linux/drivers/infiniband/hw/hfi1/chip.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2015 - 2018 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47
  48/*
  49 * This file contains all of the code that is specific to the HFI chip
  50 */
  51
  52#include <linux/pci.h>
  53#include <linux/delay.h>
  54#include <linux/interrupt.h>
  55#include <linux/module.h>
  56
  57#include "hfi.h"
  58#include "trace.h"
  59#include "mad.h"
  60#include "pio.h"
  61#include "sdma.h"
  62#include "eprom.h"
  63#include "efivar.h"
  64#include "platform.h"
  65#include "aspm.h"
  66#include "affinity.h"
  67#include "debugfs.h"
  68#include "fault.h"
  69
  70#define NUM_IB_PORTS 1
  71
  72uint kdeth_qp;
  73module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
  74MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
  75
  76uint num_vls = HFI1_MAX_VLS_SUPPORTED;
  77module_param(num_vls, uint, S_IRUGO);
  78MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  79
  80/*
  81 * Default time to aggregate two 10K packets from the idle state
  82 * (timer not running). The timer starts at the end of the first packet,
  83 * so only the time for one 10K packet and header plus a bit extra is needed.
  84 * 10 * 1024 + 64 header byte = 10304 byte
  85 * 10304 byte / 12.5 GB/s = 824.32ns
  86 */
  87uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
  88module_param(rcv_intr_timeout, uint, S_IRUGO);
  89MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
  90
  91uint rcv_intr_count = 16; /* same as qib */
  92module_param(rcv_intr_count, uint, S_IRUGO);
  93MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
  94
  95ushort link_crc_mask = SUPPORTED_CRCS;
  96module_param(link_crc_mask, ushort, S_IRUGO);
  97MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
  98
  99uint loopback;
 100module_param_named(loopback, loopback, uint, S_IRUGO);
 101MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
 102
 103/* Other driver tunables */
 104uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
 105static ushort crc_14b_sideband = 1;
 106static uint use_flr = 1;
 107uint quick_linkup; /* skip LNI */
 108
 109struct flag_table {
 110        u64 flag;       /* the flag */
 111        char *str;      /* description string */
 112        u16 extra;      /* extra information */
 113        u16 unused0;
 114        u32 unused1;
 115};
 116
 117/* str must be a string constant */
 118#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
 119#define FLAG_ENTRY0(str, flag) {flag, str, 0}
 120
 121/* Send Error Consequences */
 122#define SEC_WRITE_DROPPED       0x1
 123#define SEC_PACKET_DROPPED      0x2
 124#define SEC_SC_HALTED           0x4     /* per-context only */
 125#define SEC_SPC_FREEZE          0x8     /* per-HFI only */
 126
 127#define DEFAULT_KRCVQS            2
 128#define MIN_KERNEL_KCTXTS         2
 129#define FIRST_KERNEL_KCTXT        1
 130
 131/*
 132 * RSM instance allocation
 133 *   0 - Verbs
 134 *   1 - User Fecn Handling
 135 *   2 - Vnic
 136 */
 137#define RSM_INS_VERBS             0
 138#define RSM_INS_FECN              1
 139#define RSM_INS_VNIC              2
 140
 141/* Bit offset into the GUID which carries HFI id information */
 142#define GUID_HFI_INDEX_SHIFT     39
 143
 144/* extract the emulation revision */
 145#define emulator_rev(dd) ((dd)->irev >> 8)
 146/* parallel and serial emulation versions are 3 and 4 respectively */
 147#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
 148#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
 149
 150/* RSM fields for Verbs */
 151/* packet type */
 152#define IB_PACKET_TYPE         2ull
 153#define QW_SHIFT               6ull
 154/* QPN[7..1] */
 155#define QPN_WIDTH              7ull
 156
 157/* LRH.BTH: QW 0, OFFSET 48 - for match */
 158#define LRH_BTH_QW             0ull
 159#define LRH_BTH_BIT_OFFSET     48ull
 160#define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
 161#define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
 162#define LRH_BTH_SELECT
 163#define LRH_BTH_MASK           3ull
 164#define LRH_BTH_VALUE          2ull
 165
 166/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
 167#define LRH_SC_QW              0ull
 168#define LRH_SC_BIT_OFFSET      56ull
 169#define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
 170#define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
 171#define LRH_SC_MASK            128ull
 172#define LRH_SC_VALUE           0ull
 173
 174/* SC[n..0] QW 0, OFFSET 60 - for select */
 175#define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
 176
 177/* QPN[m+n:1] QW 1, OFFSET 1 */
 178#define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
 179
 180/* RSM fields for Vnic */
 181/* L2_TYPE: QW 0, OFFSET 61 - for match */
 182#define L2_TYPE_QW             0ull
 183#define L2_TYPE_BIT_OFFSET     61ull
 184#define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
 185#define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
 186#define L2_TYPE_MASK           3ull
 187#define L2_16B_VALUE           2ull
 188
 189/* L4_TYPE QW 1, OFFSET 0 - for match */
 190#define L4_TYPE_QW              1ull
 191#define L4_TYPE_BIT_OFFSET      0ull
 192#define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
 193#define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
 194#define L4_16B_TYPE_MASK        0xFFull
 195#define L4_16B_ETH_VALUE        0x78ull
 196
 197/* 16B VESWID - for select */
 198#define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
 199/* 16B ENTROPY - for select */
 200#define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
 201
 202/* defines to build power on SC2VL table */
 203#define SC2VL_VAL( \
 204        num, \
 205        sc0, sc0val, \
 206        sc1, sc1val, \
 207        sc2, sc2val, \
 208        sc3, sc3val, \
 209        sc4, sc4val, \
 210        sc5, sc5val, \
 211        sc6, sc6val, \
 212        sc7, sc7val) \
 213( \
 214        ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
 215        ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
 216        ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
 217        ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
 218        ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
 219        ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
 220        ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
 221        ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
 222)
 223
 224#define DC_SC_VL_VAL( \
 225        range, \
 226        e0, e0val, \
 227        e1, e1val, \
 228        e2, e2val, \
 229        e3, e3val, \
 230        e4, e4val, \
 231        e5, e5val, \
 232        e6, e6val, \
 233        e7, e7val, \
 234        e8, e8val, \
 235        e9, e9val, \
 236        e10, e10val, \
 237        e11, e11val, \
 238        e12, e12val, \
 239        e13, e13val, \
 240        e14, e14val, \
 241        e15, e15val) \
 242( \
 243        ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
 244        ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
 245        ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
 246        ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
 247        ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
 248        ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
 249        ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
 250        ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
 251        ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
 252        ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
 253        ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
 254        ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
 255        ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
 256        ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
 257        ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
 258        ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
 259)
 260
 261/* all CceStatus sub-block freeze bits */
 262#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
 263                        | CCE_STATUS_RXE_FROZE_SMASK \
 264                        | CCE_STATUS_TXE_FROZE_SMASK \
 265                        | CCE_STATUS_TXE_PIO_FROZE_SMASK)
 266/* all CceStatus sub-block TXE pause bits */
 267#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
 268                        | CCE_STATUS_TXE_PAUSED_SMASK \
 269                        | CCE_STATUS_SDMA_PAUSED_SMASK)
 270/* all CceStatus sub-block RXE pause bits */
 271#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
 272
 273#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
 274#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
 275
 276/*
 277 * CCE Error flags.
 278 */
 279static struct flag_table cce_err_status_flags[] = {
 280/* 0*/  FLAG_ENTRY0("CceCsrParityErr",
 281                CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
 282/* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
 283                CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
 284/* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
 285                CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
 286/* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
 287                CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
 288/* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
 289                CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
 290/* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
 291                CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
 292/* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
 293                CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
 294/* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
 295                CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
 296/* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
 297                CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
 298/* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 299            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
 300/*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 301            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
 302/*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
 303            CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
 304/*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
 305                CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
 306/*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 307                CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
 308/*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 309                CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
 310/*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 311                CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
 312/*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 313                CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
 314/*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 315                CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
 316/*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
 317                CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
 318/*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
 319                CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
 320/*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
 321                CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
 322/*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
 323                CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
 324/*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
 325                CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
 326/*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
 327                CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
 328/*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
 329                CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
 330/*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
 331                CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
 332/*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
 333                CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
 334/*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
 335                CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
 336/*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
 337                CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
 338/*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
 339                CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
 340/*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
 341                CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
 342/*31*/  FLAG_ENTRY0("LATriggered",
 343                CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
 344/*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
 345                CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
 346/*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
 347                CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
 348/*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
 349                CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
 350/*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
 351                CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
 352/*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
 353                CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
 354/*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
 355                CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
 356/*38*/  FLAG_ENTRY0("CceIntMapCorErr",
 357                CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
 358/*39*/  FLAG_ENTRY0("CceIntMapUncErr",
 359                CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
 360/*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
 361                CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
 362/*41-63 reserved*/
 363};
 364
 365/*
 366 * Misc Error flags
 367 */
 368#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
 369static struct flag_table misc_err_status_flags[] = {
 370/* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
 371/* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
 372/* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
 373/* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
 374/* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
 375/* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
 376/* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
 377/* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
 378/* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
 379/* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
 380/*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
 381/*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
 382/*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
 383};
 384
 385/*
 386 * TXE PIO Error flags and consequences
 387 */
 388static struct flag_table pio_err_status_flags[] = {
 389/* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
 390        SEC_WRITE_DROPPED,
 391        SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
 392/* 1*/  FLAG_ENTRY("PioWriteAddrParity",
 393        SEC_SPC_FREEZE,
 394        SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
 395/* 2*/  FLAG_ENTRY("PioCsrParity",
 396        SEC_SPC_FREEZE,
 397        SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
 398/* 3*/  FLAG_ENTRY("PioSbMemFifo0",
 399        SEC_SPC_FREEZE,
 400        SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
 401/* 4*/  FLAG_ENTRY("PioSbMemFifo1",
 402        SEC_SPC_FREEZE,
 403        SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
 404/* 5*/  FLAG_ENTRY("PioPccFifoParity",
 405        SEC_SPC_FREEZE,
 406        SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
 407/* 6*/  FLAG_ENTRY("PioPecFifoParity",
 408        SEC_SPC_FREEZE,
 409        SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
 410/* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
 411        SEC_SPC_FREEZE,
 412        SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
 413/* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
 414        SEC_SPC_FREEZE,
 415        SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
 416/* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
 417        SEC_SPC_FREEZE,
 418        SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
 419/*10*/  FLAG_ENTRY("PioSmPktResetParity",
 420        SEC_SPC_FREEZE,
 421        SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
 422/*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
 423        SEC_SPC_FREEZE,
 424        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
 425/*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
 426        SEC_SPC_FREEZE,
 427        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
 428/*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
 429        0,
 430        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
 431/*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
 432        0,
 433        SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
 434/*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
 435        SEC_SPC_FREEZE,
 436        SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
 437/*16*/  FLAG_ENTRY("PioPpmcPblFifo",
 438        SEC_SPC_FREEZE,
 439        SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
 440/*17*/  FLAG_ENTRY("PioInitSmIn",
 441        0,
 442        SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
 443/*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
 444        SEC_SPC_FREEZE,
 445        SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
 446/*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
 447        SEC_SPC_FREEZE,
 448        SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
 449/*20*/  FLAG_ENTRY("PioHostAddrMemCor",
 450        0,
 451        SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
 452/*21*/  FLAG_ENTRY("PioWriteDataParity",
 453        SEC_SPC_FREEZE,
 454        SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
 455/*22*/  FLAG_ENTRY("PioStateMachine",
 456        SEC_SPC_FREEZE,
 457        SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
 458/*23*/  FLAG_ENTRY("PioWriteQwValidParity",
 459        SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 460        SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
 461/*24*/  FLAG_ENTRY("PioBlockQwCountParity",
 462        SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 463        SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
 464/*25*/  FLAG_ENTRY("PioVlfVlLenParity",
 465        SEC_SPC_FREEZE,
 466        SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
 467/*26*/  FLAG_ENTRY("PioVlfSopParity",
 468        SEC_SPC_FREEZE,
 469        SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
 470/*27*/  FLAG_ENTRY("PioVlFifoParity",
 471        SEC_SPC_FREEZE,
 472        SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
 473/*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
 474        SEC_SPC_FREEZE,
 475        SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
 476/*29*/  FLAG_ENTRY("PioPpmcSopLen",
 477        SEC_SPC_FREEZE,
 478        SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
 479/*30-31 reserved*/
 480/*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
 481        SEC_SPC_FREEZE,
 482        SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
 483/*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
 484        SEC_SPC_FREEZE,
 485        SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
 486/*34*/  FLAG_ENTRY("PioPccSopHeadParity",
 487        SEC_SPC_FREEZE,
 488        SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
 489/*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
 490        SEC_SPC_FREEZE,
 491        SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
 492/*36-63 reserved*/
 493};
 494
 495/* TXE PIO errors that cause an SPC freeze */
 496#define ALL_PIO_FREEZE_ERR \
 497        (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
 498        | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
 499        | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
 500        | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
 501        | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
 502        | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
 503        | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
 504        | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
 505        | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
 506        | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
 507        | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
 508        | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
 509        | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
 510        | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
 511        | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
 512        | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
 513        | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
 514        | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
 515        | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
 516        | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
 517        | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
 518        | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
 519        | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
 520        | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
 521        | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
 522        | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
 523        | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
 524        | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
 525        | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
 526
 527/*
 528 * TXE SDMA Error flags
 529 */
 530static struct flag_table sdma_err_status_flags[] = {
 531/* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
 532                SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
 533/* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
 534                SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
 535/* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
 536                SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
 537/* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
 538                SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
 539/*04-63 reserved*/
 540};
 541
 542/* TXE SDMA errors that cause an SPC freeze */
 543#define ALL_SDMA_FREEZE_ERR  \
 544                (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
 545                | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
 546                | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
 547
 548/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
 549#define PORT_DISCARD_EGRESS_ERRS \
 550        (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
 551        | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
 552        | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
 553
 554/*
 555 * TXE Egress Error flags
 556 */
 557#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
 558static struct flag_table egress_err_status_flags[] = {
 559/* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
 560/* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
 561/* 2 reserved */
 562/* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
 563                SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
 564/* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
 565/* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
 566/* 6 reserved */
 567/* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
 568                SEES(TX_PIO_LAUNCH_INTF_PARITY)),
 569/* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
 570                SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
 571/* 9-10 reserved */
 572/*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
 573                SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
 574/*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
 575/*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
 576/*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
 577/*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
 578/*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
 579                SEES(TX_SDMA0_DISALLOWED_PACKET)),
 580/*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
 581                SEES(TX_SDMA1_DISALLOWED_PACKET)),
 582/*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
 583                SEES(TX_SDMA2_DISALLOWED_PACKET)),
 584/*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
 585                SEES(TX_SDMA3_DISALLOWED_PACKET)),
 586/*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
 587                SEES(TX_SDMA4_DISALLOWED_PACKET)),
 588/*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
 589                SEES(TX_SDMA5_DISALLOWED_PACKET)),
 590/*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
 591                SEES(TX_SDMA6_DISALLOWED_PACKET)),
 592/*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
 593                SEES(TX_SDMA7_DISALLOWED_PACKET)),
 594/*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
 595                SEES(TX_SDMA8_DISALLOWED_PACKET)),
 596/*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
 597                SEES(TX_SDMA9_DISALLOWED_PACKET)),
 598/*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
 599                SEES(TX_SDMA10_DISALLOWED_PACKET)),
 600/*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
 601                SEES(TX_SDMA11_DISALLOWED_PACKET)),
 602/*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
 603                SEES(TX_SDMA12_DISALLOWED_PACKET)),
 604/*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
 605                SEES(TX_SDMA13_DISALLOWED_PACKET)),
 606/*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
 607                SEES(TX_SDMA14_DISALLOWED_PACKET)),
 608/*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
 609                SEES(TX_SDMA15_DISALLOWED_PACKET)),
 610/*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
 611                SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
 612/*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
 613                SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
 614/*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
 615                SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
 616/*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
 617                SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
 618/*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
 619                SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
 620/*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
 621                SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
 622/*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
 623                SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
 624/*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
 625                SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
 626/*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
 627                SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
 628/*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
 629/*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
 630/*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
 631/*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
 632/*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
 633/*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
 634/*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
 635/*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
 636/*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
 637/*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
 638/*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
 639/*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
 640/*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
 641/*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
 642/*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
 643/*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
 644/*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
 645/*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
 646/*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
 647/*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
 648/*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
 649/*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
 650                SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
 651/*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
 652                SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
 653};
 654
 655/*
 656 * TXE Egress Error Info flags
 657 */
 658#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
 659static struct flag_table egress_err_info_flags[] = {
 660/* 0*/  FLAG_ENTRY0("Reserved", 0ull),
 661/* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
 662/* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 663/* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 664/* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
 665/* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
 666/* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
 667/* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
 668/* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
 669/* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
 670/*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
 671/*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
 672/*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
 673/*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
 674/*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
 675/*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
 676/*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
 677/*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
 678/*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
 679/*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
 680/*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
 681/*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
 682};
 683
 684/* TXE Egress errors that cause an SPC freeze */
 685#define ALL_TXE_EGRESS_FREEZE_ERR \
 686        (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
 687        | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
 688        | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
 689        | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
 690        | SEES(TX_LAUNCH_CSR_PARITY) \
 691        | SEES(TX_SBRD_CTL_CSR_PARITY) \
 692        | SEES(TX_CONFIG_PARITY) \
 693        | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
 694        | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
 695        | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
 696        | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
 697        | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
 698        | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
 699        | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
 700        | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
 701        | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
 702        | SEES(TX_CREDIT_RETURN_PARITY))
 703
 704/*
 705 * TXE Send error flags
 706 */
 707#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
 708static struct flag_table send_err_status_flags[] = {
 709/* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
 710/* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
 711/* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
 712};
 713
 714/*
 715 * TXE Send Context Error flags and consequences
 716 */
 717static struct flag_table sc_err_status_flags[] = {
 718/* 0*/  FLAG_ENTRY("InconsistentSop",
 719                SEC_PACKET_DROPPED | SEC_SC_HALTED,
 720                SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
 721/* 1*/  FLAG_ENTRY("DisallowedPacket",
 722                SEC_PACKET_DROPPED | SEC_SC_HALTED,
 723                SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
 724/* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
 725                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 726                SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
 727/* 3*/  FLAG_ENTRY("WriteOverflow",
 728                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 729                SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
 730/* 4*/  FLAG_ENTRY("WriteOutOfBounds",
 731                SEC_WRITE_DROPPED | SEC_SC_HALTED,
 732                SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
 733/* 5-63 reserved*/
 734};
 735
 736/*
 737 * RXE Receive Error flags
 738 */
 739#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
 740static struct flag_table rxe_err_status_flags[] = {
 741/* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
 742/* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
 743/* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
 744/* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
 745/* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
 746/* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
 747/* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
 748/* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
 749/* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
 750/* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
 751/*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
 752/*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
 753/*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
 754/*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
 755/*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
 756/*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
 757/*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
 758                RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
 759/*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
 760/*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
 761/*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
 762                RXES(RBUF_BLOCK_LIST_READ_UNC)),
 763/*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
 764                RXES(RBUF_BLOCK_LIST_READ_COR)),
 765/*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
 766                RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
 767/*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
 768                RXES(RBUF_CSR_QENT_CNT_PARITY)),
 769/*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
 770                RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
 771/*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
 772                RXES(RBUF_CSR_QVLD_BIT_PARITY)),
 773/*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
 774/*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
 775/*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
 776                RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
 777/*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
 778/*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
 779/*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
 780/*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
 781/*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
 782/*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
 783/*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
 784/*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
 785                RXES(RBUF_FL_INITDONE_PARITY)),
 786/*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
 787                RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
 788/*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
 789/*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
 790/*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
 791/*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
 792                RXES(LOOKUP_DES_PART1_UNC_COR)),
 793/*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
 794                RXES(LOOKUP_DES_PART2_PARITY)),
 795/*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
 796/*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
 797/*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
 798/*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
 799/*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
 800/*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
 801/*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
 802/*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
 803/*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
 804/*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
 805/*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
 806/*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
 807/*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
 808/*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
 809/*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
 810/*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
 811/*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
 812/*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
 813/*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
 814/*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
 815/*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
 816/*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
 817};
 818
 819/* RXE errors that will trigger an SPC freeze */
 820#define ALL_RXE_FREEZE_ERR  \
 821        (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
 822        | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
 823        | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
 824        | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
 825        | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
 826        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
 827        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
 828        | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
 829        | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
 830        | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
 831        | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
 832        | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
 833        | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
 834        | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
 835        | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
 836        | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
 837        | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
 838        | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
 839        | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
 840        | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
 841        | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
 842        | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
 843        | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
 844        | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
 845        | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
 846        | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
 847        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
 848        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
 849        | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
 850        | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
 851        | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
 852        | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
 853        | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
 854        | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
 855        | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
 856        | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
 857        | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
 858        | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
 859        | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
 860        | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
 861        | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
 862        | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
 863        | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
 864        | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
 865
 866#define RXE_FREEZE_ABORT_MASK \
 867        (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
 868        RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
 869        RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
 870
 871/*
 872 * DCC Error Flags
 873 */
 874#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
 875static struct flag_table dcc_err_flags[] = {
 876        FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
 877        FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
 878        FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
 879        FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
 880        FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
 881        FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
 882        FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
 883        FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
 884        FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
 885        FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
 886        FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
 887        FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
 888        FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
 889        FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
 890        FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
 891        FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
 892        FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
 893        FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
 894        FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
 895        FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
 896        FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
 897        FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
 898        FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
 899        FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
 900        FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
 901        FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
 902        FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
 903        FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
 904        FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
 905        FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
 906        FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
 907        FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
 908        FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
 909        FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
 910        FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
 911        FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
 912        FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
 913        FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
 914        FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
 915        FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
 916        FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
 917        FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
 918        FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
 919        FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
 920        FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
 921        FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
 922};
 923
 924/*
 925 * LCB error flags
 926 */
 927#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
 928static struct flag_table lcb_err_flags[] = {
 929/* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
 930/* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
 931/* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
 932/* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
 933                LCBE(ALL_LNS_FAILED_REINIT_TEST)),
 934/* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
 935/* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
 936/* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
 937/* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
 938/* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
 939/* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
 940/*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
 941/*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
 942/*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
 943/*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
 944                LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
 945/*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
 946/*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
 947/*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
 948/*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
 949/*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
 950/*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
 951                LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
 952/*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
 953/*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
 954/*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
 955/*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
 956/*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
 957/*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
 958/*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
 959                LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
 960/*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
 961/*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
 962                LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
 963/*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
 964                LCBE(REDUNDANT_FLIT_PARITY_ERR))
 965};
 966
 967/*
 968 * DC8051 Error Flags
 969 */
 970#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
 971static struct flag_table dc8051_err_flags[] = {
 972        FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
 973        FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
 974        FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
 975        FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
 976        FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
 977        FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
 978        FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
 979        FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
 980        FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
 981                    D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
 982        FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
 983};
 984
 985/*
 986 * DC8051 Information Error flags
 987 *
 988 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
 989 */
 990static struct flag_table dc8051_info_err_flags[] = {
 991        FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
 992        FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
 993        FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
 994        FLAG_ENTRY0("Serdes internal loopback failure",
 995                    FAILED_SERDES_INTERNAL_LOOPBACK),
 996        FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
 997        FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
 998        FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
 999        FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
1000        FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
1001        FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1002        FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1003        FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1004        FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1005        FLAG_ENTRY0("External Device Request Timeout",
1006                    EXTERNAL_DEVICE_REQ_TIMEOUT),
1007};
1008
1009/*
1010 * DC8051 Information Host Information flags
1011 *
1012 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1013 */
1014static struct flag_table dc8051_info_host_msg_flags[] = {
1015        FLAG_ENTRY0("Host request done", 0x0001),
1016        FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1017        FLAG_ENTRY0("BC SMA message", 0x0004),
1018        FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1019        FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1020        FLAG_ENTRY0("External device config request", 0x0020),
1021        FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1022        FLAG_ENTRY0("LinkUp achieved", 0x0080),
1023        FLAG_ENTRY0("Link going down", 0x0100),
1024        FLAG_ENTRY0("Link width downgraded", 0x0200),
1025};
1026
1027static u32 encoded_size(u32 size);
1028static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1029static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1030static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1031                               u8 *continuous);
1032static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1033                                  u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1034static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1035                                      u8 *remote_tx_rate, u16 *link_widths);
1036static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1037                                    u8 *flag_bits, u16 *link_widths);
1038static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1039                                  u8 *device_rev);
1040static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042                            u8 *tx_polarity_inversion,
1043                            u8 *rx_polarity_inversion, u8 *max_rate);
1044static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045                                unsigned int context, u64 err_status);
1046static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047static void handle_dcc_err(struct hfi1_devdata *dd,
1048                           unsigned int context, u64 err_status);
1049static void handle_lcb_err(struct hfi1_devdata *dd,
1050                           unsigned int context, u64 err_status);
1051static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059static void set_partition_keys(struct hfi1_pportdata *ppd);
1060static const char *link_state_name(u32 state);
1061static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1062                                          u32 state);
1063static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1064                           u64 *out_data);
1065static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd);
1067
1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1070                                            int msecs);
1071static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1072                                  int msecs);
1073static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1074static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1075static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1076                                   int msecs);
1077static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079static void handle_temp_err(struct hfi1_devdata *dd);
1080static void dc_shutdown(struct hfi1_devdata *dd);
1081static void dc_start(struct hfi1_devdata *dd);
1082static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083                           unsigned int *np);
1084static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088
1089/*
1090 * Error interrupt table entry.  This is used as input to the interrupt
1091 * "clear down" routine used for all second tier error interrupt register.
1092 * Second tier interrupt registers have a single bit representing them
1093 * in the top-level CceIntStatus.
1094 */
1095struct err_reg_info {
1096        u32 status;             /* status CSR offset */
1097        u32 clear;              /* clear CSR offset */
1098        u32 mask;               /* mask CSR offset */
1099        void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100        const char *desc;
1101};
1102
1103#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1104#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1105#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1106
1107/*
1108 * Helpers for building HFI and DC error interrupt table entries.  Different
1109 * helpers are needed because of inconsistent register names.
1110 */
1111#define EE(reg, handler, desc) \
1112        { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113                handler, desc }
1114#define DC_EE1(reg, handler, desc) \
1115        { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116#define DC_EE2(reg, handler, desc) \
1117        { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118
1119/*
1120 * Table of the "misc" grouping of error interrupts.  Each entry refers to
1121 * another register containing more information.
1122 */
1123static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124/* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1125/* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1126/* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1127/* 3*/  { 0, 0, 0, NULL }, /* reserved */
1128/* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1129/* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1130/* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131/* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1132        /* the rest are reserved */
1133};
1134
1135/*
1136 * Index into the Various section of the interrupt sources
1137 * corresponding to the Critical Temperature interrupt.
1138 */
1139#define TCRIT_INT_SOURCE 4
1140
1141/*
1142 * SDMA error interrupt entry - refers to another register containing more
1143 * information.
1144 */
1145static const struct err_reg_info sdma_eng_err =
1146        EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147
1148static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149/* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1150/* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1151/* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1152/* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1153/* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1154        /* rest are reserved */
1155};
1156
1157/*
1158 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159 * register can not be derived from the MTU value because 10K is not
1160 * a power of 2. Therefore, we need a constant. Everything else can
1161 * be calculated.
1162 */
1163#define DCC_CFG_PORT_MTU_CAP_10240 7
1164
1165/*
1166 * Table of the DC grouping of error interrupts.  Each entry refers to
1167 * another register containing more information.
1168 */
1169static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170/* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1171/* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1172/* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1173/* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1174        /* the rest are reserved */
1175};
1176
1177struct cntr_entry {
1178        /*
1179         * counter name
1180         */
1181        char *name;
1182
1183        /*
1184         * csr to read for name (if applicable)
1185         */
1186        u64 csr;
1187
1188        /*
1189         * offset into dd or ppd to store the counter's value
1190         */
1191        int offset;
1192
1193        /*
1194         * flags
1195         */
1196        u8 flags;
1197
1198        /*
1199         * accessor for stat element, context either dd or ppd
1200         */
1201        u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202                       int mode, u64 data);
1203};
1204
1205#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207
1208#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209{ \
1210        name, \
1211        csr, \
1212        offset, \
1213        flags, \
1214        accessor \
1215}
1216
1217/* 32bit RXE */
1218#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219CNTR_ELEM(#name, \
1220          (counter * 8 + RCV_COUNTER_ARRAY32), \
1221          0, flags | CNTR_32BIT, \
1222          port_access_u32_csr)
1223
1224#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226          (counter * 8 + RCV_COUNTER_ARRAY32), \
1227          0, flags | CNTR_32BIT, \
1228          dev_access_u32_csr)
1229
1230/* 64bit RXE */
1231#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232CNTR_ELEM(#name, \
1233          (counter * 8 + RCV_COUNTER_ARRAY64), \
1234          0, flags, \
1235          port_access_u64_csr)
1236
1237#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239          (counter * 8 + RCV_COUNTER_ARRAY64), \
1240          0, flags, \
1241          dev_access_u64_csr)
1242
1243#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244#define OVR_ELM(ctx) \
1245CNTR_ELEM("RcvHdrOvr" #ctx, \
1246          (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247          0, CNTR_NORMAL, port_access_u64_csr)
1248
1249/* 32bit TXE */
1250#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251CNTR_ELEM(#name, \
1252          (counter * 8 + SEND_COUNTER_ARRAY32), \
1253          0, flags | CNTR_32BIT, \
1254          port_access_u32_csr)
1255
1256/* 64bit TXE */
1257#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258CNTR_ELEM(#name, \
1259          (counter * 8 + SEND_COUNTER_ARRAY64), \
1260          0, flags, \
1261          port_access_u64_csr)
1262
1263# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264CNTR_ELEM(#name,\
1265          counter * 8 + SEND_COUNTER_ARRAY64, \
1266          0, \
1267          flags, \
1268          dev_access_u64_csr)
1269
1270/* CCE */
1271#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272CNTR_ELEM(#name, \
1273          (counter * 8 + CCE_COUNTER_ARRAY32), \
1274          0, flags | CNTR_32BIT, \
1275          dev_access_u32_csr)
1276
1277#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278CNTR_ELEM(#name, \
1279          (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280          0, flags | CNTR_32BIT, \
1281          dev_access_u32_csr)
1282
1283/* DC */
1284#define DC_PERF_CNTR(name, counter, flags) \
1285CNTR_ELEM(#name, \
1286          counter, \
1287          0, \
1288          flags, \
1289          dev_access_u64_csr)
1290
1291#define DC_PERF_CNTR_LCB(name, counter, flags) \
1292CNTR_ELEM(#name, \
1293          counter, \
1294          0, \
1295          flags, \
1296          dc_access_lcb_cntr)
1297
1298/* ibp counters */
1299#define SW_IBP_CNTR(name, cntr) \
1300CNTR_ELEM(#name, \
1301          0, \
1302          0, \
1303          CNTR_SYNTH, \
1304          access_ibp_##cntr)
1305
1306/**
1307 * hfi_addr_from_offset - return addr for readq/writeq
1308 * @dd - the dd device
1309 * @offset - the offset of the CSR within bar0
1310 *
1311 * This routine selects the appropriate base address
1312 * based on the indicated offset.
1313 */
1314static inline void __iomem *hfi1_addr_from_offset(
1315        const struct hfi1_devdata *dd,
1316        u32 offset)
1317{
1318        if (offset >= dd->base2_start)
1319                return dd->kregbase2 + (offset - dd->base2_start);
1320        return dd->kregbase1 + offset;
1321}
1322
1323/**
1324 * read_csr - read CSR at the indicated offset
1325 * @dd - the dd device
1326 * @offset - the offset of the CSR within bar0
1327 *
1328 * Return: the value read or all FF's if there
1329 * is no mapping
1330 */
1331u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332{
1333        if (dd->flags & HFI1_PRESENT)
1334                return readq(hfi1_addr_from_offset(dd, offset));
1335        return -1;
1336}
1337
1338/**
1339 * write_csr - write CSR at the indicated offset
1340 * @dd - the dd device
1341 * @offset - the offset of the CSR within bar0
1342 * @value - value to write
1343 */
1344void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345{
1346        if (dd->flags & HFI1_PRESENT) {
1347                void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348
1349                /* avoid write to RcvArray */
1350                if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351                        return;
1352                writeq(value, base);
1353        }
1354}
1355
1356/**
1357 * get_csr_addr - return te iomem address for offset
1358 * @dd - the dd device
1359 * @offset - the offset of the CSR within bar0
1360 *
1361 * Return: The iomem address to use in subsequent
1362 * writeq/readq operations.
1363 */
1364void __iomem *get_csr_addr(
1365        const struct hfi1_devdata *dd,
1366        u32 offset)
1367{
1368        if (dd->flags & HFI1_PRESENT)
1369                return hfi1_addr_from_offset(dd, offset);
1370        return NULL;
1371}
1372
1373static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374                                 int mode, u64 value)
1375{
1376        u64 ret;
1377
1378        if (mode == CNTR_MODE_R) {
1379                ret = read_csr(dd, csr);
1380        } else if (mode == CNTR_MODE_W) {
1381                write_csr(dd, csr, value);
1382                ret = value;
1383        } else {
1384                dd_dev_err(dd, "Invalid cntr register access mode");
1385                return 0;
1386        }
1387
1388        hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389        return ret;
1390}
1391
1392/* Dev Access */
1393static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394                              void *context, int vl, int mode, u64 data)
1395{
1396        struct hfi1_devdata *dd = context;
1397        u64 csr = entry->csr;
1398
1399        if (entry->flags & CNTR_SDMA) {
1400                if (vl == CNTR_INVALID_VL)
1401                        return 0;
1402                csr += 0x100 * vl;
1403        } else {
1404                if (vl != CNTR_INVALID_VL)
1405                        return 0;
1406        }
1407        return read_write_csr(dd, csr, mode, data);
1408}
1409
1410static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411                              void *context, int idx, int mode, u64 data)
1412{
1413        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414
1415        if (dd->per_sdma && idx < dd->num_sdma)
1416                return dd->per_sdma[idx].err_cnt;
1417        return 0;
1418}
1419
1420static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421                              void *context, int idx, int mode, u64 data)
1422{
1423        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424
1425        if (dd->per_sdma && idx < dd->num_sdma)
1426                return dd->per_sdma[idx].sdma_int_cnt;
1427        return 0;
1428}
1429
1430static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431                                   void *context, int idx, int mode, u64 data)
1432{
1433        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434
1435        if (dd->per_sdma && idx < dd->num_sdma)
1436                return dd->per_sdma[idx].idle_int_cnt;
1437        return 0;
1438}
1439
1440static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441                                       void *context, int idx, int mode,
1442                                       u64 data)
1443{
1444        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445
1446        if (dd->per_sdma && idx < dd->num_sdma)
1447                return dd->per_sdma[idx].progress_int_cnt;
1448        return 0;
1449}
1450
1451static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452                              int vl, int mode, u64 data)
1453{
1454        struct hfi1_devdata *dd = context;
1455
1456        u64 val = 0;
1457        u64 csr = entry->csr;
1458
1459        if (entry->flags & CNTR_VL) {
1460                if (vl == CNTR_INVALID_VL)
1461                        return 0;
1462                csr += 8 * vl;
1463        } else {
1464                if (vl != CNTR_INVALID_VL)
1465                        return 0;
1466        }
1467
1468        val = read_write_csr(dd, csr, mode, data);
1469        return val;
1470}
1471
1472static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473                              int vl, int mode, u64 data)
1474{
1475        struct hfi1_devdata *dd = context;
1476        u32 csr = entry->csr;
1477        int ret = 0;
1478
1479        if (vl != CNTR_INVALID_VL)
1480                return 0;
1481        if (mode == CNTR_MODE_R)
1482                ret = read_lcb_csr(dd, csr, &data);
1483        else if (mode == CNTR_MODE_W)
1484                ret = write_lcb_csr(dd, csr, data);
1485
1486        if (ret) {
1487                dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488                return 0;
1489        }
1490
1491        hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492        return data;
1493}
1494
1495/* Port Access */
1496static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497                               int vl, int mode, u64 data)
1498{
1499        struct hfi1_pportdata *ppd = context;
1500
1501        if (vl != CNTR_INVALID_VL)
1502                return 0;
1503        return read_write_csr(ppd->dd, entry->csr, mode, data);
1504}
1505
1506static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507                               void *context, int vl, int mode, u64 data)
1508{
1509        struct hfi1_pportdata *ppd = context;
1510        u64 val;
1511        u64 csr = entry->csr;
1512
1513        if (entry->flags & CNTR_VL) {
1514                if (vl == CNTR_INVALID_VL)
1515                        return 0;
1516                csr += 8 * vl;
1517        } else {
1518                if (vl != CNTR_INVALID_VL)
1519                        return 0;
1520        }
1521        val = read_write_csr(ppd->dd, csr, mode, data);
1522        return val;
1523}
1524
1525/* Software defined */
1526static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527                                u64 data)
1528{
1529        u64 ret;
1530
1531        if (mode == CNTR_MODE_R) {
1532                ret = *cntr;
1533        } else if (mode == CNTR_MODE_W) {
1534                *cntr = data;
1535                ret = data;
1536        } else {
1537                dd_dev_err(dd, "Invalid cntr sw access mode");
1538                return 0;
1539        }
1540
1541        hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542
1543        return ret;
1544}
1545
1546static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547                                 int vl, int mode, u64 data)
1548{
1549        struct hfi1_pportdata *ppd = context;
1550
1551        if (vl != CNTR_INVALID_VL)
1552                return 0;
1553        return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554}
1555
1556static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557                                 int vl, int mode, u64 data)
1558{
1559        struct hfi1_pportdata *ppd = context;
1560
1561        if (vl != CNTR_INVALID_VL)
1562                return 0;
1563        return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564}
1565
1566static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567                                       void *context, int vl, int mode,
1568                                       u64 data)
1569{
1570        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571
1572        if (vl != CNTR_INVALID_VL)
1573                return 0;
1574        return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575}
1576
1577static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578                                   void *context, int vl, int mode, u64 data)
1579{
1580        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581        u64 zero = 0;
1582        u64 *counter;
1583
1584        if (vl == CNTR_INVALID_VL)
1585                counter = &ppd->port_xmit_discards;
1586        else if (vl >= 0 && vl < C_VL_COUNT)
1587                counter = &ppd->port_xmit_discards_vl[vl];
1588        else
1589                counter = &zero;
1590
1591        return read_write_sw(ppd->dd, counter, mode, data);
1592}
1593
1594static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595                                       void *context, int vl, int mode,
1596                                       u64 data)
1597{
1598        struct hfi1_pportdata *ppd = context;
1599
1600        if (vl != CNTR_INVALID_VL)
1601                return 0;
1602
1603        return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604                             mode, data);
1605}
1606
1607static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608                                      void *context, int vl, int mode, u64 data)
1609{
1610        struct hfi1_pportdata *ppd = context;
1611
1612        if (vl != CNTR_INVALID_VL)
1613                return 0;
1614
1615        return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616                             mode, data);
1617}
1618
1619u64 get_all_cpu_total(u64 __percpu *cntr)
1620{
1621        int cpu;
1622        u64 counter = 0;
1623
1624        for_each_possible_cpu(cpu)
1625                counter += *per_cpu_ptr(cntr, cpu);
1626        return counter;
1627}
1628
1629static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630                          u64 __percpu *cntr,
1631                          int vl, int mode, u64 data)
1632{
1633        u64 ret = 0;
1634
1635        if (vl != CNTR_INVALID_VL)
1636                return 0;
1637
1638        if (mode == CNTR_MODE_R) {
1639                ret = get_all_cpu_total(cntr) - *z_val;
1640        } else if (mode == CNTR_MODE_W) {
1641                /* A write can only zero the counter */
1642                if (data == 0)
1643                        *z_val = get_all_cpu_total(cntr);
1644                else
1645                        dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646        } else {
1647                dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648                return 0;
1649        }
1650
1651        return ret;
1652}
1653
1654static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655                              void *context, int vl, int mode, u64 data)
1656{
1657        struct hfi1_devdata *dd = context;
1658
1659        return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660                              mode, data);
1661}
1662
1663static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664                                   void *context, int vl, int mode, u64 data)
1665{
1666        struct hfi1_devdata *dd = context;
1667
1668        return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669                              mode, data);
1670}
1671
1672static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673                              void *context, int vl, int mode, u64 data)
1674{
1675        struct hfi1_devdata *dd = context;
1676
1677        return dd->verbs_dev.n_piowait;
1678}
1679
1680static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681                               void *context, int vl, int mode, u64 data)
1682{
1683        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685        return dd->verbs_dev.n_piodrain;
1686}
1687
1688static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1689                              void *context, int vl, int mode, u64 data)
1690{
1691        struct hfi1_devdata *dd = context;
1692
1693        return dd->verbs_dev.n_txwait;
1694}
1695
1696static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1697                               void *context, int vl, int mode, u64 data)
1698{
1699        struct hfi1_devdata *dd = context;
1700
1701        return dd->verbs_dev.n_kmem_wait;
1702}
1703
1704static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1705                                   void *context, int vl, int mode, u64 data)
1706{
1707        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708
1709        return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1710                              mode, data);
1711}
1712
1713/* Software counters for the error status bits within MISC_ERR_STATUS */
1714static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1715                                             void *context, int vl, int mode,
1716                                             u64 data)
1717{
1718        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720        return dd->misc_err_status_cnt[12];
1721}
1722
1723static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1724                                          void *context, int vl, int mode,
1725                                          u64 data)
1726{
1727        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729        return dd->misc_err_status_cnt[11];
1730}
1731
1732static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1733                                               void *context, int vl, int mode,
1734                                               u64 data)
1735{
1736        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737
1738        return dd->misc_err_status_cnt[10];
1739}
1740
1741static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1742                                                 void *context, int vl,
1743                                                 int mode, u64 data)
1744{
1745        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747        return dd->misc_err_status_cnt[9];
1748}
1749
1750static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1751                                           void *context, int vl, int mode,
1752                                           u64 data)
1753{
1754        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755
1756        return dd->misc_err_status_cnt[8];
1757}
1758
1759static u64 access_misc_efuse_read_bad_addr_err_cnt(
1760                                const struct cntr_entry *entry,
1761                                void *context, int vl, int mode, u64 data)
1762{
1763        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765        return dd->misc_err_status_cnt[7];
1766}
1767
1768static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1769                                                void *context, int vl,
1770                                                int mode, u64 data)
1771{
1772        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774        return dd->misc_err_status_cnt[6];
1775}
1776
1777static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1778                                              void *context, int vl, int mode,
1779                                              u64 data)
1780{
1781        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783        return dd->misc_err_status_cnt[5];
1784}
1785
1786static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1787                                            void *context, int vl, int mode,
1788                                            u64 data)
1789{
1790        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792        return dd->misc_err_status_cnt[4];
1793}
1794
1795static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1796                                                 void *context, int vl,
1797                                                 int mode, u64 data)
1798{
1799        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801        return dd->misc_err_status_cnt[3];
1802}
1803
1804static u64 access_misc_csr_write_bad_addr_err_cnt(
1805                                const struct cntr_entry *entry,
1806                                void *context, int vl, int mode, u64 data)
1807{
1808        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810        return dd->misc_err_status_cnt[2];
1811}
1812
1813static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1814                                                 void *context, int vl,
1815                                                 int mode, u64 data)
1816{
1817        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819        return dd->misc_err_status_cnt[1];
1820}
1821
1822static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1823                                          void *context, int vl, int mode,
1824                                          u64 data)
1825{
1826        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828        return dd->misc_err_status_cnt[0];
1829}
1830
1831/*
1832 * Software counter for the aggregate of
1833 * individual CceErrStatus counters
1834 */
1835static u64 access_sw_cce_err_status_aggregated_cnt(
1836                                const struct cntr_entry *entry,
1837                                void *context, int vl, int mode, u64 data)
1838{
1839        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841        return dd->sw_cce_err_status_aggregate;
1842}
1843
1844/*
1845 * Software counters corresponding to each of the
1846 * error status bits within CceErrStatus
1847 */
1848static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1849                                              void *context, int vl, int mode,
1850                                              u64 data)
1851{
1852        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854        return dd->cce_err_status_cnt[40];
1855}
1856
1857static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1858                                          void *context, int vl, int mode,
1859                                          u64 data)
1860{
1861        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863        return dd->cce_err_status_cnt[39];
1864}
1865
1866static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1867                                          void *context, int vl, int mode,
1868                                          u64 data)
1869{
1870        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872        return dd->cce_err_status_cnt[38];
1873}
1874
1875static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1876                                             void *context, int vl, int mode,
1877                                             u64 data)
1878{
1879        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881        return dd->cce_err_status_cnt[37];
1882}
1883
1884static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1885                                             void *context, int vl, int mode,
1886                                             u64 data)
1887{
1888        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890        return dd->cce_err_status_cnt[36];
1891}
1892
1893static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1894                                const struct cntr_entry *entry,
1895                                void *context, int vl, int mode, u64 data)
1896{
1897        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899        return dd->cce_err_status_cnt[35];
1900}
1901
1902static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1903                                const struct cntr_entry *entry,
1904                                void *context, int vl, int mode, u64 data)
1905{
1906        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908        return dd->cce_err_status_cnt[34];
1909}
1910
1911static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1912                                                 void *context, int vl,
1913                                                 int mode, u64 data)
1914{
1915        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917        return dd->cce_err_status_cnt[33];
1918}
1919
1920static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1921                                                void *context, int vl, int mode,
1922                                                u64 data)
1923{
1924        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926        return dd->cce_err_status_cnt[32];
1927}
1928
1929static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1930                                   void *context, int vl, int mode, u64 data)
1931{
1932        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934        return dd->cce_err_status_cnt[31];
1935}
1936
1937static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1938                                               void *context, int vl, int mode,
1939                                               u64 data)
1940{
1941        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943        return dd->cce_err_status_cnt[30];
1944}
1945
1946static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1947                                              void *context, int vl, int mode,
1948                                              u64 data)
1949{
1950        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952        return dd->cce_err_status_cnt[29];
1953}
1954
1955static u64 access_pcic_transmit_back_parity_err_cnt(
1956                                const struct cntr_entry *entry,
1957                                void *context, int vl, int mode, u64 data)
1958{
1959        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961        return dd->cce_err_status_cnt[28];
1962}
1963
1964static u64 access_pcic_transmit_front_parity_err_cnt(
1965                                const struct cntr_entry *entry,
1966                                void *context, int vl, int mode, u64 data)
1967{
1968        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970        return dd->cce_err_status_cnt[27];
1971}
1972
1973static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1974                                             void *context, int vl, int mode,
1975                                             u64 data)
1976{
1977        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979        return dd->cce_err_status_cnt[26];
1980}
1981
1982static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1983                                            void *context, int vl, int mode,
1984                                            u64 data)
1985{
1986        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988        return dd->cce_err_status_cnt[25];
1989}
1990
1991static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1992                                              void *context, int vl, int mode,
1993                                              u64 data)
1994{
1995        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997        return dd->cce_err_status_cnt[24];
1998}
1999
2000static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2001                                             void *context, int vl, int mode,
2002                                             u64 data)
2003{
2004        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006        return dd->cce_err_status_cnt[23];
2007}
2008
2009static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2010                                                 void *context, int vl,
2011                                                 int mode, u64 data)
2012{
2013        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015        return dd->cce_err_status_cnt[22];
2016}
2017
2018static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2019                                         void *context, int vl, int mode,
2020                                         u64 data)
2021{
2022        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024        return dd->cce_err_status_cnt[21];
2025}
2026
2027static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2028                                const struct cntr_entry *entry,
2029                                void *context, int vl, int mode, u64 data)
2030{
2031        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033        return dd->cce_err_status_cnt[20];
2034}
2035
2036static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2037                                                 void *context, int vl,
2038                                                 int mode, u64 data)
2039{
2040        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042        return dd->cce_err_status_cnt[19];
2043}
2044
2045static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2046                                             void *context, int vl, int mode,
2047                                             u64 data)
2048{
2049        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051        return dd->cce_err_status_cnt[18];
2052}
2053
2054static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2055                                            void *context, int vl, int mode,
2056                                            u64 data)
2057{
2058        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060        return dd->cce_err_status_cnt[17];
2061}
2062
2063static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2064                                              void *context, int vl, int mode,
2065                                              u64 data)
2066{
2067        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069        return dd->cce_err_status_cnt[16];
2070}
2071
2072static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2073                                             void *context, int vl, int mode,
2074                                             u64 data)
2075{
2076        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078        return dd->cce_err_status_cnt[15];
2079}
2080
2081static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2082                                                 void *context, int vl,
2083                                                 int mode, u64 data)
2084{
2085        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087        return dd->cce_err_status_cnt[14];
2088}
2089
2090static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2091                                             void *context, int vl, int mode,
2092                                             u64 data)
2093{
2094        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096        return dd->cce_err_status_cnt[13];
2097}
2098
2099static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2100                                const struct cntr_entry *entry,
2101                                void *context, int vl, int mode, u64 data)
2102{
2103        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105        return dd->cce_err_status_cnt[12];
2106}
2107
2108static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2109                                const struct cntr_entry *entry,
2110                                void *context, int vl, int mode, u64 data)
2111{
2112        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114        return dd->cce_err_status_cnt[11];
2115}
2116
2117static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2118                                const struct cntr_entry *entry,
2119                                void *context, int vl, int mode, u64 data)
2120{
2121        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123        return dd->cce_err_status_cnt[10];
2124}
2125
2126static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2127                                const struct cntr_entry *entry,
2128                                void *context, int vl, int mode, u64 data)
2129{
2130        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132        return dd->cce_err_status_cnt[9];
2133}
2134
2135static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2136                                const struct cntr_entry *entry,
2137                                void *context, int vl, int mode, u64 data)
2138{
2139        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141        return dd->cce_err_status_cnt[8];
2142}
2143
2144static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2145                                                 void *context, int vl,
2146                                                 int mode, u64 data)
2147{
2148        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150        return dd->cce_err_status_cnt[7];
2151}
2152
2153static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2154                                const struct cntr_entry *entry,
2155                                void *context, int vl, int mode, u64 data)
2156{
2157        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159        return dd->cce_err_status_cnt[6];
2160}
2161
2162static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2163                                               void *context, int vl, int mode,
2164                                               u64 data)
2165{
2166        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168        return dd->cce_err_status_cnt[5];
2169}
2170
2171static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2172                                          void *context, int vl, int mode,
2173                                          u64 data)
2174{
2175        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177        return dd->cce_err_status_cnt[4];
2178}
2179
2180static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2181                                const struct cntr_entry *entry,
2182                                void *context, int vl, int mode, u64 data)
2183{
2184        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186        return dd->cce_err_status_cnt[3];
2187}
2188
2189static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2190                                                 void *context, int vl,
2191                                                 int mode, u64 data)
2192{
2193        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195        return dd->cce_err_status_cnt[2];
2196}
2197
2198static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2199                                                void *context, int vl,
2200                                                int mode, u64 data)
2201{
2202        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204        return dd->cce_err_status_cnt[1];
2205}
2206
2207static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2208                                         void *context, int vl, int mode,
2209                                         u64 data)
2210{
2211        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213        return dd->cce_err_status_cnt[0];
2214}
2215
2216/*
2217 * Software counters corresponding to each of the
2218 * error status bits within RcvErrStatus
2219 */
2220static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2221                                        void *context, int vl, int mode,
2222                                        u64 data)
2223{
2224        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226        return dd->rcv_err_status_cnt[63];
2227}
2228
2229static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2230                                                void *context, int vl,
2231                                                int mode, u64 data)
2232{
2233        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235        return dd->rcv_err_status_cnt[62];
2236}
2237
2238static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2239                                               void *context, int vl, int mode,
2240                                               u64 data)
2241{
2242        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244        return dd->rcv_err_status_cnt[61];
2245}
2246
2247static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2248                                         void *context, int vl, int mode,
2249                                         u64 data)
2250{
2251        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253        return dd->rcv_err_status_cnt[60];
2254}
2255
2256static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2257                                                 void *context, int vl,
2258                                                 int mode, u64 data)
2259{
2260        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262        return dd->rcv_err_status_cnt[59];
2263}
2264
2265static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266                                                 void *context, int vl,
2267                                                 int mode, u64 data)
2268{
2269        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271        return dd->rcv_err_status_cnt[58];
2272}
2273
2274static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2275                                            void *context, int vl, int mode,
2276                                            u64 data)
2277{
2278        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280        return dd->rcv_err_status_cnt[57];
2281}
2282
2283static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2284                                           void *context, int vl, int mode,
2285                                           u64 data)
2286{
2287        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289        return dd->rcv_err_status_cnt[56];
2290}
2291
2292static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2293                                           void *context, int vl, int mode,
2294                                           u64 data)
2295{
2296        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298        return dd->rcv_err_status_cnt[55];
2299}
2300
2301static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2302                                const struct cntr_entry *entry,
2303                                void *context, int vl, int mode, u64 data)
2304{
2305        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307        return dd->rcv_err_status_cnt[54];
2308}
2309
2310static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2311                                const struct cntr_entry *entry,
2312                                void *context, int vl, int mode, u64 data)
2313{
2314        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316        return dd->rcv_err_status_cnt[53];
2317}
2318
2319static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2320                                                 void *context, int vl,
2321                                                 int mode, u64 data)
2322{
2323        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325        return dd->rcv_err_status_cnt[52];
2326}
2327
2328static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2329                                                 void *context, int vl,
2330                                                 int mode, u64 data)
2331{
2332        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334        return dd->rcv_err_status_cnt[51];
2335}
2336
2337static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2338                                                 void *context, int vl,
2339                                                 int mode, u64 data)
2340{
2341        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343        return dd->rcv_err_status_cnt[50];
2344}
2345
2346static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2347                                                 void *context, int vl,
2348                                                 int mode, u64 data)
2349{
2350        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352        return dd->rcv_err_status_cnt[49];
2353}
2354
2355static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2356                                                 void *context, int vl,
2357                                                 int mode, u64 data)
2358{
2359        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361        return dd->rcv_err_status_cnt[48];
2362}
2363
2364static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2365                                                 void *context, int vl,
2366                                                 int mode, u64 data)
2367{
2368        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370        return dd->rcv_err_status_cnt[47];
2371}
2372
2373static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2374                                         void *context, int vl, int mode,
2375                                         u64 data)
2376{
2377        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379        return dd->rcv_err_status_cnt[46];
2380}
2381
2382static u64 access_rx_hq_intr_csr_parity_err_cnt(
2383                                const struct cntr_entry *entry,
2384                                void *context, int vl, int mode, u64 data)
2385{
2386        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388        return dd->rcv_err_status_cnt[45];
2389}
2390
2391static u64 access_rx_lookup_csr_parity_err_cnt(
2392                                const struct cntr_entry *entry,
2393                                void *context, int vl, int mode, u64 data)
2394{
2395        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397        return dd->rcv_err_status_cnt[44];
2398}
2399
2400static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2401                                const struct cntr_entry *entry,
2402                                void *context, int vl, int mode, u64 data)
2403{
2404        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406        return dd->rcv_err_status_cnt[43];
2407}
2408
2409static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2410                                const struct cntr_entry *entry,
2411                                void *context, int vl, int mode, u64 data)
2412{
2413        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415        return dd->rcv_err_status_cnt[42];
2416}
2417
2418static u64 access_rx_lookup_des_part2_parity_err_cnt(
2419                                const struct cntr_entry *entry,
2420                                void *context, int vl, int mode, u64 data)
2421{
2422        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424        return dd->rcv_err_status_cnt[41];
2425}
2426
2427static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2428                                const struct cntr_entry *entry,
2429                                void *context, int vl, int mode, u64 data)
2430{
2431        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433        return dd->rcv_err_status_cnt[40];
2434}
2435
2436static u64 access_rx_lookup_des_part1_unc_err_cnt(
2437                                const struct cntr_entry *entry,
2438                                void *context, int vl, int mode, u64 data)
2439{
2440        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442        return dd->rcv_err_status_cnt[39];
2443}
2444
2445static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2446                                const struct cntr_entry *entry,
2447                                void *context, int vl, int mode, u64 data)
2448{
2449        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451        return dd->rcv_err_status_cnt[38];
2452}
2453
2454static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2455                                const struct cntr_entry *entry,
2456                                void *context, int vl, int mode, u64 data)
2457{
2458        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460        return dd->rcv_err_status_cnt[37];
2461}
2462
2463static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2464                                const struct cntr_entry *entry,
2465                                void *context, int vl, int mode, u64 data)
2466{
2467        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469        return dd->rcv_err_status_cnt[36];
2470}
2471
2472static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2473                                const struct cntr_entry *entry,
2474                                void *context, int vl, int mode, u64 data)
2475{
2476        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478        return dd->rcv_err_status_cnt[35];
2479}
2480
2481static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2482                                const struct cntr_entry *entry,
2483                                void *context, int vl, int mode, u64 data)
2484{
2485        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487        return dd->rcv_err_status_cnt[34];
2488}
2489
2490static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2491                                const struct cntr_entry *entry,
2492                                void *context, int vl, int mode, u64 data)
2493{
2494        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496        return dd->rcv_err_status_cnt[33];
2497}
2498
2499static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2500                                        void *context, int vl, int mode,
2501                                        u64 data)
2502{
2503        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505        return dd->rcv_err_status_cnt[32];
2506}
2507
2508static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2509                                       void *context, int vl, int mode,
2510                                       u64 data)
2511{
2512        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514        return dd->rcv_err_status_cnt[31];
2515}
2516
2517static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2518                                          void *context, int vl, int mode,
2519                                          u64 data)
2520{
2521        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523        return dd->rcv_err_status_cnt[30];
2524}
2525
2526static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2527                                             void *context, int vl, int mode,
2528                                             u64 data)
2529{
2530        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532        return dd->rcv_err_status_cnt[29];
2533}
2534
2535static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2536                                                 void *context, int vl,
2537                                                 int mode, u64 data)
2538{
2539        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541        return dd->rcv_err_status_cnt[28];
2542}
2543
2544static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2545                                const struct cntr_entry *entry,
2546                                void *context, int vl, int mode, u64 data)
2547{
2548        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550        return dd->rcv_err_status_cnt[27];
2551}
2552
2553static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2554                                const struct cntr_entry *entry,
2555                                void *context, int vl, int mode, u64 data)
2556{
2557        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559        return dd->rcv_err_status_cnt[26];
2560}
2561
2562static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2563                                const struct cntr_entry *entry,
2564                                void *context, int vl, int mode, u64 data)
2565{
2566        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568        return dd->rcv_err_status_cnt[25];
2569}
2570
2571static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2572                                const struct cntr_entry *entry,
2573                                void *context, int vl, int mode, u64 data)
2574{
2575        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577        return dd->rcv_err_status_cnt[24];
2578}
2579
2580static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2581                                const struct cntr_entry *entry,
2582                                void *context, int vl, int mode, u64 data)
2583{
2584        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586        return dd->rcv_err_status_cnt[23];
2587}
2588
2589static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2590                                const struct cntr_entry *entry,
2591                                void *context, int vl, int mode, u64 data)
2592{
2593        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595        return dd->rcv_err_status_cnt[22];
2596}
2597
2598static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2599                                const struct cntr_entry *entry,
2600                                void *context, int vl, int mode, u64 data)
2601{
2602        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604        return dd->rcv_err_status_cnt[21];
2605}
2606
2607static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2608                                const struct cntr_entry *entry,
2609                                void *context, int vl, int mode, u64 data)
2610{
2611        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613        return dd->rcv_err_status_cnt[20];
2614}
2615
2616static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2617                                const struct cntr_entry *entry,
2618                                void *context, int vl, int mode, u64 data)
2619{
2620        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622        return dd->rcv_err_status_cnt[19];
2623}
2624
2625static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2626                                                 void *context, int vl,
2627                                                 int mode, u64 data)
2628{
2629        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631        return dd->rcv_err_status_cnt[18];
2632}
2633
2634static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2635                                                 void *context, int vl,
2636                                                 int mode, u64 data)
2637{
2638        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640        return dd->rcv_err_status_cnt[17];
2641}
2642
2643static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2644                                const struct cntr_entry *entry,
2645                                void *context, int vl, int mode, u64 data)
2646{
2647        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649        return dd->rcv_err_status_cnt[16];
2650}
2651
2652static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2653                                const struct cntr_entry *entry,
2654                                void *context, int vl, int mode, u64 data)
2655{
2656        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658        return dd->rcv_err_status_cnt[15];
2659}
2660
2661static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2662                                                void *context, int vl,
2663                                                int mode, u64 data)
2664{
2665        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667        return dd->rcv_err_status_cnt[14];
2668}
2669
2670static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2671                                                void *context, int vl,
2672                                                int mode, u64 data)
2673{
2674        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676        return dd->rcv_err_status_cnt[13];
2677}
2678
2679static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2680                                              void *context, int vl, int mode,
2681                                              u64 data)
2682{
2683        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685        return dd->rcv_err_status_cnt[12];
2686}
2687
2688static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2689                                          void *context, int vl, int mode,
2690                                          u64 data)
2691{
2692        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694        return dd->rcv_err_status_cnt[11];
2695}
2696
2697static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2698                                          void *context, int vl, int mode,
2699                                          u64 data)
2700{
2701        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703        return dd->rcv_err_status_cnt[10];
2704}
2705
2706static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2707                                               void *context, int vl, int mode,
2708                                               u64 data)
2709{
2710        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712        return dd->rcv_err_status_cnt[9];
2713}
2714
2715static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2716                                            void *context, int vl, int mode,
2717                                            u64 data)
2718{
2719        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721        return dd->rcv_err_status_cnt[8];
2722}
2723
2724static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2725                                const struct cntr_entry *entry,
2726                                void *context, int vl, int mode, u64 data)
2727{
2728        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730        return dd->rcv_err_status_cnt[7];
2731}
2732
2733static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2734                                const struct cntr_entry *entry,
2735                                void *context, int vl, int mode, u64 data)
2736{
2737        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739        return dd->rcv_err_status_cnt[6];
2740}
2741
2742static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2743                                          void *context, int vl, int mode,
2744                                          u64 data)
2745{
2746        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748        return dd->rcv_err_status_cnt[5];
2749}
2750
2751static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2752                                          void *context, int vl, int mode,
2753                                          u64 data)
2754{
2755        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757        return dd->rcv_err_status_cnt[4];
2758}
2759
2760static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2761                                         void *context, int vl, int mode,
2762                                         u64 data)
2763{
2764        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766        return dd->rcv_err_status_cnt[3];
2767}
2768
2769static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2770                                         void *context, int vl, int mode,
2771                                         u64 data)
2772{
2773        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775        return dd->rcv_err_status_cnt[2];
2776}
2777
2778static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2779                                            void *context, int vl, int mode,
2780                                            u64 data)
2781{
2782        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784        return dd->rcv_err_status_cnt[1];
2785}
2786
2787static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2788                                         void *context, int vl, int mode,
2789                                         u64 data)
2790{
2791        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793        return dd->rcv_err_status_cnt[0];
2794}
2795
2796/*
2797 * Software counters corresponding to each of the
2798 * error status bits within SendPioErrStatus
2799 */
2800static u64 access_pio_pec_sop_head_parity_err_cnt(
2801                                const struct cntr_entry *entry,
2802                                void *context, int vl, int mode, u64 data)
2803{
2804        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806        return dd->send_pio_err_status_cnt[35];
2807}
2808
2809static u64 access_pio_pcc_sop_head_parity_err_cnt(
2810                                const struct cntr_entry *entry,
2811                                void *context, int vl, int mode, u64 data)
2812{
2813        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815        return dd->send_pio_err_status_cnt[34];
2816}
2817
2818static u64 access_pio_last_returned_cnt_parity_err_cnt(
2819                                const struct cntr_entry *entry,
2820                                void *context, int vl, int mode, u64 data)
2821{
2822        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824        return dd->send_pio_err_status_cnt[33];
2825}
2826
2827static u64 access_pio_current_free_cnt_parity_err_cnt(
2828                                const struct cntr_entry *entry,
2829                                void *context, int vl, int mode, u64 data)
2830{
2831        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833        return dd->send_pio_err_status_cnt[32];
2834}
2835
2836static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2837                                          void *context, int vl, int mode,
2838                                          u64 data)
2839{
2840        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842        return dd->send_pio_err_status_cnt[31];
2843}
2844
2845static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2846                                          void *context, int vl, int mode,
2847                                          u64 data)
2848{
2849        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851        return dd->send_pio_err_status_cnt[30];
2852}
2853
2854static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2855                                           void *context, int vl, int mode,
2856                                           u64 data)
2857{
2858        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860        return dd->send_pio_err_status_cnt[29];
2861}
2862
2863static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2864                                const struct cntr_entry *entry,
2865                                void *context, int vl, int mode, u64 data)
2866{
2867        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869        return dd->send_pio_err_status_cnt[28];
2870}
2871
2872static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2873                                             void *context, int vl, int mode,
2874                                             u64 data)
2875{
2876        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878        return dd->send_pio_err_status_cnt[27];
2879}
2880
2881static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2882                                             void *context, int vl, int mode,
2883                                             u64 data)
2884{
2885        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887        return dd->send_pio_err_status_cnt[26];
2888}
2889
2890static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2891                                                void *context, int vl,
2892                                                int mode, u64 data)
2893{
2894        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896        return dd->send_pio_err_status_cnt[25];
2897}
2898
2899static u64 access_pio_block_qw_count_parity_err_cnt(
2900                                const struct cntr_entry *entry,
2901                                void *context, int vl, int mode, u64 data)
2902{
2903        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905        return dd->send_pio_err_status_cnt[24];
2906}
2907
2908static u64 access_pio_write_qw_valid_parity_err_cnt(
2909                                const struct cntr_entry *entry,
2910                                void *context, int vl, int mode, u64 data)
2911{
2912        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914        return dd->send_pio_err_status_cnt[23];
2915}
2916
2917static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2918                                            void *context, int vl, int mode,
2919                                            u64 data)
2920{
2921        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923        return dd->send_pio_err_status_cnt[22];
2924}
2925
2926static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2927                                                void *context, int vl,
2928                                                int mode, u64 data)
2929{
2930        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932        return dd->send_pio_err_status_cnt[21];
2933}
2934
2935static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2936                                                void *context, int vl,
2937                                                int mode, u64 data)
2938{
2939        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941        return dd->send_pio_err_status_cnt[20];
2942}
2943
2944static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2945                                                void *context, int vl,
2946                                                int mode, u64 data)
2947{
2948        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950        return dd->send_pio_err_status_cnt[19];
2951}
2952
2953static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2954                                const struct cntr_entry *entry,
2955                                void *context, int vl, int mode, u64 data)
2956{
2957        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959        return dd->send_pio_err_status_cnt[18];
2960}
2961
2962static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2963                                         void *context, int vl, int mode,
2964                                         u64 data)
2965{
2966        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968        return dd->send_pio_err_status_cnt[17];
2969}
2970
2971static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2972                                            void *context, int vl, int mode,
2973                                            u64 data)
2974{
2975        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977        return dd->send_pio_err_status_cnt[16];
2978}
2979
2980static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2981                                const struct cntr_entry *entry,
2982                                void *context, int vl, int mode, u64 data)
2983{
2984        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986        return dd->send_pio_err_status_cnt[15];
2987}
2988
2989static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2990                                const struct cntr_entry *entry,
2991                                void *context, int vl, int mode, u64 data)
2992{
2993        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995        return dd->send_pio_err_status_cnt[14];
2996}
2997
2998static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2999                                const struct cntr_entry *entry,
3000                                void *context, int vl, int mode, u64 data)
3001{
3002        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004        return dd->send_pio_err_status_cnt[13];
3005}
3006
3007static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3008                                const struct cntr_entry *entry,
3009                                void *context, int vl, int mode, u64 data)
3010{
3011        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013        return dd->send_pio_err_status_cnt[12];
3014}
3015
3016static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3017                                const struct cntr_entry *entry,
3018                                void *context, int vl, int mode, u64 data)
3019{
3020        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022        return dd->send_pio_err_status_cnt[11];
3023}
3024
3025static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3026                                const struct cntr_entry *entry,
3027                                void *context, int vl, int mode, u64 data)
3028{
3029        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031        return dd->send_pio_err_status_cnt[10];
3032}
3033
3034static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3035                                const struct cntr_entry *entry,
3036                                void *context, int vl, int mode, u64 data)
3037{
3038        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040        return dd->send_pio_err_status_cnt[9];
3041}
3042
3043static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3044                                const struct cntr_entry *entry,
3045                                void *context, int vl, int mode, u64 data)
3046{
3047        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049        return dd->send_pio_err_status_cnt[8];
3050}
3051
3052static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3053                                const struct cntr_entry *entry,
3054                                void *context, int vl, int mode, u64 data)
3055{
3056        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058        return dd->send_pio_err_status_cnt[7];
3059}
3060
3061static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3062                                              void *context, int vl, int mode,
3063                                              u64 data)
3064{
3065        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067        return dd->send_pio_err_status_cnt[6];
3068}
3069
3070static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071                                              void *context, int vl, int mode,
3072                                              u64 data)
3073{
3074        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075
3076        return dd->send_pio_err_status_cnt[5];
3077}
3078
3079static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3080                                           void *context, int vl, int mode,
3081                                           u64 data)
3082{
3083        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085        return dd->send_pio_err_status_cnt[4];
3086}
3087
3088static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3089                                           void *context, int vl, int mode,
3090                                           u64 data)
3091{
3092        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094        return dd->send_pio_err_status_cnt[3];
3095}
3096
3097static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3098                                         void *context, int vl, int mode,
3099                                         u64 data)
3100{
3101        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103        return dd->send_pio_err_status_cnt[2];
3104}
3105
3106static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3107                                                void *context, int vl,
3108                                                int mode, u64 data)
3109{
3110        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112        return dd->send_pio_err_status_cnt[1];
3113}
3114
3115static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3116                                             void *context, int vl, int mode,
3117                                             u64 data)
3118{
3119        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121        return dd->send_pio_err_status_cnt[0];
3122}
3123
3124/*
3125 * Software counters corresponding to each of the
3126 * error status bits within SendDmaErrStatus
3127 */
3128static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3129                                const struct cntr_entry *entry,
3130                                void *context, int vl, int mode, u64 data)
3131{
3132        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134        return dd->send_dma_err_status_cnt[3];
3135}
3136
3137static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3138                                const struct cntr_entry *entry,
3139                                void *context, int vl, int mode, u64 data)
3140{
3141        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143        return dd->send_dma_err_status_cnt[2];
3144}
3145
3146static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3147                                          void *context, int vl, int mode,
3148                                          u64 data)
3149{
3150        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152        return dd->send_dma_err_status_cnt[1];
3153}
3154
3155static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3156                                       void *context, int vl, int mode,
3157                                       u64 data)
3158{
3159        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161        return dd->send_dma_err_status_cnt[0];
3162}
3163
3164/*
3165 * Software counters corresponding to each of the
3166 * error status bits within SendEgressErrStatus
3167 */
3168static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3169                                const struct cntr_entry *entry,
3170                                void *context, int vl, int mode, u64 data)
3171{
3172        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174        return dd->send_egress_err_status_cnt[63];
3175}
3176
3177static u64 access_tx_read_sdma_memory_csr_err_cnt(
3178                                const struct cntr_entry *entry,
3179                                void *context, int vl, int mode, u64 data)
3180{
3181        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183        return dd->send_egress_err_status_cnt[62];
3184}
3185
3186static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3187                                             void *context, int vl, int mode,
3188                                             u64 data)
3189{
3190        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192        return dd->send_egress_err_status_cnt[61];
3193}
3194
3195static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3196                                                 void *context, int vl,
3197                                                 int mode, u64 data)
3198{
3199        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201        return dd->send_egress_err_status_cnt[60];
3202}
3203
3204static u64 access_tx_read_sdma_memory_cor_err_cnt(
3205                                const struct cntr_entry *entry,
3206                                void *context, int vl, int mode, u64 data)
3207{
3208        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210        return dd->send_egress_err_status_cnt[59];
3211}
3212
3213static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3214                                        void *context, int vl, int mode,
3215                                        u64 data)
3216{
3217        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219        return dd->send_egress_err_status_cnt[58];
3220}
3221
3222static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3223                                            void *context, int vl, int mode,
3224                                            u64 data)
3225{
3226        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228        return dd->send_egress_err_status_cnt[57];
3229}
3230
3231static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3232                                              void *context, int vl, int mode,
3233                                              u64 data)
3234{
3235        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237        return dd->send_egress_err_status_cnt[56];
3238}
3239
3240static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3241                                              void *context, int vl, int mode,
3242                                              u64 data)
3243{
3244        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246        return dd->send_egress_err_status_cnt[55];
3247}
3248
3249static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3250                                              void *context, int vl, int mode,
3251                                              u64 data)
3252{
3253        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255        return dd->send_egress_err_status_cnt[54];
3256}
3257
3258static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3259                                              void *context, int vl, int mode,
3260                                              u64 data)
3261{
3262        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264        return dd->send_egress_err_status_cnt[53];
3265}
3266
3267static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3268                                              void *context, int vl, int mode,
3269                                              u64 data)
3270{
3271        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273        return dd->send_egress_err_status_cnt[52];
3274}
3275
3276static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3277                                              void *context, int vl, int mode,
3278                                              u64 data)
3279{
3280        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282        return dd->send_egress_err_status_cnt[51];
3283}
3284
3285static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3286                                              void *context, int vl, int mode,
3287                                              u64 data)
3288{
3289        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291        return dd->send_egress_err_status_cnt[50];
3292}
3293
3294static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3295                                              void *context, int vl, int mode,
3296                                              u64 data)
3297{
3298        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300        return dd->send_egress_err_status_cnt[49];
3301}
3302
3303static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3304                                              void *context, int vl, int mode,
3305                                              u64 data)
3306{
3307        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309        return dd->send_egress_err_status_cnt[48];
3310}
3311
3312static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3313                                              void *context, int vl, int mode,
3314                                              u64 data)
3315{
3316        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318        return dd->send_egress_err_status_cnt[47];
3319}
3320
3321static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3322                                            void *context, int vl, int mode,
3323                                            u64 data)
3324{
3325        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327        return dd->send_egress_err_status_cnt[46];
3328}
3329
3330static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3331                                             void *context, int vl, int mode,
3332                                             u64 data)
3333{
3334        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336        return dd->send_egress_err_status_cnt[45];
3337}
3338
3339static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3340                                                 void *context, int vl,
3341                                                 int mode, u64 data)
3342{
3343        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345        return dd->send_egress_err_status_cnt[44];
3346}
3347
3348static u64 access_tx_read_sdma_memory_unc_err_cnt(
3349                                const struct cntr_entry *entry,
3350                                void *context, int vl, int mode, u64 data)
3351{
3352        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354        return dd->send_egress_err_status_cnt[43];
3355}
3356
3357static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3358                                        void *context, int vl, int mode,
3359                                        u64 data)
3360{
3361        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363        return dd->send_egress_err_status_cnt[42];
3364}
3365
3366static u64 access_tx_credit_return_partiy_err_cnt(
3367                                const struct cntr_entry *entry,
3368                                void *context, int vl, int mode, u64 data)
3369{
3370        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372        return dd->send_egress_err_status_cnt[41];
3373}
3374
3375static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3376                                const struct cntr_entry *entry,
3377                                void *context, int vl, int mode, u64 data)
3378{
3379        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381        return dd->send_egress_err_status_cnt[40];
3382}
3383
3384static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3385                                const struct cntr_entry *entry,
3386                                void *context, int vl, int mode, u64 data)
3387{
3388        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390        return dd->send_egress_err_status_cnt[39];
3391}
3392
3393static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3394                                const struct cntr_entry *entry,
3395                                void *context, int vl, int mode, u64 data)
3396{
3397        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399        return dd->send_egress_err_status_cnt[38];
3400}
3401
3402static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3403                                const struct cntr_entry *entry,
3404                                void *context, int vl, int mode, u64 data)
3405{
3406        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408        return dd->send_egress_err_status_cnt[37];
3409}
3410
3411static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3412                                const struct cntr_entry *entry,
3413                                void *context, int vl, int mode, u64 data)
3414{
3415        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417        return dd->send_egress_err_status_cnt[36];
3418}
3419
3420static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3421                                const struct cntr_entry *entry,
3422                                void *context, int vl, int mode, u64 data)
3423{
3424        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426        return dd->send_egress_err_status_cnt[35];
3427}
3428
3429static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3430                                const struct cntr_entry *entry,
3431                                void *context, int vl, int mode, u64 data)
3432{
3433        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435        return dd->send_egress_err_status_cnt[34];
3436}
3437
3438static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3439                                const struct cntr_entry *entry,
3440                                void *context, int vl, int mode, u64 data)
3441{
3442        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444        return dd->send_egress_err_status_cnt[33];
3445}
3446
3447static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3448                                const struct cntr_entry *entry,
3449                                void *context, int vl, int mode, u64 data)
3450{
3451        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453        return dd->send_egress_err_status_cnt[32];
3454}
3455
3456static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3457                                const struct cntr_entry *entry,
3458                                void *context, int vl, int mode, u64 data)
3459{
3460        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462        return dd->send_egress_err_status_cnt[31];
3463}
3464
3465static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3466                                const struct cntr_entry *entry,
3467                                void *context, int vl, int mode, u64 data)
3468{
3469        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471        return dd->send_egress_err_status_cnt[30];
3472}
3473
3474static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3475                                const struct cntr_entry *entry,
3476                                void *context, int vl, int mode, u64 data)
3477{
3478        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480        return dd->send_egress_err_status_cnt[29];
3481}
3482
3483static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3484                                const struct cntr_entry *entry,
3485                                void *context, int vl, int mode, u64 data)
3486{
3487        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489        return dd->send_egress_err_status_cnt[28];
3490}
3491
3492static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3493                                const struct cntr_entry *entry,
3494                                void *context, int vl, int mode, u64 data)
3495{
3496        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498        return dd->send_egress_err_status_cnt[27];
3499}
3500
3501static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3502                                const struct cntr_entry *entry,
3503                                void *context, int vl, int mode, u64 data)
3504{
3505        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507        return dd->send_egress_err_status_cnt[26];
3508}
3509
3510static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3511                                const struct cntr_entry *entry,
3512                                void *context, int vl, int mode, u64 data)
3513{
3514        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516        return dd->send_egress_err_status_cnt[25];
3517}
3518
3519static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3520                                const struct cntr_entry *entry,
3521                                void *context, int vl, int mode, u64 data)
3522{
3523        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525        return dd->send_egress_err_status_cnt[24];
3526}
3527
3528static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3529                                const struct cntr_entry *entry,
3530                                void *context, int vl, int mode, u64 data)
3531{
3532        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534        return dd->send_egress_err_status_cnt[23];
3535}
3536
3537static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3538                                const struct cntr_entry *entry,
3539                                void *context, int vl, int mode, u64 data)
3540{
3541        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543        return dd->send_egress_err_status_cnt[22];
3544}
3545
3546static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3547                                const struct cntr_entry *entry,
3548                                void *context, int vl, int mode, u64 data)
3549{
3550        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552        return dd->send_egress_err_status_cnt[21];
3553}
3554
3555static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3556                                const struct cntr_entry *entry,
3557                                void *context, int vl, int mode, u64 data)
3558{
3559        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561        return dd->send_egress_err_status_cnt[20];
3562}
3563
3564static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3565                                const struct cntr_entry *entry,
3566                                void *context, int vl, int mode, u64 data)
3567{
3568        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570        return dd->send_egress_err_status_cnt[19];
3571}
3572
3573static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3574                                const struct cntr_entry *entry,
3575                                void *context, int vl, int mode, u64 data)
3576{
3577        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579        return dd->send_egress_err_status_cnt[18];
3580}
3581
3582static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3583                                const struct cntr_entry *entry,
3584                                void *context, int vl, int mode, u64 data)
3585{
3586        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588        return dd->send_egress_err_status_cnt[17];
3589}
3590
3591static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3592                                const struct cntr_entry *entry,
3593                                void *context, int vl, int mode, u64 data)
3594{
3595        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597        return dd->send_egress_err_status_cnt[16];
3598}
3599
3600static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3601                                           void *context, int vl, int mode,
3602                                           u64 data)
3603{
3604        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606        return dd->send_egress_err_status_cnt[15];
3607}
3608
3609static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3610                                                 void *context, int vl,
3611                                                 int mode, u64 data)
3612{
3613        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615        return dd->send_egress_err_status_cnt[14];
3616}
3617
3618static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3619                                               void *context, int vl, int mode,
3620                                               u64 data)
3621{
3622        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624        return dd->send_egress_err_status_cnt[13];
3625}
3626
3627static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3628                                        void *context, int vl, int mode,
3629                                        u64 data)
3630{
3631        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633        return dd->send_egress_err_status_cnt[12];
3634}
3635
3636static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3637                                const struct cntr_entry *entry,
3638                                void *context, int vl, int mode, u64 data)
3639{
3640        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642        return dd->send_egress_err_status_cnt[11];
3643}
3644
3645static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3646                                             void *context, int vl, int mode,
3647                                             u64 data)
3648{
3649        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651        return dd->send_egress_err_status_cnt[10];
3652}
3653
3654static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3655                                            void *context, int vl, int mode,
3656                                            u64 data)
3657{
3658        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660        return dd->send_egress_err_status_cnt[9];
3661}
3662
3663static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3664                                const struct cntr_entry *entry,
3665                                void *context, int vl, int mode, u64 data)
3666{
3667        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669        return dd->send_egress_err_status_cnt[8];
3670}
3671
3672static u64 access_tx_pio_launch_intf_parity_err_cnt(
3673                                const struct cntr_entry *entry,
3674                                void *context, int vl, int mode, u64 data)
3675{
3676        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678        return dd->send_egress_err_status_cnt[7];
3679}
3680
3681static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3682                                            void *context, int vl, int mode,
3683                                            u64 data)
3684{
3685        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686
3687        return dd->send_egress_err_status_cnt[6];
3688}
3689
3690static u64 access_tx_incorrect_link_state_err_cnt(
3691                                const struct cntr_entry *entry,
3692                                void *context, int vl, int mode, u64 data)
3693{
3694        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696        return dd->send_egress_err_status_cnt[5];
3697}
3698
3699static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3700                                      void *context, int vl, int mode,
3701                                      u64 data)
3702{
3703        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705        return dd->send_egress_err_status_cnt[4];
3706}
3707
3708static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3709                                const struct cntr_entry *entry,
3710                                void *context, int vl, int mode, u64 data)
3711{
3712        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714        return dd->send_egress_err_status_cnt[3];
3715}
3716
3717static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3718                                            void *context, int vl, int mode,
3719                                            u64 data)
3720{
3721        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723        return dd->send_egress_err_status_cnt[2];
3724}
3725
3726static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3727                                const struct cntr_entry *entry,
3728                                void *context, int vl, int mode, u64 data)
3729{
3730        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732        return dd->send_egress_err_status_cnt[1];
3733}
3734
3735static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3736                                const struct cntr_entry *entry,
3737                                void *context, int vl, int mode, u64 data)
3738{
3739        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741        return dd->send_egress_err_status_cnt[0];
3742}
3743
3744/*
3745 * Software counters corresponding to each of the
3746 * error status bits within SendErrStatus
3747 */
3748static u64 access_send_csr_write_bad_addr_err_cnt(
3749                                const struct cntr_entry *entry,
3750                                void *context, int vl, int mode, u64 data)
3751{
3752        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754        return dd->send_err_status_cnt[2];
3755}
3756
3757static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3758                                                 void *context, int vl,
3759                                                 int mode, u64 data)
3760{
3761        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763        return dd->send_err_status_cnt[1];
3764}
3765
3766static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3767                                      void *context, int vl, int mode,
3768                                      u64 data)
3769{
3770        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772        return dd->send_err_status_cnt[0];
3773}
3774
3775/*
3776 * Software counters corresponding to each of the
3777 * error status bits within SendCtxtErrStatus
3778 */
3779static u64 access_pio_write_out_of_bounds_err_cnt(
3780                                const struct cntr_entry *entry,
3781                                void *context, int vl, int mode, u64 data)
3782{
3783        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785        return dd->sw_ctxt_err_status_cnt[4];
3786}
3787
3788static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3789                                             void *context, int vl, int mode,
3790                                             u64 data)
3791{
3792        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794        return dd->sw_ctxt_err_status_cnt[3];
3795}
3796
3797static u64 access_pio_write_crosses_boundary_err_cnt(
3798                                const struct cntr_entry *entry,
3799                                void *context, int vl, int mode, u64 data)
3800{
3801        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803        return dd->sw_ctxt_err_status_cnt[2];
3804}
3805
3806static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3807                                                void *context, int vl,
3808                                                int mode, u64 data)
3809{
3810        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812        return dd->sw_ctxt_err_status_cnt[1];
3813}
3814
3815static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3816                                               void *context, int vl, int mode,
3817                                               u64 data)
3818{
3819        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821        return dd->sw_ctxt_err_status_cnt[0];
3822}
3823
3824/*
3825 * Software counters corresponding to each of the
3826 * error status bits within SendDmaEngErrStatus
3827 */
3828static u64 access_sdma_header_request_fifo_cor_err_cnt(
3829                                const struct cntr_entry *entry,
3830                                void *context, int vl, int mode, u64 data)
3831{
3832        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834        return dd->sw_send_dma_eng_err_status_cnt[23];
3835}
3836
3837static u64 access_sdma_header_storage_cor_err_cnt(
3838                                const struct cntr_entry *entry,
3839                                void *context, int vl, int mode, u64 data)
3840{
3841        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843        return dd->sw_send_dma_eng_err_status_cnt[22];
3844}
3845
3846static u64 access_sdma_packet_tracking_cor_err_cnt(
3847                                const struct cntr_entry *entry,
3848                                void *context, int vl, int mode, u64 data)
3849{
3850        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852        return dd->sw_send_dma_eng_err_status_cnt[21];
3853}
3854
3855static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3856                                            void *context, int vl, int mode,
3857                                            u64 data)
3858{
3859        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861        return dd->sw_send_dma_eng_err_status_cnt[20];
3862}
3863
3864static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3865                                              void *context, int vl, int mode,
3866                                              u64 data)
3867{
3868        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870        return dd->sw_send_dma_eng_err_status_cnt[19];
3871}
3872
3873static u64 access_sdma_header_request_fifo_unc_err_cnt(
3874                                const struct cntr_entry *entry,
3875                                void *context, int vl, int mode, u64 data)
3876{
3877        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879        return dd->sw_send_dma_eng_err_status_cnt[18];
3880}
3881
3882static u64 access_sdma_header_storage_unc_err_cnt(
3883                                const struct cntr_entry *entry,
3884                                void *context, int vl, int mode, u64 data)
3885{
3886        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887
3888        return dd->sw_send_dma_eng_err_status_cnt[17];
3889}
3890
3891static u64 access_sdma_packet_tracking_unc_err_cnt(
3892                                const struct cntr_entry *entry,
3893                                void *context, int vl, int mode, u64 data)
3894{
3895        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897        return dd->sw_send_dma_eng_err_status_cnt[16];
3898}
3899
3900static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3901                                            void *context, int vl, int mode,
3902                                            u64 data)
3903{
3904        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906        return dd->sw_send_dma_eng_err_status_cnt[15];
3907}
3908
3909static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3910                                              void *context, int vl, int mode,
3911                                              u64 data)
3912{
3913        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915        return dd->sw_send_dma_eng_err_status_cnt[14];
3916}
3917
3918static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3919                                       void *context, int vl, int mode,
3920                                       u64 data)
3921{
3922        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924        return dd->sw_send_dma_eng_err_status_cnt[13];
3925}
3926
3927static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3928                                             void *context, int vl, int mode,
3929                                             u64 data)
3930{
3931        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933        return dd->sw_send_dma_eng_err_status_cnt[12];
3934}
3935
3936static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3937                                              void *context, int vl, int mode,
3938                                              u64 data)
3939{
3940        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942        return dd->sw_send_dma_eng_err_status_cnt[11];
3943}
3944
3945static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3946                                             void *context, int vl, int mode,
3947                                             u64 data)
3948{
3949        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951        return dd->sw_send_dma_eng_err_status_cnt[10];
3952}
3953
3954static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3955                                          void *context, int vl, int mode,
3956                                          u64 data)
3957{
3958        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959
3960        return dd->sw_send_dma_eng_err_status_cnt[9];
3961}
3962
3963static u64 access_sdma_packet_desc_overflow_err_cnt(
3964                                const struct cntr_entry *entry,
3965                                void *context, int vl, int mode, u64 data)
3966{
3967        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968
3969        return dd->sw_send_dma_eng_err_status_cnt[8];
3970}
3971
3972static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3973                                               void *context, int vl,
3974                                               int mode, u64 data)
3975{
3976        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977
3978        return dd->sw_send_dma_eng_err_status_cnt[7];
3979}
3980
3981static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3982                                    void *context, int vl, int mode, u64 data)
3983{
3984        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985
3986        return dd->sw_send_dma_eng_err_status_cnt[6];
3987}
3988
3989static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3990                                        void *context, int vl, int mode,
3991                                        u64 data)
3992{
3993        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994
3995        return dd->sw_send_dma_eng_err_status_cnt[5];
3996}
3997
3998static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3999                                          void *context, int vl, int mode,
4000                                          u64 data)
4001{
4002        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003
4004        return dd->sw_send_dma_eng_err_status_cnt[4];
4005}
4006
4007static u64 access_sdma_tail_out_of_bounds_err_cnt(
4008                                const struct cntr_entry *entry,
4009                                void *context, int vl, int mode, u64 data)
4010{
4011        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012
4013        return dd->sw_send_dma_eng_err_status_cnt[3];
4014}
4015
4016static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4017                                        void *context, int vl, int mode,
4018                                        u64 data)
4019{
4020        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021
4022        return dd->sw_send_dma_eng_err_status_cnt[2];
4023}
4024
4025static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4026                                            void *context, int vl, int mode,
4027                                            u64 data)
4028{
4029        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030
4031        return dd->sw_send_dma_eng_err_status_cnt[1];
4032}
4033
4034static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4035                                        void *context, int vl, int mode,
4036                                        u64 data)
4037{
4038        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039
4040        return dd->sw_send_dma_eng_err_status_cnt[0];
4041}
4042
4043static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4044                                 void *context, int vl, int mode,
4045                                 u64 data)
4046{
4047        struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4048
4049        u64 val = 0;
4050        u64 csr = entry->csr;
4051
4052        val = read_write_csr(dd, csr, mode, data);
4053        if (mode == CNTR_MODE_R) {
4054                val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4055                        CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4056        } else if (mode == CNTR_MODE_W) {
4057                dd->sw_rcv_bypass_packet_errors = 0;
4058        } else {
4059                dd_dev_err(dd, "Invalid cntr register access mode");
4060                return 0;
4061        }
4062        return val;
4063}
4064
4065#define def_access_sw_cpu(cntr) \
4066static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4067                              void *context, int vl, int mode, u64 data)      \
4068{                                                                             \
4069        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4070        return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4071                              ppd->ibport_data.rvp.cntr, vl,                  \
4072                              mode, data);                                    \
4073}
4074
4075def_access_sw_cpu(rc_acks);
4076def_access_sw_cpu(rc_qacks);
4077def_access_sw_cpu(rc_delayed_comp);
4078
4079#define def_access_ibp_counter(cntr) \
4080static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4081                                void *context, int vl, int mode, u64 data)    \
4082{                                                                             \
4083        struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4084                                                                              \
4085        if (vl != CNTR_INVALID_VL)                                            \
4086                return 0;                                                     \
4087                                                                              \
4088        return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4089                             mode, data);                                     \
4090}
4091
4092def_access_ibp_counter(loop_pkts);
4093def_access_ibp_counter(rc_resends);
4094def_access_ibp_counter(rnr_naks);
4095def_access_ibp_counter(other_naks);
4096def_access_ibp_counter(rc_timeouts);
4097def_access_ibp_counter(pkt_drops);
4098def_access_ibp_counter(dmawait);
4099def_access_ibp_counter(rc_seqnak);
4100def_access_ibp_counter(rc_dupreq);
4101def_access_ibp_counter(rdma_seq);
4102def_access_ibp_counter(unaligned);
4103def_access_ibp_counter(seq_naks);
4104
4105static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4106[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4107[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4108                        CNTR_NORMAL),
4109[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4110                        CNTR_NORMAL),
4111[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4112                        RCV_TID_FLOW_GEN_MISMATCH_CNT,
4113                        CNTR_NORMAL),
4114[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4115                        CNTR_NORMAL),
4116[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4117                        RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4118[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4119                        CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4120[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4121                        CNTR_NORMAL),
4122[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4123                        CNTR_NORMAL),
4124[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4125                        CNTR_NORMAL),
4126[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4127                        CNTR_NORMAL),
4128[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4129                        CNTR_NORMAL),
4130[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4131                        CNTR_NORMAL),
4132[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4133                        CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4134[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4135                        CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4136[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4137                              CNTR_SYNTH),
4138[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4139                            access_dc_rcv_err_cnt),
4140[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4141                                 CNTR_SYNTH),
4142[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4143                                  CNTR_SYNTH),
4144[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4145                                  CNTR_SYNTH),
4146[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4147                                   DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4148[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4149                                  DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4150                                  CNTR_SYNTH),
4151[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4152                                DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4153[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4154                               CNTR_SYNTH),
4155[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4156                              CNTR_SYNTH),
4157[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4158                               CNTR_SYNTH),
4159[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4160                                 CNTR_SYNTH),
4161[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4162                                CNTR_SYNTH),
4163[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4164                                CNTR_SYNTH),
4165[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4166                               CNTR_SYNTH),
4167[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4168                                 CNTR_SYNTH | CNTR_VL),
4169[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4170                                CNTR_SYNTH | CNTR_VL),
4171[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4172[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4173                                 CNTR_SYNTH | CNTR_VL),
4174[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4175[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4176                                 CNTR_SYNTH | CNTR_VL),
4177[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4178                              CNTR_SYNTH),
4179[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4180                                 CNTR_SYNTH | CNTR_VL),
4181[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4182                                CNTR_SYNTH),
4183[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4184                                   CNTR_SYNTH | CNTR_VL),
4185[C_DC_TOTAL_CRC] =
4186        DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4187                         CNTR_SYNTH),
4188[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4189                                  CNTR_SYNTH),
4190[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4191                                  CNTR_SYNTH),
4192[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4193                                  CNTR_SYNTH),
4194[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4195                                  CNTR_SYNTH),
4196[C_DC_CRC_MULT_LN] =
4197        DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4198                         CNTR_SYNTH),
4199[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4200                                    CNTR_SYNTH),
4201[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4202                                    CNTR_SYNTH),
4203[C_DC_SEQ_CRC_CNT] =
4204        DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4205                         CNTR_SYNTH),
4206[C_DC_ESC0_ONLY_CNT] =
4207        DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4208                         CNTR_SYNTH),
4209[C_DC_ESC0_PLUS1_CNT] =
4210        DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4211                         CNTR_SYNTH),
4212[C_DC_ESC0_PLUS2_CNT] =
4213        DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4214                         CNTR_SYNTH),
4215[C_DC_REINIT_FROM_PEER_CNT] =
4216        DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4217                         CNTR_SYNTH),
4218[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4219                                  CNTR_SYNTH),
4220[C_DC_MISC_FLG_CNT] =
4221        DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4222                         CNTR_SYNTH),
4223[C_DC_PRF_GOOD_LTP_CNT] =
4224        DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4225[C_DC_PRF_ACCEPTED_LTP_CNT] =
4226        DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4227                         CNTR_SYNTH),
4228[C_DC_PRF_RX_FLIT_CNT] =
4229        DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4230[C_DC_PRF_TX_FLIT_CNT] =
4231        DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4232[C_DC_PRF_CLK_CNTR] =
4233        DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4234[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4235        DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4236[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4237        DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4238                         CNTR_SYNTH),
4239[C_DC_PG_STS_TX_SBE_CNT] =
4240        DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4241[C_DC_PG_STS_TX_MBE_CNT] =
4242        DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4243                         CNTR_SYNTH),
4244[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4245                            access_sw_cpu_intr),
4246[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4247                            access_sw_cpu_rcv_limit),
4248[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4249                            access_sw_vtx_wait),
4250[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4251                            access_sw_pio_wait),
4252[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4253                            access_sw_pio_drain),
4254[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4255                            access_sw_kmem_wait),
4256[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4257                            access_sw_send_schedule),
4258[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4259                                      SEND_DMA_DESC_FETCHED_CNT, 0,
4260                                      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4261                                      dev_access_u32_csr),
4262[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4263                             CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4264                             access_sde_int_cnt),
4265[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4266                             CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267                             access_sde_err_cnt),
4268[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4269                                  CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4270                                  access_sde_idle_int_cnt),
4271[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4272                                      CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4273                                      access_sde_progress_int_cnt),
4274/* MISC_ERR_STATUS */
4275[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4276                                CNTR_NORMAL,
4277                                access_misc_pll_lock_fail_err_cnt),
4278[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4279                                CNTR_NORMAL,
4280                                access_misc_mbist_fail_err_cnt),
4281[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4282                                CNTR_NORMAL,
4283                                access_misc_invalid_eep_cmd_err_cnt),
4284[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4285                                CNTR_NORMAL,
4286                                access_misc_efuse_done_parity_err_cnt),
4287[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4288                                CNTR_NORMAL,
4289                                access_misc_efuse_write_err_cnt),
4290[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4291                                0, CNTR_NORMAL,
4292                                access_misc_efuse_read_bad_addr_err_cnt),
4293[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4294                                CNTR_NORMAL,
4295                                access_misc_efuse_csr_parity_err_cnt),
4296[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4297                                CNTR_NORMAL,
4298                                access_misc_fw_auth_failed_err_cnt),
4299[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4300                                CNTR_NORMAL,
4301                                access_misc_key_mismatch_err_cnt),
4302[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4303                                CNTR_NORMAL,
4304                                access_misc_sbus_write_failed_err_cnt),
4305[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4306                                CNTR_NORMAL,
4307                                access_misc_csr_write_bad_addr_err_cnt),
4308[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4309                                CNTR_NORMAL,
4310                                access_misc_csr_read_bad_addr_err_cnt),
4311[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4312                                CNTR_NORMAL,
4313                                access_misc_csr_parity_err_cnt),
4314/* CceErrStatus */
4315[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4316                                CNTR_NORMAL,
4317                                access_sw_cce_err_status_aggregated_cnt),
4318[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4319                                CNTR_NORMAL,
4320                                access_cce_msix_csr_parity_err_cnt),
4321[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4322                                CNTR_NORMAL,
4323                                access_cce_int_map_unc_err_cnt),
4324[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4325                                CNTR_NORMAL,
4326                                access_cce_int_map_cor_err_cnt),
4327[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4328                                CNTR_NORMAL,
4329                                access_cce_msix_table_unc_err_cnt),
4330[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4331                                CNTR_NORMAL,
4332                                access_cce_msix_table_cor_err_cnt),
4333[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4334                                0, CNTR_NORMAL,
4335                                access_cce_rxdma_conv_fifo_parity_err_cnt),
4336[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4337                                0, CNTR_NORMAL,
4338                                access_cce_rcpl_async_fifo_parity_err_cnt),
4339[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4340                                CNTR_NORMAL,
4341                                access_cce_seg_write_bad_addr_err_cnt),
4342[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4343                                CNTR_NORMAL,
4344                                access_cce_seg_read_bad_addr_err_cnt),
4345[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4346                                CNTR_NORMAL,
4347                                access_la_triggered_cnt),
4348[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4349                                CNTR_NORMAL,
4350                                access_cce_trgt_cpl_timeout_err_cnt),
4351[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4352                                CNTR_NORMAL,
4353                                access_pcic_receive_parity_err_cnt),
4354[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4355                                CNTR_NORMAL,
4356                                access_pcic_transmit_back_parity_err_cnt),
4357[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4358                                0, CNTR_NORMAL,
4359                                access_pcic_transmit_front_parity_err_cnt),
4360[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4361                                CNTR_NORMAL,
4362                                access_pcic_cpl_dat_q_unc_err_cnt),
4363[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4364                                CNTR_NORMAL,
4365                                access_pcic_cpl_hd_q_unc_err_cnt),
4366[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4367                                CNTR_NORMAL,
4368                                access_pcic_post_dat_q_unc_err_cnt),
4369[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4370                                CNTR_NORMAL,
4371                                access_pcic_post_hd_q_unc_err_cnt),
4372[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4373                                CNTR_NORMAL,
4374                                access_pcic_retry_sot_mem_unc_err_cnt),
4375[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4376                                CNTR_NORMAL,
4377                                access_pcic_retry_mem_unc_err),
4378[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4379                                CNTR_NORMAL,
4380                                access_pcic_n_post_dat_q_parity_err_cnt),
4381[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4382                                CNTR_NORMAL,
4383                                access_pcic_n_post_h_q_parity_err_cnt),
4384[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4385                                CNTR_NORMAL,
4386                                access_pcic_cpl_dat_q_cor_err_cnt),
4387[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4388                                CNTR_NORMAL,
4389                                access_pcic_cpl_hd_q_cor_err_cnt),
4390[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4391                                CNTR_NORMAL,
4392                                access_pcic_post_dat_q_cor_err_cnt),
4393[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4394                                CNTR_NORMAL,
4395                                access_pcic_post_hd_q_cor_err_cnt),
4396[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4397                                CNTR_NORMAL,
4398                                access_pcic_retry_sot_mem_cor_err_cnt),
4399[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4400                                CNTR_NORMAL,
4401                                access_pcic_retry_mem_cor_err_cnt),
4402[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4403                                "CceCli1AsyncFifoDbgParityError", 0, 0,
4404                                CNTR_NORMAL,
4405                                access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4406[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4407                                "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4408                                CNTR_NORMAL,
4409                                access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4410                                ),
4411[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4412                        "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4413                        CNTR_NORMAL,
4414                        access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4415[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4416                        "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4417                        CNTR_NORMAL,
4418                        access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4419[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4420                        0, CNTR_NORMAL,
4421                        access_cce_cli2_async_fifo_parity_err_cnt),
4422[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4423                        CNTR_NORMAL,
4424                        access_cce_csr_cfg_bus_parity_err_cnt),
4425[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4426                        0, CNTR_NORMAL,
4427                        access_cce_cli0_async_fifo_parity_err_cnt),
4428[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4429                        CNTR_NORMAL,
4430                        access_cce_rspd_data_parity_err_cnt),
4431[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4432                        CNTR_NORMAL,
4433                        access_cce_trgt_access_err_cnt),
4434[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4435                        0, CNTR_NORMAL,
4436                        access_cce_trgt_async_fifo_parity_err_cnt),
4437[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4438                        CNTR_NORMAL,
4439                        access_cce_csr_write_bad_addr_err_cnt),
4440[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4441                        CNTR_NORMAL,
4442                        access_cce_csr_read_bad_addr_err_cnt),
4443[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4444                        CNTR_NORMAL,
4445                        access_ccs_csr_parity_err_cnt),
4446
4447/* RcvErrStatus */
4448[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4449                        CNTR_NORMAL,
4450                        access_rx_csr_parity_err_cnt),
4451[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4452                        CNTR_NORMAL,
4453                        access_rx_csr_write_bad_addr_err_cnt),
4454[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4455                        CNTR_NORMAL,
4456                        access_rx_csr_read_bad_addr_err_cnt),
4457[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4458                        CNTR_NORMAL,
4459                        access_rx_dma_csr_unc_err_cnt),
4460[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4461                        CNTR_NORMAL,
4462                        access_rx_dma_dq_fsm_encoding_err_cnt),
4463[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4464                        CNTR_NORMAL,
4465                        access_rx_dma_eq_fsm_encoding_err_cnt),
4466[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4467                        CNTR_NORMAL,
4468                        access_rx_dma_csr_parity_err_cnt),
4469[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4470                        CNTR_NORMAL,
4471                        access_rx_rbuf_data_cor_err_cnt),
4472[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4473                        CNTR_NORMAL,
4474                        access_rx_rbuf_data_unc_err_cnt),
4475[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4476                        CNTR_NORMAL,
4477                        access_rx_dma_data_fifo_rd_cor_err_cnt),
4478[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4479                        CNTR_NORMAL,
4480                        access_rx_dma_data_fifo_rd_unc_err_cnt),
4481[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4482                        CNTR_NORMAL,
4483                        access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4484[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4485                        CNTR_NORMAL,
4486                        access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4487[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4488                        CNTR_NORMAL,
4489                        access_rx_rbuf_desc_part2_cor_err_cnt),
4490[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4491                        CNTR_NORMAL,
4492                        access_rx_rbuf_desc_part2_unc_err_cnt),
4493[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4494                        CNTR_NORMAL,
4495                        access_rx_rbuf_desc_part1_cor_err_cnt),
4496[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4497                        CNTR_NORMAL,
4498                        access_rx_rbuf_desc_part1_unc_err_cnt),
4499[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4500                        CNTR_NORMAL,
4501                        access_rx_hq_intr_fsm_err_cnt),
4502[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4503                        CNTR_NORMAL,
4504                        access_rx_hq_intr_csr_parity_err_cnt),
4505[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4506                        CNTR_NORMAL,
4507                        access_rx_lookup_csr_parity_err_cnt),
4508[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4509                        CNTR_NORMAL,
4510                        access_rx_lookup_rcv_array_cor_err_cnt),
4511[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4512                        CNTR_NORMAL,
4513                        access_rx_lookup_rcv_array_unc_err_cnt),
4514[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4515                        0, CNTR_NORMAL,
4516                        access_rx_lookup_des_part2_parity_err_cnt),
4517[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4518                        0, CNTR_NORMAL,
4519                        access_rx_lookup_des_part1_unc_cor_err_cnt),
4520[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4521                        CNTR_NORMAL,
4522                        access_rx_lookup_des_part1_unc_err_cnt),
4523[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4524                        CNTR_NORMAL,
4525                        access_rx_rbuf_next_free_buf_cor_err_cnt),
4526[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4527                        CNTR_NORMAL,
4528                        access_rx_rbuf_next_free_buf_unc_err_cnt),
4529[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4530                        "RxRbufFlInitWrAddrParityErr", 0, 0,
4531                        CNTR_NORMAL,
4532                        access_rbuf_fl_init_wr_addr_parity_err_cnt),
4533[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4534                        0, CNTR_NORMAL,
4535                        access_rx_rbuf_fl_initdone_parity_err_cnt),
4536[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4537                        0, CNTR_NORMAL,
4538                        access_rx_rbuf_fl_write_addr_parity_err_cnt),
4539[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4540                        CNTR_NORMAL,
4541                        access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4542[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4543                        CNTR_NORMAL,
4544                        access_rx_rbuf_empty_err_cnt),
4545[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4546                        CNTR_NORMAL,
4547                        access_rx_rbuf_full_err_cnt),
4548[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4549                        CNTR_NORMAL,
4550                        access_rbuf_bad_lookup_err_cnt),
4551[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4552                        CNTR_NORMAL,
4553                        access_rbuf_ctx_id_parity_err_cnt),
4554[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4555                        CNTR_NORMAL,
4556                        access_rbuf_csr_qeopdw_parity_err_cnt),
4557[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4558                        "RxRbufCsrQNumOfPktParityErr", 0, 0,
4559                        CNTR_NORMAL,
4560                        access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4561[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4562                        "RxRbufCsrQTlPtrParityErr", 0, 0,
4563                        CNTR_NORMAL,
4564                        access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4565[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4566                        0, CNTR_NORMAL,
4567                        access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4568[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4569                        0, CNTR_NORMAL,
4570                        access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4571[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4572                        0, 0, CNTR_NORMAL,
4573                        access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4574[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4575                        0, CNTR_NORMAL,
4576                        access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4577[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4578                        "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4579                        CNTR_NORMAL,
4580                        access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4581[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4582                        0, CNTR_NORMAL,
4583                        access_rx_rbuf_block_list_read_cor_err_cnt),
4584[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4585                        0, CNTR_NORMAL,
4586                        access_rx_rbuf_block_list_read_unc_err_cnt),
4587[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4588                        CNTR_NORMAL,
4589                        access_rx_rbuf_lookup_des_cor_err_cnt),
4590[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4591                        CNTR_NORMAL,
4592                        access_rx_rbuf_lookup_des_unc_err_cnt),
4593[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4594                        "RxRbufLookupDesRegUncCorErr", 0, 0,
4595                        CNTR_NORMAL,
4596                        access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4597[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4598                        CNTR_NORMAL,
4599                        access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4600[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4601                        CNTR_NORMAL,
4602                        access_rx_rbuf_free_list_cor_err_cnt),
4603[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4604                        CNTR_NORMAL,
4605                        access_rx_rbuf_free_list_unc_err_cnt),
4606[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4607                        CNTR_NORMAL,
4608                        access_rx_rcv_fsm_encoding_err_cnt),
4609[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4610                        CNTR_NORMAL,
4611                        access_rx_dma_flag_cor_err_cnt),
4612[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4613                        CNTR_NORMAL,
4614                        access_rx_dma_flag_unc_err_cnt),
4615[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4616                        CNTR_NORMAL,
4617                        access_rx_dc_sop_eop_parity_err_cnt),
4618[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4619                        CNTR_NORMAL,
4620                        access_rx_rcv_csr_parity_err_cnt),
4621[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4622                        CNTR_NORMAL,
4623                        access_rx_rcv_qp_map_table_cor_err_cnt),
4624[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4625                        CNTR_NORMAL,
4626                        access_rx_rcv_qp_map_table_unc_err_cnt),
4627[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4628                        CNTR_NORMAL,
4629                        access_rx_rcv_data_cor_err_cnt),
4630[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4631                        CNTR_NORMAL,
4632                        access_rx_rcv_data_unc_err_cnt),
4633[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4634                        CNTR_NORMAL,
4635                        access_rx_rcv_hdr_cor_err_cnt),
4636[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4637                        CNTR_NORMAL,
4638                        access_rx_rcv_hdr_unc_err_cnt),
4639[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4640                        CNTR_NORMAL,
4641                        access_rx_dc_intf_parity_err_cnt),
4642[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4643                        CNTR_NORMAL,
4644                        access_rx_dma_csr_cor_err_cnt),
4645/* SendPioErrStatus */
4646[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4647                        CNTR_NORMAL,
4648                        access_pio_pec_sop_head_parity_err_cnt),
4649[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4650                        CNTR_NORMAL,
4651                        access_pio_pcc_sop_head_parity_err_cnt),
4652[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4653                        0, 0, CNTR_NORMAL,
4654                        access_pio_last_returned_cnt_parity_err_cnt),
4655[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4656                        0, CNTR_NORMAL,
4657                        access_pio_current_free_cnt_parity_err_cnt),
4658[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4659                        CNTR_NORMAL,
4660                        access_pio_reserved_31_err_cnt),
4661[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4662                        CNTR_NORMAL,
4663                        access_pio_reserved_30_err_cnt),
4664[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4665                        CNTR_NORMAL,
4666                        access_pio_ppmc_sop_len_err_cnt),
4667[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4668                        CNTR_NORMAL,
4669                        access_pio_ppmc_bqc_mem_parity_err_cnt),
4670[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4671                        CNTR_NORMAL,
4672                        access_pio_vl_fifo_parity_err_cnt),
4673[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4674                        CNTR_NORMAL,
4675                        access_pio_vlf_sop_parity_err_cnt),
4676[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4677                        CNTR_NORMAL,
4678                        access_pio_vlf_v1_len_parity_err_cnt),
4679[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4680                        CNTR_NORMAL,
4681                        access_pio_block_qw_count_parity_err_cnt),
4682[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4683                        CNTR_NORMAL,
4684                        access_pio_write_qw_valid_parity_err_cnt),
4685[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4686                        CNTR_NORMAL,
4687                        access_pio_state_machine_err_cnt),
4688[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4689                        CNTR_NORMAL,
4690                        access_pio_write_data_parity_err_cnt),
4691[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4692                        CNTR_NORMAL,
4693                        access_pio_host_addr_mem_cor_err_cnt),
4694[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4695                        CNTR_NORMAL,
4696                        access_pio_host_addr_mem_unc_err_cnt),
4697[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4698                        CNTR_NORMAL,
4699                        access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4700[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4701                        CNTR_NORMAL,
4702                        access_pio_init_sm_in_err_cnt),
4703[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4704                        CNTR_NORMAL,
4705                        access_pio_ppmc_pbl_fifo_err_cnt),
4706[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4707                        0, CNTR_NORMAL,
4708                        access_pio_credit_ret_fifo_parity_err_cnt),
4709[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4710                        CNTR_NORMAL,
4711                        access_pio_v1_len_mem_bank1_cor_err_cnt),
4712[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4713                        CNTR_NORMAL,
4714                        access_pio_v1_len_mem_bank0_cor_err_cnt),
4715[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4716                        CNTR_NORMAL,
4717                        access_pio_v1_len_mem_bank1_unc_err_cnt),
4718[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4719                        CNTR_NORMAL,
4720                        access_pio_v1_len_mem_bank0_unc_err_cnt),
4721[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4722                        CNTR_NORMAL,
4723                        access_pio_sm_pkt_reset_parity_err_cnt),
4724[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4725                        CNTR_NORMAL,
4726                        access_pio_pkt_evict_fifo_parity_err_cnt),
4727[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4728                        "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4729                        CNTR_NORMAL,
4730                        access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4731[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4732                        CNTR_NORMAL,
4733                        access_pio_sbrdctl_crrel_parity_err_cnt),
4734[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4735                        CNTR_NORMAL,
4736                        access_pio_pec_fifo_parity_err_cnt),
4737[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4738                        CNTR_NORMAL,
4739                        access_pio_pcc_fifo_parity_err_cnt),
4740[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4741                        CNTR_NORMAL,
4742                        access_pio_sb_mem_fifo1_err_cnt),
4743[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4744                        CNTR_NORMAL,
4745                        access_pio_sb_mem_fifo0_err_cnt),
4746[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4747                        CNTR_NORMAL,
4748                        access_pio_csr_parity_err_cnt),
4749[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4750                        CNTR_NORMAL,
4751                        access_pio_write_addr_parity_err_cnt),
4752[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4753                        CNTR_NORMAL,
4754                        access_pio_write_bad_ctxt_err_cnt),
4755/* SendDmaErrStatus */
4756[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4757                        0, CNTR_NORMAL,
4758                        access_sdma_pcie_req_tracking_cor_err_cnt),
4759[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4760                        0, CNTR_NORMAL,
4761                        access_sdma_pcie_req_tracking_unc_err_cnt),
4762[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4763                        CNTR_NORMAL,
4764                        access_sdma_csr_parity_err_cnt),
4765[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4766                        CNTR_NORMAL,
4767                        access_sdma_rpy_tag_err_cnt),
4768/* SendEgressErrStatus */
4769[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4770                        CNTR_NORMAL,
4771                        access_tx_read_pio_memory_csr_unc_err_cnt),
4772[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4773                        0, CNTR_NORMAL,
4774                        access_tx_read_sdma_memory_csr_err_cnt),
4775[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4776                        CNTR_NORMAL,
4777                        access_tx_egress_fifo_cor_err_cnt),
4778[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4779                        CNTR_NORMAL,
4780                        access_tx_read_pio_memory_cor_err_cnt),
4781[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4782                        CNTR_NORMAL,
4783                        access_tx_read_sdma_memory_cor_err_cnt),
4784[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4785                        CNTR_NORMAL,
4786                        access_tx_sb_hdr_cor_err_cnt),
4787[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4788                        CNTR_NORMAL,
4789                        access_tx_credit_overrun_err_cnt),
4790[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4791                        CNTR_NORMAL,
4792                        access_tx_launch_fifo8_cor_err_cnt),
4793[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4794                        CNTR_NORMAL,
4795                        access_tx_launch_fifo7_cor_err_cnt),
4796[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4797                        CNTR_NORMAL,
4798                        access_tx_launch_fifo6_cor_err_cnt),
4799[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4800                        CNTR_NORMAL,
4801                        access_tx_launch_fifo5_cor_err_cnt),
4802[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4803                        CNTR_NORMAL,
4804                        access_tx_launch_fifo4_cor_err_cnt),
4805[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4806                        CNTR_NORMAL,
4807                        access_tx_launch_fifo3_cor_err_cnt),
4808[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4809                        CNTR_NORMAL,
4810                        access_tx_launch_fifo2_cor_err_cnt),
4811[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4812                        CNTR_NORMAL,
4813                        access_tx_launch_fifo1_cor_err_cnt),
4814[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4815                        CNTR_NORMAL,
4816                        access_tx_launch_fifo0_cor_err_cnt),
4817[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4818                        CNTR_NORMAL,
4819                        access_tx_credit_return_vl_err_cnt),
4820[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4821                        CNTR_NORMAL,
4822                        access_tx_hcrc_insertion_err_cnt),
4823[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4824                        CNTR_NORMAL,
4825                        access_tx_egress_fifo_unc_err_cnt),
4826[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4827                        CNTR_NORMAL,
4828                        access_tx_read_pio_memory_unc_err_cnt),
4829[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4830                        CNTR_NORMAL,
4831                        access_tx_read_sdma_memory_unc_err_cnt),
4832[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4833                        CNTR_NORMAL,
4834                        access_tx_sb_hdr_unc_err_cnt),
4835[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4836                        CNTR_NORMAL,
4837                        access_tx_credit_return_partiy_err_cnt),
4838[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4839                        0, 0, CNTR_NORMAL,
4840                        access_tx_launch_fifo8_unc_or_parity_err_cnt),
4841[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4842                        0, 0, CNTR_NORMAL,
4843                        access_tx_launch_fifo7_unc_or_parity_err_cnt),
4844[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4845                        0, 0, CNTR_NORMAL,
4846                        access_tx_launch_fifo6_unc_or_parity_err_cnt),
4847[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4848                        0, 0, CNTR_NORMAL,
4849                        access_tx_launch_fifo5_unc_or_parity_err_cnt),
4850[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4851                        0, 0, CNTR_NORMAL,
4852                        access_tx_launch_fifo4_unc_or_parity_err_cnt),
4853[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4854                        0, 0, CNTR_NORMAL,
4855                        access_tx_launch_fifo3_unc_or_parity_err_cnt),
4856[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4857                        0, 0, CNTR_NORMAL,
4858                        access_tx_launch_fifo2_unc_or_parity_err_cnt),
4859[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4860                        0, 0, CNTR_NORMAL,
4861                        access_tx_launch_fifo1_unc_or_parity_err_cnt),
4862[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4863                        0, 0, CNTR_NORMAL,
4864                        access_tx_launch_fifo0_unc_or_parity_err_cnt),
4865[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4866                        0, 0, CNTR_NORMAL,
4867                        access_tx_sdma15_disallowed_packet_err_cnt),
4868[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4869                        0, 0, CNTR_NORMAL,
4870                        access_tx_sdma14_disallowed_packet_err_cnt),
4871[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4872                        0, 0, CNTR_NORMAL,
4873                        access_tx_sdma13_disallowed_packet_err_cnt),
4874[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4875                        0, 0, CNTR_NORMAL,
4876                        access_tx_sdma12_disallowed_packet_err_cnt),
4877[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4878                        0, 0, CNTR_NORMAL,
4879                        access_tx_sdma11_disallowed_packet_err_cnt),
4880[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4881                        0, 0, CNTR_NORMAL,
4882                        access_tx_sdma10_disallowed_packet_err_cnt),
4883[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4884                        0, 0, CNTR_NORMAL,
4885                        access_tx_sdma9_disallowed_packet_err_cnt),
4886[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4887                        0, 0, CNTR_NORMAL,
4888                        access_tx_sdma8_disallowed_packet_err_cnt),
4889[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4890                        0, 0, CNTR_NORMAL,
4891                        access_tx_sdma7_disallowed_packet_err_cnt),
4892[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4893                        0, 0, CNTR_NORMAL,
4894                        access_tx_sdma6_disallowed_packet_err_cnt),
4895[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4896                        0, 0, CNTR_NORMAL,
4897                        access_tx_sdma5_disallowed_packet_err_cnt),
4898[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4899                        0, 0, CNTR_NORMAL,
4900                        access_tx_sdma4_disallowed_packet_err_cnt),
4901[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4902                        0, 0, CNTR_NORMAL,
4903                        access_tx_sdma3_disallowed_packet_err_cnt),
4904[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4905                        0, 0, CNTR_NORMAL,
4906                        access_tx_sdma2_disallowed_packet_err_cnt),
4907[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4908                        0, 0, CNTR_NORMAL,
4909                        access_tx_sdma1_disallowed_packet_err_cnt),
4910[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4911                        0, 0, CNTR_NORMAL,
4912                        access_tx_sdma0_disallowed_packet_err_cnt),
4913[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4914                        CNTR_NORMAL,
4915                        access_tx_config_parity_err_cnt),
4916[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4917                        CNTR_NORMAL,
4918                        access_tx_sbrd_ctl_csr_parity_err_cnt),
4919[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4920                        CNTR_NORMAL,
4921                        access_tx_launch_csr_parity_err_cnt),
4922[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4923                        CNTR_NORMAL,
4924                        access_tx_illegal_vl_err_cnt),
4925[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4926                        "TxSbrdCtlStateMachineParityErr", 0, 0,
4927                        CNTR_NORMAL,
4928                        access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4929[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4930                        CNTR_NORMAL,
4931                        access_egress_reserved_10_err_cnt),
4932[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4933                        CNTR_NORMAL,
4934                        access_egress_reserved_9_err_cnt),
4935[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4936                        0, 0, CNTR_NORMAL,
4937                        access_tx_sdma_launch_intf_parity_err_cnt),
4938[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4939                        CNTR_NORMAL,
4940                        access_tx_pio_launch_intf_parity_err_cnt),
4941[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4942                        CNTR_NORMAL,
4943                        access_egress_reserved_6_err_cnt),
4944[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4945                        CNTR_NORMAL,
4946                        access_tx_incorrect_link_state_err_cnt),
4947[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4948                        CNTR_NORMAL,
4949                        access_tx_linkdown_err_cnt),
4950[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4951                        "EgressFifoUnderrunOrParityErr", 0, 0,
4952                        CNTR_NORMAL,
4953                        access_tx_egress_fifi_underrun_or_parity_err_cnt),
4954[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4955                        CNTR_NORMAL,
4956                        access_egress_reserved_2_err_cnt),
4957[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4958                        CNTR_NORMAL,
4959                        access_tx_pkt_integrity_mem_unc_err_cnt),
4960[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4961                        CNTR_NORMAL,
4962                        access_tx_pkt_integrity_mem_cor_err_cnt),
4963/* SendErrStatus */
4964[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4965                        CNTR_NORMAL,
4966                        access_send_csr_write_bad_addr_err_cnt),
4967[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4968                        CNTR_NORMAL,
4969                        access_send_csr_read_bad_addr_err_cnt),
4970[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4971                        CNTR_NORMAL,
4972                        access_send_csr_parity_cnt),
4973/* SendCtxtErrStatus */
4974[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4975                        CNTR_NORMAL,
4976                        access_pio_write_out_of_bounds_err_cnt),
4977[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4978                        CNTR_NORMAL,
4979                        access_pio_write_overflow_err_cnt),
4980[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4981                        0, 0, CNTR_NORMAL,
4982                        access_pio_write_crosses_boundary_err_cnt),
4983[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4984                        CNTR_NORMAL,
4985                        access_pio_disallowed_packet_err_cnt),
4986[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4987                        CNTR_NORMAL,
4988                        access_pio_inconsistent_sop_err_cnt),
4989/* SendDmaEngErrStatus */
4990[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4991                        0, 0, CNTR_NORMAL,
4992                        access_sdma_header_request_fifo_cor_err_cnt),
4993[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4994                        CNTR_NORMAL,
4995                        access_sdma_header_storage_cor_err_cnt),
4996[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4997                        CNTR_NORMAL,
4998                        access_sdma_packet_tracking_cor_err_cnt),
4999[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5000                        CNTR_NORMAL,
5001                        access_sdma_assembly_cor_err_cnt),
5002[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5003                        CNTR_NORMAL,
5004                        access_sdma_desc_table_cor_err_cnt),
5005[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5006                        0, 0, CNTR_NORMAL,
5007                        access_sdma_header_request_fifo_unc_err_cnt),
5008[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5009                        CNTR_NORMAL,
5010                        access_sdma_header_storage_unc_err_cnt),
5011[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5012                        CNTR_NORMAL,
5013                        access_sdma_packet_tracking_unc_err_cnt),
5014[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5015                        CNTR_NORMAL,
5016                        access_sdma_assembly_unc_err_cnt),
5017[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5018                        CNTR_NORMAL,
5019                        access_sdma_desc_table_unc_err_cnt),
5020[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5021                        CNTR_NORMAL,
5022                        access_sdma_timeout_err_cnt),
5023[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5024                        CNTR_NORMAL,
5025                        access_sdma_header_length_err_cnt),
5026[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5027                        CNTR_NORMAL,
5028                        access_sdma_header_address_err_cnt),
5029[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5030                        CNTR_NORMAL,
5031                        access_sdma_header_select_err_cnt),
5032[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5033                        CNTR_NORMAL,
5034                        access_sdma_reserved_9_err_cnt),
5035[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5036                        CNTR_NORMAL,
5037                        access_sdma_packet_desc_overflow_err_cnt),
5038[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5039                        CNTR_NORMAL,
5040                        access_sdma_length_mismatch_err_cnt),
5041[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5042                        CNTR_NORMAL,
5043                        access_sdma_halt_err_cnt),
5044[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5045                        CNTR_NORMAL,
5046                        access_sdma_mem_read_err_cnt),
5047[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5048                        CNTR_NORMAL,
5049                        access_sdma_first_desc_err_cnt),
5050[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5051                        CNTR_NORMAL,
5052                        access_sdma_tail_out_of_bounds_err_cnt),
5053[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5054                        CNTR_NORMAL,
5055                        access_sdma_too_long_err_cnt),
5056[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5057                        CNTR_NORMAL,
5058                        access_sdma_gen_mismatch_err_cnt),
5059[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5060                        CNTR_NORMAL,
5061                        access_sdma_wrong_dw_err_cnt),
5062};
5063
5064static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5065[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5066                        CNTR_NORMAL),
5067[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5068                        CNTR_NORMAL),
5069[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5070                        CNTR_NORMAL),
5071[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5072                        CNTR_NORMAL),
5073[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5074                        CNTR_NORMAL),
5075[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5076                        CNTR_NORMAL),
5077[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5078                        CNTR_NORMAL),
5079[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5080[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5081[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5082[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5083                                      CNTR_SYNTH | CNTR_VL),
5084[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5085                                     CNTR_SYNTH | CNTR_VL),
5086[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5087                                      CNTR_SYNTH | CNTR_VL),
5088[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5089[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5090[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5091                             access_sw_link_dn_cnt),
5092[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5093                           access_sw_link_up_cnt),
5094[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5095                                 access_sw_unknown_frame_cnt),
5096[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5097                             access_sw_xmit_discards),
5098[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5099                                CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5100                                access_sw_xmit_discards),
5101[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5102                                 access_xmit_constraint_errs),
5103[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5104                                access_rcv_constraint_errs),
5105[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5106[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5107[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5108[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5109[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5110[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5111[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5112[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5113[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5114[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5115[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5116[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5117[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5118                               access_sw_cpu_rc_acks),
5119[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5120                                access_sw_cpu_rc_qacks),
5121[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5122                                       access_sw_cpu_rc_delayed_comp),
5123[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5124[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5125[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5126[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5127[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5128[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5129[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5130[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5131[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5132[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5133[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5134[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5135[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5136[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5137[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5138[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5139[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5140[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5141[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5142[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5143[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5144[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5145[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5146[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5147[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5148[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5149[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5150[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5151[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5152[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5153[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5154[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5155[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5156[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5157[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5158[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5159[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5160[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5161[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5162[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5163[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5164[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5165[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5166[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5167[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5168[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5169[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5170[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5171[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5172[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5173[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5174[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5175[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5176[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5177[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5178[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5179[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5180[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5181[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5182[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5183[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5184[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5185[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5186[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5187[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5188[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5189[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5190[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5191[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5192[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5193[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5194[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5195[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5196[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5197[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5198[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5199[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5200[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5201[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5202[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5203};
5204
5205/* ======================================================================== */
5206
5207/* return true if this is chip revision revision a */
5208int is_ax(struct hfi1_devdata *dd)
5209{
5210        u8 chip_rev_minor =
5211                dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5212                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
5213        return (chip_rev_minor & 0xf0) == 0;
5214}
5215
5216/* return true if this is chip revision revision b */
5217int is_bx(struct hfi1_devdata *dd)
5218{
5219        u8 chip_rev_minor =
5220                dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5221                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
5222        return (chip_rev_minor & 0xF0) == 0x10;
5223}
5224
5225/*
5226 * Append string s to buffer buf.  Arguments curp and len are the current
5227 * position and remaining length, respectively.
5228 *
5229 * return 0 on success, 1 on out of room
5230 */
5231static int append_str(char *buf, char **curp, int *lenp, const char *s)
5232{
5233        char *p = *curp;
5234        int len = *lenp;
5235        int result = 0; /* success */
5236        char c;
5237
5238        /* add a comma, if first in the buffer */
5239        if (p != buf) {
5240                if (len == 0) {
5241                        result = 1; /* out of room */
5242                        goto done;
5243                }
5244                *p++ = ',';
5245                len--;
5246        }
5247
5248        /* copy the string */
5249        while ((c = *s++) != 0) {
5250                if (len == 0) {
5251                        result = 1; /* out of room */
5252                        goto done;
5253                }
5254                *p++ = c;
5255                len--;
5256        }
5257
5258done:
5259        /* write return values */
5260        *curp = p;
5261        *lenp = len;
5262
5263        return result;
5264}
5265
5266/*
5267 * Using the given flag table, print a comma separated string into
5268 * the buffer.  End in '*' if the buffer is too short.
5269 */
5270static char *flag_string(char *buf, int buf_len, u64 flags,
5271                         struct flag_table *table, int table_size)
5272{
5273        char extra[32];
5274        char *p = buf;
5275        int len = buf_len;
5276        int no_room = 0;
5277        int i;
5278
5279        /* make sure there is at least 2 so we can form "*" */
5280        if (len < 2)
5281                return "";
5282
5283        len--;  /* leave room for a nul */
5284        for (i = 0; i < table_size; i++) {
5285                if (flags & table[i].flag) {
5286                        no_room = append_str(buf, &p, &len, table[i].str);
5287                        if (no_room)
5288                                break;
5289                        flags &= ~table[i].flag;
5290                }
5291        }
5292
5293        /* any undocumented bits left? */
5294        if (!no_room && flags) {
5295                snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5296                no_room = append_str(buf, &p, &len, extra);
5297        }
5298
5299        /* add * if ran out of room */
5300        if (no_room) {
5301                /* may need to back up to add space for a '*' */
5302                if (len == 0)
5303                        --p;
5304                *p++ = '*';
5305        }
5306
5307        /* add final nul - space already allocated above */
5308        *p = 0;
5309        return buf;
5310}
5311
5312/* first 8 CCE error interrupt source names */
5313static const char * const cce_misc_names[] = {
5314        "CceErrInt",            /* 0 */
5315        "RxeErrInt",            /* 1 */
5316        "MiscErrInt",           /* 2 */
5317        "Reserved3",            /* 3 */
5318        "PioErrInt",            /* 4 */
5319        "SDmaErrInt",           /* 5 */
5320        "EgressErrInt",         /* 6 */
5321        "TxeErrInt"             /* 7 */
5322};
5323
5324/*
5325 * Return the miscellaneous error interrupt name.
5326 */
5327static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5328{
5329        if (source < ARRAY_SIZE(cce_misc_names))
5330                strncpy(buf, cce_misc_names[source], bsize);
5331        else
5332                snprintf(buf, bsize, "Reserved%u",
5333                         source + IS_GENERAL_ERR_START);
5334
5335        return buf;
5336}
5337
5338/*
5339 * Return the SDMA engine error interrupt name.
5340 */
5341static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5342{
5343        snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5344        return buf;
5345}
5346
5347/*
5348 * Return the send context error interrupt name.
5349 */
5350static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5351{
5352        snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5353        return buf;
5354}
5355
5356static const char * const various_names[] = {
5357        "PbcInt",
5358        "GpioAssertInt",
5359        "Qsfp1Int",
5360        "Qsfp2Int",
5361        "TCritInt"
5362};
5363
5364/*
5365 * Return the various interrupt name.
5366 */
5367static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5368{
5369        if (source < ARRAY_SIZE(various_names))
5370                strncpy(buf, various_names[source], bsize);
5371        else
5372                snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5373        return buf;
5374}
5375
5376/*
5377 * Return the DC interrupt name.
5378 */
5379static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5380{
5381        static const char * const dc_int_names[] = {
5382                "common",
5383                "lcb",
5384                "8051",
5385                "lbm"   /* local block merge */
5386        };
5387
5388        if (source < ARRAY_SIZE(dc_int_names))
5389                snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5390        else
5391                snprintf(buf, bsize, "DCInt%u", source);
5392        return buf;
5393}
5394
5395static const char * const sdma_int_names[] = {
5396        "SDmaInt",
5397        "SdmaIdleInt",
5398        "SdmaProgressInt",
5399};
5400
5401/*
5402 * Return the SDMA engine interrupt name.
5403 */
5404static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5405{
5406        /* what interrupt */
5407        unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5408        /* which engine */
5409        unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5410
5411        if (likely(what < 3))
5412                snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5413        else
5414                snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5415        return buf;
5416}
5417
5418/*
5419 * Return the receive available interrupt name.
5420 */
5421static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5422{
5423        snprintf(buf, bsize, "RcvAvailInt%u", source);
5424        return buf;
5425}
5426
5427/*
5428 * Return the receive urgent interrupt name.
5429 */
5430static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5431{
5432        snprintf(buf, bsize, "RcvUrgentInt%u", source);
5433        return buf;
5434}
5435
5436/*
5437 * Return the send credit interrupt name.
5438 */
5439static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5440{
5441        snprintf(buf, bsize, "SendCreditInt%u", source);
5442        return buf;
5443}
5444
5445/*
5446 * Return the reserved interrupt name.
5447 */
5448static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5449{
5450        snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5451        return buf;
5452}
5453
5454static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5455{
5456        return flag_string(buf, buf_len, flags,
5457                           cce_err_status_flags,
5458                           ARRAY_SIZE(cce_err_status_flags));
5459}
5460
5461static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5462{
5463        return flag_string(buf, buf_len, flags,
5464                           rxe_err_status_flags,
5465                           ARRAY_SIZE(rxe_err_status_flags));
5466}
5467
5468static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5469{
5470        return flag_string(buf, buf_len, flags, misc_err_status_flags,
5471                           ARRAY_SIZE(misc_err_status_flags));
5472}
5473
5474static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5475{
5476        return flag_string(buf, buf_len, flags,
5477                           pio_err_status_flags,
5478                           ARRAY_SIZE(pio_err_status_flags));
5479}
5480
5481static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5482{
5483        return flag_string(buf, buf_len, flags,
5484                           sdma_err_status_flags,
5485                           ARRAY_SIZE(sdma_err_status_flags));
5486}
5487
5488static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5489{
5490        return flag_string(buf, buf_len, flags,
5491                           egress_err_status_flags,
5492                           ARRAY_SIZE(egress_err_status_flags));
5493}
5494
5495static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5496{
5497        return flag_string(buf, buf_len, flags,
5498                           egress_err_info_flags,
5499                           ARRAY_SIZE(egress_err_info_flags));
5500}
5501
5502static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5503{
5504        return flag_string(buf, buf_len, flags,
5505                           send_err_status_flags,
5506                           ARRAY_SIZE(send_err_status_flags));
5507}
5508
5509static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5510{
5511        char buf[96];
5512        int i = 0;
5513
5514        /*
5515         * For most these errors, there is nothing that can be done except
5516         * report or record it.
5517         */
5518        dd_dev_info(dd, "CCE Error: %s\n",
5519                    cce_err_status_string(buf, sizeof(buf), reg));
5520
5521        if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5522            is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5523                /* this error requires a manual drop into SPC freeze mode */
5524                /* then a fix up */
5525                start_freeze_handling(dd->pport, FREEZE_SELF);
5526        }
5527
5528        for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5529                if (reg & (1ull << i)) {
5530                        incr_cntr64(&dd->cce_err_status_cnt[i]);
5531                        /* maintain a counter over all cce_err_status errors */
5532                        incr_cntr64(&dd->sw_cce_err_status_aggregate);
5533                }
5534        }
5535}
5536
5537/*
5538 * Check counters for receive errors that do not have an interrupt
5539 * associated with them.
5540 */
5541#define RCVERR_CHECK_TIME 10
5542static void update_rcverr_timer(struct timer_list *t)
5543{
5544        struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5545        struct hfi1_pportdata *ppd = dd->pport;
5546        u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5547
5548        if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5549            ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5550                dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5551                set_link_down_reason(
5552                ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5553                OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5554                queue_work(ppd->link_wq, &ppd->link_bounce_work);
5555        }
5556        dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5557
5558        mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5559}
5560
5561static int init_rcverr(struct hfi1_devdata *dd)
5562{
5563        timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5564        /* Assume the hardware counter has been reset */
5565        dd->rcv_ovfl_cnt = 0;
5566        return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5567}
5568
5569static void free_rcverr(struct hfi1_devdata *dd)
5570{
5571        if (dd->rcverr_timer.function)
5572                del_timer_sync(&dd->rcverr_timer);
5573}
5574
5575static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5576{
5577        char buf[96];
5578        int i = 0;
5579
5580        dd_dev_info(dd, "Receive Error: %s\n",
5581                    rxe_err_status_string(buf, sizeof(buf), reg));
5582
5583        if (reg & ALL_RXE_FREEZE_ERR) {
5584                int flags = 0;
5585
5586                /*
5587                 * Freeze mode recovery is disabled for the errors
5588                 * in RXE_FREEZE_ABORT_MASK
5589                 */
5590                if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5591                        flags = FREEZE_ABORT;
5592
5593                start_freeze_handling(dd->pport, flags);
5594        }
5595
5596        for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5597                if (reg & (1ull << i))
5598                        incr_cntr64(&dd->rcv_err_status_cnt[i]);
5599        }
5600}
5601
5602static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5603{
5604        char buf[96];
5605        int i = 0;
5606
5607        dd_dev_info(dd, "Misc Error: %s",
5608                    misc_err_status_string(buf, sizeof(buf), reg));
5609        for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5610                if (reg & (1ull << i))
5611                        incr_cntr64(&dd->misc_err_status_cnt[i]);
5612        }
5613}
5614
5615static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5616{
5617        char buf[96];
5618        int i = 0;
5619
5620        dd_dev_info(dd, "PIO Error: %s\n",
5621                    pio_err_status_string(buf, sizeof(buf), reg));
5622
5623        if (reg & ALL_PIO_FREEZE_ERR)
5624                start_freeze_handling(dd->pport, 0);
5625
5626        for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5627                if (reg & (1ull << i))
5628                        incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5629        }
5630}
5631
5632static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5633{
5634        char buf[96];
5635        int i = 0;
5636
5637        dd_dev_info(dd, "SDMA Error: %s\n",
5638                    sdma_err_status_string(buf, sizeof(buf), reg));
5639
5640        if (reg & ALL_SDMA_FREEZE_ERR)
5641                start_freeze_handling(dd->pport, 0);
5642
5643        for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5644                if (reg & (1ull << i))
5645                        incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5646        }
5647}
5648
5649static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5650{
5651        incr_cntr64(&ppd->port_xmit_discards);
5652}
5653
5654static void count_port_inactive(struct hfi1_devdata *dd)
5655{
5656        __count_port_discards(dd->pport);
5657}
5658
5659/*
5660 * We have had a "disallowed packet" error during egress. Determine the
5661 * integrity check which failed, and update relevant error counter, etc.
5662 *
5663 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5664 * bit of state per integrity check, and so we can miss the reason for an
5665 * egress error if more than one packet fails the same integrity check
5666 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5667 */
5668static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5669                                        int vl)
5670{
5671        struct hfi1_pportdata *ppd = dd->pport;
5672        u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5673        u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5674        char buf[96];
5675
5676        /* clear down all observed info as quickly as possible after read */
5677        write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5678
5679        dd_dev_info(dd,
5680                    "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5681                    info, egress_err_info_string(buf, sizeof(buf), info), src);
5682
5683        /* Eventually add other counters for each bit */
5684        if (info & PORT_DISCARD_EGRESS_ERRS) {
5685                int weight, i;
5686
5687                /*
5688                 * Count all applicable bits as individual errors and
5689                 * attribute them to the packet that triggered this handler.
5690                 * This may not be completely accurate due to limitations
5691                 * on the available hardware error information.  There is
5692                 * a single information register and any number of error
5693                 * packets may have occurred and contributed to it before
5694                 * this routine is called.  This means that:
5695                 * a) If multiple packets with the same error occur before
5696                 *    this routine is called, earlier packets are missed.
5697                 *    There is only a single bit for each error type.
5698                 * b) Errors may not be attributed to the correct VL.
5699                 *    The driver is attributing all bits in the info register
5700                 *    to the packet that triggered this call, but bits
5701                 *    could be an accumulation of different packets with
5702                 *    different VLs.
5703                 * c) A single error packet may have multiple counts attached
5704                 *    to it.  There is no way for the driver to know if
5705                 *    multiple bits set in the info register are due to a
5706                 *    single packet or multiple packets.  The driver assumes
5707                 *    multiple packets.
5708                 */
5709                weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5710                for (i = 0; i < weight; i++) {
5711                        __count_port_discards(ppd);
5712                        if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5713                                incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5714                        else if (vl == 15)
5715                                incr_cntr64(&ppd->port_xmit_discards_vl
5716                                            [C_VL_15]);
5717                }
5718        }
5719}
5720
5721/*
5722 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5723 * register. Does it represent a 'port inactive' error?
5724 */
5725static inline int port_inactive_err(u64 posn)
5726{
5727        return (posn >= SEES(TX_LINKDOWN) &&
5728                posn <= SEES(TX_INCORRECT_LINK_STATE));
5729}
5730
5731/*
5732 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5733 * register. Does it represent a 'disallowed packet' error?
5734 */
5735static inline int disallowed_pkt_err(int posn)
5736{
5737        return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5738                posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5739}
5740
5741/*
5742 * Input value is a bit position of one of the SDMA engine disallowed
5743 * packet errors.  Return which engine.  Use of this must be guarded by
5744 * disallowed_pkt_err().
5745 */
5746static inline int disallowed_pkt_engine(int posn)
5747{
5748        return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5749}
5750
5751/*
5752 * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5753 * be done.
5754 */
5755static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5756{
5757        struct sdma_vl_map *m;
5758        int vl;
5759
5760        /* range check */
5761        if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5762                return -1;
5763
5764        rcu_read_lock();
5765        m = rcu_dereference(dd->sdma_map);
5766        vl = m->engine_to_vl[engine];
5767        rcu_read_unlock();
5768
5769        return vl;
5770}
5771
5772/*
5773 * Translate the send context (sofware index) into a VL.  Return -1 if the
5774 * translation cannot be done.
5775 */
5776static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5777{
5778        struct send_context_info *sci;
5779        struct send_context *sc;
5780        int i;
5781
5782        sci = &dd->send_contexts[sw_index];
5783
5784        /* there is no information for user (PSM) and ack contexts */
5785        if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5786                return -1;
5787
5788        sc = sci->sc;
5789        if (!sc)
5790                return -1;
5791        if (dd->vld[15].sc == sc)
5792                return 15;
5793        for (i = 0; i < num_vls; i++)
5794                if (dd->vld[i].sc == sc)
5795                        return i;
5796
5797        return -1;
5798}
5799
5800static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5801{
5802        u64 reg_copy = reg, handled = 0;
5803        char buf[96];
5804        int i = 0;
5805
5806        if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5807                start_freeze_handling(dd->pport, 0);
5808        else if (is_ax(dd) &&
5809                 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5810                 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5811                start_freeze_handling(dd->pport, 0);
5812
5813        while (reg_copy) {
5814                int posn = fls64(reg_copy);
5815                /* fls64() returns a 1-based offset, we want it zero based */
5816                int shift = posn - 1;
5817                u64 mask = 1ULL << shift;
5818
5819                if (port_inactive_err(shift)) {
5820                        count_port_inactive(dd);
5821                        handled |= mask;
5822                } else if (disallowed_pkt_err(shift)) {
5823                        int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5824
5825                        handle_send_egress_err_info(dd, vl);
5826                        handled |= mask;
5827                }
5828                reg_copy &= ~mask;
5829        }
5830
5831        reg &= ~handled;
5832
5833        if (reg)
5834                dd_dev_info(dd, "Egress Error: %s\n",
5835                            egress_err_status_string(buf, sizeof(buf), reg));
5836
5837        for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5838                if (reg & (1ull << i))
5839                        incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5840        }
5841}
5842
5843static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5844{
5845        char buf[96];
5846        int i = 0;
5847
5848        dd_dev_info(dd, "Send Error: %s\n",
5849                    send_err_status_string(buf, sizeof(buf), reg));
5850
5851        for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5852                if (reg & (1ull << i))
5853                        incr_cntr64(&dd->send_err_status_cnt[i]);
5854        }
5855}
5856
5857/*
5858 * The maximum number of times the error clear down will loop before
5859 * blocking a repeating error.  This value is arbitrary.
5860 */
5861#define MAX_CLEAR_COUNT 20
5862
5863/*
5864 * Clear and handle an error register.  All error interrupts are funneled
5865 * through here to have a central location to correctly handle single-
5866 * or multi-shot errors.
5867 *
5868 * For non per-context registers, call this routine with a context value
5869 * of 0 so the per-context offset is zero.
5870 *
5871 * If the handler loops too many times, assume that something is wrong
5872 * and can't be fixed, so mask the error bits.
5873 */
5874static void interrupt_clear_down(struct hfi1_devdata *dd,
5875                                 u32 context,
5876                                 const struct err_reg_info *eri)
5877{
5878        u64 reg;
5879        u32 count;
5880
5881        /* read in a loop until no more errors are seen */
5882        count = 0;
5883        while (1) {
5884                reg = read_kctxt_csr(dd, context, eri->status);
5885                if (reg == 0)
5886                        break;
5887                write_kctxt_csr(dd, context, eri->clear, reg);
5888                if (likely(eri->handler))
5889                        eri->handler(dd, context, reg);
5890                count++;
5891                if (count > MAX_CLEAR_COUNT) {
5892                        u64 mask;
5893
5894                        dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5895                                   eri->desc, reg);
5896                        /*
5897                         * Read-modify-write so any other masked bits
5898                         * remain masked.
5899                         */
5900                        mask = read_kctxt_csr(dd, context, eri->mask);
5901                        mask &= ~reg;
5902                        write_kctxt_csr(dd, context, eri->mask, mask);
5903                        break;
5904                }
5905        }
5906}
5907
5908/*
5909 * CCE block "misc" interrupt.  Source is < 16.
5910 */
5911static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5912{
5913        const struct err_reg_info *eri = &misc_errs[source];
5914
5915        if (eri->handler) {
5916                interrupt_clear_down(dd, 0, eri);
5917        } else {
5918                dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5919                           source);
5920        }
5921}
5922
5923static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5924{
5925        return flag_string(buf, buf_len, flags,
5926                           sc_err_status_flags,
5927                           ARRAY_SIZE(sc_err_status_flags));
5928}
5929
5930/*
5931 * Send context error interrupt.  Source (hw_context) is < 160.
5932 *
5933 * All send context errors cause the send context to halt.  The normal
5934 * clear-down mechanism cannot be used because we cannot clear the
5935 * error bits until several other long-running items are done first.
5936 * This is OK because with the context halted, nothing else is going
5937 * to happen on it anyway.
5938 */
5939static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5940                                unsigned int hw_context)
5941{
5942        struct send_context_info *sci;
5943        struct send_context *sc;
5944        char flags[96];
5945        u64 status;
5946        u32 sw_index;
5947        int i = 0;
5948        unsigned long irq_flags;
5949
5950        sw_index = dd->hw_to_sw[hw_context];
5951        if (sw_index >= dd->num_send_contexts) {
5952                dd_dev_err(dd,
5953                           "out of range sw index %u for send context %u\n",
5954                           sw_index, hw_context);
5955                return;
5956        }
5957        sci = &dd->send_contexts[sw_index];
5958        spin_lock_irqsave(&dd->sc_lock, irq_flags);
5959        sc = sci->sc;
5960        if (!sc) {
5961                dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5962                           sw_index, hw_context);
5963                spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5964                return;
5965        }
5966
5967        /* tell the software that a halt has begun */
5968        sc_stop(sc, SCF_HALTED);
5969
5970        status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5971
5972        dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5973                    send_context_err_status_string(flags, sizeof(flags),
5974                                                   status));
5975
5976        if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5977                handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5978
5979        /*
5980         * Automatically restart halted kernel contexts out of interrupt
5981         * context.  User contexts must ask the driver to restart the context.
5982         */
5983        if (sc->type != SC_USER)
5984                queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5985        spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5986
5987        /*
5988         * Update the counters for the corresponding status bits.
5989         * Note that these particular counters are aggregated over all
5990         * 160 contexts.
5991         */
5992        for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5993                if (status & (1ull << i))
5994                        incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5995        }
5996}
5997
5998static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5999                                unsigned int source, u64 status)
6000{
6001        struct sdma_engine *sde;
6002        int i = 0;
6003
6004        sde = &dd->per_sdma[source];
6005#ifdef CONFIG_SDMA_VERBOSITY
6006        dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6007                   slashstrip(__FILE__), __LINE__, __func__);
6008        dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6009                   sde->this_idx, source, (unsigned long long)status);
6010#endif
6011        sde->err_cnt++;
6012        sdma_engine_error(sde, status);
6013
6014        /*
6015        * Update the counters for the corresponding status bits.
6016        * Note that these particular counters are aggregated over
6017        * all 16 DMA engines.
6018        */
6019        for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6020                if (status & (1ull << i))
6021                        incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6022        }
6023}
6024
6025/*
6026 * CCE block SDMA error interrupt.  Source is < 16.
6027 */
6028static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6029{
6030#ifdef CONFIG_SDMA_VERBOSITY
6031        struct sdma_engine *sde = &dd->per_sdma[source];
6032
6033        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6034                   slashstrip(__FILE__), __LINE__, __func__);
6035        dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6036                   source);
6037        sdma_dumpstate(sde);
6038#endif
6039        interrupt_clear_down(dd, source, &sdma_eng_err);
6040}
6041
6042/*
6043 * CCE block "various" interrupt.  Source is < 8.
6044 */
6045static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6046{
6047        const struct err_reg_info *eri = &various_err[source];
6048
6049        /*
6050         * TCritInt cannot go through interrupt_clear_down()
6051         * because it is not a second tier interrupt. The handler
6052         * should be called directly.
6053         */
6054        if (source == TCRIT_INT_SOURCE)
6055                handle_temp_err(dd);
6056        else if (eri->handler)
6057                interrupt_clear_down(dd, 0, eri);
6058        else
6059                dd_dev_info(dd,
6060                            "%s: Unimplemented/reserved interrupt %d\n",
6061                            __func__, source);
6062}
6063
6064static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6065{
6066        /* src_ctx is always zero */
6067        struct hfi1_pportdata *ppd = dd->pport;
6068        unsigned long flags;
6069        u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6070
6071        if (reg & QSFP_HFI0_MODPRST_N) {
6072                if (!qsfp_mod_present(ppd)) {
6073                        dd_dev_info(dd, "%s: QSFP module removed\n",
6074                                    __func__);
6075
6076                        ppd->driver_link_ready = 0;
6077                        /*
6078                         * Cable removed, reset all our information about the
6079                         * cache and cable capabilities
6080                         */
6081
6082                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6083                        /*
6084                         * We don't set cache_refresh_required here as we expect
6085                         * an interrupt when a cable is inserted
6086                         */
6087                        ppd->qsfp_info.cache_valid = 0;
6088                        ppd->qsfp_info.reset_needed = 0;
6089                        ppd->qsfp_info.limiting_active = 0;
6090                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6091                                               flags);
6092                        /* Invert the ModPresent pin now to detect plug-in */
6093                        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6094                                  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6095
6096                        if ((ppd->offline_disabled_reason >
6097                          HFI1_ODR_MASK(
6098                          OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6099                          (ppd->offline_disabled_reason ==
6100                          HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6101                                ppd->offline_disabled_reason =
6102                                HFI1_ODR_MASK(
6103                                OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6104
6105                        if (ppd->host_link_state == HLS_DN_POLL) {
6106                                /*
6107                                 * The link is still in POLL. This means
6108                                 * that the normal link down processing
6109                                 * will not happen. We have to do it here
6110                                 * before turning the DC off.
6111                                 */
6112                                queue_work(ppd->link_wq, &ppd->link_down_work);
6113                        }
6114                } else {
6115                        dd_dev_info(dd, "%s: QSFP module inserted\n",
6116                                    __func__);
6117
6118                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6119                        ppd->qsfp_info.cache_valid = 0;
6120                        ppd->qsfp_info.cache_refresh_required = 1;
6121                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6122                                               flags);
6123
6124                        /*
6125                         * Stop inversion of ModPresent pin to detect
6126                         * removal of the cable
6127                         */
6128                        qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6129                        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6130                                  ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6131
6132                        ppd->offline_disabled_reason =
6133                                HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6134                }
6135        }
6136
6137        if (reg & QSFP_HFI0_INT_N) {
6138                dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6139                            __func__);
6140                spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6141                ppd->qsfp_info.check_interrupt_flags = 1;
6142                spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6143        }
6144
6145        /* Schedule the QSFP work only if there is a cable attached. */
6146        if (qsfp_mod_present(ppd))
6147                queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6148}
6149
6150static int request_host_lcb_access(struct hfi1_devdata *dd)
6151{
6152        int ret;
6153
6154        ret = do_8051_command(dd, HCMD_MISC,
6155                              (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6156                              LOAD_DATA_FIELD_ID_SHIFT, NULL);
6157        if (ret != HCMD_SUCCESS) {
6158                dd_dev_err(dd, "%s: command failed with error %d\n",
6159                           __func__, ret);
6160        }
6161        return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6162}
6163
6164static int request_8051_lcb_access(struct hfi1_devdata *dd)
6165{
6166        int ret;
6167
6168        ret = do_8051_command(dd, HCMD_MISC,
6169                              (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6170                              LOAD_DATA_FIELD_ID_SHIFT, NULL);
6171        if (ret != HCMD_SUCCESS) {
6172                dd_dev_err(dd, "%s: command failed with error %d\n",
6173                           __func__, ret);
6174        }
6175        return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6176}
6177
6178/*
6179 * Set the LCB selector - allow host access.  The DCC selector always
6180 * points to the host.
6181 */
6182static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6183{
6184        write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6185                  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6186                  DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6187}
6188
6189/*
6190 * Clear the LCB selector - allow 8051 access.  The DCC selector always
6191 * points to the host.
6192 */
6193static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6194{
6195        write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6196                  DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6197}
6198
6199/*
6200 * Acquire LCB access from the 8051.  If the host already has access,
6201 * just increment a counter.  Otherwise, inform the 8051 that the
6202 * host is taking access.
6203 *
6204 * Returns:
6205 *      0 on success
6206 *      -EBUSY if the 8051 has control and cannot be disturbed
6207 *      -errno if unable to acquire access from the 8051
6208 */
6209int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6210{
6211        struct hfi1_pportdata *ppd = dd->pport;
6212        int ret = 0;
6213
6214        /*
6215         * Use the host link state lock so the operation of this routine
6216         * { link state check, selector change, count increment } can occur
6217         * as a unit against a link state change.  Otherwise there is a
6218         * race between the state change and the count increment.
6219         */
6220        if (sleep_ok) {
6221                mutex_lock(&ppd->hls_lock);
6222        } else {
6223                while (!mutex_trylock(&ppd->hls_lock))
6224                        udelay(1);
6225        }
6226
6227        /* this access is valid only when the link is up */
6228        if (ppd->host_link_state & HLS_DOWN) {
6229                dd_dev_info(dd, "%s: link state %s not up\n",
6230                            __func__, link_state_name(ppd->host_link_state));
6231                ret = -EBUSY;
6232                goto done;
6233        }
6234
6235        if (dd->lcb_access_count == 0) {
6236                ret = request_host_lcb_access(dd);
6237                if (ret) {
6238                        dd_dev_err(dd,
6239                                   "%s: unable to acquire LCB access, err %d\n",
6240                                   __func__, ret);
6241                        goto done;
6242                }
6243                set_host_lcb_access(dd);
6244        }
6245        dd->lcb_access_count++;
6246done:
6247        mutex_unlock(&ppd->hls_lock);
6248        return ret;
6249}
6250
6251/*
6252 * Release LCB access by decrementing the use count.  If the count is moving
6253 * from 1 to 0, inform 8051 that it has control back.
6254 *
6255 * Returns:
6256 *      0 on success
6257 *      -errno if unable to release access to the 8051
6258 */
6259int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6260{
6261        int ret = 0;
6262
6263        /*
6264         * Use the host link state lock because the acquire needed it.
6265         * Here, we only need to keep { selector change, count decrement }
6266         * as a unit.
6267         */
6268        if (sleep_ok) {
6269                mutex_lock(&dd->pport->hls_lock);
6270        } else {
6271                while (!mutex_trylock(&dd->pport->hls_lock))
6272                        udelay(1);
6273        }
6274
6275        if (dd->lcb_access_count == 0) {
6276                dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6277                           __func__);
6278                goto done;
6279        }
6280
6281        if (dd->lcb_access_count == 1) {
6282                set_8051_lcb_access(dd);
6283                ret = request_8051_lcb_access(dd);
6284                if (ret) {
6285                        dd_dev_err(dd,
6286                                   "%s: unable to release LCB access, err %d\n",
6287                                   __func__, ret);
6288                        /* restore host access if the grant didn't work */
6289                        set_host_lcb_access(dd);
6290                        goto done;
6291                }
6292        }
6293        dd->lcb_access_count--;
6294done:
6295        mutex_unlock(&dd->pport->hls_lock);
6296        return ret;
6297}
6298
6299/*
6300 * Initialize LCB access variables and state.  Called during driver load,
6301 * after most of the initialization is finished.
6302 *
6303 * The DC default is LCB access on for the host.  The driver defaults to
6304 * leaving access to the 8051.  Assign access now - this constrains the call
6305 * to this routine to be after all LCB set-up is done.  In particular, after
6306 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6307 */
6308static void init_lcb_access(struct hfi1_devdata *dd)
6309{
6310        dd->lcb_access_count = 0;
6311}
6312
6313/*
6314 * Write a response back to a 8051 request.
6315 */
6316static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6317{
6318        write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6319                  DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6320                  (u64)return_code <<
6321                  DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6322                  (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6323}
6324
6325/*
6326 * Handle host requests from the 8051.
6327 */
6328static void handle_8051_request(struct hfi1_pportdata *ppd)
6329{
6330        struct hfi1_devdata *dd = ppd->dd;
6331        u64 reg;
6332        u16 data = 0;
6333        u8 type;
6334
6335        reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6336        if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6337                return; /* no request */
6338
6339        /* zero out COMPLETED so the response is seen */
6340        write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6341
6342        /* extract request details */
6343        type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6344                        & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6345        data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6346                        & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6347
6348        switch (type) {
6349        case HREQ_LOAD_CONFIG:
6350        case HREQ_SAVE_CONFIG:
6351        case HREQ_READ_CONFIG:
6352        case HREQ_SET_TX_EQ_ABS:
6353        case HREQ_SET_TX_EQ_REL:
6354        case HREQ_ENABLE:
6355                dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6356                            type);
6357                hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6358                break;
6359        case HREQ_LCB_RESET:
6360                /* Put the LCB, RX FPE and TX FPE into reset */
6361                write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6362                /* Make sure the write completed */
6363                (void)read_csr(dd, DCC_CFG_RESET);
6364                /* Hold the reset long enough to take effect */
6365                udelay(1);
6366                /* Take the LCB, RX FPE and TX FPE out of reset */
6367                write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6368                hreq_response(dd, HREQ_SUCCESS, 0);
6369
6370                break;
6371        case HREQ_CONFIG_DONE:
6372                hreq_response(dd, HREQ_SUCCESS, 0);
6373                break;
6374
6375        case HREQ_INTERFACE_TEST:
6376                hreq_response(dd, HREQ_SUCCESS, data);
6377                break;
6378        default:
6379                dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6380                hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6381                break;
6382        }
6383}
6384
6385/*
6386 * Set up allocation unit vaulue.
6387 */
6388void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6389{
6390        u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6391
6392        /* do not modify other values in the register */
6393        reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6394        reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6395        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6396}
6397
6398/*
6399 * Set up initial VL15 credits of the remote.  Assumes the rest of
6400 * the CM credit registers are zero from a previous global or credit reset.
6401 * Shared limit for VL15 will always be 0.
6402 */
6403void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6404{
6405        u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6406
6407        /* set initial values for total and shared credit limit */
6408        reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6409                 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6410
6411        /*
6412         * Set total limit to be equal to VL15 credits.
6413         * Leave shared limit at 0.
6414         */
6415        reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6416        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6417
6418        write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6419                  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6420}
6421
6422/*
6423 * Zero all credit details from the previous connection and
6424 * reset the CM manager's internal counters.
6425 */
6426void reset_link_credits(struct hfi1_devdata *dd)
6427{
6428        int i;
6429
6430        /* remove all previous VL credit limits */
6431        for (i = 0; i < TXE_NUM_DATA_VL; i++)
6432                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6433        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6434        write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6435        /* reset the CM block */
6436        pio_send_control(dd, PSC_CM_RESET);
6437        /* reset cached value */
6438        dd->vl15buf_cached = 0;
6439}
6440
6441/* convert a vCU to a CU */
6442static u32 vcu_to_cu(u8 vcu)
6443{
6444        return 1 << vcu;
6445}
6446
6447/* convert a CU to a vCU */
6448static u8 cu_to_vcu(u32 cu)
6449{
6450        return ilog2(cu);
6451}
6452
6453/* convert a vAU to an AU */
6454static u32 vau_to_au(u8 vau)
6455{
6456        return 8 * (1 << vau);
6457}
6458
6459static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6460{
6461        ppd->sm_trap_qp = 0x0;
6462        ppd->sa_qp = 0x1;
6463}
6464
6465/*
6466 * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6467 */
6468static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6469{
6470        u64 reg;
6471
6472        /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6473        write_csr(dd, DC_LCB_CFG_RUN, 0);
6474        /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6475        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6476                  1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6477        /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6478        dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6479        reg = read_csr(dd, DCC_CFG_RESET);
6480        write_csr(dd, DCC_CFG_RESET, reg |
6481                  DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6482        (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6483        if (!abort) {
6484                udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6485                write_csr(dd, DCC_CFG_RESET, reg);
6486                write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6487        }
6488}
6489
6490/*
6491 * This routine should be called after the link has been transitioned to
6492 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6493 * reset).
6494 *
6495 * The expectation is that the caller of this routine would have taken
6496 * care of properly transitioning the link into the correct state.
6497 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6498 *       before calling this function.
6499 */
6500static void _dc_shutdown(struct hfi1_devdata *dd)
6501{
6502        lockdep_assert_held(&dd->dc8051_lock);
6503
6504        if (dd->dc_shutdown)
6505                return;
6506
6507        dd->dc_shutdown = 1;
6508        /* Shutdown the LCB */
6509        lcb_shutdown(dd, 1);
6510        /*
6511         * Going to OFFLINE would have causes the 8051 to put the
6512         * SerDes into reset already. Just need to shut down the 8051,
6513         * itself.
6514         */
6515        write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6516}
6517
6518static void dc_shutdown(struct hfi1_devdata *dd)
6519{
6520        mutex_lock(&dd->dc8051_lock);
6521        _dc_shutdown(dd);
6522        mutex_unlock(&dd->dc8051_lock);
6523}
6524
6525/*
6526 * Calling this after the DC has been brought out of reset should not
6527 * do any damage.
6528 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6529 *       before calling this function.
6530 */
6531static void _dc_start(struct hfi1_devdata *dd)
6532{
6533        lockdep_assert_held(&dd->dc8051_lock);
6534
6535        if (!dd->dc_shutdown)
6536                return;
6537
6538        /* Take the 8051 out of reset */
6539        write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6540        /* Wait until 8051 is ready */
6541        if (wait_fm_ready(dd, TIMEOUT_8051_START))
6542                dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6543                           __func__);
6544
6545        /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6546        write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6547        /* lcb_shutdown() with abort=1 does not restore these */
6548        write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6549        dd->dc_shutdown = 0;
6550}
6551
6552static void dc_start(struct hfi1_devdata *dd)
6553{
6554        mutex_lock(&dd->dc8051_lock);
6555        _dc_start(dd);
6556        mutex_unlock(&dd->dc8051_lock);
6557}
6558
6559/*
6560 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6561 */
6562static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6563{
6564        u64 rx_radr, tx_radr;
6565        u32 version;
6566
6567        if (dd->icode != ICODE_FPGA_EMULATION)
6568                return;
6569
6570        /*
6571         * These LCB defaults on emulator _s are good, nothing to do here:
6572         *      LCB_CFG_TX_FIFOS_RADR
6573         *      LCB_CFG_RX_FIFOS_RADR
6574         *      LCB_CFG_LN_DCLK
6575         *      LCB_CFG_IGNORE_LOST_RCLK
6576         */
6577        if (is_emulator_s(dd))
6578                return;
6579        /* else this is _p */
6580
6581        version = emulator_rev(dd);
6582        if (!is_ax(dd))
6583                version = 0x2d; /* all B0 use 0x2d or higher settings */
6584
6585        if (version <= 0x12) {
6586                /* release 0x12 and below */
6587
6588                /*
6589                 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6590                 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6591                 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6592                 */
6593                rx_radr =
6594                      0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6595                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6596                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6597                /*
6598                 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6599                 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6600                 */
6601                tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6602        } else if (version <= 0x18) {
6603                /* release 0x13 up to 0x18 */
6604                /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6605                rx_radr =
6606                      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6607                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6608                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6609                tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6610        } else if (version == 0x19) {
6611                /* release 0x19 */
6612                /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6613                rx_radr =
6614                      0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6615                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6616                    | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6617                tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6618        } else if (version == 0x1a) {
6619                /* release 0x1a */
6620                /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6621                rx_radr =
6622                      0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6623                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6624                    | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6625                tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6626                write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6627        } else {
6628                /* release 0x1b and higher */
6629                /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6630                rx_radr =
6631                      0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6632                    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6633                    | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6634                tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6635        }
6636
6637        write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6638        /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6639        write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6640                  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6641        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6642}
6643
6644/*
6645 * Handle a SMA idle message
6646 *
6647 * This is a work-queue function outside of the interrupt.
6648 */
6649void handle_sma_message(struct work_struct *work)
6650{
6651        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6652                                                        sma_message_work);
6653        struct hfi1_devdata *dd = ppd->dd;
6654        u64 msg;
6655        int ret;
6656
6657        /*
6658         * msg is bytes 1-4 of the 40-bit idle message - the command code
6659         * is stripped off
6660         */
6661        ret = read_idle_sma(dd, &msg);
6662        if (ret)
6663                return;
6664        dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6665        /*
6666         * React to the SMA message.  Byte[1] (0 for us) is the command.
6667         */
6668        switch (msg & 0xff) {
6669        case SMA_IDLE_ARM:
6670                /*
6671                 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6672                 * State Transitions
6673                 *
6674                 * Only expected in INIT or ARMED, discard otherwise.
6675                 */
6676                if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6677                        ppd->neighbor_normal = 1;
6678                break;
6679        case SMA_IDLE_ACTIVE:
6680                /*
6681                 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6682                 * State Transitions
6683                 *
6684                 * Can activate the node.  Discard otherwise.
6685                 */
6686                if (ppd->host_link_state == HLS_UP_ARMED &&
6687                    ppd->is_active_optimize_enabled) {
6688                        ppd->neighbor_normal = 1;
6689                        ret = set_link_state(ppd, HLS_UP_ACTIVE);
6690                        if (ret)
6691                                dd_dev_err(
6692                                        dd,
6693                                        "%s: received Active SMA idle message, couldn't set link to Active\n",
6694                                        __func__);
6695                }
6696                break;
6697        default:
6698                dd_dev_err(dd,
6699                           "%s: received unexpected SMA idle message 0x%llx\n",
6700                           __func__, msg);
6701                break;
6702        }
6703}
6704
6705static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6706{
6707        u64 rcvctrl;
6708        unsigned long flags;
6709
6710        spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6711        rcvctrl = read_csr(dd, RCV_CTRL);
6712        rcvctrl |= add;
6713        rcvctrl &= ~clear;
6714        write_csr(dd, RCV_CTRL, rcvctrl);
6715        spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6716}
6717
6718static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6719{
6720        adjust_rcvctrl(dd, add, 0);
6721}
6722
6723static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6724{
6725        adjust_rcvctrl(dd, 0, clear);
6726}
6727
6728/*
6729 * Called from all interrupt handlers to start handling an SPC freeze.
6730 */
6731void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6732{
6733        struct hfi1_devdata *dd = ppd->dd;
6734        struct send_context *sc;
6735        int i;
6736        int sc_flags;
6737
6738        if (flags & FREEZE_SELF)
6739                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6740
6741        /* enter frozen mode */
6742        dd->flags |= HFI1_FROZEN;
6743
6744        /* notify all SDMA engines that they are going into a freeze */
6745        sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6746
6747        sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6748                                              SCF_LINK_DOWN : 0);
6749        /* do halt pre-handling on all enabled send contexts */
6750        for (i = 0; i < dd->num_send_contexts; i++) {
6751                sc = dd->send_contexts[i].sc;
6752                if (sc && (sc->flags & SCF_ENABLED))
6753                        sc_stop(sc, sc_flags);
6754        }
6755
6756        /* Send context are frozen. Notify user space */
6757        hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6758
6759        if (flags & FREEZE_ABORT) {
6760                dd_dev_err(dd,
6761                           "Aborted freeze recovery. Please REBOOT system\n");
6762                return;
6763        }
6764        /* queue non-interrupt handler */
6765        queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6766}
6767
6768/*
6769 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6770 * depending on the "freeze" parameter.
6771 *
6772 * No need to return an error if it times out, our only option
6773 * is to proceed anyway.
6774 */
6775static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6776{
6777        unsigned long timeout;
6778        u64 reg;
6779
6780        timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6781        while (1) {
6782                reg = read_csr(dd, CCE_STATUS);
6783                if (freeze) {
6784                        /* waiting until all indicators are set */
6785                        if ((reg & ALL_FROZE) == ALL_FROZE)
6786                                return; /* all done */
6787                } else {
6788                        /* waiting until all indicators are clear */
6789                        if ((reg & ALL_FROZE) == 0)
6790                                return; /* all done */
6791                }
6792
6793                if (time_after(jiffies, timeout)) {
6794                        dd_dev_err(dd,
6795                                   "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6796                                   freeze ? "" : "un", reg & ALL_FROZE,
6797                                   freeze ? ALL_FROZE : 0ull);
6798                        return;
6799                }
6800                usleep_range(80, 120);
6801        }
6802}
6803
6804/*
6805 * Do all freeze handling for the RXE block.
6806 */
6807static void rxe_freeze(struct hfi1_devdata *dd)
6808{
6809        int i;
6810        struct hfi1_ctxtdata *rcd;
6811
6812        /* disable port */
6813        clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6814
6815        /* disable all receive contexts */
6816        for (i = 0; i < dd->num_rcv_contexts; i++) {
6817                rcd = hfi1_rcd_get_by_index(dd, i);
6818                hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6819                hfi1_rcd_put(rcd);
6820        }
6821}
6822
6823/*
6824 * Unfreeze handling for the RXE block - kernel contexts only.
6825 * This will also enable the port.  User contexts will do unfreeze
6826 * handling on a per-context basis as they call into the driver.
6827 *
6828 */
6829static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6830{
6831        u32 rcvmask;
6832        u16 i;
6833        struct hfi1_ctxtdata *rcd;
6834
6835        /* enable all kernel contexts */
6836        for (i = 0; i < dd->num_rcv_contexts; i++) {
6837                rcd = hfi1_rcd_get_by_index(dd, i);
6838
6839                /* Ensure all non-user contexts(including vnic) are enabled */
6840                if (!rcd ||
6841                    (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6842                        hfi1_rcd_put(rcd);
6843                        continue;
6844                }
6845                rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6846                /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6847                rcvmask |= rcd->rcvhdrtail_kvaddr ?
6848                        HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6849                hfi1_rcvctrl(dd, rcvmask, rcd);
6850                hfi1_rcd_put(rcd);
6851        }
6852
6853        /* enable port */
6854        add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6855}
6856
6857/*
6858 * Non-interrupt SPC freeze handling.
6859 *
6860 * This is a work-queue function outside of the triggering interrupt.
6861 */
6862void handle_freeze(struct work_struct *work)
6863{
6864        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6865                                                                freeze_work);
6866        struct hfi1_devdata *dd = ppd->dd;
6867
6868        /* wait for freeze indicators on all affected blocks */
6869        wait_for_freeze_status(dd, 1);
6870
6871        /* SPC is now frozen */
6872
6873        /* do send PIO freeze steps */
6874        pio_freeze(dd);
6875
6876        /* do send DMA freeze steps */
6877        sdma_freeze(dd);
6878
6879        /* do send egress freeze steps - nothing to do */
6880
6881        /* do receive freeze steps */
6882        rxe_freeze(dd);
6883
6884        /*
6885         * Unfreeze the hardware - clear the freeze, wait for each
6886         * block's frozen bit to clear, then clear the frozen flag.
6887         */
6888        write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6889        wait_for_freeze_status(dd, 0);
6890
6891        if (is_ax(dd)) {
6892                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6893                wait_for_freeze_status(dd, 1);
6894                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6895                wait_for_freeze_status(dd, 0);
6896        }
6897
6898        /* do send PIO unfreeze steps for kernel contexts */
6899        pio_kernel_unfreeze(dd);
6900
6901        /* do send DMA unfreeze steps */
6902        sdma_unfreeze(dd);
6903
6904        /* do send egress unfreeze steps - nothing to do */
6905
6906        /* do receive unfreeze steps for kernel contexts */
6907        rxe_kernel_unfreeze(dd);
6908
6909        /*
6910         * The unfreeze procedure touches global device registers when
6911         * it disables and re-enables RXE. Mark the device unfrozen
6912         * after all that is done so other parts of the driver waiting
6913         * for the device to unfreeze don't do things out of order.
6914         *
6915         * The above implies that the meaning of HFI1_FROZEN flag is
6916         * "Device has gone into freeze mode and freeze mode handling
6917         * is still in progress."
6918         *
6919         * The flag will be removed when freeze mode processing has
6920         * completed.
6921         */
6922        dd->flags &= ~HFI1_FROZEN;
6923        wake_up(&dd->event_queue);
6924
6925        /* no longer frozen */
6926}
6927
6928/**
6929 * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6930 * counters.
6931 * @ppd: info of physical Hfi port
6932 * @link_width: new link width after link up or downgrade
6933 *
6934 * Update the PortXmitWait and PortVlXmitWait counters after
6935 * a link up or downgrade event to reflect a link width change.
6936 */
6937static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6938{
6939        int i;
6940        u16 tx_width;
6941        u16 link_speed;
6942
6943        tx_width = tx_link_width(link_width);
6944        link_speed = get_link_speed(ppd->link_speed_active);
6945
6946        /*
6947         * There are C_VL_COUNT number of PortVLXmitWait counters.
6948         * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6949         */
6950        for (i = 0; i < C_VL_COUNT + 1; i++)
6951                get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6952}
6953
6954/*
6955 * Handle a link up interrupt from the 8051.
6956 *
6957 * This is a work-queue function outside of the interrupt.
6958 */
6959void handle_link_up(struct work_struct *work)
6960{
6961        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6962                                                  link_up_work);
6963        struct hfi1_devdata *dd = ppd->dd;
6964
6965        set_link_state(ppd, HLS_UP_INIT);
6966
6967        /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6968        read_ltp_rtt(dd);
6969        /*
6970         * OPA specifies that certain counters are cleared on a transition
6971         * to link up, so do that.
6972         */
6973        clear_linkup_counters(dd);
6974        /*
6975         * And (re)set link up default values.
6976         */
6977        set_linkup_defaults(ppd);
6978
6979        /*
6980         * Set VL15 credits. Use cached value from verify cap interrupt.
6981         * In case of quick linkup or simulator, vl15 value will be set by
6982         * handle_linkup_change. VerifyCap interrupt handler will not be
6983         * called in those scenarios.
6984         */
6985        if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6986                set_up_vl15(dd, dd->vl15buf_cached);
6987
6988        /* enforce link speed enabled */
6989        if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6990                /* oops - current speed is not enabled, bounce */
6991                dd_dev_err(dd,
6992                           "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6993                           ppd->link_speed_active, ppd->link_speed_enabled);
6994                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6995                                     OPA_LINKDOWN_REASON_SPEED_POLICY);
6996                set_link_state(ppd, HLS_DN_OFFLINE);
6997                start_link(ppd);
6998        }
6999}
7000
7001/*
7002 * Several pieces of LNI information were cached for SMA in ppd.
7003 * Reset these on link down
7004 */
7005static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7006{
7007        ppd->neighbor_guid = 0;
7008        ppd->neighbor_port_number = 0;
7009        ppd->neighbor_type = 0;
7010        ppd->neighbor_fm_security = 0;
7011}
7012
7013static const char * const link_down_reason_strs[] = {
7014        [OPA_LINKDOWN_REASON_NONE] = "None",
7015        [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7016        [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7017        [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7018        [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7019        [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7020        [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7021        [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7022        [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7023        [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7024        [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7025        [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7026        [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7027        [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7028        [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7029        [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7030        [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7031        [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7032        [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7033        [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7034        [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7035        [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7036        [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7037        [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7038        [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7039        [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7040        [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7041        [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7042        [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7043        [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7044        [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7045        [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7046        [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7047                                        "Excessive buffer overrun",
7048        [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7049        [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7050        [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7051        [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7052        [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7053        [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7054        [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7055        [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7056                                        "Local media not installed",
7057        [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7058        [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7059        [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7060                                        "End to end not installed",
7061        [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7062        [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7063        [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7064        [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7065        [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7066        [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7067};
7068
7069/* return the neighbor link down reason string */
7070static const char *link_down_reason_str(u8 reason)
7071{
7072        const char *str = NULL;
7073
7074        if (reason < ARRAY_SIZE(link_down_reason_strs))
7075                str = link_down_reason_strs[reason];
7076        if (!str)
7077                str = "(invalid)";
7078
7079        return str;
7080}
7081
7082/*
7083 * Handle a link down interrupt from the 8051.
7084 *
7085 * This is a work-queue function outside of the interrupt.
7086 */
7087void handle_link_down(struct work_struct *work)
7088{
7089        u8 lcl_reason, neigh_reason = 0;
7090        u8 link_down_reason;
7091        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7092                                                  link_down_work);
7093        int was_up;
7094        static const char ldr_str[] = "Link down reason: ";
7095
7096        if ((ppd->host_link_state &
7097             (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7098             ppd->port_type == PORT_TYPE_FIXED)
7099                ppd->offline_disabled_reason =
7100                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7101
7102        /* Go offline first, then deal with reading/writing through 8051 */
7103        was_up = !!(ppd->host_link_state & HLS_UP);
7104        set_link_state(ppd, HLS_DN_OFFLINE);
7105        xchg(&ppd->is_link_down_queued, 0);
7106
7107        if (was_up) {
7108                lcl_reason = 0;
7109                /* link down reason is only valid if the link was up */
7110                read_link_down_reason(ppd->dd, &link_down_reason);
7111                switch (link_down_reason) {
7112                case LDR_LINK_TRANSFER_ACTIVE_LOW:
7113                        /* the link went down, no idle message reason */
7114                        dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7115                                    ldr_str);
7116                        break;
7117                case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7118                        /*
7119                         * The neighbor reason is only valid if an idle message
7120                         * was received for it.
7121                         */
7122                        read_planned_down_reason_code(ppd->dd, &neigh_reason);
7123                        dd_dev_info(ppd->dd,
7124                                    "%sNeighbor link down message %d, %s\n",
7125                                    ldr_str, neigh_reason,
7126                                    link_down_reason_str(neigh_reason));
7127                        break;
7128                case LDR_RECEIVED_HOST_OFFLINE_REQ:
7129                        dd_dev_info(ppd->dd,
7130                                    "%sHost requested link to go offline\n",
7131                                    ldr_str);
7132                        break;
7133                default:
7134                        dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7135                                    ldr_str, link_down_reason);
7136                        break;
7137                }
7138
7139                /*
7140                 * If no reason, assume peer-initiated but missed
7141                 * LinkGoingDown idle flits.
7142                 */
7143                if (neigh_reason == 0)
7144                        lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7145        } else {
7146                /* went down while polling or going up */
7147                lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7148        }
7149
7150        set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7151
7152        /* inform the SMA when the link transitions from up to down */
7153        if (was_up && ppd->local_link_down_reason.sma == 0 &&
7154            ppd->neigh_link_down_reason.sma == 0) {
7155                ppd->local_link_down_reason.sma =
7156                                        ppd->local_link_down_reason.latest;
7157                ppd->neigh_link_down_reason.sma =
7158                                        ppd->neigh_link_down_reason.latest;
7159        }
7160
7161        reset_neighbor_info(ppd);
7162
7163        /* disable the port */
7164        clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7165
7166        /*
7167         * If there is no cable attached, turn the DC off. Otherwise,
7168         * start the link bring up.
7169         */
7170        if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7171                dc_shutdown(ppd->dd);
7172        else
7173                start_link(ppd);
7174}
7175
7176void handle_link_bounce(struct work_struct *work)
7177{
7178        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7179                                                        link_bounce_work);
7180
7181        /*
7182         * Only do something if the link is currently up.
7183         */
7184        if (ppd->host_link_state & HLS_UP) {
7185                set_link_state(ppd, HLS_DN_OFFLINE);
7186                start_link(ppd);
7187        } else {
7188                dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7189                            __func__, link_state_name(ppd->host_link_state));
7190        }
7191}
7192
7193/*
7194 * Mask conversion: Capability exchange to Port LTP.  The capability
7195 * exchange has an implicit 16b CRC that is mandatory.
7196 */
7197static int cap_to_port_ltp(int cap)
7198{
7199        int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7200
7201        if (cap & CAP_CRC_14B)
7202                port_ltp |= PORT_LTP_CRC_MODE_14;
7203        if (cap & CAP_CRC_48B)
7204                port_ltp |= PORT_LTP_CRC_MODE_48;
7205        if (cap & CAP_CRC_12B_16B_PER_LANE)
7206                port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7207
7208        return port_ltp;
7209}
7210
7211/*
7212 * Convert an OPA Port LTP mask to capability mask
7213 */
7214int port_ltp_to_cap(int port_ltp)
7215{
7216        int cap_mask = 0;
7217
7218        if (port_ltp & PORT_LTP_CRC_MODE_14)
7219                cap_mask |= CAP_CRC_14B;
7220        if (port_ltp & PORT_LTP_CRC_MODE_48)
7221                cap_mask |= CAP_CRC_48B;
7222        if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7223                cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7224
7225        return cap_mask;
7226}
7227
7228/*
7229 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7230 */
7231static int lcb_to_port_ltp(int lcb_crc)
7232{
7233        int port_ltp = 0;
7234
7235        if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7236                port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7237        else if (lcb_crc == LCB_CRC_48B)
7238                port_ltp = PORT_LTP_CRC_MODE_48;
7239        else if (lcb_crc == LCB_CRC_14B)
7240                port_ltp = PORT_LTP_CRC_MODE_14;
7241        else
7242                port_ltp = PORT_LTP_CRC_MODE_16;
7243
7244        return port_ltp;
7245}
7246
7247static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7248{
7249        if (ppd->pkeys[2] != 0) {
7250                ppd->pkeys[2] = 0;
7251                (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7252                hfi1_event_pkey_change(ppd->dd, ppd->port);
7253        }
7254}
7255
7256/*
7257 * Convert the given link width to the OPA link width bitmask.
7258 */
7259static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7260{
7261        switch (width) {
7262        case 0:
7263                /*
7264                 * Simulator and quick linkup do not set the width.
7265                 * Just set it to 4x without complaint.
7266                 */
7267                if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7268                        return OPA_LINK_WIDTH_4X;
7269                return 0; /* no lanes up */
7270        case 1: return OPA_LINK_WIDTH_1X;
7271        case 2: return OPA_LINK_WIDTH_2X;
7272        case 3: return OPA_LINK_WIDTH_3X;
7273        default:
7274                dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7275                            __func__, width);
7276                /* fall through */
7277        case 4: return OPA_LINK_WIDTH_4X;
7278        }
7279}
7280
7281/*
7282 * Do a population count on the bottom nibble.
7283 */
7284static const u8 bit_counts[16] = {
7285        0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7286};
7287
7288static inline u8 nibble_to_count(u8 nibble)
7289{
7290        return bit_counts[nibble & 0xf];
7291}
7292
7293/*
7294 * Read the active lane information from the 8051 registers and return
7295 * their widths.
7296 *
7297 * Active lane information is found in these 8051 registers:
7298 *      enable_lane_tx
7299 *      enable_lane_rx
7300 */
7301static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7302                            u16 *rx_width)
7303{
7304        u16 tx, rx;
7305        u8 enable_lane_rx;
7306        u8 enable_lane_tx;
7307        u8 tx_polarity_inversion;
7308        u8 rx_polarity_inversion;
7309        u8 max_rate;
7310
7311        /* read the active lanes */
7312        read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7313                         &rx_polarity_inversion, &max_rate);
7314        read_local_lni(dd, &enable_lane_rx);
7315
7316        /* convert to counts */
7317        tx = nibble_to_count(enable_lane_tx);
7318        rx = nibble_to_count(enable_lane_rx);
7319
7320        /*
7321         * Set link_speed_active here, overriding what was set in
7322         * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7323         * set the max_rate field in handle_verify_cap until v0.19.
7324         */
7325        if ((dd->icode == ICODE_RTL_SILICON) &&
7326            (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7327                /* max_rate: 0 = 12.5G, 1 = 25G */
7328                switch (max_rate) {
7329                case 0:
7330                        dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7331                        break;
7332                default:
7333                        dd_dev_err(dd,
7334                                   "%s: unexpected max rate %d, using 25Gb\n",
7335                                   __func__, (int)max_rate);
7336                        /* fall through */
7337                case 1:
7338                        dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7339                        break;
7340                }
7341        }
7342
7343        dd_dev_info(dd,
7344                    "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7345                    enable_lane_tx, tx, enable_lane_rx, rx);
7346        *tx_width = link_width_to_bits(dd, tx);
7347        *rx_width = link_width_to_bits(dd, rx);
7348}
7349
7350/*
7351 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7352 * Valid after the end of VerifyCap and during LinkUp.  Does not change
7353 * after link up.  I.e. look elsewhere for downgrade information.
7354 *
7355 * Bits are:
7356 *      + bits [7:4] contain the number of active transmitters
7357 *      + bits [3:0] contain the number of active receivers
7358 * These are numbers 1 through 4 and can be different values if the
7359 * link is asymmetric.
7360 *
7361 * verify_cap_local_fm_link_width[0] retains its original value.
7362 */
7363static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7364                              u16 *rx_width)
7365{
7366        u16 widths, tx, rx;
7367        u8 misc_bits, local_flags;
7368        u16 active_tx, active_rx;
7369
7370        read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7371        tx = widths >> 12;
7372        rx = (widths >> 8) & 0xf;
7373
7374        *tx_width = link_width_to_bits(dd, tx);
7375        *rx_width = link_width_to_bits(dd, rx);
7376
7377        /* print the active widths */
7378        get_link_widths(dd, &active_tx, &active_rx);
7379}
7380
7381/*
7382 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7383 * hardware information when the link first comes up.
7384 *
7385 * The link width is not available until after VerifyCap.AllFramesReceived
7386 * (the trigger for handle_verify_cap), so this is outside that routine
7387 * and should be called when the 8051 signals linkup.
7388 */
7389void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7390{
7391        u16 tx_width, rx_width;
7392
7393        /* get end-of-LNI link widths */
7394        get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7395
7396        /* use tx_width as the link is supposed to be symmetric on link up */
7397        ppd->link_width_active = tx_width;
7398        /* link width downgrade active (LWD.A) starts out matching LW.A */
7399        ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7400        ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7401        /* per OPA spec, on link up LWD.E resets to LWD.S */
7402        ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7403        /* cache the active egress rate (units {10^6 bits/sec]) */
7404        ppd->current_egress_rate = active_egress_rate(ppd);
7405}
7406
7407/*
7408 * Handle a verify capabilities interrupt from the 8051.
7409 *
7410 * This is a work-queue function outside of the interrupt.
7411 */
7412void handle_verify_cap(struct work_struct *work)
7413{
7414        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7415                                                                link_vc_work);
7416        struct hfi1_devdata *dd = ppd->dd;
7417        u64 reg;
7418        u8 power_management;
7419        u8 continuous;
7420        u8 vcu;
7421        u8 vau;
7422        u8 z;
7423        u16 vl15buf;
7424        u16 link_widths;
7425        u16 crc_mask;
7426        u16 crc_val;
7427        u16 device_id;
7428        u16 active_tx, active_rx;
7429        u8 partner_supported_crc;
7430        u8 remote_tx_rate;
7431        u8 device_rev;
7432
7433        set_link_state(ppd, HLS_VERIFY_CAP);
7434
7435        lcb_shutdown(dd, 0);
7436        adjust_lcb_for_fpga_serdes(dd);
7437
7438        read_vc_remote_phy(dd, &power_management, &continuous);
7439        read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7440                              &partner_supported_crc);
7441        read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7442        read_remote_device_id(dd, &device_id, &device_rev);
7443
7444        /* print the active widths */
7445        get_link_widths(dd, &active_tx, &active_rx);
7446        dd_dev_info(dd,
7447                    "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7448                    (int)power_management, (int)continuous);
7449        dd_dev_info(dd,
7450                    "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7451                    (int)vau, (int)z, (int)vcu, (int)vl15buf,
7452                    (int)partner_supported_crc);
7453        dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7454                    (u32)remote_tx_rate, (u32)link_widths);
7455        dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7456                    (u32)device_id, (u32)device_rev);
7457        /*
7458         * The peer vAU value just read is the peer receiver value.  HFI does
7459         * not support a transmit vAU of 0 (AU == 8).  We advertised that
7460         * with Z=1 in the fabric capabilities sent to the peer.  The peer
7461         * will see our Z=1, and, if it advertised a vAU of 0, will move its
7462         * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7463         * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7464         * subject to the Z value exception.
7465         */
7466        if (vau == 0)
7467                vau = 1;
7468        set_up_vau(dd, vau);
7469
7470        /*
7471         * Set VL15 credits to 0 in global credit register. Cache remote VL15
7472         * credits value and wait for link-up interrupt ot set it.
7473         */
7474        set_up_vl15(dd, 0);
7475        dd->vl15buf_cached = vl15buf;
7476
7477        /* set up the LCB CRC mode */
7478        crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7479
7480        /* order is important: use the lowest bit in common */
7481        if (crc_mask & CAP_CRC_14B)
7482                crc_val = LCB_CRC_14B;
7483        else if (crc_mask & CAP_CRC_48B)
7484                crc_val = LCB_CRC_48B;
7485        else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7486                crc_val = LCB_CRC_12B_16B_PER_LANE;
7487        else
7488                crc_val = LCB_CRC_16B;
7489
7490        dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7491        write_csr(dd, DC_LCB_CFG_CRC_MODE,
7492                  (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7493
7494        /* set (14b only) or clear sideband credit */
7495        reg = read_csr(dd, SEND_CM_CTRL);
7496        if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7497                write_csr(dd, SEND_CM_CTRL,
7498                          reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7499        } else {
7500                write_csr(dd, SEND_CM_CTRL,
7501                          reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7502        }
7503
7504        ppd->link_speed_active = 0;     /* invalid value */
7505        if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7506                /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7507                switch (remote_tx_rate) {
7508                case 0:
7509                        ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7510                        break;
7511                case 1:
7512                        ppd->link_speed_active = OPA_LINK_SPEED_25G;
7513                        break;
7514                }
7515        } else {
7516                /* actual rate is highest bit of the ANDed rates */
7517                u8 rate = remote_tx_rate & ppd->local_tx_rate;
7518
7519                if (rate & 2)
7520                        ppd->link_speed_active = OPA_LINK_SPEED_25G;
7521                else if (rate & 1)
7522                        ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7523        }
7524        if (ppd->link_speed_active == 0) {
7525                dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7526                           __func__, (int)remote_tx_rate);
7527                ppd->link_speed_active = OPA_LINK_SPEED_25G;
7528        }
7529
7530        /*
7531         * Cache the values of the supported, enabled, and active
7532         * LTP CRC modes to return in 'portinfo' queries. But the bit
7533         * flags that are returned in the portinfo query differ from
7534         * what's in the link_crc_mask, crc_sizes, and crc_val
7535         * variables. Convert these here.
7536         */
7537        ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7538                /* supported crc modes */
7539        ppd->port_ltp_crc_mode |=
7540                cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7541                /* enabled crc modes */
7542        ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7543                /* active crc mode */
7544
7545        /* set up the remote credit return table */
7546        assign_remote_cm_au_table(dd, vcu);
7547
7548        /*
7549         * The LCB is reset on entry to handle_verify_cap(), so this must
7550         * be applied on every link up.
7551         *
7552         * Adjust LCB error kill enable to kill the link if
7553         * these RBUF errors are seen:
7554         *      REPLAY_BUF_MBE_SMASK
7555         *      FLIT_INPUT_BUF_MBE_SMASK
7556         */
7557        if (is_ax(dd)) {                        /* fixed in B0 */
7558                reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7559                reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7560                        | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7561                write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7562        }
7563
7564        /* pull LCB fifos out of reset - all fifo clocks must be stable */
7565        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7566
7567        /* give 8051 access to the LCB CSRs */
7568        write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7569        set_8051_lcb_access(dd);
7570
7571        /* tell the 8051 to go to LinkUp */
7572        set_link_state(ppd, HLS_GOING_UP);
7573}
7574
7575/**
7576 * apply_link_downgrade_policy - Apply the link width downgrade enabled
7577 * policy against the current active link widths.
7578 * @ppd: info of physical Hfi port
7579 * @refresh_widths: True indicates link downgrade event
7580 * @return: True indicates a successful link downgrade. False indicates
7581 *          link downgrade event failed and the link will bounce back to
7582 *          default link width.
7583 *
7584 * Called when the enabled policy changes or the active link widths
7585 * change.
7586 * Refresh_widths indicates that a link downgrade occurred. The
7587 * link_downgraded variable is set by refresh_widths and
7588 * determines the success/failure of the policy application.
7589 */
7590bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7591                                 bool refresh_widths)
7592{
7593        int do_bounce = 0;
7594        int tries;
7595        u16 lwde;
7596        u16 tx, rx;
7597        bool link_downgraded = refresh_widths;
7598
7599        /* use the hls lock to avoid a race with actual link up */
7600        tries = 0;
7601retry:
7602        mutex_lock(&ppd->hls_lock);
7603        /* only apply if the link is up */
7604        if (ppd->host_link_state & HLS_DOWN) {
7605                /* still going up..wait and retry */
7606                if (ppd->host_link_state & HLS_GOING_UP) {
7607                        if (++tries < 1000) {
7608                                mutex_unlock(&ppd->hls_lock);
7609                                usleep_range(100, 120); /* arbitrary */
7610                                goto retry;
7611                        }
7612                        dd_dev_err(ppd->dd,
7613                                   "%s: giving up waiting for link state change\n",
7614                                   __func__);
7615                }
7616                goto done;
7617        }
7618
7619        lwde = ppd->link_width_downgrade_enabled;
7620
7621        if (refresh_widths) {
7622                get_link_widths(ppd->dd, &tx, &rx);
7623                ppd->link_width_downgrade_tx_active = tx;
7624                ppd->link_width_downgrade_rx_active = rx;
7625        }
7626
7627        if (ppd->link_width_downgrade_tx_active == 0 ||
7628            ppd->link_width_downgrade_rx_active == 0) {
7629                /* the 8051 reported a dead link as a downgrade */
7630                dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7631                link_downgraded = false;
7632        } else if (lwde == 0) {
7633                /* downgrade is disabled */
7634
7635                /* bounce if not at starting active width */
7636                if ((ppd->link_width_active !=
7637                     ppd->link_width_downgrade_tx_active) ||
7638                    (ppd->link_width_active !=
7639                     ppd->link_width_downgrade_rx_active)) {
7640                        dd_dev_err(ppd->dd,
7641                                   "Link downgrade is disabled and link has downgraded, downing link\n");
7642                        dd_dev_err(ppd->dd,
7643                                   "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7644                                   ppd->link_width_active,
7645                                   ppd->link_width_downgrade_tx_active,
7646                                   ppd->link_width_downgrade_rx_active);
7647                        do_bounce = 1;
7648                        link_downgraded = false;
7649                }
7650        } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7651                   (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7652                /* Tx or Rx is outside the enabled policy */
7653                dd_dev_err(ppd->dd,
7654                           "Link is outside of downgrade allowed, downing link\n");
7655                dd_dev_err(ppd->dd,
7656                           "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7657                           lwde, ppd->link_width_downgrade_tx_active,
7658                           ppd->link_width_downgrade_rx_active);
7659                do_bounce = 1;
7660                link_downgraded = false;
7661        }
7662
7663done:
7664        mutex_unlock(&ppd->hls_lock);
7665
7666        if (do_bounce) {
7667                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7668                                     OPA_LINKDOWN_REASON_WIDTH_POLICY);
7669                set_link_state(ppd, HLS_DN_OFFLINE);
7670                start_link(ppd);
7671        }
7672
7673        return link_downgraded;
7674}
7675
7676/*
7677 * Handle a link downgrade interrupt from the 8051.
7678 *
7679 * This is a work-queue function outside of the interrupt.
7680 */
7681void handle_link_downgrade(struct work_struct *work)
7682{
7683        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7684                                                        link_downgrade_work);
7685
7686        dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7687        if (apply_link_downgrade_policy(ppd, true))
7688                update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7689}
7690
7691static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7692{
7693        return flag_string(buf, buf_len, flags, dcc_err_flags,
7694                ARRAY_SIZE(dcc_err_flags));
7695}
7696
7697static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7698{
7699        return flag_string(buf, buf_len, flags, lcb_err_flags,
7700                ARRAY_SIZE(lcb_err_flags));
7701}
7702
7703static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7704{
7705        return flag_string(buf, buf_len, flags, dc8051_err_flags,
7706                ARRAY_SIZE(dc8051_err_flags));
7707}
7708
7709static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7710{
7711        return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7712                ARRAY_SIZE(dc8051_info_err_flags));
7713}
7714
7715static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7716{
7717        return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7718                ARRAY_SIZE(dc8051_info_host_msg_flags));
7719}
7720
7721static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7722{
7723        struct hfi1_pportdata *ppd = dd->pport;
7724        u64 info, err, host_msg;
7725        int queue_link_down = 0;
7726        char buf[96];
7727
7728        /* look at the flags */
7729        if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7730                /* 8051 information set by firmware */
7731                /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7732                info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7733                err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7734                        & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7735                host_msg = (info >>
7736                        DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7737                        & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7738
7739                /*
7740                 * Handle error flags.
7741                 */
7742                if (err & FAILED_LNI) {
7743                        /*
7744                         * LNI error indications are cleared by the 8051
7745                         * only when starting polling.  Only pay attention
7746                         * to them when in the states that occur during
7747                         * LNI.
7748                         */
7749                        if (ppd->host_link_state
7750                            & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7751                                queue_link_down = 1;
7752                                dd_dev_info(dd, "Link error: %s\n",
7753                                            dc8051_info_err_string(buf,
7754                                                                   sizeof(buf),
7755                                                                   err &
7756                                                                   FAILED_LNI));
7757                        }
7758                        err &= ~(u64)FAILED_LNI;
7759                }
7760                /* unknown frames can happen durning LNI, just count */
7761                if (err & UNKNOWN_FRAME) {
7762                        ppd->unknown_frame_count++;
7763                        err &= ~(u64)UNKNOWN_FRAME;
7764                }
7765                if (err) {
7766                        /* report remaining errors, but do not do anything */
7767                        dd_dev_err(dd, "8051 info error: %s\n",
7768                                   dc8051_info_err_string(buf, sizeof(buf),
7769                                                          err));
7770                }
7771
7772                /*
7773                 * Handle host message flags.
7774                 */
7775                if (host_msg & HOST_REQ_DONE) {
7776                        /*
7777                         * Presently, the driver does a busy wait for
7778                         * host requests to complete.  This is only an
7779                         * informational message.
7780                         * NOTE: The 8051 clears the host message
7781                         * information *on the next 8051 command*.
7782                         * Therefore, when linkup is achieved,
7783                         * this flag will still be set.
7784                         */
7785                        host_msg &= ~(u64)HOST_REQ_DONE;
7786                }
7787                if (host_msg & BC_SMA_MSG) {
7788                        queue_work(ppd->link_wq, &ppd->sma_message_work);
7789                        host_msg &= ~(u64)BC_SMA_MSG;
7790                }
7791                if (host_msg & LINKUP_ACHIEVED) {
7792                        dd_dev_info(dd, "8051: Link up\n");
7793                        queue_work(ppd->link_wq, &ppd->link_up_work);
7794                        host_msg &= ~(u64)LINKUP_ACHIEVED;
7795                }
7796                if (host_msg & EXT_DEVICE_CFG_REQ) {
7797                        handle_8051_request(ppd);
7798                        host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7799                }
7800                if (host_msg & VERIFY_CAP_FRAME) {
7801                        queue_work(ppd->link_wq, &ppd->link_vc_work);
7802                        host_msg &= ~(u64)VERIFY_CAP_FRAME;
7803                }
7804                if (host_msg & LINK_GOING_DOWN) {
7805                        const char *extra = "";
7806                        /* no downgrade action needed if going down */
7807                        if (host_msg & LINK_WIDTH_DOWNGRADED) {
7808                                host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7809                                extra = " (ignoring downgrade)";
7810                        }
7811                        dd_dev_info(dd, "8051: Link down%s\n", extra);
7812                        queue_link_down = 1;
7813                        host_msg &= ~(u64)LINK_GOING_DOWN;
7814                }
7815                if (host_msg & LINK_WIDTH_DOWNGRADED) {
7816                        queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7817                        host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7818                }
7819                if (host_msg) {
7820                        /* report remaining messages, but do not do anything */
7821                        dd_dev_info(dd, "8051 info host message: %s\n",
7822                                    dc8051_info_host_msg_string(buf,
7823                                                                sizeof(buf),
7824                                                                host_msg));
7825                }
7826
7827                reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7828        }
7829        if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7830                /*
7831                 * Lost the 8051 heartbeat.  If this happens, we
7832                 * receive constant interrupts about it.  Disable
7833                 * the interrupt after the first.
7834                 */
7835                dd_dev_err(dd, "Lost 8051 heartbeat\n");
7836                write_csr(dd, DC_DC8051_ERR_EN,
7837                          read_csr(dd, DC_DC8051_ERR_EN) &
7838                          ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7839
7840                reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7841        }
7842        if (reg) {
7843                /* report the error, but do not do anything */
7844                dd_dev_err(dd, "8051 error: %s\n",
7845                           dc8051_err_string(buf, sizeof(buf), reg));
7846        }
7847
7848        if (queue_link_down) {
7849                /*
7850                 * if the link is already going down or disabled, do not
7851                 * queue another. If there's a link down entry already
7852                 * queued, don't queue another one.
7853                 */
7854                if ((ppd->host_link_state &
7855                    (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7856                    ppd->link_enabled == 0) {
7857                        dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7858                                    __func__, ppd->host_link_state,
7859                                    ppd->link_enabled);
7860                } else {
7861                        if (xchg(&ppd->is_link_down_queued, 1) == 1)
7862                                dd_dev_info(dd,
7863                                            "%s: link down request already queued\n",
7864                                            __func__);
7865                        else
7866                                queue_work(ppd->link_wq, &ppd->link_down_work);
7867                }
7868        }
7869}
7870
7871static const char * const fm_config_txt[] = {
7872[0] =
7873        "BadHeadDist: Distance violation between two head flits",
7874[1] =
7875        "BadTailDist: Distance violation between two tail flits",
7876[2] =
7877        "BadCtrlDist: Distance violation between two credit control flits",
7878[3] =
7879        "BadCrdAck: Credits return for unsupported VL",
7880[4] =
7881        "UnsupportedVLMarker: Received VL Marker",
7882[5] =
7883        "BadPreempt: Exceeded the preemption nesting level",
7884[6] =
7885        "BadControlFlit: Received unsupported control flit",
7886/* no 7 */
7887[8] =
7888        "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7889};
7890
7891static const char * const port_rcv_txt[] = {
7892[1] =
7893        "BadPktLen: Illegal PktLen",
7894[2] =
7895        "PktLenTooLong: Packet longer than PktLen",
7896[3] =
7897        "PktLenTooShort: Packet shorter than PktLen",
7898[4] =
7899        "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7900[5] =
7901        "BadDLID: Illegal DLID (0, doesn't match HFI)",
7902[6] =
7903        "BadL2: Illegal L2 opcode",
7904[7] =
7905        "BadSC: Unsupported SC",
7906[9] =
7907        "BadRC: Illegal RC",
7908[11] =
7909        "PreemptError: Preempting with same VL",
7910[12] =
7911        "PreemptVL15: Preempting a VL15 packet",
7912};
7913
7914#define OPA_LDR_FMCONFIG_OFFSET 16
7915#define OPA_LDR_PORTRCV_OFFSET 0
7916static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7917{
7918        u64 info, hdr0, hdr1;
7919        const char *extra;
7920        char buf[96];
7921        struct hfi1_pportdata *ppd = dd->pport;
7922        u8 lcl_reason = 0;
7923        int do_bounce = 0;
7924
7925        if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7926                if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7927                        info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7928                        dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7929                        /* set status bit */
7930                        dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7931                }
7932                reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7933        }
7934
7935        if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7936                struct hfi1_pportdata *ppd = dd->pport;
7937                /* this counter saturates at (2^32) - 1 */
7938                if (ppd->link_downed < (u32)UINT_MAX)
7939                        ppd->link_downed++;
7940                reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7941        }
7942
7943        if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7944                u8 reason_valid = 1;
7945
7946                info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7947                if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7948                        dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7949                        /* set status bit */
7950                        dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7951                }
7952                switch (info) {
7953                case 0:
7954                case 1:
7955                case 2:
7956                case 3:
7957                case 4:
7958                case 5:
7959                case 6:
7960                        extra = fm_config_txt[info];
7961                        break;
7962                case 8:
7963                        extra = fm_config_txt[info];
7964                        if (ppd->port_error_action &
7965                            OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7966                                do_bounce = 1;
7967                                /*
7968                                 * lcl_reason cannot be derived from info
7969                                 * for this error
7970                                 */
7971                                lcl_reason =
7972                                  OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7973                        }
7974                        break;
7975                default:
7976                        reason_valid = 0;
7977                        snprintf(buf, sizeof(buf), "reserved%lld", info);
7978                        extra = buf;
7979                        break;
7980                }
7981
7982                if (reason_valid && !do_bounce) {
7983                        do_bounce = ppd->port_error_action &
7984                                        (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7985                        lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7986                }
7987
7988                /* just report this */
7989                dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7990                                        extra);
7991                reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7992        }
7993
7994        if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7995                u8 reason_valid = 1;
7996
7997                info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7998                hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7999                hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8000                if (!(dd->err_info_rcvport.status_and_code &
8001                      OPA_EI_STATUS_SMASK)) {
8002                        dd->err_info_rcvport.status_and_code =
8003                                info & OPA_EI_CODE_SMASK;
8004                        /* set status bit */
8005                        dd->err_info_rcvport.status_and_code |=
8006                                OPA_EI_STATUS_SMASK;
8007                        /*
8008                         * save first 2 flits in the packet that caused
8009                         * the error
8010                         */
8011                        dd->err_info_rcvport.packet_flit1 = hdr0;
8012                        dd->err_info_rcvport.packet_flit2 = hdr1;
8013                }
8014                switch (info) {
8015                case 1:
8016                case 2:
8017                case 3:
8018                case 4:
8019                case 5:
8020                case 6:
8021                case 7:
8022                case 9:
8023                case 11:
8024                case 12:
8025                        extra = port_rcv_txt[info];
8026                        break;
8027                default:
8028                        reason_valid = 0;
8029                        snprintf(buf, sizeof(buf), "reserved%lld", info);
8030                        extra = buf;
8031                        break;
8032                }
8033
8034                if (reason_valid && !do_bounce) {
8035                        do_bounce = ppd->port_error_action &
8036                                        (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8037                        lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8038                }
8039
8040                /* just report this */
8041                dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8042                                        "               hdr0 0x%llx, hdr1 0x%llx\n",
8043                                        extra, hdr0, hdr1);
8044
8045                reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8046        }
8047
8048        if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8049                /* informative only */
8050                dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8051                reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8052        }
8053        if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8054                /* informative only */
8055                dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8056                reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8057        }
8058
8059        if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8060                reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8061
8062        /* report any remaining errors */
8063        if (reg)
8064                dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8065                                        dcc_err_string(buf, sizeof(buf), reg));
8066
8067        if (lcl_reason == 0)
8068                lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8069
8070        if (do_bounce) {
8071                dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8072                                        __func__);
8073                set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8074                queue_work(ppd->link_wq, &ppd->link_bounce_work);
8075        }
8076}
8077
8078static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8079{
8080        char buf[96];
8081
8082        dd_dev_info(dd, "LCB Error: %s\n",
8083                    lcb_err_string(buf, sizeof(buf), reg));
8084}
8085
8086/*
8087 * CCE block DC interrupt.  Source is < 8.
8088 */
8089static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8090{
8091        const struct err_reg_info *eri = &dc_errs[source];
8092
8093        if (eri->handler) {
8094                interrupt_clear_down(dd, 0, eri);
8095        } else if (source == 3 /* dc_lbm_int */) {
8096                /*
8097                 * This indicates that a parity error has occurred on the
8098                 * address/control lines presented to the LBM.  The error
8099                 * is a single pulse, there is no associated error flag,
8100                 * and it is non-maskable.  This is because if a parity
8101                 * error occurs on the request the request is dropped.
8102                 * This should never occur, but it is nice to know if it
8103                 * ever does.
8104                 */
8105                dd_dev_err(dd, "Parity error in DC LBM block\n");
8106        } else {
8107                dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8108        }
8109}
8110
8111/*
8112 * TX block send credit interrupt.  Source is < 160.
8113 */
8114static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8115{
8116        sc_group_release_update(dd, source);
8117}
8118
8119/*
8120 * TX block SDMA interrupt.  Source is < 48.
8121 *
8122 * SDMA interrupts are grouped by type:
8123 *
8124 *       0 -  N-1 = SDma
8125 *       N - 2N-1 = SDmaProgress
8126 *      2N - 3N-1 = SDmaIdle
8127 */
8128static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8129{
8130        /* what interrupt */
8131        unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8132        /* which engine */
8133        unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8134
8135#ifdef CONFIG_SDMA_VERBOSITY
8136        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8137                   slashstrip(__FILE__), __LINE__, __func__);
8138        sdma_dumpstate(&dd->per_sdma[which]);
8139#endif
8140
8141        if (likely(what < 3 && which < dd->num_sdma)) {
8142                sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8143        } else {
8144                /* should not happen */
8145                dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8146        }
8147}
8148
8149/**
8150 * is_rcv_avail_int() - User receive context available IRQ handler
8151 * @dd: valid dd
8152 * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8153 *
8154 * RX block receive available interrupt.  Source is < 160.
8155 *
8156 * This is the general interrupt handler for user (PSM) receive contexts,
8157 * and can only be used for non-threaded IRQs.
8158 */
8159static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8160{
8161        struct hfi1_ctxtdata *rcd;
8162        char *err_detail;
8163
8164        if (likely(source < dd->num_rcv_contexts)) {
8165                rcd = hfi1_rcd_get_by_index(dd, source);
8166                if (rcd) {
8167                        handle_user_interrupt(rcd);
8168                        hfi1_rcd_put(rcd);
8169                        return; /* OK */
8170                }
8171                /* received an interrupt, but no rcd */
8172                err_detail = "dataless";
8173        } else {
8174                /* received an interrupt, but are not using that context */
8175                err_detail = "out of range";
8176        }
8177        dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8178                   err_detail, source);
8179}
8180
8181/**
8182 * is_rcv_urgent_int() - User receive context urgent IRQ handler
8183 * @dd: valid dd
8184 * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
8185 *
8186 * RX block receive urgent interrupt.  Source is < 160.
8187 *
8188 * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8189 */
8190static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8191{
8192        struct hfi1_ctxtdata *rcd;
8193        char *err_detail;
8194
8195        if (likely(source < dd->num_rcv_contexts)) {
8196                rcd = hfi1_rcd_get_by_index(dd, source);
8197                if (rcd) {
8198                        handle_user_interrupt(rcd);
8199                        hfi1_rcd_put(rcd);
8200                        return; /* OK */
8201                }
8202                /* received an interrupt, but no rcd */
8203                err_detail = "dataless";
8204        } else {
8205                /* received an interrupt, but are not using that context */
8206                err_detail = "out of range";
8207        }
8208        dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8209                   err_detail, source);
8210}
8211
8212/*
8213 * Reserved range interrupt.  Should not be called in normal operation.
8214 */
8215static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8216{
8217        char name[64];
8218
8219        dd_dev_err(dd, "unexpected %s interrupt\n",
8220                   is_reserved_name(name, sizeof(name), source));
8221}
8222
8223static const struct is_table is_table[] = {
8224/*
8225 * start                 end
8226 *                              name func               interrupt func
8227 */
8228{ IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8229                                is_misc_err_name,       is_misc_err_int },
8230{ IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8231                                is_sdma_eng_err_name,   is_sdma_eng_err_int },
8232{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8233                                is_sendctxt_err_name,   is_sendctxt_err_int },
8234{ IS_SDMA_START,             IS_SDMA_END,
8235                                is_sdma_eng_name,       is_sdma_eng_int },
8236{ IS_VARIOUS_START,          IS_VARIOUS_END,
8237                                is_various_name,        is_various_int },
8238{ IS_DC_START,       IS_DC_END,
8239                                is_dc_name,             is_dc_int },
8240{ IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8241                                is_rcv_avail_name,      is_rcv_avail_int },
8242{ IS_RCVURGENT_START,    IS_RCVURGENT_END,
8243                                is_rcv_urgent_name,     is_rcv_urgent_int },
8244{ IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8245                                is_send_credit_name,    is_send_credit_int},
8246{ IS_RESERVED_START,     IS_RESERVED_END,
8247                                is_reserved_name,       is_reserved_int},
8248};
8249
8250/*
8251 * Interrupt source interrupt - called when the given source has an interrupt.
8252 * Source is a bit index into an array of 64-bit integers.
8253 */
8254static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8255{
8256        const struct is_table *entry;
8257
8258        /* avoids a double compare by walking the table in-order */
8259        for (entry = &is_table[0]; entry->is_name; entry++) {
8260                if (source < entry->end) {
8261                        trace_hfi1_interrupt(dd, entry, source);
8262                        entry->is_int(dd, source - entry->start);
8263                        return;
8264                }
8265        }
8266        /* fell off the end */
8267        dd_dev_err(dd, "invalid interrupt source %u\n", source);
8268}
8269
8270/**
8271 * gerneral_interrupt() -  General interrupt handler
8272 * @irq: MSIx IRQ vector
8273 * @data: hfi1 devdata
8274 *
8275 * This is able to correctly handle all non-threaded interrupts.  Receive
8276 * context DATA IRQs are threaded and are not supported by this handler.
8277 *
8278 */
8279static irqreturn_t general_interrupt(int irq, void *data)
8280{
8281        struct hfi1_devdata *dd = data;
8282        u64 regs[CCE_NUM_INT_CSRS];
8283        u32 bit;
8284        int i;
8285        irqreturn_t handled = IRQ_NONE;
8286
8287        this_cpu_inc(*dd->int_counter);
8288
8289        /* phase 1: scan and clear all handled interrupts */
8290        for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8291                if (dd->gi_mask[i] == 0) {
8292                        regs[i] = 0;    /* used later */
8293                        continue;
8294                }
8295                regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8296                                dd->gi_mask[i];
8297                /* only clear if anything is set */
8298                if (regs[i])
8299                        write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8300        }
8301
8302        /* phase 2: call the appropriate handler */
8303        for_each_set_bit(bit, (unsigned long *)&regs[0],
8304                         CCE_NUM_INT_CSRS * 64) {
8305                is_interrupt(dd, bit);
8306                handled = IRQ_HANDLED;
8307        }
8308
8309        return handled;
8310}
8311
8312static irqreturn_t sdma_interrupt(int irq, void *data)
8313{
8314        struct sdma_engine *sde = data;
8315        struct hfi1_devdata *dd = sde->dd;
8316        u64 status;
8317
8318#ifdef CONFIG_SDMA_VERBOSITY
8319        dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8320                   slashstrip(__FILE__), __LINE__, __func__);
8321        sdma_dumpstate(sde);
8322#endif
8323
8324        this_cpu_inc(*dd->int_counter);
8325
8326        /* This read_csr is really bad in the hot path */
8327        status = read_csr(dd,
8328                          CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8329                          & sde->imask;
8330        if (likely(status)) {
8331                /* clear the interrupt(s) */
8332                write_csr(dd,
8333                          CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8334                          status);
8335
8336                /* handle the interrupt(s) */
8337                sdma_engine_interrupt(sde, status);
8338        } else {
8339                dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8340                                        sde->this_idx);
8341        }
8342        return IRQ_HANDLED;
8343}
8344
8345/*
8346 * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8347 * to insure that the write completed.  This does NOT guarantee that
8348 * queued DMA writes to memory from the chip are pushed.
8349 */
8350static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8351{
8352        struct hfi1_devdata *dd = rcd->dd;
8353        u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8354
8355        mmiowb();       /* make sure everything before is written */
8356        write_csr(dd, addr, rcd->imask);
8357        /* force the above write on the chip and get a value back */
8358        (void)read_csr(dd, addr);
8359}
8360
8361/* force the receive interrupt */
8362void force_recv_intr(struct hfi1_ctxtdata *rcd)
8363{
8364        write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8365}
8366
8367/*
8368 * Return non-zero if a packet is present.
8369 *
8370 * This routine is called when rechecking for packets after the RcvAvail
8371 * interrupt has been cleared down.  First, do a quick check of memory for
8372 * a packet present.  If not found, use an expensive CSR read of the context
8373 * tail to determine the actual tail.  The CSR read is necessary because there
8374 * is no method to push pending DMAs to memory other than an interrupt and we
8375 * are trying to determine if we need to force an interrupt.
8376 */
8377static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8378{
8379        u32 tail;
8380        int present;
8381
8382        if (!rcd->rcvhdrtail_kvaddr)
8383                present = (rcd->seq_cnt ==
8384                                rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8385        else /* is RDMA rtail */
8386                present = (rcd->head != get_rcvhdrtail(rcd));
8387
8388        if (present)
8389                return 1;
8390
8391        /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8392        tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8393        return rcd->head != tail;
8394}
8395
8396/*
8397 * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8398 * This routine will try to handle packets immediately (latency), but if
8399 * it finds too many, it will invoke the thread handler (bandwitdh).  The
8400 * chip receive interrupt is *not* cleared down until this or the thread (if
8401 * invoked) is finished.  The intent is to avoid extra interrupts while we
8402 * are processing packets anyway.
8403 */
8404static irqreturn_t receive_context_interrupt(int irq, void *data)
8405{
8406        struct hfi1_ctxtdata *rcd = data;
8407        struct hfi1_devdata *dd = rcd->dd;
8408        int disposition;
8409        int present;
8410
8411        trace_hfi1_receive_interrupt(dd, rcd);
8412        this_cpu_inc(*dd->int_counter);
8413        aspm_ctx_disable(rcd);
8414
8415        /* receive interrupt remains blocked while processing packets */
8416        disposition = rcd->do_interrupt(rcd, 0);
8417
8418        /*
8419         * Too many packets were seen while processing packets in this
8420         * IRQ handler.  Invoke the handler thread.  The receive interrupt
8421         * remains blocked.
8422         */
8423        if (disposition == RCV_PKT_LIMIT)
8424                return IRQ_WAKE_THREAD;
8425
8426        /*
8427         * The packet processor detected no more packets.  Clear the receive
8428         * interrupt and recheck for a packet packet that may have arrived
8429         * after the previous check and interrupt clear.  If a packet arrived,
8430         * force another interrupt.
8431         */
8432        clear_recv_intr(rcd);
8433        present = check_packet_present(rcd);
8434        if (present)
8435                force_recv_intr(rcd);
8436
8437        return IRQ_HANDLED;
8438}
8439
8440/*
8441 * Receive packet thread handler.  This expects to be invoked with the
8442 * receive interrupt still blocked.
8443 */
8444static irqreturn_t receive_context_thread(int irq, void *data)
8445{
8446        struct hfi1_ctxtdata *rcd = data;
8447        int present;
8448
8449        /* receive interrupt is still blocked from the IRQ handler */
8450        (void)rcd->do_interrupt(rcd, 1);
8451
8452        /*
8453         * The packet processor will only return if it detected no more
8454         * packets.  Hold IRQs here so we can safely clear the interrupt and
8455         * recheck for a packet that may have arrived after the previous
8456         * check and the interrupt clear.  If a packet arrived, force another
8457         * interrupt.
8458         */
8459        local_irq_disable();
8460        clear_recv_intr(rcd);
8461        present = check_packet_present(rcd);
8462        if (present)
8463                force_recv_intr(rcd);
8464        local_irq_enable();
8465
8466        return IRQ_HANDLED;
8467}
8468
8469/* ========================================================================= */
8470
8471u32 read_physical_state(struct hfi1_devdata *dd)
8472{
8473        u64 reg;
8474
8475        reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8476        return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8477                                & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8478}
8479
8480u32 read_logical_state(struct hfi1_devdata *dd)
8481{
8482        u64 reg;
8483
8484        reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8485        return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8486                                & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8487}
8488
8489static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8490{
8491        u64 reg;
8492
8493        reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8494        /* clear current state, set new state */
8495        reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8496        reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8497        write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8498}
8499
8500/*
8501 * Use the 8051 to read a LCB CSR.
8502 */
8503static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8504{
8505        u32 regno;
8506        int ret;
8507
8508        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8509                if (acquire_lcb_access(dd, 0) == 0) {
8510                        *data = read_csr(dd, addr);
8511                        release_lcb_access(dd, 0);
8512                        return 0;
8513                }
8514                return -EBUSY;
8515        }
8516
8517        /* register is an index of LCB registers: (offset - base) / 8 */
8518        regno = (addr - DC_LCB_CFG_RUN) >> 3;
8519        ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8520        if (ret != HCMD_SUCCESS)
8521                return -EBUSY;
8522        return 0;
8523}
8524
8525/*
8526 * Provide a cache for some of the LCB registers in case the LCB is
8527 * unavailable.
8528 * (The LCB is unavailable in certain link states, for example.)
8529 */
8530struct lcb_datum {
8531        u32 off;
8532        u64 val;
8533};
8534
8535static struct lcb_datum lcb_cache[] = {
8536        { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8537        { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8538        { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8539};
8540
8541static void update_lcb_cache(struct hfi1_devdata *dd)
8542{
8543        int i;
8544        int ret;
8545        u64 val;
8546
8547        for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8548                ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8549
8550                /* Update if we get good data */
8551                if (likely(ret != -EBUSY))
8552                        lcb_cache[i].val = val;
8553        }
8554}
8555
8556static int read_lcb_cache(u32 off, u64 *val)
8557{
8558        int i;
8559
8560        for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8561                if (lcb_cache[i].off == off) {
8562                        *val = lcb_cache[i].val;
8563                        return 0;
8564                }
8565        }
8566
8567        pr_warn("%s bad offset 0x%x\n", __func__, off);
8568        return -1;
8569}
8570
8571/*
8572 * Read an LCB CSR.  Access may not be in host control, so check.
8573 * Return 0 on success, -EBUSY on failure.
8574 */
8575int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8576{
8577        struct hfi1_pportdata *ppd = dd->pport;
8578
8579        /* if up, go through the 8051 for the value */
8580        if (ppd->host_link_state & HLS_UP)
8581                return read_lcb_via_8051(dd, addr, data);
8582        /* if going up or down, check the cache, otherwise, no access */
8583        if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8584                if (read_lcb_cache(addr, data))
8585                        return -EBUSY;
8586                return 0;
8587        }
8588
8589        /* otherwise, host has access */
8590        *data = read_csr(dd, addr);
8591        return 0;
8592}
8593
8594/*
8595 * Use the 8051 to write a LCB CSR.
8596 */
8597static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8598{
8599        u32 regno;
8600        int ret;
8601
8602        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8603            (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8604                if (acquire_lcb_access(dd, 0) == 0) {
8605                        write_csr(dd, addr, data);
8606                        release_lcb_access(dd, 0);
8607                        return 0;
8608                }
8609                return -EBUSY;
8610        }
8611
8612        /* register is an index of LCB registers: (offset - base) / 8 */
8613        regno = (addr - DC_LCB_CFG_RUN) >> 3;
8614        ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8615        if (ret != HCMD_SUCCESS)
8616                return -EBUSY;
8617        return 0;
8618}
8619
8620/*
8621 * Write an LCB CSR.  Access may not be in host control, so check.
8622 * Return 0 on success, -EBUSY on failure.
8623 */
8624int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8625{
8626        struct hfi1_pportdata *ppd = dd->pport;
8627
8628        /* if up, go through the 8051 for the value */
8629        if (ppd->host_link_state & HLS_UP)
8630                return write_lcb_via_8051(dd, addr, data);
8631        /* if going up or down, no access */
8632        if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8633                return -EBUSY;
8634        /* otherwise, host has access */
8635        write_csr(dd, addr, data);
8636        return 0;
8637}
8638
8639/*
8640 * Returns:
8641 *      < 0 = Linux error, not able to get access
8642 *      > 0 = 8051 command RETURN_CODE
8643 */
8644static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8645                           u64 *out_data)
8646{
8647        u64 reg, completed;
8648        int return_code;
8649        unsigned long timeout;
8650
8651        hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8652
8653        mutex_lock(&dd->dc8051_lock);
8654
8655        /* We can't send any commands to the 8051 if it's in reset */
8656        if (dd->dc_shutdown) {
8657                return_code = -ENODEV;
8658                goto fail;
8659        }
8660
8661        /*
8662         * If an 8051 host command timed out previously, then the 8051 is
8663         * stuck.
8664         *
8665         * On first timeout, attempt to reset and restart the entire DC
8666         * block (including 8051). (Is this too big of a hammer?)
8667         *
8668         * If the 8051 times out a second time, the reset did not bring it
8669         * back to healthy life. In that case, fail any subsequent commands.
8670         */
8671        if (dd->dc8051_timed_out) {
8672                if (dd->dc8051_timed_out > 1) {
8673                        dd_dev_err(dd,
8674                                   "Previous 8051 host command timed out, skipping command %u\n",
8675                                   type);
8676                        return_code = -ENXIO;
8677                        goto fail;
8678                }
8679                _dc_shutdown(dd);
8680                _dc_start(dd);
8681        }
8682
8683        /*
8684         * If there is no timeout, then the 8051 command interface is
8685         * waiting for a command.
8686         */
8687
8688        /*
8689         * When writing a LCB CSR, out_data contains the full value to
8690         * to be written, while in_data contains the relative LCB
8691         * address in 7:0.  Do the work here, rather than the caller,
8692         * of distrubting the write data to where it needs to go:
8693         *
8694         * Write data
8695         *   39:00 -> in_data[47:8]
8696         *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8697         *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8698         */
8699        if (type == HCMD_WRITE_LCB_CSR) {
8700                in_data |= ((*out_data) & 0xffffffffffull) << 8;
8701                /* must preserve COMPLETED - it is tied to hardware */
8702                reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8703                reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8704                reg |= ((((*out_data) >> 40) & 0xff) <<
8705                                DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8706                      | ((((*out_data) >> 48) & 0xffff) <<
8707                                DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8708                write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8709        }
8710
8711        /*
8712         * Do two writes: the first to stabilize the type and req_data, the
8713         * second to activate.
8714         */
8715        reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8716                        << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8717                | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8718                        << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8719        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8720        reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8721        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8722
8723        /* wait for completion, alternate: interrupt */
8724        timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8725        while (1) {
8726                reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8727                completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8728                if (completed)
8729                        break;
8730                if (time_after(jiffies, timeout)) {
8731                        dd->dc8051_timed_out++;
8732                        dd_dev_err(dd, "8051 host command %u timeout\n", type);
8733                        if (out_data)
8734                                *out_data = 0;
8735                        return_code = -ETIMEDOUT;
8736                        goto fail;
8737                }
8738                udelay(2);
8739        }
8740
8741        if (out_data) {
8742                *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8743                                & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8744                if (type == HCMD_READ_LCB_CSR) {
8745                        /* top 16 bits are in a different register */
8746                        *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8747                                & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8748                                << (48
8749                                    - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8750                }
8751        }
8752        return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8753                                & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8754        dd->dc8051_timed_out = 0;
8755        /*
8756         * Clear command for next user.
8757         */
8758        write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8759
8760fail:
8761        mutex_unlock(&dd->dc8051_lock);
8762        return return_code;
8763}
8764
8765static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8766{
8767        return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8768}
8769
8770int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8771                     u8 lane_id, u32 config_data)
8772{
8773        u64 data;
8774        int ret;
8775
8776        data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8777                | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8778                | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8779        ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8780        if (ret != HCMD_SUCCESS) {
8781                dd_dev_err(dd,
8782                           "load 8051 config: field id %d, lane %d, err %d\n",
8783                           (int)field_id, (int)lane_id, ret);
8784        }
8785        return ret;
8786}
8787
8788/*
8789 * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8790 * set the result, even on error.
8791 * Return 0 on success, -errno on failure
8792 */
8793int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8794                     u32 *result)
8795{
8796        u64 big_data;
8797        u32 addr;
8798        int ret;
8799
8800        /* address start depends on the lane_id */
8801        if (lane_id < 4)
8802                addr = (4 * NUM_GENERAL_FIELDS)
8803                        + (lane_id * 4 * NUM_LANE_FIELDS);
8804        else
8805                addr = 0;
8806        addr += field_id * 4;
8807
8808        /* read is in 8-byte chunks, hardware will truncate the address down */
8809        ret = read_8051_data(dd, addr, 8, &big_data);
8810
8811        if (ret == 0) {
8812                /* extract the 4 bytes we want */
8813                if (addr & 0x4)
8814                        *result = (u32)(big_data >> 32);
8815                else
8816                        *result = (u32)big_data;
8817        } else {
8818                *result = 0;
8819                dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8820                           __func__, lane_id, field_id);
8821        }
8822
8823        return ret;
8824}
8825
8826static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8827                              u8 continuous)
8828{
8829        u32 frame;
8830
8831        frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8832                | power_management << POWER_MANAGEMENT_SHIFT;
8833        return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8834                                GENERAL_CONFIG, frame);
8835}
8836
8837static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8838                                 u16 vl15buf, u8 crc_sizes)
8839{
8840        u32 frame;
8841
8842        frame = (u32)vau << VAU_SHIFT
8843                | (u32)z << Z_SHIFT
8844                | (u32)vcu << VCU_SHIFT
8845                | (u32)vl15buf << VL15BUF_SHIFT
8846                | (u32)crc_sizes << CRC_SIZES_SHIFT;
8847        return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8848                                GENERAL_CONFIG, frame);
8849}
8850
8851static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8852                                    u8 *flag_bits, u16 *link_widths)
8853{
8854        u32 frame;
8855
8856        read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8857                         &frame);
8858        *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8859        *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8860        *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8861}
8862
8863static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8864                                    u8 misc_bits,
8865                                    u8 flag_bits,
8866                                    u16 link_widths)
8867{
8868        u32 frame;
8869
8870        frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8871                | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8872                | (u32)link_widths << LINK_WIDTH_SHIFT;
8873        return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8874                     frame);
8875}
8876
8877static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8878                                 u8 device_rev)
8879{
8880        u32 frame;
8881
8882        frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8883                | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8884        return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8885}
8886
8887static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8888                                  u8 *device_rev)
8889{
8890        u32 frame;
8891
8892        read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8893        *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8894        *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8895                        & REMOTE_DEVICE_REV_MASK;
8896}
8897
8898int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8899{
8900        u32 frame;
8901        u32 mask;
8902
8903        mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8904        read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8905        /* Clear, then set field */
8906        frame &= ~mask;
8907        frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8908        return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8909                                frame);
8910}
8911
8912void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8913                      u8 *ver_patch)
8914{
8915        u32 frame;
8916
8917        read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8918        *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8919                STS_FM_VERSION_MAJOR_MASK;
8920        *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8921                STS_FM_VERSION_MINOR_MASK;
8922
8923        read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8924        *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8925                STS_FM_VERSION_PATCH_MASK;
8926}
8927
8928static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8929                               u8 *continuous)
8930{
8931        u32 frame;
8932
8933        read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8934        *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8935                                        & POWER_MANAGEMENT_MASK;
8936        *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8937                                        & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8938}
8939
8940static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8941                                  u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8942{
8943        u32 frame;
8944
8945        read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8946        *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8947        *z = (frame >> Z_SHIFT) & Z_MASK;
8948        *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8949        *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8950        *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8951}
8952
8953static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8954                                      u8 *remote_tx_rate,
8955                                      u16 *link_widths)
8956{
8957        u32 frame;
8958
8959        read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8960                         &frame);
8961        *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8962                                & REMOTE_TX_RATE_MASK;
8963        *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8964}
8965
8966static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8967{
8968        u32 frame;
8969
8970        read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8971        *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8972}
8973
8974static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8975{
8976        read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8977}
8978
8979static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8980{
8981        read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8982}
8983
8984void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8985{
8986        u32 frame;
8987        int ret;
8988
8989        *link_quality = 0;
8990        if (dd->pport->host_link_state & HLS_UP) {
8991                ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8992                                       &frame);
8993                if (ret == 0)
8994                        *link_quality = (frame >> LINK_QUALITY_SHIFT)
8995                                                & LINK_QUALITY_MASK;
8996        }
8997}
8998
8999static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9000{
9001        u32 frame;
9002
9003        read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9004        *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9005}
9006
9007static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9008{
9009        u32 frame;
9010
9011        read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9012        *ldr = (frame & 0xff);
9013}
9014
9015static int read_tx_settings(struct hfi1_devdata *dd,
9016                            u8 *enable_lane_tx,
9017                            u8 *tx_polarity_inversion,
9018                            u8 *rx_polarity_inversion,
9019                            u8 *max_rate)
9020{
9021        u32 frame;
9022        int ret;
9023
9024        ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9025        *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9026                                & ENABLE_LANE_TX_MASK;
9027        *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9028                                & TX_POLARITY_INVERSION_MASK;
9029        *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9030                                & RX_POLARITY_INVERSION_MASK;
9031        *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9032        return ret;
9033}
9034
9035static int write_tx_settings(struct hfi1_devdata *dd,
9036                             u8 enable_lane_tx,
9037                             u8 tx_polarity_inversion,
9038                             u8 rx_polarity_inversion,
9039                             u8 max_rate)
9040{
9041        u32 frame;
9042
9043        /* no need to mask, all variable sizes match field widths */
9044        frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9045                | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9046                | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9047                | max_rate << MAX_RATE_SHIFT;
9048        return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9049}
9050
9051/*
9052 * Read an idle LCB message.
9053 *
9054 * Returns 0 on success, -EINVAL on error
9055 */
9056static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9057{
9058        int ret;
9059
9060        ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9061        if (ret != HCMD_SUCCESS) {
9062                dd_dev_err(dd, "read idle message: type %d, err %d\n",
9063                           (u32)type, ret);
9064                return -EINVAL;
9065        }
9066        dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9067        /* return only the payload as we already know the type */
9068        *data_out >>= IDLE_PAYLOAD_SHIFT;
9069        return 0;
9070}
9071
9072/*
9073 * Read an idle SMA message.  To be done in response to a notification from
9074 * the 8051.
9075 *
9076 * Returns 0 on success, -EINVAL on error
9077 */
9078static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9079{
9080        return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9081                                 data);
9082}
9083
9084/*
9085 * Send an idle LCB message.
9086 *
9087 * Returns 0 on success, -EINVAL on error
9088 */
9089static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9090{
9091        int ret;
9092
9093        dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9094        ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9095        if (ret != HCMD_SUCCESS) {
9096                dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9097                           data, ret);
9098                return -EINVAL;
9099        }
9100        return 0;
9101}
9102
9103/*
9104 * Send an idle SMA message.
9105 *
9106 * Returns 0 on success, -EINVAL on error
9107 */
9108int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9109{
9110        u64 data;
9111
9112        data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9113                ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9114        return send_idle_message(dd, data);
9115}
9116
9117/*
9118 * Initialize the LCB then do a quick link up.  This may or may not be
9119 * in loopback.
9120 *
9121 * return 0 on success, -errno on error
9122 */
9123static int do_quick_linkup(struct hfi1_devdata *dd)
9124{
9125        int ret;
9126
9127        lcb_shutdown(dd, 0);
9128
9129        if (loopback) {
9130                /* LCB_CFG_LOOPBACK.VAL = 2 */
9131                /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9132                write_csr(dd, DC_LCB_CFG_LOOPBACK,
9133                          IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9134                write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9135        }
9136
9137        /* start the LCBs */
9138        /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9139        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9140
9141        /* simulator only loopback steps */
9142        if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9143                /* LCB_CFG_RUN.EN = 1 */
9144                write_csr(dd, DC_LCB_CFG_RUN,
9145                          1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9146
9147                ret = wait_link_transfer_active(dd, 10);
9148                if (ret)
9149                        return ret;
9150
9151                write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9152                          1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9153        }
9154
9155        if (!loopback) {
9156                /*
9157                 * When doing quick linkup and not in loopback, both
9158                 * sides must be done with LCB set-up before either
9159                 * starts the quick linkup.  Put a delay here so that
9160                 * both sides can be started and have a chance to be
9161                 * done with LCB set up before resuming.
9162                 */
9163                dd_dev_err(dd,
9164                           "Pausing for peer to be finished with LCB set up\n");
9165                msleep(5000);
9166                dd_dev_err(dd, "Continuing with quick linkup\n");
9167        }
9168
9169        write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9170        set_8051_lcb_access(dd);
9171
9172        /*
9173         * State "quick" LinkUp request sets the physical link state to
9174         * LinkUp without a verify capability sequence.
9175         * This state is in simulator v37 and later.
9176         */
9177        ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9178        if (ret != HCMD_SUCCESS) {
9179                dd_dev_err(dd,
9180                           "%s: set physical link state to quick LinkUp failed with return %d\n",
9181                           __func__, ret);
9182
9183                set_host_lcb_access(dd);
9184                write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9185
9186                if (ret >= 0)
9187                        ret = -EINVAL;
9188                return ret;
9189        }
9190
9191        return 0; /* success */
9192}
9193
9194/*
9195 * Do all special steps to set up loopback.
9196 */
9197static int init_loopback(struct hfi1_devdata *dd)
9198{
9199        dd_dev_info(dd, "Entering loopback mode\n");
9200
9201        /* all loopbacks should disable self GUID check */
9202        write_csr(dd, DC_DC8051_CFG_MODE,
9203                  (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9204
9205        /*
9206         * The simulator has only one loopback option - LCB.  Switch
9207         * to that option, which includes quick link up.
9208         *
9209         * Accept all valid loopback values.
9210         */
9211        if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9212            (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9213             loopback == LOOPBACK_CABLE)) {
9214                loopback = LOOPBACK_LCB;
9215                quick_linkup = 1;
9216                return 0;
9217        }
9218
9219        /*
9220         * SerDes loopback init sequence is handled in set_local_link_attributes
9221         */
9222        if (loopback == LOOPBACK_SERDES)
9223                return 0;
9224
9225        /* LCB loopback - handled at poll time */
9226        if (loopback == LOOPBACK_LCB) {
9227                quick_linkup = 1; /* LCB is always quick linkup */
9228
9229                /* not supported in emulation due to emulation RTL changes */
9230                if (dd->icode == ICODE_FPGA_EMULATION) {
9231                        dd_dev_err(dd,
9232                                   "LCB loopback not supported in emulation\n");
9233                        return -EINVAL;
9234                }
9235                return 0;
9236        }
9237
9238        /* external cable loopback requires no extra steps */
9239        if (loopback == LOOPBACK_CABLE)
9240                return 0;
9241
9242        dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9243        return -EINVAL;
9244}
9245
9246/*
9247 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9248 * used in the Verify Capability link width attribute.
9249 */
9250static u16 opa_to_vc_link_widths(u16 opa_widths)
9251{
9252        int i;
9253        u16 result = 0;
9254
9255        static const struct link_bits {
9256                u16 from;
9257                u16 to;
9258        } opa_link_xlate[] = {
9259                { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9260                { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9261                { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9262                { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9263        };
9264
9265        for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9266                if (opa_widths & opa_link_xlate[i].from)
9267                        result |= opa_link_xlate[i].to;
9268        }
9269        return result;
9270}
9271
9272/*
9273 * Set link attributes before moving to polling.
9274 */
9275static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9276{
9277        struct hfi1_devdata *dd = ppd->dd;
9278        u8 enable_lane_tx;
9279        u8 tx_polarity_inversion;
9280        u8 rx_polarity_inversion;
9281        int ret;
9282        u32 misc_bits = 0;
9283        /* reset our fabric serdes to clear any lingering problems */
9284        fabric_serdes_reset(dd);
9285
9286        /* set the local tx rate - need to read-modify-write */
9287        ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9288                               &rx_polarity_inversion, &ppd->local_tx_rate);
9289        if (ret)
9290                goto set_local_link_attributes_fail;
9291
9292        if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9293                /* set the tx rate to the fastest enabled */
9294                if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9295                        ppd->local_tx_rate = 1;
9296                else
9297                        ppd->local_tx_rate = 0;
9298        } else {
9299                /* set the tx rate to all enabled */
9300                ppd->local_tx_rate = 0;
9301                if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9302                        ppd->local_tx_rate |= 2;
9303                if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9304                        ppd->local_tx_rate |= 1;
9305        }
9306
9307        enable_lane_tx = 0xF; /* enable all four lanes */
9308        ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9309                                rx_polarity_inversion, ppd->local_tx_rate);
9310        if (ret != HCMD_SUCCESS)
9311                goto set_local_link_attributes_fail;
9312
9313        ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9314        if (ret != HCMD_SUCCESS) {
9315                dd_dev_err(dd,
9316                           "Failed to set host interface version, return 0x%x\n",
9317                           ret);
9318                goto set_local_link_attributes_fail;
9319        }
9320
9321        /*
9322         * DC supports continuous updates.
9323         */
9324        ret = write_vc_local_phy(dd,
9325                                 0 /* no power management */,
9326                                 1 /* continuous updates */);
9327        if (ret != HCMD_SUCCESS)
9328                goto set_local_link_attributes_fail;
9329
9330        /* z=1 in the next call: AU of 0 is not supported by the hardware */
9331        ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9332                                    ppd->port_crc_mode_enabled);
9333        if (ret != HCMD_SUCCESS)
9334                goto set_local_link_attributes_fail;
9335
9336        /*
9337         * SerDes loopback init sequence requires
9338         * setting bit 0 of MISC_CONFIG_BITS
9339         */
9340        if (loopback == LOOPBACK_SERDES)
9341                misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9342
9343        /*
9344         * An external device configuration request is used to reset the LCB
9345         * to retry to obtain operational lanes when the first attempt is
9346         * unsuccesful.
9347         */
9348        if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9349                misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9350
9351        ret = write_vc_local_link_mode(dd, misc_bits, 0,
9352                                       opa_to_vc_link_widths(
9353                                                ppd->link_width_enabled));
9354        if (ret != HCMD_SUCCESS)
9355                goto set_local_link_attributes_fail;
9356
9357        /* let peer know who we are */
9358        ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9359        if (ret == HCMD_SUCCESS)
9360                return 0;
9361
9362set_local_link_attributes_fail:
9363        dd_dev_err(dd,
9364                   "Failed to set local link attributes, return 0x%x\n",
9365                   ret);
9366        return ret;
9367}
9368
9369/*
9370 * Call this to start the link.
9371 * Do not do anything if the link is disabled.
9372 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9373 */
9374int start_link(struct hfi1_pportdata *ppd)
9375{
9376        /*
9377         * Tune the SerDes to a ballpark setting for optimal signal and bit
9378         * error rate.  Needs to be done before starting the link.
9379         */
9380        tune_serdes(ppd);
9381
9382        if (!ppd->driver_link_ready) {
9383                dd_dev_info(ppd->dd,
9384                            "%s: stopping link start because driver is not ready\n",
9385                            __func__);
9386                return 0;
9387        }
9388
9389        /*
9390         * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9391         * pkey table can be configured properly if the HFI unit is connected
9392         * to switch port with MgmtAllowed=NO
9393         */
9394        clear_full_mgmt_pkey(ppd);
9395
9396        return set_link_state(ppd, HLS_DN_POLL);
9397}
9398
9399static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9400{
9401        struct hfi1_devdata *dd = ppd->dd;
9402        u64 mask;
9403        unsigned long timeout;
9404
9405        /*
9406         * Some QSFP cables have a quirk that asserts the IntN line as a side
9407         * effect of power up on plug-in. We ignore this false positive
9408         * interrupt until the module has finished powering up by waiting for
9409         * a minimum timeout of the module inrush initialization time of
9410         * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9411         * module have stabilized.
9412         */
9413        msleep(500);
9414
9415        /*
9416         * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9417         */
9418        timeout = jiffies + msecs_to_jiffies(2000);
9419        while (1) {
9420                mask = read_csr(dd, dd->hfi1_id ?
9421                                ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9422                if (!(mask & QSFP_HFI0_INT_N))
9423                        break;
9424                if (time_after(jiffies, timeout)) {
9425                        dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9426                                    __func__);
9427                        break;
9428                }
9429                udelay(2);
9430        }
9431}
9432
9433static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9434{
9435        struct hfi1_devdata *dd = ppd->dd;
9436        u64 mask;
9437
9438        mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9439        if (enable) {
9440                /*
9441                 * Clear the status register to avoid an immediate interrupt
9442                 * when we re-enable the IntN pin
9443                 */
9444                write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9445                          QSFP_HFI0_INT_N);
9446                mask |= (u64)QSFP_HFI0_INT_N;
9447        } else {
9448                mask &= ~(u64)QSFP_HFI0_INT_N;
9449        }
9450        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9451}
9452
9453int reset_qsfp(struct hfi1_pportdata *ppd)
9454{
9455        struct hfi1_devdata *dd = ppd->dd;
9456        u64 mask, qsfp_mask;
9457
9458        /* Disable INT_N from triggering QSFP interrupts */
9459        set_qsfp_int_n(ppd, 0);
9460
9461        /* Reset the QSFP */
9462        mask = (u64)QSFP_HFI0_RESET_N;
9463
9464        qsfp_mask = read_csr(dd,
9465                             dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9466        qsfp_mask &= ~mask;
9467        write_csr(dd,
9468                  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9469
9470        udelay(10);
9471
9472        qsfp_mask |= mask;
9473        write_csr(dd,
9474                  dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9475
9476        wait_for_qsfp_init(ppd);
9477
9478        /*
9479         * Allow INT_N to trigger the QSFP interrupt to watch
9480         * for alarms and warnings
9481         */
9482        set_qsfp_int_n(ppd, 1);
9483
9484        /*
9485         * After the reset, AOC transmitters are enabled by default. They need
9486         * to be turned off to complete the QSFP setup before they can be
9487         * enabled again.
9488         */
9489        return set_qsfp_tx(ppd, 0);
9490}
9491
9492static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9493                                        u8 *qsfp_interrupt_status)
9494{
9495        struct hfi1_devdata *dd = ppd->dd;
9496
9497        if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9498            (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9499                dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9500                           __func__);
9501
9502        if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9503            (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9504                dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9505                           __func__);
9506
9507        /*
9508         * The remaining alarms/warnings don't matter if the link is down.
9509         */
9510        if (ppd->host_link_state & HLS_DOWN)
9511                return 0;
9512
9513        if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9514            (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9515                dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9516                           __func__);
9517
9518        if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9519            (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9520                dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9521                           __func__);
9522
9523        /* Byte 2 is vendor specific */
9524
9525        if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9526            (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9527                dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9528                           __func__);
9529
9530        if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9531            (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9532                dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9533                           __func__);
9534
9535        if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9536            (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9537                dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9538                           __func__);
9539
9540        if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9541            (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9542                dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9543                           __func__);
9544
9545        if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9546            (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9547                dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9548                           __func__);
9549
9550        if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9551            (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9552                dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9553                           __func__);
9554
9555        if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9556            (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9557                dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9558                           __func__);
9559
9560        if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9561            (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9562                dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9563                           __func__);
9564
9565        if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9566            (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9567                dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9568                           __func__);
9569
9570        if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9571            (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9572                dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9573                           __func__);
9574
9575        if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9576            (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9577                dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9578                           __func__);
9579
9580        if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9581            (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9582                dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9583                           __func__);
9584
9585        /* Bytes 9-10 and 11-12 are reserved */
9586        /* Bytes 13-15 are vendor specific */
9587
9588        return 0;
9589}
9590
9591/* This routine will only be scheduled if the QSFP module present is asserted */
9592void qsfp_event(struct work_struct *work)
9593{
9594        struct qsfp_data *qd;
9595        struct hfi1_pportdata *ppd;
9596        struct hfi1_devdata *dd;
9597
9598        qd = container_of(work, struct qsfp_data, qsfp_work);
9599        ppd = qd->ppd;
9600        dd = ppd->dd;
9601
9602        /* Sanity check */
9603        if (!qsfp_mod_present(ppd))
9604                return;
9605
9606        if (ppd->host_link_state == HLS_DN_DISABLE) {
9607                dd_dev_info(ppd->dd,
9608                            "%s: stopping link start because link is disabled\n",
9609                            __func__);
9610                return;
9611        }
9612
9613        /*
9614         * Turn DC back on after cable has been re-inserted. Up until
9615         * now, the DC has been in reset to save power.
9616         */
9617        dc_start(dd);
9618
9619        if (qd->cache_refresh_required) {
9620                set_qsfp_int_n(ppd, 0);
9621
9622                wait_for_qsfp_init(ppd);
9623
9624                /*
9625                 * Allow INT_N to trigger the QSFP interrupt to watch
9626                 * for alarms and warnings
9627                 */
9628                set_qsfp_int_n(ppd, 1);
9629
9630                start_link(ppd);
9631        }
9632
9633        if (qd->check_interrupt_flags) {
9634                u8 qsfp_interrupt_status[16] = {0,};
9635
9636                if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9637                                  &qsfp_interrupt_status[0], 16) != 16) {
9638                        dd_dev_info(dd,
9639                                    "%s: Failed to read status of QSFP module\n",
9640                                    __func__);
9641                } else {
9642                        unsigned long flags;
9643
9644                        handle_qsfp_error_conditions(
9645                                        ppd, qsfp_interrupt_status);
9646                        spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9647                        ppd->qsfp_info.check_interrupt_flags = 0;
9648                        spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9649                                               flags);
9650                }
9651        }
9652}
9653
9654static void init_qsfp_int(struct hfi1_devdata *dd)
9655{
9656        struct hfi1_pportdata *ppd = dd->pport;
9657        u64 qsfp_mask, cce_int_mask;
9658        const int qsfp1_int_smask = QSFP1_INT % 64;
9659        const int qsfp2_int_smask = QSFP2_INT % 64;
9660
9661        /*
9662         * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9663         * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9664         * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9665         * the index of the appropriate CSR in the CCEIntMask CSR array
9666         */
9667        cce_int_mask = read_csr(dd, CCE_INT_MASK +
9668                                (8 * (QSFP1_INT / 64)));
9669        if (dd->hfi1_id) {
9670                cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9671                write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9672                          cce_int_mask);
9673        } else {
9674                cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9675                write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9676                          cce_int_mask);
9677        }
9678
9679        qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9680        /* Clear current status to avoid spurious interrupts */
9681        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9682                  qsfp_mask);
9683        write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9684                  qsfp_mask);
9685
9686        set_qsfp_int_n(ppd, 0);
9687
9688        /* Handle active low nature of INT_N and MODPRST_N pins */
9689        if (qsfp_mod_present(ppd))
9690                qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9691        write_csr(dd,
9692                  dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9693                  qsfp_mask);
9694}
9695
9696/*
9697 * Do a one-time initialize of the LCB block.
9698 */
9699static void init_lcb(struct hfi1_devdata *dd)
9700{
9701        /* simulator does not correctly handle LCB cclk loopback, skip */
9702        if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9703                return;
9704
9705        /* the DC has been reset earlier in the driver load */
9706
9707        /* set LCB for cclk loopback on the port */
9708        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9709        write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9710        write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9711        write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9712        write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9713        write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9714        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9715}
9716
9717/*
9718 * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9719 * on error.
9720 */
9721static int test_qsfp_read(struct hfi1_pportdata *ppd)
9722{
9723        int ret;
9724        u8 status;
9725
9726        /*
9727         * Report success if not a QSFP or, if it is a QSFP, but the cable is
9728         * not present
9729         */
9730        if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9731                return 0;
9732
9733        /* read byte 2, the status byte */
9734        ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9735        if (ret < 0)
9736                return ret;
9737        if (ret != 1)
9738                return -EIO;
9739
9740        return 0; /* success */
9741}
9742
9743/*
9744 * Values for QSFP retry.
9745 *
9746 * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9747 * arrived at from experience on a large cluster.
9748 */
9749#define MAX_QSFP_RETRIES 20
9750#define QSFP_RETRY_WAIT 500 /* msec */
9751
9752/*
9753 * Try a QSFP read.  If it fails, schedule a retry for later.
9754 * Called on first link activation after driver load.
9755 */
9756static void try_start_link(struct hfi1_pportdata *ppd)
9757{
9758        if (test_qsfp_read(ppd)) {
9759                /* read failed */
9760                if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9761                        dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9762                        return;
9763                }
9764                dd_dev_info(ppd->dd,
9765                            "QSFP not responding, waiting and retrying %d\n",
9766                            (int)ppd->qsfp_retry_count);
9767                ppd->qsfp_retry_count++;
9768                queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9769                                   msecs_to_jiffies(QSFP_RETRY_WAIT));
9770                return;
9771        }
9772        ppd->qsfp_retry_count = 0;
9773
9774        start_link(ppd);
9775}
9776
9777/*
9778 * Workqueue function to start the link after a delay.
9779 */
9780void handle_start_link(struct work_struct *work)
9781{
9782        struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9783                                                  start_link_work.work);
9784        try_start_link(ppd);
9785}
9786
9787int bringup_serdes(struct hfi1_pportdata *ppd)
9788{
9789        struct hfi1_devdata *dd = ppd->dd;
9790        u64 guid;
9791        int ret;
9792
9793        if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9794                add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9795
9796        guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9797        if (!guid) {
9798                if (dd->base_guid)
9799                        guid = dd->base_guid + ppd->port - 1;
9800                ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9801        }
9802
9803        /* Set linkinit_reason on power up per OPA spec */
9804        ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9805
9806        /* one-time init of the LCB */
9807        init_lcb(dd);
9808
9809        if (loopback) {
9810                ret = init_loopback(dd);
9811                if (ret < 0)
9812                        return ret;
9813        }
9814
9815        get_port_type(ppd);
9816        if (ppd->port_type == PORT_TYPE_QSFP) {
9817                set_qsfp_int_n(ppd, 0);
9818                wait_for_qsfp_init(ppd);
9819                set_qsfp_int_n(ppd, 1);
9820        }
9821
9822        try_start_link(ppd);
9823        return 0;
9824}
9825
9826void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9827{
9828        struct hfi1_devdata *dd = ppd->dd;
9829
9830        /*
9831         * Shut down the link and keep it down.   First turn off that the
9832         * driver wants to allow the link to be up (driver_link_ready).
9833         * Then make sure the link is not automatically restarted
9834         * (link_enabled).  Cancel any pending restart.  And finally
9835         * go offline.
9836         */
9837        ppd->driver_link_ready = 0;
9838        ppd->link_enabled = 0;
9839
9840        ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9841        flush_delayed_work(&ppd->start_link_work);
9842        cancel_delayed_work_sync(&ppd->start_link_work);
9843
9844        ppd->offline_disabled_reason =
9845                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9846        set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9847                             OPA_LINKDOWN_REASON_REBOOT);
9848        set_link_state(ppd, HLS_DN_OFFLINE);
9849
9850        /* disable the port */
9851        clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9852}
9853
9854static inline int init_cpu_counters(struct hfi1_devdata *dd)
9855{
9856        struct hfi1_pportdata *ppd;
9857        int i;
9858
9859        ppd = (struct hfi1_pportdata *)(dd + 1);
9860        for (i = 0; i < dd->num_pports; i++, ppd++) {
9861                ppd->ibport_data.rvp.rc_acks = NULL;
9862                ppd->ibport_data.rvp.rc_qacks = NULL;
9863                ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9864                ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9865                ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9866                if (!ppd->ibport_data.rvp.rc_acks ||
9867                    !ppd->ibport_data.rvp.rc_delayed_comp ||
9868                    !ppd->ibport_data.rvp.rc_qacks)
9869                        return -ENOMEM;
9870        }
9871
9872        return 0;
9873}
9874
9875/*
9876 * index is the index into the receive array
9877 */
9878void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9879                  u32 type, unsigned long pa, u16 order)
9880{
9881        u64 reg;
9882
9883        if (!(dd->flags & HFI1_PRESENT))
9884                goto done;
9885
9886        if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9887                pa = 0;
9888                order = 0;
9889        } else if (type > PT_INVALID) {
9890                dd_dev_err(dd,
9891                           "unexpected receive array type %u for index %u, not handled\n",
9892                           type, index);
9893                goto done;
9894        }
9895        trace_hfi1_put_tid(dd, index, type, pa, order);
9896
9897#define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9898        reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9899                | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9900                | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9901                                        << RCV_ARRAY_RT_ADDR_SHIFT;
9902        trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9903        writeq(reg, dd->rcvarray_wc + (index * 8));
9904
9905        if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9906                /*
9907                 * Eager entries are written and flushed
9908                 *
9909                 * Expected entries are flushed every 4 writes
9910                 */
9911                flush_wc();
9912done:
9913        return;
9914}
9915
9916void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9917{
9918        struct hfi1_devdata *dd = rcd->dd;
9919        u32 i;
9920
9921        /* this could be optimized */
9922        for (i = rcd->eager_base; i < rcd->eager_base +
9923                     rcd->egrbufs.alloced; i++)
9924                hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9925
9926        for (i = rcd->expected_base;
9927                        i < rcd->expected_base + rcd->expected_count; i++)
9928                hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9929}
9930
9931static const char * const ib_cfg_name_strings[] = {
9932        "HFI1_IB_CFG_LIDLMC",
9933        "HFI1_IB_CFG_LWID_DG_ENB",
9934        "HFI1_IB_CFG_LWID_ENB",
9935        "HFI1_IB_CFG_LWID",
9936        "HFI1_IB_CFG_SPD_ENB",
9937        "HFI1_IB_CFG_SPD",
9938        "HFI1_IB_CFG_RXPOL_ENB",
9939        "HFI1_IB_CFG_LREV_ENB",
9940        "HFI1_IB_CFG_LINKLATENCY",
9941        "HFI1_IB_CFG_HRTBT",
9942        "HFI1_IB_CFG_OP_VLS",
9943        "HFI1_IB_CFG_VL_HIGH_CAP",
9944        "HFI1_IB_CFG_VL_LOW_CAP",
9945        "HFI1_IB_CFG_OVERRUN_THRESH",
9946        "HFI1_IB_CFG_PHYERR_THRESH",
9947        "HFI1_IB_CFG_LINKDEFAULT",
9948        "HFI1_IB_CFG_PKEYS",
9949        "HFI1_IB_CFG_MTU",
9950        "HFI1_IB_CFG_LSTATE",
9951        "HFI1_IB_CFG_VL_HIGH_LIMIT",
9952        "HFI1_IB_CFG_PMA_TICKS",
9953        "HFI1_IB_CFG_PORT"
9954};
9955
9956static const char *ib_cfg_name(int which)
9957{
9958        if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9959                return "invalid";
9960        return ib_cfg_name_strings[which];
9961}
9962
9963int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9964{
9965        struct hfi1_devdata *dd = ppd->dd;
9966        int val = 0;
9967
9968        switch (which) {
9969        case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9970                val = ppd->link_width_enabled;
9971                break;
9972        case HFI1_IB_CFG_LWID: /* currently active Link-width */
9973                val = ppd->link_width_active;
9974                break;
9975        case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9976                val = ppd->link_speed_enabled;
9977                break;
9978        case HFI1_IB_CFG_SPD: /* current Link speed */
9979                val = ppd->link_speed_active;
9980                break;
9981
9982        case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9983        case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9984        case HFI1_IB_CFG_LINKLATENCY:
9985                goto unimplemented;
9986
9987        case HFI1_IB_CFG_OP_VLS:
9988                val = ppd->actual_vls_operational;
9989                break;
9990        case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9991                val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9992                break;
9993        case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9994                val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9995                break;
9996        case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9997                val = ppd->overrun_threshold;
9998                break;
9999        case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10000                val = ppd->phy_error_threshold;
10001                break;
10002        case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10003                val = HLS_DEFAULT;
10004                break;
10005
10006        case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10007        case HFI1_IB_CFG_PMA_TICKS:
10008        default:
10009unimplemented:
10010                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10011                        dd_dev_info(
10012                                dd,
10013                                "%s: which %s: not implemented\n",
10014                                __func__,
10015                                ib_cfg_name(which));
10016                break;
10017        }
10018
10019        return val;
10020}
10021
10022/*
10023 * The largest MAD packet size.
10024 */
10025#define MAX_MAD_PACKET 2048
10026
10027/*
10028 * Return the maximum header bytes that can go on the _wire_
10029 * for this device. This count includes the ICRC which is
10030 * not part of the packet held in memory but it is appended
10031 * by the HW.
10032 * This is dependent on the device's receive header entry size.
10033 * HFI allows this to be set per-receive context, but the
10034 * driver presently enforces a global value.
10035 */
10036u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10037{
10038        /*
10039         * The maximum non-payload (MTU) bytes in LRH.PktLen are
10040         * the Receive Header Entry Size minus the PBC (or RHF) size
10041         * plus one DW for the ICRC appended by HW.
10042         *
10043         * dd->rcd[0].rcvhdrqentsize is in DW.
10044         * We use rcd[0] as all context will have the same value. Also,
10045         * the first kernel context would have been allocated by now so
10046         * we are guaranteed a valid value.
10047         */
10048        return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10049}
10050
10051/*
10052 * Set Send Length
10053 * @ppd - per port data
10054 *
10055 * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10056 * registers compare against LRH.PktLen, so use the max bytes included
10057 * in the LRH.
10058 *
10059 * This routine changes all VL values except VL15, which it maintains at
10060 * the same value.
10061 */
10062static void set_send_length(struct hfi1_pportdata *ppd)
10063{
10064        struct hfi1_devdata *dd = ppd->dd;
10065        u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10066        u32 maxvlmtu = dd->vld[15].mtu;
10067        u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10068                              & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10069                SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10070        int i, j;
10071        u32 thres;
10072
10073        for (i = 0; i < ppd->vls_supported; i++) {
10074                if (dd->vld[i].mtu > maxvlmtu)
10075                        maxvlmtu = dd->vld[i].mtu;
10076                if (i <= 3)
10077                        len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10078                                 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10079                                ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10080                else
10081                        len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10082                                 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10083                                ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10084        }
10085        write_csr(dd, SEND_LEN_CHECK0, len1);
10086        write_csr(dd, SEND_LEN_CHECK1, len2);
10087        /* adjust kernel credit return thresholds based on new MTUs */
10088        /* all kernel receive contexts have the same hdrqentsize */
10089        for (i = 0; i < ppd->vls_supported; i++) {
10090                thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10091                            sc_mtu_to_threshold(dd->vld[i].sc,
10092                                                dd->vld[i].mtu,
10093                                                dd->rcd[0]->rcvhdrqentsize));
10094                for (j = 0; j < INIT_SC_PER_VL; j++)
10095                        sc_set_cr_threshold(
10096                                        pio_select_send_context_vl(dd, j, i),
10097                                            thres);
10098        }
10099        thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10100                    sc_mtu_to_threshold(dd->vld[15].sc,
10101                                        dd->vld[15].mtu,
10102                                        dd->rcd[0]->rcvhdrqentsize));
10103        sc_set_cr_threshold(dd->vld[15].sc, thres);
10104
10105        /* Adjust maximum MTU for the port in DC */
10106        dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10107                (ilog2(maxvlmtu >> 8) + 1);
10108        len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10109        len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10110        len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10111                DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10112        write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10113}
10114
10115static void set_lidlmc(struct hfi1_pportdata *ppd)
10116{
10117        int i;
10118        u64 sreg = 0;
10119        struct hfi1_devdata *dd = ppd->dd;
10120        u32 mask = ~((1U << ppd->lmc) - 1);
10121        u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10122        u32 lid;
10123
10124        /*
10125         * Program 0 in CSR if port lid is extended. This prevents
10126         * 9B packets being sent out for large lids.
10127         */
10128        lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10129        c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10130                | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10131        c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10132                        << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10133              ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10134                        << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10135        write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10136
10137        /*
10138         * Iterate over all the send contexts and set their SLID check
10139         */
10140        sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10141                        SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10142               (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10143                        SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10144
10145        for (i = 0; i < chip_send_contexts(dd); i++) {
10146                hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10147                          i, (u32)sreg);
10148                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10149        }
10150
10151        /* Now we have to do the same thing for the sdma engines */
10152        sdma_update_lmc(dd, mask, lid);
10153}
10154
10155static const char *state_completed_string(u32 completed)
10156{
10157        static const char * const state_completed[] = {
10158                "EstablishComm",
10159                "OptimizeEQ",
10160                "VerifyCap"
10161        };
10162
10163        if (completed < ARRAY_SIZE(state_completed))
10164                return state_completed[completed];
10165
10166        return "unknown";
10167}
10168
10169static const char all_lanes_dead_timeout_expired[] =
10170        "All lanes were inactive – was the interconnect media removed?";
10171static const char tx_out_of_policy[] =
10172        "Passing lanes on local port do not meet the local link width policy";
10173static const char no_state_complete[] =
10174        "State timeout occurred before link partner completed the state";
10175static const char * const state_complete_reasons[] = {
10176        [0x00] = "Reason unknown",
10177        [0x01] = "Link was halted by driver, refer to LinkDownReason",
10178        [0x02] = "Link partner reported failure",
10179        [0x10] = "Unable to achieve frame sync on any lane",
10180        [0x11] =
10181          "Unable to find a common bit rate with the link partner",
10182        [0x12] =
10183          "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10184        [0x13] =
10185          "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10186        [0x14] = no_state_complete,
10187        [0x15] =
10188          "State timeout occurred before link partner identified equalization presets",
10189        [0x16] =
10190          "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10191        [0x17] = tx_out_of_policy,
10192        [0x20] = all_lanes_dead_timeout_expired,
10193        [0x21] =
10194          "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10195        [0x22] = no_state_complete,
10196        [0x23] =
10197          "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10198        [0x24] = tx_out_of_policy,
10199        [0x30] = all_lanes_dead_timeout_expired,
10200        [0x31] =
10201          "State timeout occurred waiting for host to process received frames",
10202        [0x32] = no_state_complete,
10203        [0x33] =
10204          "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10205        [0x34] = tx_out_of_policy,
10206        [0x35] = "Negotiated link width is mutually exclusive",
10207        [0x36] =
10208          "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10209        [0x37] = "Unable to resolve secure data exchange",
10210};
10211
10212static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10213                                                     u32 code)
10214{
10215        const char *str = NULL;
10216
10217        if (code < ARRAY_SIZE(state_complete_reasons))
10218                str = state_complete_reasons[code];
10219
10220        if (str)
10221                return str;
10222        return "Reserved";
10223}
10224
10225/* describe the given last state complete frame */
10226static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10227                                  const char *prefix)
10228{
10229        struct hfi1_devdata *dd = ppd->dd;
10230        u32 success;
10231        u32 state;
10232        u32 reason;
10233        u32 lanes;
10234
10235        /*
10236         * Decode frame:
10237         *  [ 0: 0] - success
10238         *  [ 3: 1] - state
10239         *  [ 7: 4] - next state timeout
10240         *  [15: 8] - reason code
10241         *  [31:16] - lanes
10242         */
10243        success = frame & 0x1;
10244        state = (frame >> 1) & 0x7;
10245        reason = (frame >> 8) & 0xff;
10246        lanes = (frame >> 16) & 0xffff;
10247
10248        dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10249                   prefix, frame);
10250        dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10251                   state_completed_string(state), state);
10252        dd_dev_err(dd, "    state successfully completed: %s\n",
10253                   success ? "yes" : "no");
10254        dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10255                   reason, state_complete_reason_code_string(ppd, reason));
10256        dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10257}
10258
10259/*
10260 * Read the last state complete frames and explain them.  This routine
10261 * expects to be called if the link went down during link negotiation
10262 * and initialization (LNI).  That is, anywhere between polling and link up.
10263 */
10264static void check_lni_states(struct hfi1_pportdata *ppd)
10265{
10266        u32 last_local_state;
10267        u32 last_remote_state;
10268
10269        read_last_local_state(ppd->dd, &last_local_state);
10270        read_last_remote_state(ppd->dd, &last_remote_state);
10271
10272        /*
10273         * Don't report anything if there is nothing to report.  A value of
10274         * 0 means the link was taken down while polling and there was no
10275         * training in-process.
10276         */
10277        if (last_local_state == 0 && last_remote_state == 0)
10278                return;
10279
10280        decode_state_complete(ppd, last_local_state, "transmitted");
10281        decode_state_complete(ppd, last_remote_state, "received");
10282}
10283
10284/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10285static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10286{
10287        u64 reg;
10288        unsigned long timeout;
10289
10290        /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10291        timeout = jiffies + msecs_to_jiffies(wait_ms);
10292        while (1) {
10293                reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10294                if (reg)
10295                        break;
10296                if (time_after(jiffies, timeout)) {
10297                        dd_dev_err(dd,
10298                                   "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10299                        return -ETIMEDOUT;
10300                }
10301                udelay(2);
10302        }
10303        return 0;
10304}
10305
10306/* called when the logical link state is not down as it should be */
10307static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10308{
10309        struct hfi1_devdata *dd = ppd->dd;
10310
10311        /*
10312         * Bring link up in LCB loopback
10313         */
10314        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10315        write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10316                  DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10317
10318        write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10319        write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10320        write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10321        write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10322
10323        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10324        (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10325        udelay(3);
10326        write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10327        write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10328
10329        wait_link_transfer_active(dd, 100);
10330
10331        /*
10332         * Bring the link down again.
10333         */
10334        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10335        write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10336        write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10337
10338        dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10339}
10340
10341/*
10342 * Helper for set_link_state().  Do not call except from that routine.
10343 * Expects ppd->hls_mutex to be held.
10344 *
10345 * @rem_reason value to be sent to the neighbor
10346 *
10347 * LinkDownReasons only set if transition succeeds.
10348 */
10349static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10350{
10351        struct hfi1_devdata *dd = ppd->dd;
10352        u32 previous_state;
10353        int offline_state_ret;
10354        int ret;
10355
10356        update_lcb_cache(dd);
10357
10358        previous_state = ppd->host_link_state;
10359        ppd->host_link_state = HLS_GOING_OFFLINE;
10360
10361        /* start offline transition */
10362        ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10363
10364        if (ret != HCMD_SUCCESS) {
10365                dd_dev_err(dd,
10366                           "Failed to transition to Offline link state, return %d\n",
10367                           ret);
10368                return -EINVAL;
10369        }
10370        if (ppd->offline_disabled_reason ==
10371                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10372                ppd->offline_disabled_reason =
10373                HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10374
10375        offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10376        if (offline_state_ret < 0)
10377                return offline_state_ret;
10378
10379        /* Disabling AOC transmitters */
10380        if (ppd->port_type == PORT_TYPE_QSFP &&
10381            ppd->qsfp_info.limiting_active &&
10382            qsfp_mod_present(ppd)) {
10383                int ret;
10384
10385                ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10386                if (ret == 0) {
10387                        set_qsfp_tx(ppd, 0);
10388                        release_chip_resource(dd, qsfp_resource(dd));
10389                } else {
10390                        /* not fatal, but should warn */
10391                        dd_dev_err(dd,
10392                                   "Unable to acquire lock to turn off QSFP TX\n");
10393                }
10394        }
10395
10396        /*
10397         * Wait for the offline.Quiet transition if it hasn't happened yet. It
10398         * can take a while for the link to go down.
10399         */
10400        if (offline_state_ret != PLS_OFFLINE_QUIET) {
10401                ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10402                if (ret < 0)
10403                        return ret;
10404        }
10405
10406        /*
10407         * Now in charge of LCB - must be after the physical state is
10408         * offline.quiet and before host_link_state is changed.
10409         */
10410        set_host_lcb_access(dd);
10411        write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10412
10413        /* make sure the logical state is also down */
10414        ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10415        if (ret)
10416                force_logical_link_state_down(ppd);
10417
10418        ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10419        update_statusp(ppd, IB_PORT_DOWN);
10420
10421        /*
10422         * The LNI has a mandatory wait time after the physical state
10423         * moves to Offline.Quiet.  The wait time may be different
10424         * depending on how the link went down.  The 8051 firmware
10425         * will observe the needed wait time and only move to ready
10426         * when that is completed.  The largest of the quiet timeouts
10427         * is 6s, so wait that long and then at least 0.5s more for
10428         * other transitions, and another 0.5s for a buffer.
10429         */
10430        ret = wait_fm_ready(dd, 7000);
10431        if (ret) {
10432                dd_dev_err(dd,
10433                           "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10434                /* state is really offline, so make it so */
10435                ppd->host_link_state = HLS_DN_OFFLINE;
10436                return ret;
10437        }
10438
10439        /*
10440         * The state is now offline and the 8051 is ready to accept host
10441         * requests.
10442         *      - change our state
10443         *      - notify others if we were previously in a linkup state
10444         */
10445        ppd->host_link_state = HLS_DN_OFFLINE;
10446        if (previous_state & HLS_UP) {
10447                /* went down while link was up */
10448                handle_linkup_change(dd, 0);
10449        } else if (previous_state
10450                        & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10451                /* went down while attempting link up */
10452                check_lni_states(ppd);
10453
10454                /* The QSFP doesn't need to be reset on LNI failure */
10455                ppd->qsfp_info.reset_needed = 0;
10456        }
10457
10458        /* the active link width (downgrade) is 0 on link down */
10459        ppd->link_width_active = 0;
10460        ppd->link_width_downgrade_tx_active = 0;
10461        ppd->link_width_downgrade_rx_active = 0;
10462        ppd->current_egress_rate = 0;
10463        return 0;
10464}
10465
10466/* return the link state name */
10467static const char *link_state_name(u32 state)
10468{
10469        const char *name;
10470        int n = ilog2(state);
10471        static const char * const names[] = {
10472                [__HLS_UP_INIT_BP]       = "INIT",
10473                [__HLS_UP_ARMED_BP]      = "ARMED",
10474                [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10475                [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10476                [__HLS_DN_POLL_BP]       = "POLL",
10477                [__HLS_DN_DISABLE_BP]    = "DISABLE",
10478                [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10479                [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10480                [__HLS_GOING_UP_BP]      = "GOING_UP",
10481                [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10482                [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10483        };
10484
10485        name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10486        return name ? name : "unknown";
10487}
10488
10489/* return the link state reason name */
10490static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10491{
10492        if (state == HLS_UP_INIT) {
10493                switch (ppd->linkinit_reason) {
10494                case OPA_LINKINIT_REASON_LINKUP:
10495                        return "(LINKUP)";
10496                case OPA_LINKINIT_REASON_FLAPPING:
10497                        return "(FLAPPING)";
10498                case OPA_LINKINIT_OUTSIDE_POLICY:
10499                        return "(OUTSIDE_POLICY)";
10500                case OPA_LINKINIT_QUARANTINED:
10501                        return "(QUARANTINED)";
10502                case OPA_LINKINIT_INSUFIC_CAPABILITY:
10503                        return "(INSUFIC_CAPABILITY)";
10504                default:
10505                        break;
10506                }
10507        }
10508        return "";
10509}
10510
10511/*
10512 * driver_pstate - convert the driver's notion of a port's
10513 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10514 * Return -1 (converted to a u32) to indicate error.
10515 */
10516u32 driver_pstate(struct hfi1_pportdata *ppd)
10517{
10518        switch (ppd->host_link_state) {
10519        case HLS_UP_INIT:
10520        case HLS_UP_ARMED:
10521        case HLS_UP_ACTIVE:
10522                return IB_PORTPHYSSTATE_LINKUP;
10523        case HLS_DN_POLL:
10524                return IB_PORTPHYSSTATE_POLLING;
10525        case HLS_DN_DISABLE:
10526                return IB_PORTPHYSSTATE_DISABLED;
10527        case HLS_DN_OFFLINE:
10528                return OPA_PORTPHYSSTATE_OFFLINE;
10529        case HLS_VERIFY_CAP:
10530                return IB_PORTPHYSSTATE_TRAINING;
10531        case HLS_GOING_UP:
10532                return IB_PORTPHYSSTATE_TRAINING;
10533        case HLS_GOING_OFFLINE:
10534                return OPA_PORTPHYSSTATE_OFFLINE;
10535        case HLS_LINK_COOLDOWN:
10536                return OPA_PORTPHYSSTATE_OFFLINE;
10537        case HLS_DN_DOWNDEF:
10538        default:
10539                dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10540                           ppd->host_link_state);
10541                return  -1;
10542        }
10543}
10544
10545/*
10546 * driver_lstate - convert the driver's notion of a port's
10547 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10548 * (converted to a u32) to indicate error.
10549 */
10550u32 driver_lstate(struct hfi1_pportdata *ppd)
10551{
10552        if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10553                return IB_PORT_DOWN;
10554
10555        switch (ppd->host_link_state & HLS_UP) {
10556        case HLS_UP_INIT:
10557                return IB_PORT_INIT;
10558        case HLS_UP_ARMED:
10559                return IB_PORT_ARMED;
10560        case HLS_UP_ACTIVE:
10561                return IB_PORT_ACTIVE;
10562        default:
10563                dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10564                           ppd->host_link_state);
10565        return -1;
10566        }
10567}
10568
10569void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10570                          u8 neigh_reason, u8 rem_reason)
10571{
10572        if (ppd->local_link_down_reason.latest == 0 &&
10573            ppd->neigh_link_down_reason.latest == 0) {
10574                ppd->local_link_down_reason.latest = lcl_reason;
10575                ppd->neigh_link_down_reason.latest = neigh_reason;
10576                ppd->remote_link_down_reason = rem_reason;
10577        }
10578}
10579
10580/*
10581 * Verify if BCT for data VLs is non-zero.
10582 */
10583static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10584{
10585        return !!ppd->actual_vls_operational;
10586}
10587
10588/*
10589 * Change the physical and/or logical link state.
10590 *
10591 * Do not call this routine while inside an interrupt.  It contains
10592 * calls to routines that can take multiple seconds to finish.
10593 *
10594 * Returns 0 on success, -errno on failure.
10595 */
10596int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10597{
10598        struct hfi1_devdata *dd = ppd->dd;
10599        struct ib_event event = {.device = NULL};
10600        int ret1, ret = 0;
10601        int orig_new_state, poll_bounce;
10602
10603        mutex_lock(&ppd->hls_lock);
10604
10605        orig_new_state = state;
10606        if (state == HLS_DN_DOWNDEF)
10607                state = HLS_DEFAULT;
10608
10609        /* interpret poll -> poll as a link bounce */
10610        poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10611                      state == HLS_DN_POLL;
10612
10613        dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10614                    link_state_name(ppd->host_link_state),
10615                    link_state_name(orig_new_state),
10616                    poll_bounce ? "(bounce) " : "",
10617                    link_state_reason_name(ppd, state));
10618
10619        /*
10620         * If we're going to a (HLS_*) link state that implies the logical
10621         * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10622         * reset is_sm_config_started to 0.
10623         */
10624        if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10625                ppd->is_sm_config_started = 0;
10626
10627        /*
10628         * Do nothing if the states match.  Let a poll to poll link bounce
10629         * go through.
10630         */
10631        if (ppd->host_link_state == state && !poll_bounce)
10632                goto done;
10633
10634        switch (state) {
10635        case HLS_UP_INIT:
10636                if (ppd->host_link_state == HLS_DN_POLL &&
10637                    (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10638                        /*
10639                         * Quick link up jumps from polling to here.
10640                         *
10641                         * Whether in normal or loopback mode, the
10642                         * simulator jumps from polling to link up.
10643                         * Accept that here.
10644                         */
10645                        /* OK */
10646                } else if (ppd->host_link_state != HLS_GOING_UP) {
10647                        goto unexpected;
10648                }
10649
10650                /*
10651                 * Wait for Link_Up physical state.
10652                 * Physical and Logical states should already be
10653                 * be transitioned to LinkUp and LinkInit respectively.
10654                 */
10655                ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10656                if (ret) {
10657                        dd_dev_err(dd,
10658                                   "%s: physical state did not change to LINK-UP\n",
10659                                   __func__);
10660                        break;
10661                }
10662
10663                ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10664                if (ret) {
10665                        dd_dev_err(dd,
10666                                   "%s: logical state did not change to INIT\n",
10667                                   __func__);
10668                        break;
10669                }
10670
10671                /* clear old transient LINKINIT_REASON code */
10672                if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10673                        ppd->linkinit_reason =
10674                                OPA_LINKINIT_REASON_LINKUP;
10675
10676                /* enable the port */
10677                add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10678
10679                handle_linkup_change(dd, 1);
10680                pio_kernel_linkup(dd);
10681
10682                /*
10683                 * After link up, a new link width will have been set.
10684                 * Update the xmit counters with regards to the new
10685                 * link width.
10686                 */
10687                update_xmit_counters(ppd, ppd->link_width_active);
10688
10689                ppd->host_link_state = HLS_UP_INIT;
10690                update_statusp(ppd, IB_PORT_INIT);
10691                break;
10692        case HLS_UP_ARMED:
10693                if (ppd->host_link_state != HLS_UP_INIT)
10694                        goto unexpected;
10695
10696                if (!data_vls_operational(ppd)) {
10697                        dd_dev_err(dd,
10698                                   "%s: data VLs not operational\n", __func__);
10699                        ret = -EINVAL;
10700                        break;
10701                }
10702
10703                set_logical_state(dd, LSTATE_ARMED);
10704                ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10705                if (ret) {
10706                        dd_dev_err(dd,
10707                                   "%s: logical state did not change to ARMED\n",
10708                                   __func__);
10709                        break;
10710                }
10711                ppd->host_link_state = HLS_UP_ARMED;
10712                update_statusp(ppd, IB_PORT_ARMED);
10713                /*
10714                 * The simulator does not currently implement SMA messages,
10715                 * so neighbor_normal is not set.  Set it here when we first
10716                 * move to Armed.
10717                 */
10718                if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10719                        ppd->neighbor_normal = 1;
10720                break;
10721        case HLS_UP_ACTIVE:
10722                if (ppd->host_link_state != HLS_UP_ARMED)
10723                        goto unexpected;
10724
10725                set_logical_state(dd, LSTATE_ACTIVE);
10726                ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10727                if (ret) {
10728                        dd_dev_err(dd,
10729                                   "%s: logical state did not change to ACTIVE\n",
10730                                   __func__);
10731                } else {
10732                        /* tell all engines to go running */
10733                        sdma_all_running(dd);
10734                        ppd->host_link_state = HLS_UP_ACTIVE;
10735                        update_statusp(ppd, IB_PORT_ACTIVE);
10736
10737                        /* Signal the IB layer that the port has went active */
10738                        event.device = &dd->verbs_dev.rdi.ibdev;
10739                        event.element.port_num = ppd->port;
10740                        event.event = IB_EVENT_PORT_ACTIVE;
10741                }
10742                break;
10743        case HLS_DN_POLL:
10744                if ((ppd->host_link_state == HLS_DN_DISABLE ||
10745                     ppd->host_link_state == HLS_DN_OFFLINE) &&
10746                    dd->dc_shutdown)
10747                        dc_start(dd);
10748                /* Hand LED control to the DC */
10749                write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10750
10751                if (ppd->host_link_state != HLS_DN_OFFLINE) {
10752                        u8 tmp = ppd->link_enabled;
10753
10754                        ret = goto_offline(ppd, ppd->remote_link_down_reason);
10755                        if (ret) {
10756                                ppd->link_enabled = tmp;
10757                                break;
10758                        }
10759                        ppd->remote_link_down_reason = 0;
10760
10761                        if (ppd->driver_link_ready)
10762                                ppd->link_enabled = 1;
10763                }
10764
10765                set_all_slowpath(ppd->dd);
10766                ret = set_local_link_attributes(ppd);
10767                if (ret)
10768                        break;
10769
10770                ppd->port_error_action = 0;
10771                ppd->host_link_state = HLS_DN_POLL;
10772
10773                if (quick_linkup) {
10774                        /* quick linkup does not go into polling */
10775                        ret = do_quick_linkup(dd);
10776                } else {
10777                        ret1 = set_physical_link_state(dd, PLS_POLLING);
10778                        if (ret1 != HCMD_SUCCESS) {
10779                                dd_dev_err(dd,
10780                                           "Failed to transition to Polling link state, return 0x%x\n",
10781                                           ret1);
10782                                ret = -EINVAL;
10783                        }
10784                }
10785                ppd->offline_disabled_reason =
10786                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10787                /*
10788                 * If an error occurred above, go back to offline.  The
10789                 * caller may reschedule another attempt.
10790                 */
10791                if (ret)
10792                        goto_offline(ppd, 0);
10793                else
10794                        log_physical_state(ppd, PLS_POLLING);
10795                break;
10796        case HLS_DN_DISABLE:
10797                /* link is disabled */
10798                ppd->link_enabled = 0;
10799
10800                /* allow any state to transition to disabled */
10801
10802                /* must transition to offline first */
10803                if (ppd->host_link_state != HLS_DN_OFFLINE) {
10804                        ret = goto_offline(ppd, ppd->remote_link_down_reason);
10805                        if (ret)
10806                                break;
10807                        ppd->remote_link_down_reason = 0;
10808                }
10809
10810                if (!dd->dc_shutdown) {
10811                        ret1 = set_physical_link_state(dd, PLS_DISABLED);
10812                        if (ret1 != HCMD_SUCCESS) {
10813                                dd_dev_err(dd,
10814                                           "Failed to transition to Disabled link state, return 0x%x\n",
10815                                           ret1);
10816                                ret = -EINVAL;
10817                                break;
10818                        }
10819                        ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10820                        if (ret) {
10821                                dd_dev_err(dd,
10822                                           "%s: physical state did not change to DISABLED\n",
10823                                           __func__);
10824                                break;
10825                        }
10826                        dc_shutdown(dd);
10827                }
10828                ppd->host_link_state = HLS_DN_DISABLE;
10829                break;
10830        case HLS_DN_OFFLINE:
10831                if (ppd->host_link_state == HLS_DN_DISABLE)
10832                        dc_start(dd);
10833
10834                /* allow any state to transition to offline */
10835                ret = goto_offline(ppd, ppd->remote_link_down_reason);
10836                if (!ret)
10837                        ppd->remote_link_down_reason = 0;
10838                break;
10839        case HLS_VERIFY_CAP:
10840                if (ppd->host_link_state != HLS_DN_POLL)
10841                        goto unexpected;
10842                ppd->host_link_state = HLS_VERIFY_CAP;
10843                log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10844                break;
10845        case HLS_GOING_UP:
10846                if (ppd->host_link_state != HLS_VERIFY_CAP)
10847                        goto unexpected;
10848
10849                ret1 = set_physical_link_state(dd, PLS_LINKUP);
10850                if (ret1 != HCMD_SUCCESS) {
10851                        dd_dev_err(dd,
10852                                   "Failed to transition to link up state, return 0x%x\n",
10853                                   ret1);
10854                        ret = -EINVAL;
10855                        break;
10856                }
10857                ppd->host_link_state = HLS_GOING_UP;
10858                break;
10859
10860        case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10861        case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10862        default:
10863                dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10864                            __func__, state);
10865                ret = -EINVAL;
10866                break;
10867        }
10868
10869        goto done;
10870
10871unexpected:
10872        dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10873                   __func__, link_state_name(ppd->host_link_state),
10874                   link_state_name(state));
10875        ret = -EINVAL;
10876
10877done:
10878        mutex_unlock(&ppd->hls_lock);
10879
10880        if (event.device)
10881                ib_dispatch_event(&event);
10882
10883        return ret;
10884}
10885
10886int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10887{
10888        u64 reg;
10889        int ret = 0;
10890
10891        switch (which) {
10892        case HFI1_IB_CFG_LIDLMC:
10893                set_lidlmc(ppd);
10894                break;
10895        case HFI1_IB_CFG_VL_HIGH_LIMIT:
10896                /*
10897                 * The VL Arbitrator high limit is sent in units of 4k
10898                 * bytes, while HFI stores it in units of 64 bytes.
10899                 */
10900                val *= 4096 / 64;
10901                reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10902                        << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10903                write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10904                break;
10905        case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10906                /* HFI only supports POLL as the default link down state */
10907                if (val != HLS_DN_POLL)
10908                        ret = -EINVAL;
10909                break;
10910        case HFI1_IB_CFG_OP_VLS:
10911                if (ppd->vls_operational != val) {
10912                        ppd->vls_operational = val;
10913                        if (!ppd->port)
10914                                ret = -EINVAL;
10915                }
10916                break;
10917        /*
10918         * For link width, link width downgrade, and speed enable, always AND
10919         * the setting with what is actually supported.  This has two benefits.
10920         * First, enabled can't have unsupported values, no matter what the
10921         * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10922         * "fill in with your supported value" have all the bits in the
10923         * field set, so simply ANDing with supported has the desired result.
10924         */
10925        case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10926                ppd->link_width_enabled = val & ppd->link_width_supported;
10927                break;
10928        case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10929                ppd->link_width_downgrade_enabled =
10930                                val & ppd->link_width_downgrade_supported;
10931                break;
10932        case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10933                ppd->link_speed_enabled = val & ppd->link_speed_supported;
10934                break;
10935        case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10936                /*
10937                 * HFI does not follow IB specs, save this value
10938                 * so we can report it, if asked.
10939                 */
10940                ppd->overrun_threshold = val;
10941                break;
10942        case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10943                /*
10944                 * HFI does not follow IB specs, save this value
10945                 * so we can report it, if asked.
10946                 */
10947                ppd->phy_error_threshold = val;
10948                break;
10949
10950        case HFI1_IB_CFG_MTU:
10951                set_send_length(ppd);
10952                break;
10953
10954        case HFI1_IB_CFG_PKEYS:
10955                if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10956                        set_partition_keys(ppd);
10957                break;
10958
10959        default:
10960                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10961                        dd_dev_info(ppd->dd,
10962                                    "%s: which %s, val 0x%x: not implemented\n",
10963                                    __func__, ib_cfg_name(which), val);
10964                break;
10965        }
10966        return ret;
10967}
10968
10969/* begin functions related to vl arbitration table caching */
10970static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10971{
10972        int i;
10973
10974        BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10975                        VL_ARB_LOW_PRIO_TABLE_SIZE);
10976        BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10977                        VL_ARB_HIGH_PRIO_TABLE_SIZE);
10978
10979        /*
10980         * Note that we always return values directly from the
10981         * 'vl_arb_cache' (and do no CSR reads) in response to a
10982         * 'Get(VLArbTable)'. This is obviously correct after a
10983         * 'Set(VLArbTable)', since the cache will then be up to
10984         * date. But it's also correct prior to any 'Set(VLArbTable)'
10985         * since then both the cache, and the relevant h/w registers
10986         * will be zeroed.
10987         */
10988
10989        for (i = 0; i < MAX_PRIO_TABLE; i++)
10990                spin_lock_init(&ppd->vl_arb_cache[i].lock);
10991}
10992
10993/*
10994 * vl_arb_lock_cache
10995 *
10996 * All other vl_arb_* functions should be called only after locking
10997 * the cache.
10998 */
10999static inline struct vl_arb_cache *
11000vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11001{
11002        if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11003                return NULL;
11004        spin_lock(&ppd->vl_arb_cache[idx].lock);
11005        return &ppd->vl_arb_cache[idx];
11006}
11007
11008static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11009{
11010        spin_unlock(&ppd->vl_arb_cache[idx].lock);
11011}
11012
11013static void vl_arb_get_cache(struct vl_arb_cache *cache,
11014                             struct ib_vl_weight_elem *vl)
11015{
11016        memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11017}
11018
11019static void vl_arb_set_cache(struct vl_arb_cache *cache,
11020                             struct ib_vl_weight_elem *vl)
11021{
11022        memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11023}
11024
11025static int vl_arb_match_cache(struct vl_arb_cache *cache,
11026                              struct ib_vl_weight_elem *vl)
11027{
11028        return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11029}
11030
11031/* end functions related to vl arbitration table caching */
11032
11033static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11034                          u32 size, struct ib_vl_weight_elem *vl)
11035{
11036        struct hfi1_devdata *dd = ppd->dd;
11037        u64 reg;
11038        unsigned int i, is_up = 0;
11039        int drain, ret = 0;
11040
11041        mutex_lock(&ppd->hls_lock);
11042
11043        if (ppd->host_link_state & HLS_UP)
11044                is_up = 1;
11045
11046        drain = !is_ax(dd) && is_up;
11047
11048        if (drain)
11049                /*
11050                 * Before adjusting VL arbitration weights, empty per-VL
11051                 * FIFOs, otherwise a packet whose VL weight is being
11052                 * set to 0 could get stuck in a FIFO with no chance to
11053                 * egress.
11054                 */
11055                ret = stop_drain_data_vls(dd);
11056
11057        if (ret) {
11058                dd_dev_err(
11059                        dd,
11060                        "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11061                        __func__);
11062                goto err;
11063        }
11064
11065        for (i = 0; i < size; i++, vl++) {
11066                /*
11067                 * NOTE: The low priority shift and mask are used here, but
11068                 * they are the same for both the low and high registers.
11069                 */
11070                reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11071                                << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11072                      | (((u64)vl->weight
11073                                & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11074                                << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11075                write_csr(dd, target + (i * 8), reg);
11076        }
11077        pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11078
11079        if (drain)
11080                open_fill_data_vls(dd); /* reopen all VLs */
11081
11082err:
11083        mutex_unlock(&ppd->hls_lock);
11084
11085        return ret;
11086}
11087
11088/*
11089 * Read one credit merge VL register.
11090 */
11091static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11092                           struct vl_limit *vll)
11093{
11094        u64 reg = read_csr(dd, csr);
11095
11096        vll->dedicated = cpu_to_be16(
11097                (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11098                & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11099        vll->shared = cpu_to_be16(
11100                (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11101                & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11102}
11103
11104/*
11105 * Read the current credit merge limits.
11106 */
11107static int get_buffer_control(struct hfi1_devdata *dd,
11108                              struct buffer_control *bc, u16 *overall_limit)
11109{
11110        u64 reg;
11111        int i;
11112
11113        /* not all entries are filled in */
11114        memset(bc, 0, sizeof(*bc));
11115
11116        /* OPA and HFI have a 1-1 mapping */
11117        for (i = 0; i < TXE_NUM_DATA_VL; i++)
11118                read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11119
11120        /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11121        read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11122
11123        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11124        bc->overall_shared_limit = cpu_to_be16(
11125                (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11126                & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11127        if (overall_limit)
11128                *overall_limit = (reg
11129                        >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11130                        & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11131        return sizeof(struct buffer_control);
11132}
11133
11134static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11135{
11136        u64 reg;
11137        int i;
11138
11139        /* each register contains 16 SC->VLnt mappings, 4 bits each */
11140        reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11141        for (i = 0; i < sizeof(u64); i++) {
11142                u8 byte = *(((u8 *)&reg) + i);
11143
11144                dp->vlnt[2 * i] = byte & 0xf;
11145                dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11146        }
11147
11148        reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11149        for (i = 0; i < sizeof(u64); i++) {
11150                u8 byte = *(((u8 *)&reg) + i);
11151
11152                dp->vlnt[16 + (2 * i)] = byte & 0xf;
11153                dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11154        }
11155        return sizeof(struct sc2vlnt);
11156}
11157
11158static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11159                              struct ib_vl_weight_elem *vl)
11160{
11161        unsigned int i;
11162
11163        for (i = 0; i < nelems; i++, vl++) {
11164                vl->vl = 0xf;
11165                vl->weight = 0;
11166        }
11167}
11168
11169static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11170{
11171        write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11172                  DC_SC_VL_VAL(15_0,
11173                               0, dp->vlnt[0] & 0xf,
11174                               1, dp->vlnt[1] & 0xf,
11175                               2, dp->vlnt[2] & 0xf,
11176                               3, dp->vlnt[3] & 0xf,
11177                               4, dp->vlnt[4] & 0xf,
11178                               5, dp->vlnt[5] & 0xf,
11179                               6, dp->vlnt[6] & 0xf,
11180                               7, dp->vlnt[7] & 0xf,
11181                               8, dp->vlnt[8] & 0xf,
11182                               9, dp->vlnt[9] & 0xf,
11183                               10, dp->vlnt[10] & 0xf,
11184                               11, dp->vlnt[11] & 0xf,
11185                               12, dp->vlnt[12] & 0xf,
11186                               13, dp->vlnt[13] & 0xf,
11187                               14, dp->vlnt[14] & 0xf,
11188                               15, dp->vlnt[15] & 0xf));
11189        write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11190                  DC_SC_VL_VAL(31_16,
11191                               16, dp->vlnt[16] & 0xf,
11192                               17, dp->vlnt[17] & 0xf,
11193                               18, dp->vlnt[18] & 0xf,
11194                               19, dp->vlnt[19] & 0xf,
11195                               20, dp->vlnt[20] & 0xf,
11196                               21, dp->vlnt[21] & 0xf,
11197                               22, dp->vlnt[22] & 0xf,
11198                               23, dp->vlnt[23] & 0xf,
11199                               24, dp->vlnt[24] & 0xf,
11200                               25, dp->vlnt[25] & 0xf,
11201                               26, dp->vlnt[26] & 0xf,
11202                               27, dp->vlnt[27] & 0xf,
11203                               28, dp->vlnt[28] & 0xf,
11204                               29, dp->vlnt[29] & 0xf,
11205                               30, dp->vlnt[30] & 0xf,
11206                               31, dp->vlnt[31] & 0xf));
11207}
11208
11209static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11210                        u16 limit)
11211{
11212        if (limit != 0)
11213                dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11214                            what, (int)limit, idx);
11215}
11216
11217/* change only the shared limit portion of SendCmGLobalCredit */
11218static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11219{
11220        u64 reg;
11221
11222        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11223        reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11224        reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11225        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11226}
11227
11228/* change only the total credit limit portion of SendCmGLobalCredit */
11229static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11230{
11231        u64 reg;
11232
11233        reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11234        reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11235        reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11236        write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11237}
11238
11239/* set the given per-VL shared limit */
11240static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11241{
11242        u64 reg;
11243        u32 addr;
11244
11245        if (vl < TXE_NUM_DATA_VL)
11246                addr = SEND_CM_CREDIT_VL + (8 * vl);
11247        else
11248                addr = SEND_CM_CREDIT_VL15;
11249
11250        reg = read_csr(dd, addr);
11251        reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11252        reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11253        write_csr(dd, addr, reg);
11254}
11255
11256/* set the given per-VL dedicated limit */
11257static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11258{
11259        u64 reg;
11260        u32 addr;
11261
11262        if (vl < TXE_NUM_DATA_VL)
11263                addr = SEND_CM_CREDIT_VL + (8 * vl);
11264        else
11265                addr = SEND_CM_CREDIT_VL15;
11266
11267        reg = read_csr(dd, addr);
11268        reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11269        reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11270        write_csr(dd, addr, reg);
11271}
11272
11273/* spin until the given per-VL status mask bits clear */
11274static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11275                                     const char *which)
11276{
11277        unsigned long timeout;
11278        u64 reg;
11279
11280        timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11281        while (1) {
11282                reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11283
11284                if (reg == 0)
11285                        return; /* success */
11286                if (time_after(jiffies, timeout))
11287                        break;          /* timed out */
11288                udelay(1);
11289        }
11290
11291        dd_dev_err(dd,
11292                   "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11293                   which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11294        /*
11295         * If this occurs, it is likely there was a credit loss on the link.
11296         * The only recovery from that is a link bounce.
11297         */
11298        dd_dev_err(dd,
11299                   "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11300}
11301
11302/*
11303 * The number of credits on the VLs may be changed while everything
11304 * is "live", but the following algorithm must be followed due to
11305 * how the hardware is actually implemented.  In particular,
11306 * Return_Credit_Status[] is the only correct status check.
11307 *
11308 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11309 *     set Global_Shared_Credit_Limit = 0
11310 *     use_all_vl = 1
11311 * mask0 = all VLs that are changing either dedicated or shared limits
11312 * set Shared_Limit[mask0] = 0
11313 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11314 * if (changing any dedicated limit)
11315 *     mask1 = all VLs that are lowering dedicated limits
11316 *     lower Dedicated_Limit[mask1]
11317 *     spin until Return_Credit_Status[mask1] == 0
11318 *     raise Dedicated_Limits
11319 * raise Shared_Limits
11320 * raise Global_Shared_Credit_Limit
11321 *
11322 * lower = if the new limit is lower, set the limit to the new value
11323 * raise = if the new limit is higher than the current value (may be changed
11324 *      earlier in the algorithm), set the new limit to the new value
11325 */
11326int set_buffer_control(struct hfi1_pportdata *ppd,
11327                       struct buffer_control *new_bc)
11328{
11329        struct hfi1_devdata *dd = ppd->dd;
11330        u64 changing_mask, ld_mask, stat_mask;
11331        int change_count;
11332        int i, use_all_mask;
11333        int this_shared_changing;
11334        int vl_count = 0, ret;
11335        /*
11336         * A0: add the variable any_shared_limit_changing below and in the
11337         * algorithm above.  If removing A0 support, it can be removed.
11338         */
11339        int any_shared_limit_changing;
11340        struct buffer_control cur_bc;
11341        u8 changing[OPA_MAX_VLS];
11342        u8 lowering_dedicated[OPA_MAX_VLS];
11343        u16 cur_total;
11344        u32 new_total = 0;
11345        const u64 all_mask =
11346        SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11347         | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11348         | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11349         | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11350         | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11351         | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11352         | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11353         | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11354         | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11355
11356#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11357#define NUM_USABLE_VLS 16       /* look at VL15 and less */
11358
11359        /* find the new total credits, do sanity check on unused VLs */
11360        for (i = 0; i < OPA_MAX_VLS; i++) {
11361                if (valid_vl(i)) {
11362                        new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11363                        continue;
11364                }
11365                nonzero_msg(dd, i, "dedicated",
11366                            be16_to_cpu(new_bc->vl[i].dedicated));
11367                nonzero_msg(dd, i, "shared",
11368                            be16_to_cpu(new_bc->vl[i].shared));
11369                new_bc->vl[i].dedicated = 0;
11370                new_bc->vl[i].shared = 0;
11371        }
11372        new_total += be16_to_cpu(new_bc->overall_shared_limit);
11373
11374        /* fetch the current values */
11375        get_buffer_control(dd, &cur_bc, &cur_total);
11376
11377        /*
11378         * Create the masks we will use.
11379         */
11380        memset(changing, 0, sizeof(changing));
11381        memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11382        /*
11383         * NOTE: Assumes that the individual VL bits are adjacent and in
11384         * increasing order
11385         */
11386        stat_mask =
11387                SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11388        changing_mask = 0;
11389        ld_mask = 0;
11390        change_count = 0;
11391        any_shared_limit_changing = 0;
11392        for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11393                if (!valid_vl(i))
11394                        continue;
11395                this_shared_changing = new_bc->vl[i].shared
11396                                                != cur_bc.vl[i].shared;
11397                if (this_shared_changing)
11398                        any_shared_limit_changing = 1;
11399                if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11400                    this_shared_changing) {
11401                        changing[i] = 1;
11402                        changing_mask |= stat_mask;
11403                        change_count++;
11404                }
11405                if (be16_to_cpu(new_bc->vl[i].dedicated) <
11406                                        be16_to_cpu(cur_bc.vl[i].dedicated)) {
11407                        lowering_dedicated[i] = 1;
11408                        ld_mask |= stat_mask;
11409                }
11410        }
11411
11412        /* bracket the credit change with a total adjustment */
11413        if (new_total > cur_total)
11414                set_global_limit(dd, new_total);
11415
11416        /*
11417         * Start the credit change algorithm.
11418         */
11419        use_all_mask = 0;
11420        if ((be16_to_cpu(new_bc->overall_shared_limit) <
11421             be16_to_cpu(cur_bc.overall_shared_limit)) ||
11422            (is_ax(dd) && any_shared_limit_changing)) {
11423                set_global_shared(dd, 0);
11424                cur_bc.overall_shared_limit = 0;
11425                use_all_mask = 1;
11426        }
11427
11428        for (i = 0; i < NUM_USABLE_VLS; i++) {
11429                if (!valid_vl(i))
11430                        continue;
11431
11432                if (changing[i]) {
11433                        set_vl_shared(dd, i, 0);
11434                        cur_bc.vl[i].shared = 0;
11435                }
11436        }
11437
11438        wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11439                                 "shared");
11440
11441        if (change_count > 0) {
11442                for (i = 0; i < NUM_USABLE_VLS; i++) {
11443                        if (!valid_vl(i))
11444                                continue;
11445
11446                        if (lowering_dedicated[i]) {
11447                                set_vl_dedicated(dd, i,
11448                                                 be16_to_cpu(new_bc->
11449                                                             vl[i].dedicated));
11450                                cur_bc.vl[i].dedicated =
11451                                                new_bc->vl[i].dedicated;
11452                        }
11453                }
11454
11455                wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11456
11457                /* now raise all dedicated that are going up */
11458                for (i = 0; i < NUM_USABLE_VLS; i++) {
11459                        if (!valid_vl(i))
11460                                continue;
11461
11462                        if (be16_to_cpu(new_bc->vl[i].dedicated) >
11463                                        be16_to_cpu(cur_bc.vl[i].dedicated))
11464                                set_vl_dedicated(dd, i,
11465                                                 be16_to_cpu(new_bc->
11466                                                             vl[i].dedicated));
11467                }
11468        }
11469
11470        /* next raise all shared that are going up */
11471        for (i = 0; i < NUM_USABLE_VLS; i++) {
11472                if (!valid_vl(i))
11473                        continue;
11474
11475                if (be16_to_cpu(new_bc->vl[i].shared) >
11476                                be16_to_cpu(cur_bc.vl[i].shared))
11477                        set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11478        }
11479
11480        /* finally raise the global shared */
11481        if (be16_to_cpu(new_bc->overall_shared_limit) >
11482            be16_to_cpu(cur_bc.overall_shared_limit))
11483                set_global_shared(dd,
11484                                  be16_to_cpu(new_bc->overall_shared_limit));
11485
11486        /* bracket the credit change with a total adjustment */
11487        if (new_total < cur_total)
11488                set_global_limit(dd, new_total);
11489
11490        /*
11491         * Determine the actual number of operational VLS using the number of
11492         * dedicated and shared credits for each VL.
11493         */
11494        if (change_count > 0) {
11495                for (i = 0; i < TXE_NUM_DATA_VL; i++)
11496                        if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11497                            be16_to_cpu(new_bc->vl[i].shared) > 0)
11498                                vl_count++;
11499                ppd->actual_vls_operational = vl_count;
11500                ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11501                                    ppd->actual_vls_operational :
11502                                    ppd->vls_operational,
11503                                    NULL);
11504                if (ret == 0)
11505                        ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11506                                           ppd->actual_vls_operational :
11507                                           ppd->vls_operational, NULL);
11508                if (ret)
11509                        return ret;
11510        }
11511        return 0;
11512}
11513
11514/*
11515 * Read the given fabric manager table. Return the size of the
11516 * table (in bytes) on success, and a negative error code on
11517 * failure.
11518 */
11519int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11520
11521{
11522        int size;
11523        struct vl_arb_cache *vlc;
11524
11525        switch (which) {
11526        case FM_TBL_VL_HIGH_ARB:
11527                size = 256;
11528                /*
11529                 * OPA specifies 128 elements (of 2 bytes each), though
11530                 * HFI supports only 16 elements in h/w.
11531                 */
11532                vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11533                vl_arb_get_cache(vlc, t);
11534                vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11535                break;
11536        case FM_TBL_VL_LOW_ARB:
11537                size = 256;
11538                /*
11539                 * OPA specifies 128 elements (of 2 bytes each), though
11540                 * HFI supports only 16 elements in h/w.
11541                 */
11542                vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11543                vl_arb_get_cache(vlc, t);
11544                vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11545                break;
11546        case FM_TBL_BUFFER_CONTROL:
11547                size = get_buffer_control(ppd->dd, t, NULL);
11548                break;
11549        case FM_TBL_SC2VLNT:
11550                size = get_sc2vlnt(ppd->dd, t);
11551                break;
11552        case FM_TBL_VL_PREEMPT_ELEMS:
11553                size = 256;
11554                /* OPA specifies 128 elements, of 2 bytes each */
11555                get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11556                break;
11557        case FM_TBL_VL_PREEMPT_MATRIX:
11558                size = 256;
11559                /*
11560                 * OPA specifies that this is the same size as the VL
11561                 * arbitration tables (i.e., 256 bytes).
11562                 */
11563                break;
11564        default:
11565                return -EINVAL;
11566        }
11567        return size;
11568}
11569
11570/*
11571 * Write the given fabric manager table.
11572 */
11573int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11574{
11575        int ret = 0;
11576        struct vl_arb_cache *vlc;
11577
11578        switch (which) {
11579        case FM_TBL_VL_HIGH_ARB:
11580                vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11581                if (vl_arb_match_cache(vlc, t)) {
11582                        vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11583                        break;
11584                }
11585                vl_arb_set_cache(vlc, t);
11586                vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11587                ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11588                                     VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11589                break;
11590        case FM_TBL_VL_LOW_ARB:
11591                vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11592                if (vl_arb_match_cache(vlc, t)) {
11593                        vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11594                        break;
11595                }
11596                vl_arb_set_cache(vlc, t);
11597                vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11598                ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11599                                     VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11600                break;
11601        case FM_TBL_BUFFER_CONTROL:
11602                ret = set_buffer_control(ppd, t);
11603                break;
11604        case FM_TBL_SC2VLNT:
11605                set_sc2vlnt(ppd->dd, t);
11606                break;
11607        default:
11608                ret = -EINVAL;
11609        }
11610        return ret;
11611}
11612
11613/*
11614 * Disable all data VLs.
11615 *
11616 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11617 */
11618static int disable_data_vls(struct hfi1_devdata *dd)
11619{
11620        if (is_ax(dd))
11621                return 1;
11622
11623        pio_send_control(dd, PSC_DATA_VL_DISABLE);
11624
11625        return 0;
11626}
11627
11628/*
11629 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11630 * Just re-enables all data VLs (the "fill" part happens
11631 * automatically - the name was chosen for symmetry with
11632 * stop_drain_data_vls()).
11633 *
11634 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11635 */
11636int open_fill_data_vls(struct hfi1_devdata *dd)
11637{
11638        if (is_ax(dd))
11639                return 1;
11640
11641        pio_send_control(dd, PSC_DATA_VL_ENABLE);
11642
11643        return 0;
11644}
11645
11646/*
11647 * drain_data_vls() - assumes that disable_data_vls() has been called,
11648 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11649 * engines to drop to 0.
11650 */
11651static void drain_data_vls(struct hfi1_devdata *dd)
11652{
11653        sc_wait(dd);
11654        sdma_wait(dd);
11655        pause_for_credit_return(dd);
11656}
11657
11658/*
11659 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11660 *
11661 * Use open_fill_data_vls() to resume using data VLs.  This pair is
11662 * meant to be used like this:
11663 *
11664 * stop_drain_data_vls(dd);
11665 * // do things with per-VL resources
11666 * open_fill_data_vls(dd);
11667 */
11668int stop_drain_data_vls(struct hfi1_devdata *dd)
11669{
11670        int ret;
11671
11672        ret = disable_data_vls(dd);
11673        if (ret == 0)
11674                drain_data_vls(dd);
11675
11676        return ret;
11677}
11678
11679/*
11680 * Convert a nanosecond time to a cclock count.  No matter how slow
11681 * the cclock, a non-zero ns will always have a non-zero result.
11682 */
11683u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11684{
11685        u32 cclocks;
11686
11687        if (dd->icode == ICODE_FPGA_EMULATION)
11688                cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11689        else  /* simulation pretends to be ASIC */
11690                cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11691        if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11692                cclocks = 1;
11693        return cclocks;
11694}
11695
11696/*
11697 * Convert a cclock count to nanoseconds. Not matter how slow
11698 * the cclock, a non-zero cclocks will always have a non-zero result.
11699 */
11700u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11701{
11702        u32 ns;
11703
11704        if (dd->icode == ICODE_FPGA_EMULATION)
11705                ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11706        else  /* simulation pretends to be ASIC */
11707                ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11708        if (cclocks && !ns)
11709                ns = 1;
11710        return ns;
11711}
11712
11713/*
11714 * Dynamically adjust the receive interrupt timeout for a context based on
11715 * incoming packet rate.
11716 *
11717 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11718 */
11719static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11720{
11721        struct hfi1_devdata *dd = rcd->dd;
11722        u32 timeout = rcd->rcvavail_timeout;
11723
11724        /*
11725         * This algorithm doubles or halves the timeout depending on whether
11726         * the number of packets received in this interrupt were less than or
11727         * greater equal the interrupt count.
11728         *
11729         * The calculations below do not allow a steady state to be achieved.
11730         * Only at the endpoints it is possible to have an unchanging
11731         * timeout.
11732         */
11733        if (npkts < rcv_intr_count) {
11734                /*
11735                 * Not enough packets arrived before the timeout, adjust
11736                 * timeout downward.
11737                 */
11738                if (timeout < 2) /* already at minimum? */
11739                        return;
11740                timeout >>= 1;
11741        } else {
11742                /*
11743                 * More than enough packets arrived before the timeout, adjust
11744                 * timeout upward.
11745                 */
11746                if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11747                        return;
11748                timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11749        }
11750
11751        rcd->rcvavail_timeout = timeout;
11752        /*
11753         * timeout cannot be larger than rcv_intr_timeout_csr which has already
11754         * been verified to be in range
11755         */
11756        write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11757                        (u64)timeout <<
11758                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11759}
11760
11761void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11762                    u32 intr_adjust, u32 npkts)
11763{
11764        struct hfi1_devdata *dd = rcd->dd;
11765        u64 reg;
11766        u32 ctxt = rcd->ctxt;
11767
11768        /*
11769         * Need to write timeout register before updating RcvHdrHead to ensure
11770         * that a new value is used when the HW decides to restart counting.
11771         */
11772        if (intr_adjust)
11773                adjust_rcv_timeout(rcd, npkts);
11774        if (updegr) {
11775                reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11776                        << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11777                write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11778        }
11779        mmiowb();
11780        reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11781                (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11782                        << RCV_HDR_HEAD_HEAD_SHIFT);
11783        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11784        mmiowb();
11785}
11786
11787u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11788{
11789        u32 head, tail;
11790
11791        head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11792                & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11793
11794        if (rcd->rcvhdrtail_kvaddr)
11795                tail = get_rcvhdrtail(rcd);
11796        else
11797                tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11798
11799        return head == tail;
11800}
11801
11802/*
11803 * Context Control and Receive Array encoding for buffer size:
11804 *      0x0 invalid
11805 *      0x1   4 KB
11806 *      0x2   8 KB
11807 *      0x3  16 KB
11808 *      0x4  32 KB
11809 *      0x5  64 KB
11810 *      0x6 128 KB
11811 *      0x7 256 KB
11812 *      0x8 512 KB (Receive Array only)
11813 *      0x9   1 MB (Receive Array only)
11814 *      0xa   2 MB (Receive Array only)
11815 *
11816 *      0xB-0xF - reserved (Receive Array only)
11817 *
11818 *
11819 * This routine assumes that the value has already been sanity checked.
11820 */
11821static u32 encoded_size(u32 size)
11822{
11823        switch (size) {
11824        case   4 * 1024: return 0x1;
11825        case   8 * 1024: return 0x2;
11826        case  16 * 1024: return 0x3;
11827        case  32 * 1024: return 0x4;
11828        case  64 * 1024: return 0x5;
11829        case 128 * 1024: return 0x6;
11830        case 256 * 1024: return 0x7;
11831        case 512 * 1024: return 0x8;
11832        case   1 * 1024 * 1024: return 0x9;
11833        case   2 * 1024 * 1024: return 0xa;
11834        }
11835        return 0x1;     /* if invalid, go with the minimum size */
11836}
11837
11838void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11839                  struct hfi1_ctxtdata *rcd)
11840{
11841        u64 rcvctrl, reg;
11842        int did_enable = 0;
11843        u16 ctxt;
11844
11845        if (!rcd)
11846                return;
11847
11848        ctxt = rcd->ctxt;
11849
11850        hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11851
11852        rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11853        /* if the context already enabled, don't do the extra steps */
11854        if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11855            !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11856                /* reset the tail and hdr addresses, and sequence count */
11857                write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11858                                rcd->rcvhdrq_dma);
11859                if (rcd->rcvhdrtail_kvaddr)
11860                        write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11861                                        rcd->rcvhdrqtailaddr_dma);
11862                rcd->seq_cnt = 1;
11863
11864                /* reset the cached receive header queue head value */
11865                rcd->head = 0;
11866
11867                /*
11868                 * Zero the receive header queue so we don't get false
11869                 * positives when checking the sequence number.  The
11870                 * sequence numbers could land exactly on the same spot.
11871                 * E.g. a rcd restart before the receive header wrapped.
11872                 */
11873                memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11874
11875                /* starting timeout */
11876                rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11877
11878                /* enable the context */
11879                rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11880
11881                /* clean the egr buffer size first */
11882                rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11883                rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11884                                & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11885                                        << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11886
11887                /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11888                write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11889                did_enable = 1;
11890
11891                /* zero RcvEgrIndexHead */
11892                write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11893
11894                /* set eager count and base index */
11895                reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11896                        & RCV_EGR_CTRL_EGR_CNT_MASK)
11897                       << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11898                        (((rcd->eager_base >> RCV_SHIFT)
11899                          & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11900                         << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11901                write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11902
11903                /*
11904                 * Set TID (expected) count and base index.
11905                 * rcd->expected_count is set to individual RcvArray entries,
11906                 * not pairs, and the CSR takes a pair-count in groups of
11907                 * four, so divide by 8.
11908                 */
11909                reg = (((rcd->expected_count >> RCV_SHIFT)
11910                                        & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11911                                << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11912                      (((rcd->expected_base >> RCV_SHIFT)
11913                                        & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11914                                << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11915                write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11916                if (ctxt == HFI1_CTRL_CTXT)
11917                        write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11918        }
11919        if (op & HFI1_RCVCTRL_CTXT_DIS) {
11920                write_csr(dd, RCV_VL15, 0);
11921                /*
11922                 * When receive context is being disabled turn on tail
11923                 * update with a dummy tail address and then disable
11924                 * receive context.
11925                 */
11926                if (dd->rcvhdrtail_dummy_dma) {
11927                        write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11928                                        dd->rcvhdrtail_dummy_dma);
11929                        /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11930                        rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11931                }
11932
11933                rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11934        }
11935        if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11936                rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11937        if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11938                rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11939        if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11940                rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11941        if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11942                /* See comment on RcvCtxtCtrl.TailUpd above */
11943                if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11944                        rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11945        }
11946        if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11947                rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11948        if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11949                rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11950        if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11951                /*
11952                 * In one-packet-per-eager mode, the size comes from
11953                 * the RcvArray entry.
11954                 */
11955                rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11956                rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11957        }
11958        if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11959                rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11960        if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11961                rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11962        if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11963                rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11964        if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11965                rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11966        if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11967                rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11968        hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11969        write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
11970
11971        /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11972        if (did_enable &&
11973            (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11974                reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11975                if (reg != 0) {
11976                        dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11977                                    ctxt, reg);
11978                        read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11979                        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11980                        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11981                        read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11982                        reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11983                        dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11984                                    ctxt, reg, reg == 0 ? "not" : "still");
11985                }
11986        }
11987
11988        if (did_enable) {
11989                /*
11990                 * The interrupt timeout and count must be set after
11991                 * the context is enabled to take effect.
11992                 */
11993                /* set interrupt timeout */
11994                write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11995                                (u64)rcd->rcvavail_timeout <<
11996                                RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11997
11998                /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11999                reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12000                write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12001        }
12002
12003        if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12004                /*
12005                 * If the context has been disabled and the Tail Update has
12006                 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12007                 * so it doesn't contain an address that is invalid.
12008                 */
12009                write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12010                                dd->rcvhdrtail_dummy_dma);
12011}
12012
12013u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12014{
12015        int ret;
12016        u64 val = 0;
12017
12018        if (namep) {
12019                ret = dd->cntrnameslen;
12020                *namep = dd->cntrnames;
12021        } else {
12022                const struct cntr_entry *entry;
12023                int i, j;
12024
12025                ret = (dd->ndevcntrs) * sizeof(u64);
12026
12027                /* Get the start of the block of counters */
12028                *cntrp = dd->cntrs;
12029
12030                /*
12031                 * Now go and fill in each counter in the block.
12032                 */
12033                for (i = 0; i < DEV_CNTR_LAST; i++) {
12034                        entry = &dev_cntrs[i];
12035                        hfi1_cdbg(CNTR, "reading %s", entry->name);
12036                        if (entry->flags & CNTR_DISABLED) {
12037                                /* Nothing */
12038                                hfi1_cdbg(CNTR, "\tDisabled\n");
12039                        } else {
12040                                if (entry->flags & CNTR_VL) {
12041                                        hfi1_cdbg(CNTR, "\tPer VL\n");
12042                                        for (j = 0; j < C_VL_COUNT; j++) {
12043                                                val = entry->rw_cntr(entry,
12044                                                                  dd, j,
12045                                                                  CNTR_MODE_R,
12046                                                                  0);
12047                                                hfi1_cdbg(
12048                                                   CNTR,
12049                                                   "\t\tRead 0x%llx for %d\n",
12050                                                   val, j);
12051                                                dd->cntrs[entry->offset + j] =
12052                                                                            val;
12053                                        }
12054                                } else if (entry->flags & CNTR_SDMA) {
12055                                        hfi1_cdbg(CNTR,
12056                                                  "\t Per SDMA Engine\n");
12057                                        for (j = 0; j < chip_sdma_engines(dd);
12058                                             j++) {
12059                                                val =
12060                                                entry->rw_cntr(entry, dd, j,
12061                                                               CNTR_MODE_R, 0);
12062                                                hfi1_cdbg(CNTR,
12063                                                          "\t\tRead 0x%llx for %d\n",
12064                                                          val, j);
12065                                                dd->cntrs[entry->offset + j] =
12066                                                                        val;
12067                                        }
12068                                } else {
12069                                        val = entry->rw_cntr(entry, dd,
12070                                                        CNTR_INVALID_VL,
12071                                                        CNTR_MODE_R, 0);
12072                                        dd->cntrs[entry->offset] = val;
12073                                        hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12074                                }
12075                        }
12076                }
12077        }
12078        return ret;
12079}
12080
12081/*
12082 * Used by sysfs to create files for hfi stats to read
12083 */
12084u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12085{
12086        int ret;
12087        u64 val = 0;
12088
12089        if (namep) {
12090                ret = ppd->dd->portcntrnameslen;
12091                *namep = ppd->dd->portcntrnames;
12092        } else {
12093                const struct cntr_entry *entry;
12094                int i, j;
12095
12096                ret = ppd->dd->nportcntrs * sizeof(u64);
12097                *cntrp = ppd->cntrs;
12098
12099                for (i = 0; i < PORT_CNTR_LAST; i++) {
12100                        entry = &port_cntrs[i];
12101                        hfi1_cdbg(CNTR, "reading %s", entry->name);
12102                        if (entry->flags & CNTR_DISABLED) {
12103                                /* Nothing */
12104                                hfi1_cdbg(CNTR, "\tDisabled\n");
12105                                continue;
12106                        }
12107
12108                        if (entry->flags & CNTR_VL) {
12109                                hfi1_cdbg(CNTR, "\tPer VL");
12110                                for (j = 0; j < C_VL_COUNT; j++) {
12111                                        val = entry->rw_cntr(entry, ppd, j,
12112                                                               CNTR_MODE_R,
12113                                                               0);
12114                                        hfi1_cdbg(
12115                                           CNTR,
12116                                           "\t\tRead 0x%llx for %d",
12117                                           val, j);
12118                                        ppd->cntrs[entry->offset + j] = val;
12119                                }
12120                        } else {
12121                                val = entry->rw_cntr(entry, ppd,
12122                                                       CNTR_INVALID_VL,
12123                                                       CNTR_MODE_R,
12124                                                       0);
12125                                ppd->cntrs[entry->offset] = val;
12126                                hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12127                        }
12128                }
12129        }
12130        return ret;
12131}
12132
12133static void free_cntrs(struct hfi1_devdata *dd)
12134{
12135        struct hfi1_pportdata *ppd;
12136        int i;
12137
12138        if (dd->synth_stats_timer.function)
12139                del_timer_sync(&dd->synth_stats_timer);
12140        ppd = (struct hfi1_pportdata *)(dd + 1);
12141        for (i = 0; i < dd->num_pports; i++, ppd++) {
12142                kfree(ppd->cntrs);
12143                kfree(ppd->scntrs);
12144                free_percpu(ppd->ibport_data.rvp.rc_acks);
12145                free_percpu(ppd->ibport_data.rvp.rc_qacks);
12146                free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12147                ppd->cntrs = NULL;
12148                ppd->scntrs = NULL;
12149                ppd->ibport_data.rvp.rc_acks = NULL;
12150                ppd->ibport_data.rvp.rc_qacks = NULL;
12151                ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12152        }
12153        kfree(dd->portcntrnames);
12154        dd->portcntrnames = NULL;
12155        kfree(dd->cntrs);
12156        dd->cntrs = NULL;
12157        kfree(dd->scntrs);
12158        dd->scntrs = NULL;
12159        kfree(dd->cntrnames);
12160        dd->cntrnames = NULL;
12161        if (dd->update_cntr_wq) {
12162                destroy_workqueue(dd->update_cntr_wq);
12163                dd->update_cntr_wq = NULL;
12164        }
12165}
12166
12167static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12168                              u64 *psval, void *context, int vl)
12169{
12170        u64 val;
12171        u64 sval = *psval;
12172
12173        if (entry->flags & CNTR_DISABLED) {
12174                dd_dev_err(dd, "Counter %s not enabled", entry->name);
12175                return 0;
12176        }
12177
12178        hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12179
12180        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12181
12182        /* If its a synthetic counter there is more work we need to do */
12183        if (entry->flags & CNTR_SYNTH) {
12184                if (sval == CNTR_MAX) {
12185                        /* No need to read already saturated */
12186                        return CNTR_MAX;
12187                }
12188
12189                if (entry->flags & CNTR_32BIT) {
12190                        /* 32bit counters can wrap multiple times */
12191                        u64 upper = sval >> 32;
12192                        u64 lower = (sval << 32) >> 32;
12193
12194                        if (lower > val) { /* hw wrapped */
12195                                if (upper == CNTR_32BIT_MAX)
12196                                        val = CNTR_MAX;
12197                                else
12198                                        upper++;
12199                        }
12200
12201                        if (val != CNTR_MAX)
12202                                val = (upper << 32) | val;
12203
12204                } else {
12205                        /* If we rolled we are saturated */
12206                        if ((val < sval) || (val > CNTR_MAX))
12207                                val = CNTR_MAX;
12208                }
12209        }
12210
12211        *psval = val;
12212
12213        hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12214
12215        return val;
12216}
12217
12218static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12219                               struct cntr_entry *entry,
12220                               u64 *psval, void *context, int vl, u64 data)
12221{
12222        u64 val;
12223
12224        if (entry->flags & CNTR_DISABLED) {
12225                dd_dev_err(dd, "Counter %s not enabled", entry->name);
12226                return 0;
12227        }
12228
12229        hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12230
12231        if (entry->flags & CNTR_SYNTH) {
12232                *psval = data;
12233                if (entry->flags & CNTR_32BIT) {
12234                        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12235                                             (data << 32) >> 32);
12236                        val = data; /* return the full 64bit value */
12237                } else {
12238                        val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12239                                             data);
12240                }
12241        } else {
12242                val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12243        }
12244
12245        *psval = val;
12246
12247        hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12248
12249        return val;
12250}
12251
12252u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12253{
12254        struct cntr_entry *entry;
12255        u64 *sval;
12256
12257        entry = &dev_cntrs[index];
12258        sval = dd->scntrs + entry->offset;
12259
12260        if (vl != CNTR_INVALID_VL)
12261                sval += vl;
12262
12263        return read_dev_port_cntr(dd, entry, sval, dd, vl);
12264}
12265
12266u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12267{
12268        struct cntr_entry *entry;
12269        u64 *sval;
12270
12271        entry = &dev_cntrs[index];
12272        sval = dd->scntrs + entry->offset;
12273
12274        if (vl != CNTR_INVALID_VL)
12275                sval += vl;
12276
12277        return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12278}
12279
12280u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12281{
12282        struct cntr_entry *entry;
12283        u64 *sval;
12284
12285        entry = &port_cntrs[index];
12286        sval = ppd->scntrs + entry->offset;
12287
12288        if (vl != CNTR_INVALID_VL)
12289                sval += vl;
12290
12291        if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12292            (index <= C_RCV_HDR_OVF_LAST)) {
12293                /* We do not want to bother for disabled contexts */
12294                return 0;
12295        }
12296
12297        return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12298}
12299
12300u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12301{
12302        struct cntr_entry *entry;
12303        u64 *sval;
12304
12305        entry = &port_cntrs[index];
12306        sval = ppd->scntrs + entry->offset;
12307
12308        if (vl != CNTR_INVALID_VL)
12309                sval += vl;
12310
12311        if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12312            (index <= C_RCV_HDR_OVF_LAST)) {
12313                /* We do not want to bother for disabled contexts */
12314                return 0;
12315        }
12316
12317        return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12318}
12319
12320static void do_update_synth_timer(struct work_struct *work)
12321{
12322        u64 cur_tx;
12323        u64 cur_rx;
12324        u64 total_flits;
12325        u8 update = 0;
12326        int i, j, vl;
12327        struct hfi1_pportdata *ppd;
12328        struct cntr_entry *entry;
12329        struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12330                                               update_cntr_work);
12331
12332        /*
12333         * Rather than keep beating on the CSRs pick a minimal set that we can
12334         * check to watch for potential roll over. We can do this by looking at
12335         * the number of flits sent/recv. If the total flits exceeds 32bits then
12336         * we have to iterate all the counters and update.
12337         */
12338        entry = &dev_cntrs[C_DC_RCV_FLITS];
12339        cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12340
12341        entry = &dev_cntrs[C_DC_XMIT_FLITS];
12342        cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12343
12344        hfi1_cdbg(
12345            CNTR,
12346            "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12347            dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12348
12349        if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12350                /*
12351                 * May not be strictly necessary to update but it won't hurt and
12352                 * simplifies the logic here.
12353                 */
12354                update = 1;
12355                hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12356                          dd->unit);
12357        } else {
12358                total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12359                hfi1_cdbg(CNTR,
12360                          "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12361                          total_flits, (u64)CNTR_32BIT_MAX);
12362                if (total_flits >= CNTR_32BIT_MAX) {
12363                        hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12364                                  dd->unit);
12365                        update = 1;
12366                }
12367        }
12368
12369        if (update) {
12370                hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12371                for (i = 0; i < DEV_CNTR_LAST; i++) {
12372                        entry = &dev_cntrs[i];
12373                        if (entry->flags & CNTR_VL) {
12374                                for (vl = 0; vl < C_VL_COUNT; vl++)
12375                                        read_dev_cntr(dd, i, vl);
12376                        } else {
12377                                read_dev_cntr(dd, i, CNTR_INVALID_VL);
12378                        }
12379                }
12380                ppd = (struct hfi1_pportdata *)(dd + 1);
12381                for (i = 0; i < dd->num_pports; i++, ppd++) {
12382                        for (j = 0; j < PORT_CNTR_LAST; j++) {
12383                                entry = &port_cntrs[j];
12384                                if (entry->flags & CNTR_VL) {
12385                                        for (vl = 0; vl < C_VL_COUNT; vl++)
12386                                                read_port_cntr(ppd, j, vl);
12387                                } else {
12388                                        read_port_cntr(ppd, j, CNTR_INVALID_VL);
12389                                }
12390                        }
12391                }
12392
12393                /*
12394                 * We want the value in the register. The goal is to keep track
12395                 * of the number of "ticks" not the counter value. In other
12396                 * words if the register rolls we want to notice it and go ahead
12397                 * and force an update.
12398                 */
12399                entry = &dev_cntrs[C_DC_XMIT_FLITS];
12400                dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12401                                                CNTR_MODE_R, 0);
12402
12403                entry = &dev_cntrs[C_DC_RCV_FLITS];
12404                dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12405                                                CNTR_MODE_R, 0);
12406
12407                hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12408                          dd->unit, dd->last_tx, dd->last_rx);
12409
12410        } else {
12411                hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12412        }
12413}
12414
12415static void update_synth_timer(struct timer_list *t)
12416{
12417        struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12418
12419        queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12420        mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12421}
12422
12423#define C_MAX_NAME 16 /* 15 chars + one for /0 */
12424static int init_cntrs(struct hfi1_devdata *dd)
12425{
12426        int i, rcv_ctxts, j;
12427        size_t sz;
12428        char *p;
12429        char name[C_MAX_NAME];
12430        struct hfi1_pportdata *ppd;
12431        const char *bit_type_32 = ",32";
12432        const int bit_type_32_sz = strlen(bit_type_32);
12433        u32 sdma_engines = chip_sdma_engines(dd);
12434
12435        /* set up the stats timer; the add_timer is done at the end */
12436        timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12437
12438        /***********************/
12439        /* per device counters */
12440        /***********************/
12441
12442        /* size names and determine how many we have*/
12443        dd->ndevcntrs = 0;
12444        sz = 0;
12445
12446        for (i = 0; i < DEV_CNTR_LAST; i++) {
12447                if (dev_cntrs[i].flags & CNTR_DISABLED) {
12448                        hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12449                        continue;
12450                }
12451
12452                if (dev_cntrs[i].flags & CNTR_VL) {
12453                        dev_cntrs[i].offset = dd->ndevcntrs;
12454                        for (j = 0; j < C_VL_COUNT; j++) {
12455                                snprintf(name, C_MAX_NAME, "%s%d",
12456                                         dev_cntrs[i].name, vl_from_idx(j));
12457                                sz += strlen(name);
12458                                /* Add ",32" for 32-bit counters */
12459                                if (dev_cntrs[i].flags & CNTR_32BIT)
12460                                        sz += bit_type_32_sz;
12461                                sz++;
12462                                dd->ndevcntrs++;
12463                        }
12464                } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12465                        dev_cntrs[i].offset = dd->ndevcntrs;
12466                        for (j = 0; j < sdma_engines; j++) {
12467                                snprintf(name, C_MAX_NAME, "%s%d",
12468                                         dev_cntrs[i].name, j);
12469                                sz += strlen(name);
12470                                /* Add ",32" for 32-bit counters */
12471                                if (dev_cntrs[i].flags & CNTR_32BIT)
12472                                        sz += bit_type_32_sz;
12473                                sz++;
12474                                dd->ndevcntrs++;
12475                        }
12476                } else {
12477                        /* +1 for newline. */
12478                        sz += strlen(dev_cntrs[i].name) + 1;
12479                        /* Add ",32" for 32-bit counters */
12480                        if (dev_cntrs[i].flags & CNTR_32BIT)
12481                                sz += bit_type_32_sz;
12482                        dev_cntrs[i].offset = dd->ndevcntrs;
12483                        dd->ndevcntrs++;
12484                }
12485        }
12486
12487        /* allocate space for the counter values */
12488        dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12489        if (!dd->cntrs)
12490                goto bail;
12491
12492        dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12493        if (!dd->scntrs)
12494                goto bail;
12495
12496        /* allocate space for the counter names */
12497        dd->cntrnameslen = sz;
12498        dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12499        if (!dd->cntrnames)
12500                goto bail;
12501
12502        /* fill in the names */
12503        for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12504                if (dev_cntrs[i].flags & CNTR_DISABLED) {
12505                        /* Nothing */
12506                } else if (dev_cntrs[i].flags & CNTR_VL) {
12507                        for (j = 0; j < C_VL_COUNT; j++) {
12508                                snprintf(name, C_MAX_NAME, "%s%d",
12509                                         dev_cntrs[i].name,
12510                                         vl_from_idx(j));
12511                                memcpy(p, name, strlen(name));
12512                                p += strlen(name);
12513
12514                                /* Counter is 32 bits */
12515                                if (dev_cntrs[i].flags & CNTR_32BIT) {
12516                                        memcpy(p, bit_type_32, bit_type_32_sz);
12517                                        p += bit_type_32_sz;
12518                                }
12519
12520                                *p++ = '\n';
12521                        }
12522                } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12523                        for (j = 0; j < sdma_engines; j++) {
12524                                snprintf(name, C_MAX_NAME, "%s%d",
12525                                         dev_cntrs[i].name, j);
12526                                memcpy(p, name, strlen(name));
12527                                p += strlen(name);
12528
12529                                /* Counter is 32 bits */
12530                                if (dev_cntrs[i].flags & CNTR_32BIT) {
12531                                        memcpy(p, bit_type_32, bit_type_32_sz);
12532                                        p += bit_type_32_sz;
12533                                }
12534
12535                                *p++ = '\n';
12536                        }
12537                } else {
12538                        memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12539                        p += strlen(dev_cntrs[i].name);
12540
12541                        /* Counter is 32 bits */
12542                        if (dev_cntrs[i].flags & CNTR_32BIT) {
12543                                memcpy(p, bit_type_32, bit_type_32_sz);
12544                                p += bit_type_32_sz;
12545                        }
12546
12547                        *p++ = '\n';
12548                }
12549        }
12550
12551        /*********************/
12552        /* per port counters */
12553        /*********************/
12554
12555        /*
12556         * Go through the counters for the overflows and disable the ones we
12557         * don't need. This varies based on platform so we need to do it
12558         * dynamically here.
12559         */
12560        rcv_ctxts = dd->num_rcv_contexts;
12561        for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12562             i <= C_RCV_HDR_OVF_LAST; i++) {
12563                port_cntrs[i].flags |= CNTR_DISABLED;
12564        }
12565
12566        /* size port counter names and determine how many we have*/
12567        sz = 0;
12568        dd->nportcntrs = 0;
12569        for (i = 0; i < PORT_CNTR_LAST; i++) {
12570                if (port_cntrs[i].flags & CNTR_DISABLED) {
12571                        hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12572                        continue;
12573                }
12574
12575                if (port_cntrs[i].flags & CNTR_VL) {
12576                        port_cntrs[i].offset = dd->nportcntrs;
12577                        for (j = 0; j < C_VL_COUNT; j++) {
12578                                snprintf(name, C_MAX_NAME, "%s%d",
12579                                         port_cntrs[i].name, vl_from_idx(j));
12580                                sz += strlen(name);
12581                                /* Add ",32" for 32-bit counters */
12582                                if (port_cntrs[i].flags & CNTR_32BIT)
12583                                        sz += bit_type_32_sz;
12584                                sz++;
12585                                dd->nportcntrs++;
12586                        }
12587                } else {
12588                        /* +1 for newline */
12589                        sz += strlen(port_cntrs[i].name) + 1;
12590                        /* Add ",32" for 32-bit counters */
12591                        if (port_cntrs[i].flags & CNTR_32BIT)
12592                                sz += bit_type_32_sz;
12593                        port_cntrs[i].offset = dd->nportcntrs;
12594                        dd->nportcntrs++;
12595                }
12596        }
12597
12598        /* allocate space for the counter names */
12599        dd->portcntrnameslen = sz;
12600        dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12601        if (!dd->portcntrnames)
12602                goto bail;
12603
12604        /* fill in port cntr names */
12605        for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12606                if (port_cntrs[i].flags & CNTR_DISABLED)
12607                        continue;
12608
12609                if (port_cntrs[i].flags & CNTR_VL) {
12610                        for (j = 0; j < C_VL_COUNT; j++) {
12611                                snprintf(name, C_MAX_NAME, "%s%d",
12612                                         port_cntrs[i].name, vl_from_idx(j));
12613                                memcpy(p, name, strlen(name));
12614                                p += strlen(name);
12615
12616                                /* Counter is 32 bits */
12617                                if (port_cntrs[i].flags & CNTR_32BIT) {
12618                                        memcpy(p, bit_type_32, bit_type_32_sz);
12619                                        p += bit_type_32_sz;
12620                                }
12621
12622                                *p++ = '\n';
12623                        }
12624                } else {
12625                        memcpy(p, port_cntrs[i].name,
12626                               strlen(port_cntrs[i].name));
12627                        p += strlen(port_cntrs[i].name);
12628
12629                        /* Counter is 32 bits */
12630                        if (port_cntrs[i].flags & CNTR_32BIT) {
12631                                memcpy(p, bit_type_32, bit_type_32_sz);
12632                                p += bit_type_32_sz;
12633                        }
12634
12635                        *p++ = '\n';
12636                }
12637        }
12638
12639        /* allocate per port storage for counter values */
12640        ppd = (struct hfi1_pportdata *)(dd + 1);
12641        for (i = 0; i < dd->num_pports; i++, ppd++) {
12642                ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12643                if (!ppd->cntrs)
12644                        goto bail;
12645
12646                ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12647                if (!ppd->scntrs)
12648                        goto bail;
12649        }
12650
12651        /* CPU counters need to be allocated and zeroed */
12652        if (init_cpu_counters(dd))
12653                goto bail;
12654
12655        dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12656                                                     WQ_MEM_RECLAIM, dd->unit);
12657        if (!dd->update_cntr_wq)
12658                goto bail;
12659
12660        INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12661
12662        mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12663        return 0;
12664bail:
12665        free_cntrs(dd);
12666        return -ENOMEM;
12667}
12668
12669static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12670{
12671        switch (chip_lstate) {
12672        default:
12673                dd_dev_err(dd,
12674                           "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12675                           chip_lstate);
12676                /* fall through */
12677        case LSTATE_DOWN:
12678                return IB_PORT_DOWN;
12679        case LSTATE_INIT:
12680                return IB_PORT_INIT;
12681        case LSTATE_ARMED:
12682                return IB_PORT_ARMED;
12683        case LSTATE_ACTIVE:
12684                return IB_PORT_ACTIVE;
12685        }
12686}
12687
12688u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12689{
12690        /* look at the HFI meta-states only */
12691        switch (chip_pstate & 0xf0) {
12692        default:
12693                dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12694                           chip_pstate);
12695                /* fall through */
12696        case PLS_DISABLED:
12697                return IB_PORTPHYSSTATE_DISABLED;
12698        case PLS_OFFLINE:
12699                return OPA_PORTPHYSSTATE_OFFLINE;
12700        case PLS_POLLING:
12701                return IB_PORTPHYSSTATE_POLLING;
12702        case PLS_CONFIGPHY:
12703                return IB_PORTPHYSSTATE_TRAINING;
12704        case PLS_LINKUP:
12705                return IB_PORTPHYSSTATE_LINKUP;
12706        case PLS_PHYTEST:
12707                return IB_PORTPHYSSTATE_PHY_TEST;
12708        }
12709}
12710
12711/* return the OPA port logical state name */
12712const char *opa_lstate_name(u32 lstate)
12713{
12714        static const char * const port_logical_names[] = {
12715                "PORT_NOP",
12716                "PORT_DOWN",
12717                "PORT_INIT",
12718                "PORT_ARMED",
12719                "PORT_ACTIVE",
12720                "PORT_ACTIVE_DEFER",
12721        };
12722        if (lstate < ARRAY_SIZE(port_logical_names))
12723                return port_logical_names[lstate];
12724        return "unknown";
12725}
12726
12727/* return the OPA port physical state name */
12728const char *opa_pstate_name(u32 pstate)
12729{
12730        static const char * const port_physical_names[] = {
12731                "PHYS_NOP",
12732                "reserved1",
12733                "PHYS_POLL",
12734                "PHYS_DISABLED",
12735                "PHYS_TRAINING",
12736                "PHYS_LINKUP",
12737                "PHYS_LINK_ERR_RECOVER",
12738                "PHYS_PHY_TEST",
12739                "reserved8",
12740                "PHYS_OFFLINE",
12741                "PHYS_GANGED",
12742                "PHYS_TEST",
12743        };
12744        if (pstate < ARRAY_SIZE(port_physical_names))
12745                return port_physical_names[pstate];
12746        return "unknown";
12747}
12748
12749/**
12750 * update_statusp - Update userspace status flag
12751 * @ppd: Port data structure
12752 * @state: port state information
12753 *
12754 * Actual port status is determined by the host_link_state value
12755 * in the ppd.
12756 *
12757 * host_link_state MUST be updated before updating the user space
12758 * statusp.
12759 */
12760static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12761{
12762        /*
12763         * Set port status flags in the page mapped into userspace
12764         * memory. Do it here to ensure a reliable state - this is
12765         * the only function called by all state handling code.
12766         * Always set the flags due to the fact that the cache value
12767         * might have been changed explicitly outside of this
12768         * function.
12769         */
12770        if (ppd->statusp) {
12771                switch (state) {
12772                case IB_PORT_DOWN:
12773                case IB_PORT_INIT:
12774                        *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12775                                           HFI1_STATUS_IB_READY);
12776                        break;
12777                case IB_PORT_ARMED:
12778                        *ppd->statusp |= HFI1_STATUS_IB_CONF;
12779                        break;
12780                case IB_PORT_ACTIVE:
12781                        *ppd->statusp |= HFI1_STATUS_IB_READY;
12782                        break;
12783                }
12784        }
12785        dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12786                    opa_lstate_name(state), state);
12787}
12788
12789/**
12790 * wait_logical_linkstate - wait for an IB link state change to occur
12791 * @ppd: port device
12792 * @state: the state to wait for
12793 * @msecs: the number of milliseconds to wait
12794 *
12795 * Wait up to msecs milliseconds for IB link state change to occur.
12796 * For now, take the easy polling route.
12797 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12798 */
12799static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12800                                  int msecs)
12801{
12802        unsigned long timeout;
12803        u32 new_state;
12804
12805        timeout = jiffies + msecs_to_jiffies(msecs);
12806        while (1) {
12807                new_state = chip_to_opa_lstate(ppd->dd,
12808                                               read_logical_state(ppd->dd));
12809                if (new_state == state)
12810                        break;
12811                if (time_after(jiffies, timeout)) {
12812                        dd_dev_err(ppd->dd,
12813                                   "timeout waiting for link state 0x%x\n",
12814                                   state);
12815                        return -ETIMEDOUT;
12816                }
12817                msleep(20);
12818        }
12819
12820        return 0;
12821}
12822
12823static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12824{
12825        u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12826
12827        dd_dev_info(ppd->dd,
12828                    "physical state changed to %s (0x%x), phy 0x%x\n",
12829                    opa_pstate_name(ib_pstate), ib_pstate, state);
12830}
12831
12832/*
12833 * Read the physical hardware link state and check if it matches host
12834 * drivers anticipated state.
12835 */
12836static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12837{
12838        u32 read_state = read_physical_state(ppd->dd);
12839
12840        if (read_state == state) {
12841                log_state_transition(ppd, state);
12842        } else {
12843                dd_dev_err(ppd->dd,
12844                           "anticipated phy link state 0x%x, read 0x%x\n",
12845                           state, read_state);
12846        }
12847}
12848
12849/*
12850 * wait_physical_linkstate - wait for an physical link state change to occur
12851 * @ppd: port device
12852 * @state: the state to wait for
12853 * @msecs: the number of milliseconds to wait
12854 *
12855 * Wait up to msecs milliseconds for physical link state change to occur.
12856 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12857 */
12858static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12859                                   int msecs)
12860{
12861        u32 read_state;
12862        unsigned long timeout;
12863
12864        timeout = jiffies + msecs_to_jiffies(msecs);
12865        while (1) {
12866                read_state = read_physical_state(ppd->dd);
12867                if (read_state == state)
12868                        break;
12869                if (time_after(jiffies, timeout)) {
12870                        dd_dev_err(ppd->dd,
12871                                   "timeout waiting for phy link state 0x%x\n",
12872                                   state);
12873                        return -ETIMEDOUT;
12874                }
12875                usleep_range(1950, 2050); /* sleep 2ms-ish */
12876        }
12877
12878        log_state_transition(ppd, state);
12879        return 0;
12880}
12881
12882/*
12883 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12884 * @ppd: port device
12885 * @msecs: the number of milliseconds to wait
12886 *
12887 * Wait up to msecs milliseconds for any offline physical link
12888 * state change to occur.
12889 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12890 */
12891static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12892                                            int msecs)
12893{
12894        u32 read_state;
12895        unsigned long timeout;
12896
12897        timeout = jiffies + msecs_to_jiffies(msecs);
12898        while (1) {
12899                read_state = read_physical_state(ppd->dd);
12900                if ((read_state & 0xF0) == PLS_OFFLINE)
12901                        break;
12902                if (time_after(jiffies, timeout)) {
12903                        dd_dev_err(ppd->dd,
12904                                   "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12905                                   read_state, msecs);
12906                        return -ETIMEDOUT;
12907                }
12908                usleep_range(1950, 2050); /* sleep 2ms-ish */
12909        }
12910
12911        log_state_transition(ppd, read_state);
12912        return read_state;
12913}
12914
12915#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12916(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12917
12918#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12919(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12920
12921void hfi1_init_ctxt(struct send_context *sc)
12922{
12923        if (sc) {
12924                struct hfi1_devdata *dd = sc->dd;
12925                u64 reg;
12926                u8 set = (sc->type == SC_USER ?
12927                          HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12928                          HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12929                reg = read_kctxt_csr(dd, sc->hw_context,
12930                                     SEND_CTXT_CHECK_ENABLE);
12931                if (set)
12932                        CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12933                else
12934                        SET_STATIC_RATE_CONTROL_SMASK(reg);
12935                write_kctxt_csr(dd, sc->hw_context,
12936                                SEND_CTXT_CHECK_ENABLE, reg);
12937        }
12938}
12939
12940int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12941{
12942        int ret = 0;
12943        u64 reg;
12944
12945        if (dd->icode != ICODE_RTL_SILICON) {
12946                if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12947                        dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12948                                    __func__);
12949                return -EINVAL;
12950        }
12951        reg = read_csr(dd, ASIC_STS_THERM);
12952        temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12953                      ASIC_STS_THERM_CURR_TEMP_MASK);
12954        temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12955                        ASIC_STS_THERM_LO_TEMP_MASK);
12956        temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12957                        ASIC_STS_THERM_HI_TEMP_MASK);
12958        temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12959                          ASIC_STS_THERM_CRIT_TEMP_MASK);
12960        /* triggers is a 3-bit value - 1 bit per trigger. */
12961        temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12962
12963        return ret;
12964}
12965
12966/**
12967 * get_int_mask - get 64 bit int mask
12968 * @dd - the devdata
12969 * @i - the csr (relative to CCE_INT_MASK)
12970 *
12971 * Returns the mask with the urgent interrupt mask
12972 * bit clear for kernel receive contexts.
12973 */
12974static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
12975{
12976        u64 mask = U64_MAX; /* default to no change */
12977
12978        if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
12979                int j = (i - (IS_RCVURGENT_START / 64)) * 64;
12980                int k = !j ? IS_RCVURGENT_START % 64 : 0;
12981
12982                if (j)
12983                        j -= IS_RCVURGENT_START % 64;
12984                /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
12985                for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
12986                        /* convert to bit in mask and clear */
12987                        mask &= ~BIT_ULL(k);
12988        }
12989        return mask;
12990}
12991
12992/* ========================================================================= */
12993
12994/*
12995 * Enable/disable chip from delivering interrupts.
12996 */
12997void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12998{
12999        int i;
13000
13001        /*
13002         * In HFI, the mask needs to be 1 to allow interrupts.
13003         */
13004        if (enable) {
13005                /* enable all interrupts but urgent on kernel contexts */
13006                for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13007                        u64 mask = get_int_mask(dd, i);
13008
13009                        write_csr(dd, CCE_INT_MASK + (8 * i), mask);
13010                }
13011
13012                init_qsfp_int(dd);
13013        } else {
13014                for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13015                        write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13016        }
13017}
13018
13019/*
13020 * Clear all interrupt sources on the chip.
13021 */
13022static void clear_all_interrupts(struct hfi1_devdata *dd)
13023{
13024        int i;
13025
13026        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13027                write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13028
13029        write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13030        write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13031        write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13032        write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13033        write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13034        write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13035        write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13036        for (i = 0; i < chip_send_contexts(dd); i++)
13037                write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13038        for (i = 0; i < chip_sdma_engines(dd); i++)
13039                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13040
13041        write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13042        write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13043        write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13044}
13045
13046/**
13047 * hfi1_clean_up_interrupts() - Free all IRQ resources
13048 * @dd: valid device data data structure
13049 *
13050 * Free the MSIx and assoicated PCI resources, if they have been allocated.
13051 */
13052void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
13053{
13054        int i;
13055        struct hfi1_msix_entry *me = dd->msix_entries;
13056
13057        /* remove irqs - must happen before disabling/turning off */
13058        for (i = 0; i < dd->num_msix_entries; i++, me++) {
13059                if (!me->arg) /* => no irq, no affinity */
13060                        continue;
13061                hfi1_put_irq_affinity(dd, me);
13062                pci_free_irq(dd->pcidev, i, me->arg);
13063        }
13064
13065        /* clean structures */
13066        kfree(dd->msix_entries);
13067        dd->msix_entries = NULL;
13068        dd->num_msix_entries = 0;
13069
13070        pci_free_irq_vectors(dd->pcidev);
13071}
13072
13073/*
13074 * Remap the interrupt source from the general handler to the given MSI-X
13075 * interrupt.
13076 */
13077static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13078{
13079        u64 reg;
13080        int m, n;
13081
13082        /* clear from the handled mask of the general interrupt */
13083        m = isrc / 64;
13084        n = isrc % 64;
13085        if (likely(m < CCE_NUM_INT_CSRS)) {
13086                dd->gi_mask[m] &= ~((u64)1 << n);
13087        } else {
13088                dd_dev_err(dd, "remap interrupt err\n");
13089                return;
13090        }
13091
13092        /* direct the chip source to the given MSI-X interrupt */
13093        m = isrc / 8;
13094        n = isrc % 8;
13095        reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13096        reg &= ~((u64)0xff << (8 * n));
13097        reg |= ((u64)msix_intr & 0xff) << (8 * n);
13098        write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13099}
13100
13101static void remap_sdma_interrupts(struct hfi1_devdata *dd,
13102                                  int engine, int msix_intr)
13103{
13104        /*
13105         * SDMA engine interrupt sources grouped by type, rather than
13106         * engine.  Per-engine interrupts are as follows:
13107         *      SDMA
13108         *      SDMAProgress
13109         *      SDMAIdle
13110         */
13111        remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
13112                   msix_intr);
13113        remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
13114                   msix_intr);
13115        remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
13116                   msix_intr);
13117}
13118
13119static int request_msix_irqs(struct hfi1_devdata *dd)
13120{
13121        int first_general, last_general;
13122        int first_sdma, last_sdma;
13123        int first_rx, last_rx;
13124        int i, ret = 0;
13125
13126        /* calculate the ranges we are going to use */
13127        first_general = 0;
13128        last_general = first_general + 1;
13129        first_sdma = last_general;
13130        last_sdma = first_sdma + dd->num_sdma;
13131        first_rx = last_sdma;
13132        last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
13133
13134        /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13135        dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13136
13137        /*
13138         * Sanity check - the code expects all SDMA chip source
13139         * interrupts to be in the same CSR, starting at bit 0.  Verify
13140         * that this is true by checking the bit location of the start.
13141         */
13142        BUILD_BUG_ON(IS_SDMA_START % 64);
13143
13144        for (i = 0; i < dd->num_msix_entries; i++) {
13145                struct hfi1_msix_entry *me = &dd->msix_entries[i];
13146                const char *err_info;
13147                irq_handler_t handler;
13148                irq_handler_t thread = NULL;
13149                void *arg = NULL;
13150                int idx;
13151                struct hfi1_ctxtdata *rcd = NULL;
13152                struct sdma_engine *sde = NULL;
13153                char name[MAX_NAME_SIZE];
13154
13155                /* obtain the arguments to pci_request_irq */
13156                if (first_general <= i && i < last_general) {
13157                        idx = i - first_general;
13158                        handler = general_interrupt;
13159                        arg = dd;
13160                        snprintf(name, sizeof(name),
13161                                 DRIVER_NAME "_%d", dd->unit);
13162                        err_info = "general";
13163                        me->type = IRQ_GENERAL;
13164                } else if (first_sdma <= i && i < last_sdma) {
13165                        idx = i - first_sdma;
13166                        sde = &dd->per_sdma[idx];
13167                        handler = sdma_interrupt;
13168                        arg = sde;
13169                        snprintf(name, sizeof(name),
13170                                 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13171                        err_info = "sdma";
13172                        remap_sdma_interrupts(dd, idx, i);
13173                        me->type = IRQ_SDMA;
13174                } else if (first_rx <= i && i < last_rx) {
13175                        idx = i - first_rx;
13176                        rcd = hfi1_rcd_get_by_index_safe(dd, idx);
13177                        if (rcd) {
13178                                /*
13179                                 * Set the interrupt register and mask for this
13180                                 * context's interrupt.
13181                                 */
13182                                rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13183                                rcd->imask = ((u64)1) <<
13184                                          ((IS_RCVAVAIL_START + idx) % 64);
13185                                handler = receive_context_interrupt;
13186                                thread = receive_context_thread;
13187                                arg = rcd;
13188                                snprintf(name, sizeof(name),
13189                                         DRIVER_NAME "_%d kctxt%d",
13190                                         dd->unit, idx);
13191                                err_info = "receive context";
13192                                remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13193                                me->type = IRQ_RCVCTXT;
13194                                rcd->msix_intr = i;
13195                                hfi1_rcd_put(rcd);
13196                        }
13197                } else {
13198                        /* not in our expected range - complain, then
13199                         * ignore it
13200                         */
13201                        dd_dev_err(dd,
13202                                   "Unexpected extra MSI-X interrupt %d\n", i);
13203                        continue;
13204                }
13205                /* no argument, no interrupt */
13206                if (!arg)
13207                        continue;
13208                /* make sure the name is terminated */
13209                name[sizeof(name) - 1] = 0;
13210                me->irq = pci_irq_vector(dd->pcidev, i);
13211                ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
13212                                      name);
13213                if (ret) {
13214                        dd_dev_err(dd,
13215                                   "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13216                                   err_info, me->irq, idx, ret);
13217                        return ret;
13218                }
13219                /*
13220                 * assign arg after pci_request_irq call, so it will be
13221                 * cleaned up
13222                 */
13223                me->arg = arg;
13224
13225                ret = hfi1_get_irq_affinity(dd, me);
13226                if (ret)
13227                        dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13228        }
13229
13230        return ret;
13231}
13232
13233void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13234{
13235        int i;
13236
13237        for (i = 0; i < dd->vnic.num_ctxt; i++) {
13238                struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13239                struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13240
13241                synchronize_irq(me->irq);
13242        }
13243}
13244
13245void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13246{
13247        struct hfi1_devdata *dd = rcd->dd;
13248        struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13249
13250        if (!me->arg) /* => no irq, no affinity */
13251                return;
13252
13253        hfi1_put_irq_affinity(dd, me);
13254        pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13255
13256        me->arg = NULL;
13257}
13258
13259void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13260{
13261        struct hfi1_devdata *dd = rcd->dd;
13262        struct hfi1_msix_entry *me;
13263        int idx = rcd->ctxt;
13264        void *arg = rcd;
13265        int ret;
13266
13267        rcd->msix_intr = dd->vnic.msix_idx++;
13268        me = &dd->msix_entries[rcd->msix_intr];
13269
13270        /*
13271         * Set the interrupt register and mask for this
13272         * context's interrupt.
13273         */
13274        rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13275        rcd->imask = ((u64)1) <<
13276                  ((IS_RCVAVAIL_START + idx) % 64);
13277        me->type = IRQ_RCVCTXT;
13278        me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13279        remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13280
13281        ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
13282                              receive_context_interrupt,
13283                              receive_context_thread, arg,
13284                              DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13285        if (ret) {
13286                dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13287                           me->irq, idx, ret);
13288                return;
13289        }
13290        /*
13291         * assign arg after pci_request_irq call, so it will be
13292         * cleaned up
13293         */
13294        me->arg = arg;
13295
13296        ret = hfi1_get_irq_affinity(dd, me);
13297        if (ret) {
13298                dd_dev_err(dd,
13299                           "unable to pin IRQ %d\n", ret);
13300                pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13301        }
13302}
13303
13304/*
13305 * Set the general handler to accept all interrupts, remap all
13306 * chip interrupts back to MSI-X 0.
13307 */
13308static void reset_interrupts(struct hfi1_devdata *dd)
13309{
13310        int i;
13311
13312        /* all interrupts handled by the general handler */
13313        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13314                dd->gi_mask[i] = ~(u64)0;
13315
13316        /* all chip interrupts map to MSI-X 0 */
13317        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13318                write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13319}
13320
13321static int set_up_interrupts(struct hfi1_devdata *dd)
13322{
13323        u32 total;
13324        int ret, request;
13325
13326        /*
13327         * Interrupt count:
13328         *      1 general, "slow path" interrupt (includes the SDMA engines
13329         *              slow source, SDMACleanupDone)
13330         *      N interrupts - one per used SDMA engine
13331         *      M interrupt - one per kernel receive context
13332         *      V interrupt - one for each VNIC context
13333         */
13334        total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
13335
13336        /* ask for MSI-X interrupts */
13337        request = request_msix(dd, total);
13338        if (request < 0) {
13339                ret = request;
13340                goto fail;
13341        } else {
13342                dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13343                                           GFP_KERNEL);
13344                if (!dd->msix_entries) {
13345                        ret = -ENOMEM;
13346                        goto fail;
13347                }
13348                /* using MSI-X */
13349                dd->num_msix_entries = total;
13350                dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13351        }
13352
13353        /* mask all interrupts */
13354        set_intr_state(dd, 0);
13355        /* clear all pending interrupts */
13356        clear_all_interrupts(dd);
13357
13358        /* reset general handler mask, chip MSI-X mappings */
13359        reset_interrupts(dd);
13360
13361        ret = request_msix_irqs(dd);
13362        if (ret)
13363                goto fail;
13364
13365        return 0;
13366
13367fail:
13368        hfi1_clean_up_interrupts(dd);
13369        return ret;
13370}
13371
13372/*
13373 * Set up context values in dd.  Sets:
13374 *
13375 *      num_rcv_contexts - number of contexts being used
13376 *      n_krcv_queues - number of kernel contexts
13377 *      first_dyn_alloc_ctxt - first dynamically allocated context
13378 *                             in array of contexts
13379 *      freectxts  - number of free user contexts
13380 *      num_send_contexts - number of PIO send contexts being used
13381 *      num_vnic_contexts - number of contexts reserved for VNIC
13382 */
13383static int set_up_context_variables(struct hfi1_devdata *dd)
13384{
13385        unsigned long num_kernel_contexts;
13386        u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13387        int total_contexts;
13388        int ret;
13389        unsigned ngroups;
13390        int qos_rmt_count;
13391        int user_rmt_reduced;
13392        u32 n_usr_ctxts;
13393        u32 send_contexts = chip_send_contexts(dd);
13394        u32 rcv_contexts = chip_rcv_contexts(dd);
13395
13396        /*
13397         * Kernel receive contexts:
13398         * - Context 0 - control context (VL15/multicast/error)
13399         * - Context 1 - first kernel context
13400         * - Context 2 - second kernel context
13401         * ...
13402         */
13403        if (n_krcvqs)
13404                /*
13405                 * n_krcvqs is the sum of module parameter kernel receive
13406                 * contexts, krcvqs[].  It does not include the control
13407                 * context, so add that.
13408                 */
13409                num_kernel_contexts = n_krcvqs + 1;
13410        else
13411                num_kernel_contexts = DEFAULT_KRCVQS + 1;
13412        /*
13413         * Every kernel receive context needs an ACK send context.
13414         * one send context is allocated for each VL{0-7} and VL15
13415         */
13416        if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13417                dd_dev_err(dd,
13418                           "Reducing # kernel rcv contexts to: %d, from %lu\n",
13419                           send_contexts - num_vls - 1,
13420                           num_kernel_contexts);
13421                num_kernel_contexts = send_contexts - num_vls - 1;
13422        }
13423
13424        /* Accommodate VNIC contexts if possible */
13425        if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13426                dd_dev_err(dd, "No receive contexts available for VNIC\n");
13427                num_vnic_contexts = 0;
13428        }
13429        total_contexts = num_kernel_contexts + num_vnic_contexts;
13430
13431        /*
13432         * User contexts:
13433         *      - default to 1 user context per real (non-HT) CPU core if
13434         *        num_user_contexts is negative
13435         */
13436        if (num_user_contexts < 0)
13437                n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13438        else
13439                n_usr_ctxts = num_user_contexts;
13440        /*
13441         * Adjust the counts given a global max.
13442         */
13443        if (total_contexts + n_usr_ctxts > rcv_contexts) {
13444                dd_dev_err(dd,
13445                           "Reducing # user receive contexts to: %d, from %u\n",
13446                           rcv_contexts - total_contexts,
13447                           n_usr_ctxts);
13448                /* recalculate */
13449                n_usr_ctxts = rcv_contexts - total_contexts;
13450        }
13451
13452        /* each user context requires an entry in the RMT */
13453        qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13454        if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13455                user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13456                dd_dev_err(dd,
13457                           "RMT size is reducing the number of user receive contexts from %u to %d\n",
13458                           n_usr_ctxts,
13459                           user_rmt_reduced);
13460                /* recalculate */
13461                n_usr_ctxts = user_rmt_reduced;
13462        }
13463
13464        total_contexts += n_usr_ctxts;
13465
13466        /* the first N are kernel contexts, the rest are user/vnic contexts */
13467        dd->num_rcv_contexts = total_contexts;
13468        dd->n_krcv_queues = num_kernel_contexts;
13469        dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13470        dd->num_vnic_contexts = num_vnic_contexts;
13471        dd->num_user_contexts = n_usr_ctxts;
13472        dd->freectxts = n_usr_ctxts;
13473        dd_dev_info(dd,
13474                    "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13475                    rcv_contexts,
13476                    (int)dd->num_rcv_contexts,
13477                    (int)dd->n_krcv_queues,
13478                    dd->num_vnic_contexts,
13479                    dd->num_user_contexts);
13480
13481        /*
13482         * Receive array allocation:
13483         *   All RcvArray entries are divided into groups of 8. This
13484         *   is required by the hardware and will speed up writes to
13485         *   consecutive entries by using write-combining of the entire
13486         *   cacheline.
13487         *
13488         *   The number of groups are evenly divided among all contexts.
13489         *   any left over groups will be given to the first N user
13490         *   contexts.
13491         */
13492        dd->rcv_entries.group_size = RCV_INCREMENT;
13493        ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13494        dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13495        dd->rcv_entries.nctxt_extra = ngroups -
13496                (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13497        dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13498                    dd->rcv_entries.ngroups,
13499                    dd->rcv_entries.nctxt_extra);
13500        if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13501            MAX_EAGER_ENTRIES * 2) {
13502                dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13503                        dd->rcv_entries.group_size;
13504                dd_dev_info(dd,
13505                            "RcvArray group count too high, change to %u\n",
13506                            dd->rcv_entries.ngroups);
13507                dd->rcv_entries.nctxt_extra = 0;
13508        }
13509        /*
13510         * PIO send contexts
13511         */
13512        ret = init_sc_pools_and_sizes(dd);
13513        if (ret >= 0) { /* success */
13514                dd->num_send_contexts = ret;
13515                dd_dev_info(
13516                        dd,
13517                        "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13518                        send_contexts,
13519                        dd->num_send_contexts,
13520                        dd->sc_sizes[SC_KERNEL].count,
13521                        dd->sc_sizes[SC_ACK].count,
13522                        dd->sc_sizes[SC_USER].count,
13523                        dd->sc_sizes[SC_VL15].count);
13524                ret = 0;        /* success */
13525        }
13526
13527        return ret;
13528}
13529
13530/*
13531 * Set the device/port partition key table. The MAD code
13532 * will ensure that, at least, the partial management
13533 * partition key is present in the table.
13534 */
13535static void set_partition_keys(struct hfi1_pportdata *ppd)
13536{
13537        struct hfi1_devdata *dd = ppd->dd;
13538        u64 reg = 0;
13539        int i;
13540
13541        dd_dev_info(dd, "Setting partition keys\n");
13542        for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13543                reg |= (ppd->pkeys[i] &
13544                        RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13545                        ((i % 4) *
13546                         RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13547                /* Each register holds 4 PKey values. */
13548                if ((i % 4) == 3) {
13549                        write_csr(dd, RCV_PARTITION_KEY +
13550                                  ((i - 3) * 2), reg);
13551                        reg = 0;
13552                }
13553        }
13554
13555        /* Always enable HW pkeys check when pkeys table is set */
13556        add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13557}
13558
13559/*
13560 * These CSRs and memories are uninitialized on reset and must be
13561 * written before reading to set the ECC/parity bits.
13562 *
13563 * NOTE: All user context CSRs that are not mmaped write-only
13564 * (e.g. the TID flows) must be initialized even if the driver never
13565 * reads them.
13566 */
13567static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13568{
13569        int i, j;
13570
13571        /* CceIntMap */
13572        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13573                write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13574
13575        /* SendCtxtCreditReturnAddr */
13576        for (i = 0; i < chip_send_contexts(dd); i++)
13577                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13578
13579        /* PIO Send buffers */
13580        /* SDMA Send buffers */
13581        /*
13582         * These are not normally read, and (presently) have no method
13583         * to be read, so are not pre-initialized
13584         */
13585
13586        /* RcvHdrAddr */
13587        /* RcvHdrTailAddr */
13588        /* RcvTidFlowTable */
13589        for (i = 0; i < chip_rcv_contexts(dd); i++) {
13590                write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13591                write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13592                for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13593                        write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13594        }
13595
13596        /* RcvArray */
13597        for (i = 0; i < chip_rcv_array_count(dd); i++)
13598                hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13599
13600        /* RcvQPMapTable */
13601        for (i = 0; i < 32; i++)
13602                write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13603}
13604
13605/*
13606 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13607 */
13608static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13609                             u64 ctrl_bits)
13610{
13611        unsigned long timeout;
13612        u64 reg;
13613
13614        /* is the condition present? */
13615        reg = read_csr(dd, CCE_STATUS);
13616        if ((reg & status_bits) == 0)
13617                return;
13618
13619        /* clear the condition */
13620        write_csr(dd, CCE_CTRL, ctrl_bits);
13621
13622        /* wait for the condition to clear */
13623        timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13624        while (1) {
13625                reg = read_csr(dd, CCE_STATUS);
13626                if ((reg & status_bits) == 0)
13627                        return;
13628                if (time_after(jiffies, timeout)) {
13629                        dd_dev_err(dd,
13630                                   "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13631                                   status_bits, reg & status_bits);
13632                        return;
13633                }
13634                udelay(1);
13635        }
13636}
13637
13638/* set CCE CSRs to chip reset defaults */
13639static void reset_cce_csrs(struct hfi1_devdata *dd)
13640{
13641        int i;
13642
13643        /* CCE_REVISION read-only */
13644        /* CCE_REVISION2 read-only */
13645        /* CCE_CTRL - bits clear automatically */
13646        /* CCE_STATUS read-only, use CceCtrl to clear */
13647        clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13648        clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13649        clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13650        for (i = 0; i < CCE_NUM_SCRATCH; i++)
13651                write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13652        /* CCE_ERR_STATUS read-only */
13653        write_csr(dd, CCE_ERR_MASK, 0);
13654        write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13655        /* CCE_ERR_FORCE leave alone */
13656        for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13657                write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13658        write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13659        /* CCE_PCIE_CTRL leave alone */
13660        for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13661                write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13662                write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13663                          CCE_MSIX_TABLE_UPPER_RESETCSR);
13664        }
13665        for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13666                /* CCE_MSIX_PBA read-only */
13667                write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13668                write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13669        }
13670        for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13671                write_csr(dd, CCE_INT_MAP, 0);
13672        for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13673                /* CCE_INT_STATUS read-only */
13674                write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13675                write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13676                /* CCE_INT_FORCE leave alone */
13677                /* CCE_INT_BLOCKED read-only */
13678        }
13679        for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13680                write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13681}
13682
13683/* set MISC CSRs to chip reset defaults */
13684static void reset_misc_csrs(struct hfi1_devdata *dd)
13685{
13686        int i;
13687
13688        for (i = 0; i < 32; i++) {
13689                write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13690                write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13691                write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13692        }
13693        /*
13694         * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13695         * only be written 128-byte chunks
13696         */
13697        /* init RSA engine to clear lingering errors */
13698        write_csr(dd, MISC_CFG_RSA_CMD, 1);
13699        write_csr(dd, MISC_CFG_RSA_MU, 0);
13700        write_csr(dd, MISC_CFG_FW_CTRL, 0);
13701        /* MISC_STS_8051_DIGEST read-only */
13702        /* MISC_STS_SBM_DIGEST read-only */
13703        /* MISC_STS_PCIE_DIGEST read-only */
13704        /* MISC_STS_FAB_DIGEST read-only */
13705        /* MISC_ERR_STATUS read-only */
13706        write_csr(dd, MISC_ERR_MASK, 0);
13707        write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13708        /* MISC_ERR_FORCE leave alone */
13709}
13710
13711/* set TXE CSRs to chip reset defaults */
13712static void reset_txe_csrs(struct hfi1_devdata *dd)
13713{
13714        int i;
13715
13716        /*
13717         * TXE Kernel CSRs
13718         */
13719        write_csr(dd, SEND_CTRL, 0);
13720        __cm_reset(dd, 0);      /* reset CM internal state */
13721        /* SEND_CONTEXTS read-only */
13722        /* SEND_DMA_ENGINES read-only */
13723        /* SEND_PIO_MEM_SIZE read-only */
13724        /* SEND_DMA_MEM_SIZE read-only */
13725        write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13726        pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13727        /* SEND_PIO_ERR_STATUS read-only */
13728        write_csr(dd, SEND_PIO_ERR_MASK, 0);
13729        write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13730        /* SEND_PIO_ERR_FORCE leave alone */
13731        /* SEND_DMA_ERR_STATUS read-only */
13732        write_csr(dd, SEND_DMA_ERR_MASK, 0);
13733        write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13734        /* SEND_DMA_ERR_FORCE leave alone */
13735        /* SEND_EGRESS_ERR_STATUS read-only */
13736        write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13737        write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13738        /* SEND_EGRESS_ERR_FORCE leave alone */
13739        write_csr(dd, SEND_BTH_QP, 0);
13740        write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13741        write_csr(dd, SEND_SC2VLT0, 0);
13742        write_csr(dd, SEND_SC2VLT1, 0);
13743        write_csr(dd, SEND_SC2VLT2, 0);
13744        write_csr(dd, SEND_SC2VLT3, 0);
13745        write_csr(dd, SEND_LEN_CHECK0, 0);
13746        write_csr(dd, SEND_LEN_CHECK1, 0);
13747        /* SEND_ERR_STATUS read-only */
13748        write_csr(dd, SEND_ERR_MASK, 0);
13749        write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13750        /* SEND_ERR_FORCE read-only */
13751        for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13752                write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13753        for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13754                write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13755        for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13756                write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13757        for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13758                write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13759        for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13760                write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13761        write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13762        write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13763        /* SEND_CM_CREDIT_USED_STATUS read-only */
13764        write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13765        write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13766        write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13767        write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13768        write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13769        for (i = 0; i < TXE_NUM_DATA_VL; i++)
13770                write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13771        write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13772        /* SEND_CM_CREDIT_USED_VL read-only */
13773        /* SEND_CM_CREDIT_USED_VL15 read-only */
13774        /* SEND_EGRESS_CTXT_STATUS read-only */
13775        /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13776        write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13777        /* SEND_EGRESS_ERR_INFO read-only */
13778        /* SEND_EGRESS_ERR_SOURCE read-only */
13779
13780        /*
13781         * TXE Per-Context CSRs
13782         */
13783        for (i = 0; i < chip_send_contexts(dd); i++) {
13784                write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13785                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13786                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13787                write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13788                write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13789                write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13790                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13791                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13792                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13793                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13794                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13795                write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13796        }
13797
13798        /*
13799         * TXE Per-SDMA CSRs
13800         */
13801        for (i = 0; i < chip_sdma_engines(dd); i++) {
13802                write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13803                /* SEND_DMA_STATUS read-only */
13804                write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13805                write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13806                write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13807                /* SEND_DMA_HEAD read-only */
13808                write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13809                write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13810                /* SEND_DMA_IDLE_CNT read-only */
13811                write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13812                write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13813                /* SEND_DMA_DESC_FETCHED_CNT read-only */
13814                /* SEND_DMA_ENG_ERR_STATUS read-only */
13815                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13816                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13817                /* SEND_DMA_ENG_ERR_FORCE leave alone */
13818                write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13819                write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13820                write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13821                write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13822                write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13823                write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13824                write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13825        }
13826}
13827
13828/*
13829 * Expect on entry:
13830 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13831 */
13832static void init_rbufs(struct hfi1_devdata *dd)
13833{
13834        u64 reg;
13835        int count;
13836
13837        /*
13838         * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13839         * clear.
13840         */
13841        count = 0;
13842        while (1) {
13843                reg = read_csr(dd, RCV_STATUS);
13844                if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13845                            | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13846                        break;
13847                /*
13848                 * Give up after 1ms - maximum wait time.
13849                 *
13850                 * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13851                 * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13852                 *      136 KB / (66% * 250MB/s) = 844us
13853                 */
13854                if (count++ > 500) {
13855                        dd_dev_err(dd,
13856                                   "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13857                                   __func__, reg);
13858                        break;
13859                }
13860                udelay(2); /* do not busy-wait the CSR */
13861        }
13862
13863        /* start the init - expect RcvCtrl to be 0 */
13864        write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13865
13866        /*
13867         * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13868         * period after the write before RcvStatus.RxRbufInitDone is valid.
13869         * The delay in the first run through the loop below is sufficient and
13870         * required before the first read of RcvStatus.RxRbufInintDone.
13871         */
13872        read_csr(dd, RCV_CTRL);
13873
13874        /* wait for the init to finish */
13875        count = 0;
13876        while (1) {
13877                /* delay is required first time through - see above */
13878                udelay(2); /* do not busy-wait the CSR */
13879                reg = read_csr(dd, RCV_STATUS);
13880                if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13881                        break;
13882
13883                /* give up after 100us - slowest possible at 33MHz is 73us */
13884                if (count++ > 50) {
13885                        dd_dev_err(dd,
13886                                   "%s: RcvStatus.RxRbufInit not set, continuing\n",
13887                                   __func__);
13888                        break;
13889                }
13890        }
13891}
13892
13893/* set RXE CSRs to chip reset defaults */
13894static void reset_rxe_csrs(struct hfi1_devdata *dd)
13895{
13896        int i, j;
13897
13898        /*
13899         * RXE Kernel CSRs
13900         */
13901        write_csr(dd, RCV_CTRL, 0);
13902        init_rbufs(dd);
13903        /* RCV_STATUS read-only */
13904        /* RCV_CONTEXTS read-only */
13905        /* RCV_ARRAY_CNT read-only */
13906        /* RCV_BUF_SIZE read-only */
13907        write_csr(dd, RCV_BTH_QP, 0);
13908        write_csr(dd, RCV_MULTICAST, 0);
13909        write_csr(dd, RCV_BYPASS, 0);
13910        write_csr(dd, RCV_VL15, 0);
13911        /* this is a clear-down */
13912        write_csr(dd, RCV_ERR_INFO,
13913                  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13914        /* RCV_ERR_STATUS read-only */
13915        write_csr(dd, RCV_ERR_MASK, 0);
13916        write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13917        /* RCV_ERR_FORCE leave alone */
13918        for (i = 0; i < 32; i++)
13919                write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13920        for (i = 0; i < 4; i++)
13921                write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13922        for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13923                write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13924        for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13925                write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13926        for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13927                clear_rsm_rule(dd, i);
13928        for (i = 0; i < 32; i++)
13929                write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13930
13931        /*
13932         * RXE Kernel and User Per-Context CSRs
13933         */
13934        for (i = 0; i < chip_rcv_contexts(dd); i++) {
13935                /* kernel */
13936                write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13937                /* RCV_CTXT_STATUS read-only */
13938                write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13939                write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13940                write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13941                write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13942                write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13943                write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13944                write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13945                write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13946                write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13947                write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13948
13949                /* user */
13950                /* RCV_HDR_TAIL read-only */
13951                write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13952                /* RCV_EGR_INDEX_TAIL read-only */
13953                write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13954                /* RCV_EGR_OFFSET_TAIL read-only */
13955                for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13956                        write_uctxt_csr(dd, i,
13957                                        RCV_TID_FLOW_TABLE + (8 * j), 0);
13958                }
13959        }
13960}
13961
13962/*
13963 * Set sc2vl tables.
13964 *
13965 * They power on to zeros, so to avoid send context errors
13966 * they need to be set:
13967 *
13968 * SC 0-7 -> VL 0-7 (respectively)
13969 * SC 15  -> VL 15
13970 * otherwise
13971 *        -> VL 0
13972 */
13973static void init_sc2vl_tables(struct hfi1_devdata *dd)
13974{
13975        int i;
13976        /* init per architecture spec, constrained by hardware capability */
13977
13978        /* HFI maps sent packets */
13979        write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13980                0,
13981                0, 0, 1, 1,
13982                2, 2, 3, 3,
13983                4, 4, 5, 5,
13984                6, 6, 7, 7));
13985        write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13986                1,
13987                8, 0, 9, 0,
13988                10, 0, 11, 0,
13989                12, 0, 13, 0,
13990                14, 0, 15, 15));
13991        write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13992                2,
13993                16, 0, 17, 0,
13994                18, 0, 19, 0,
13995                20, 0, 21, 0,
13996                22, 0, 23, 0));
13997        write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13998                3,
13999                24, 0, 25, 0,
14000                26, 0, 27, 0,
14001                28, 0, 29, 0,
14002                30, 0, 31, 0));
14003
14004        /* DC maps received packets */
14005        write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
14006                15_0,
14007                0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
14008                8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
14009        write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
14010                31_16,
14011                16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
14012                24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
14013
14014        /* initialize the cached sc2vl values consistently with h/w */
14015        for (i = 0; i < 32; i++) {
14016                if (i < 8 || i == 15)
14017                        *((u8 *)(dd->sc2vl) + i) = (u8)i;
14018                else
14019                        *((u8 *)(dd->sc2vl) + i) = 0;
14020        }
14021}
14022
14023/*
14024 * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
14025 * depend on the chip going through a power-on reset - a driver may be loaded
14026 * and unloaded many times.
14027 *
14028 * Do not write any CSR values to the chip in this routine - there may be
14029 * a reset following the (possible) FLR in this routine.
14030 *
14031 */
14032static int init_chip(struct hfi1_devdata *dd)
14033{
14034        int i;
14035        int ret = 0;
14036
14037        /*
14038         * Put the HFI CSRs in a known state.
14039         * Combine this with a DC reset.
14040         *
14041         * Stop the device from doing anything while we do a
14042         * reset.  We know there are no other active users of
14043         * the device since we are now in charge.  Turn off
14044         * off all outbound and inbound traffic and make sure
14045         * the device does not generate any interrupts.
14046         */
14047
14048        /* disable send contexts and SDMA engines */
14049        write_csr(dd, SEND_CTRL, 0);
14050        for (i = 0; i < chip_send_contexts(dd); i++)
14051                write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14052        for (i = 0; i < chip_sdma_engines(dd); i++)
14053                write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14054        /* disable port (turn off RXE inbound traffic) and contexts */
14055        write_csr(dd, RCV_CTRL, 0);
14056        for (i = 0; i < chip_rcv_contexts(dd); i++)
14057                write_csr(dd, RCV_CTXT_CTRL, 0);
14058        /* mask all interrupt sources */
14059        for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14060                write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14061
14062        /*
14063         * DC Reset: do a full DC reset before the register clear.
14064         * A recommended length of time to hold is one CSR read,
14065         * so reread the CceDcCtrl.  Then, hold the DC in reset
14066         * across the clear.
14067         */
14068        write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14069        (void)read_csr(dd, CCE_DC_CTRL);
14070
14071        if (use_flr) {
14072                /*
14073                 * A FLR will reset the SPC core and part of the PCIe.
14074                 * The parts that need to be restored have already been
14075                 * saved.
14076                 */
14077                dd_dev_info(dd, "Resetting CSRs with FLR\n");
14078
14079                /* do the FLR, the DC reset will remain */
14080                pcie_flr(dd->pcidev);
14081
14082                /* restore command and BARs */
14083                ret = restore_pci_variables(dd);
14084                if (ret) {
14085                        dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14086                                   __func__);
14087                        return ret;
14088                }
14089
14090                if (is_ax(dd)) {
14091                        dd_dev_info(dd, "Resetting CSRs with FLR\n");
14092                        pcie_flr(dd->pcidev);
14093                        ret = restore_pci_variables(dd);
14094                        if (ret) {
14095                                dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14096                                           __func__);
14097                                return ret;
14098                        }
14099                }
14100        } else {
14101                dd_dev_info(dd, "Resetting CSRs with writes\n");
14102                reset_cce_csrs(dd);
14103                reset_txe_csrs(dd);
14104                reset_rxe_csrs(dd);
14105                reset_misc_csrs(dd);
14106        }
14107        /* clear the DC reset */
14108        write_csr(dd, CCE_DC_CTRL, 0);
14109
14110        /* Set the LED off */
14111        setextled(dd, 0);
14112
14113        /*
14114         * Clear the QSFP reset.
14115         * An FLR enforces a 0 on all out pins. The driver does not touch
14116         * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
14117         * anything plugged constantly in reset, if it pays attention
14118         * to RESET_N.
14119         * Prime examples of this are optical cables. Set all pins high.
14120         * I2CCLK and I2CDAT will change per direction, and INT_N and
14121         * MODPRS_N are input only and their value is ignored.
14122         */
14123        write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14124        write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14125        init_chip_resources(dd);
14126        return ret;
14127}
14128
14129static void init_early_variables(struct hfi1_devdata *dd)
14130{
14131        int i;
14132
14133        /* assign link credit variables */
14134        dd->vau = CM_VAU;
14135        dd->link_credits = CM_GLOBAL_CREDITS;
14136        if (is_ax(dd))
14137                dd->link_credits--;
14138        dd->vcu = cu_to_vcu(hfi1_cu);
14139        /* enough room for 8 MAD packets plus header - 17K */
14140        dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14141        if (dd->vl15_init > dd->link_credits)
14142                dd->vl15_init = dd->link_credits;
14143
14144        write_uninitialized_csrs_and_memories(dd);
14145
14146        if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14147                for (i = 0; i < dd->num_pports; i++) {
14148                        struct hfi1_pportdata *ppd = &dd->pport[i];
14149
14150                        set_partition_keys(ppd);
14151                }
14152        init_sc2vl_tables(dd);
14153}
14154
14155static void init_kdeth_qp(struct hfi1_devdata *dd)
14156{
14157        /* user changed the KDETH_QP */
14158        if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14159                /* out of range or illegal value */
14160                dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14161                kdeth_qp = 0;
14162        }
14163        if (kdeth_qp == 0)      /* not set, or failed range check */
14164                kdeth_qp = DEFAULT_KDETH_QP;
14165
14166        write_csr(dd, SEND_BTH_QP,
14167                  (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14168                  SEND_BTH_QP_KDETH_QP_SHIFT);
14169
14170        write_csr(dd, RCV_BTH_QP,
14171                  (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14172                  RCV_BTH_QP_KDETH_QP_SHIFT);
14173}
14174
14175/**
14176 * init_qpmap_table
14177 * @dd - device data
14178 * @first_ctxt - first context
14179 * @last_ctxt - first context
14180 *
14181 * This return sets the qpn mapping table that
14182 * is indexed by qpn[8:1].
14183 *
14184 * The routine will round robin the 256 settings
14185 * from first_ctxt to last_ctxt.
14186 *
14187 * The first/last looks ahead to having specialized
14188 * receive contexts for mgmt and bypass.  Normal
14189 * verbs traffic will assumed to be on a range
14190 * of receive contexts.
14191 */
14192static void init_qpmap_table(struct hfi1_devdata *dd,
14193                             u32 first_ctxt,
14194                             u32 last_ctxt)
14195{
14196        u64 reg = 0;
14197        u64 regno = RCV_QP_MAP_TABLE;
14198        int i;
14199        u64 ctxt = first_ctxt;
14200
14201        for (i = 0; i < 256; i++) {
14202                reg |= ctxt << (8 * (i % 8));
14203                ctxt++;
14204                if (ctxt > last_ctxt)
14205                        ctxt = first_ctxt;
14206                if (i % 8 == 7) {
14207                        write_csr(dd, regno, reg);
14208                        reg = 0;
14209                        regno += 8;
14210                }
14211        }
14212
14213        add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14214                        | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14215}
14216
14217struct rsm_map_table {
14218        u64 map[NUM_MAP_REGS];
14219        unsigned int used;
14220};
14221
14222struct rsm_rule_data {
14223        u8 offset;
14224        u8 pkt_type;
14225        u32 field1_off;
14226        u32 field2_off;
14227        u32 index1_off;
14228        u32 index1_width;
14229        u32 index2_off;
14230        u32 index2_width;
14231        u32 mask1;
14232        u32 value1;
14233        u32 mask2;
14234        u32 value2;
14235};
14236
14237/*
14238 * Return an initialized RMT map table for users to fill in.  OK if it
14239 * returns NULL, indicating no table.
14240 */
14241static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14242{
14243        struct rsm_map_table *rmt;
14244        u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14245
14246        rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14247        if (rmt) {
14248                memset(rmt->map, rxcontext, sizeof(rmt->map));
14249                rmt->used = 0;
14250        }
14251
14252        return rmt;
14253}
14254
14255/*
14256 * Write the final RMT map table to the chip and free the table.  OK if
14257 * table is NULL.
14258 */
14259static void complete_rsm_map_table(struct hfi1_devdata *dd,
14260                                   struct rsm_map_table *rmt)
14261{
14262        int i;
14263
14264        if (rmt) {
14265                /* write table to chip */
14266                for (i = 0; i < NUM_MAP_REGS; i++)
14267                        write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14268
14269                /* enable RSM */
14270                add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14271        }
14272}
14273
14274/*
14275 * Add a receive side mapping rule.
14276 */
14277static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14278                         struct rsm_rule_data *rrd)
14279{
14280        write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14281                  (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14282                  1ull << rule_index | /* enable bit */
14283                  (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14284        write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14285                  (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14286                  (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14287                  (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14288                  (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14289                  (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14290                  (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14291        write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14292                  (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14293                  (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14294                  (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14295                  (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14296}
14297
14298/*
14299 * Clear a receive side mapping rule.
14300 */
14301static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14302{
14303        write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14304        write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14305        write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14306}
14307
14308/* return the number of RSM map table entries that will be used for QOS */
14309static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14310                           unsigned int *np)
14311{
14312        int i;
14313        unsigned int m, n;
14314        u8 max_by_vl = 0;
14315
14316        /* is QOS active at all? */
14317        if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14318            num_vls == 1 ||
14319            krcvqsset <= 1)
14320                goto no_qos;
14321
14322        /* determine bits for qpn */
14323        for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14324                if (krcvqs[i] > max_by_vl)
14325                        max_by_vl = krcvqs[i];
14326        if (max_by_vl > 32)
14327                goto no_qos;
14328        m = ilog2(__roundup_pow_of_two(max_by_vl));
14329
14330        /* determine bits for vl */
14331        n = ilog2(__roundup_pow_of_two(num_vls));
14332
14333        /* reject if too much is used */
14334        if ((m + n) > 7)
14335                goto no_qos;
14336
14337        if (mp)
14338                *mp = m;
14339        if (np)
14340                *np = n;
14341
14342        return 1 << (m + n);
14343
14344no_qos:
14345        if (mp)
14346                *mp = 0;
14347        if (np)
14348                *np = 0;
14349        return 0;
14350}
14351
14352/**
14353 * init_qos - init RX qos
14354 * @dd - device data
14355 * @rmt - RSM map table
14356 *
14357 * This routine initializes Rule 0 and the RSM map table to implement
14358 * quality of service (qos).
14359 *
14360 * If all of the limit tests succeed, qos is applied based on the array
14361 * interpretation of krcvqs where entry 0 is VL0.
14362 *
14363 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14364 * feed both the RSM map table and the single rule.
14365 */
14366static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14367{
14368        struct rsm_rule_data rrd;
14369        unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14370        unsigned int rmt_entries;
14371        u64 reg;
14372
14373        if (!rmt)
14374                goto bail;
14375        rmt_entries = qos_rmt_entries(dd, &m, &n);
14376        if (rmt_entries == 0)
14377                goto bail;
14378        qpns_per_vl = 1 << m;
14379
14380        /* enough room in the map table? */
14381        rmt_entries = 1 << (m + n);
14382        if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14383                goto bail;
14384
14385        /* add qos entries to the the RSM map table */
14386        for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14387                unsigned tctxt;
14388
14389                for (qpn = 0, tctxt = ctxt;
14390                     krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14391                        unsigned idx, regoff, regidx;
14392
14393                        /* generate the index the hardware will produce */
14394                        idx = rmt->used + ((qpn << n) ^ i);
14395                        regoff = (idx % 8) * 8;
14396                        regidx = idx / 8;
14397                        /* replace default with context number */
14398                        reg = rmt->map[regidx];
14399                        reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14400                                << regoff);
14401                        reg |= (u64)(tctxt++) << regoff;
14402                        rmt->map[regidx] = reg;
14403                        if (tctxt == ctxt + krcvqs[i])
14404                                tctxt = ctxt;
14405                }
14406                ctxt += krcvqs[i];
14407        }
14408
14409        rrd.offset = rmt->used;
14410        rrd.pkt_type = 2;
14411        rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14412        rrd.field2_off = LRH_SC_MATCH_OFFSET;
14413        rrd.index1_off = LRH_SC_SELECT_OFFSET;
14414        rrd.index1_width = n;
14415        rrd.index2_off = QPN_SELECT_OFFSET;
14416        rrd.index2_width = m + n;
14417        rrd.mask1 = LRH_BTH_MASK;
14418        rrd.value1 = LRH_BTH_VALUE;
14419        rrd.mask2 = LRH_SC_MASK;
14420        rrd.value2 = LRH_SC_VALUE;
14421
14422        /* add rule 0 */
14423        add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14424
14425        /* mark RSM map entries as used */
14426        rmt->used += rmt_entries;
14427        /* map everything else to the mcast/err/vl15 context */
14428        init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14429        dd->qos_shift = n + 1;
14430        return;
14431bail:
14432        dd->qos_shift = 1;
14433        init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14434}
14435
14436static void init_user_fecn_handling(struct hfi1_devdata *dd,
14437                                    struct rsm_map_table *rmt)
14438{
14439        struct rsm_rule_data rrd;
14440        u64 reg;
14441        int i, idx, regoff, regidx;
14442        u8 offset;
14443
14444        /* there needs to be enough room in the map table */
14445        if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14446                dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14447                return;
14448        }
14449
14450        /*
14451         * RSM will extract the destination context as an index into the
14452         * map table.  The destination contexts are a sequential block
14453         * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14454         * Map entries are accessed as offset + extracted value.  Adjust
14455         * the added offset so this sequence can be placed anywhere in
14456         * the table - as long as the entries themselves do not wrap.
14457         * There are only enough bits in offset for the table size, so
14458         * start with that to allow for a "negative" offset.
14459         */
14460        offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14461                                                (int)dd->first_dyn_alloc_ctxt);
14462
14463        for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14464                                i < dd->num_rcv_contexts; i++, idx++) {
14465                /* replace with identity mapping */
14466                regoff = (idx % 8) * 8;
14467                regidx = idx / 8;
14468                reg = rmt->map[regidx];
14469                reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14470                reg |= (u64)i << regoff;
14471                rmt->map[regidx] = reg;
14472        }
14473
14474        /*
14475         * For RSM intercept of Expected FECN packets:
14476         * o packet type 0 - expected
14477         * o match on F (bit 95), using select/match 1, and
14478         * o match on SH (bit 133), using select/match 2.
14479         *
14480         * Use index 1 to extract the 8-bit receive context from DestQP
14481         * (start at bit 64).  Use that as the RSM map table index.
14482         */
14483        rrd.offset = offset;
14484        rrd.pkt_type = 0;
14485        rrd.field1_off = 95;
14486        rrd.field2_off = 133;
14487        rrd.index1_off = 64;
14488        rrd.index1_width = 8;
14489        rrd.index2_off = 0;
14490        rrd.index2_width = 0;
14491        rrd.mask1 = 1;
14492        rrd.value1 = 1;
14493        rrd.mask2 = 1;
14494        rrd.value2 = 1;
14495
14496        /* add rule 1 */
14497        add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14498
14499        rmt->used += dd->num_user_contexts;
14500}
14501
14502/* Initialize RSM for VNIC */
14503void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14504{
14505        u8 i, j;
14506        u8 ctx_id = 0;
14507        u64 reg;
14508        u32 regoff;
14509        struct rsm_rule_data rrd;
14510
14511        if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14512                dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14513                           dd->vnic.rmt_start);
14514                return;
14515        }
14516
14517        dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14518                dd->vnic.rmt_start,
14519                dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14520
14521        /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14522        regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14523        reg = read_csr(dd, regoff);
14524        for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14525                /* Update map register with vnic context */
14526                j = (dd->vnic.rmt_start + i) % 8;
14527                reg &= ~(0xffllu << (j * 8));
14528                reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14529                /* Wrap up vnic ctx index */
14530                ctx_id %= dd->vnic.num_ctxt;
14531                /* Write back map register */
14532                if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14533                        dev_dbg(&(dd)->pcidev->dev,
14534                                "Vnic rsm map reg[%d] =0x%llx\n",
14535                                regoff - RCV_RSM_MAP_TABLE, reg);
14536
14537                        write_csr(dd, regoff, reg);
14538                        regoff += 8;
14539                        if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14540                                reg = read_csr(dd, regoff);
14541                }
14542        }
14543
14544        /* Add rule for vnic */
14545        rrd.offset = dd->vnic.rmt_start;
14546        rrd.pkt_type = 4;
14547        /* Match 16B packets */
14548        rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14549        rrd.mask1 = L2_TYPE_MASK;
14550        rrd.value1 = L2_16B_VALUE;
14551        /* Match ETH L4 packets */
14552        rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14553        rrd.mask2 = L4_16B_TYPE_MASK;
14554        rrd.value2 = L4_16B_ETH_VALUE;
14555        /* Calc context from veswid and entropy */
14556        rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14557        rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14558        rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14559        rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14560        add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14561
14562        /* Enable RSM if not already enabled */
14563        add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14564}
14565
14566void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14567{
14568        clear_rsm_rule(dd, RSM_INS_VNIC);
14569
14570        /* Disable RSM if used only by vnic */
14571        if (dd->vnic.rmt_start == 0)
14572                clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14573}
14574
14575static void init_rxe(struct hfi1_devdata *dd)
14576{
14577        struct rsm_map_table *rmt;
14578        u64 val;
14579
14580        /* enable all receive errors */
14581        write_csr(dd, RCV_ERR_MASK, ~0ull);
14582
14583        rmt = alloc_rsm_map_table(dd);
14584        /* set up QOS, including the QPN map table */
14585        init_qos(dd, rmt);
14586        init_user_fecn_handling(dd, rmt);
14587        complete_rsm_map_table(dd, rmt);
14588        /* record number of used rsm map entries for vnic */
14589        dd->vnic.rmt_start = rmt->used;
14590        kfree(rmt);
14591
14592        /*
14593         * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14594         * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14595         * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14596         * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14597         * Max_PayLoad_Size set to its minimum of 128.
14598         *
14599         * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14600         * (64 bytes).  Max_Payload_Size is possibly modified upward in
14601         * tune_pcie_caps() which is called after this routine.
14602         */
14603
14604        /* Have 16 bytes (4DW) of bypass header available in header queue */
14605        val = read_csr(dd, RCV_BYPASS);
14606        val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14607        val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14608                RCV_BYPASS_HDR_SIZE_SHIFT);
14609        write_csr(dd, RCV_BYPASS, val);
14610}
14611
14612static void init_other(struct hfi1_devdata *dd)
14613{
14614        /* enable all CCE errors */
14615        write_csr(dd, CCE_ERR_MASK, ~0ull);
14616        /* enable *some* Misc errors */
14617        write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14618        /* enable all DC errors, except LCB */
14619        write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14620        write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14621}
14622
14623/*
14624 * Fill out the given AU table using the given CU.  A CU is defined in terms
14625 * AUs.  The table is a an encoding: given the index, how many AUs does that
14626 * represent?
14627 *
14628 * NOTE: Assumes that the register layout is the same for the
14629 * local and remote tables.
14630 */
14631static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14632                               u32 csr0to3, u32 csr4to7)
14633{
14634        write_csr(dd, csr0to3,
14635                  0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14636                  1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14637                  2ull * cu <<
14638                  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14639                  4ull * cu <<
14640                  SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14641        write_csr(dd, csr4to7,
14642                  8ull * cu <<
14643                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14644                  16ull * cu <<
14645                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14646                  32ull * cu <<
14647                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14648                  64ull * cu <<
14649                  SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14650}
14651
14652static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14653{
14654        assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14655                           SEND_CM_LOCAL_AU_TABLE4_TO7);
14656}
14657
14658void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14659{
14660        assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14661                           SEND_CM_REMOTE_AU_TABLE4_TO7);
14662}
14663
14664static void init_txe(struct hfi1_devdata *dd)
14665{
14666        int i;
14667
14668        /* enable all PIO, SDMA, general, and Egress errors */
14669        write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14670        write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14671        write_csr(dd, SEND_ERR_MASK, ~0ull);
14672        write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14673
14674        /* enable all per-context and per-SDMA engine errors */
14675        for (i = 0; i < chip_send_contexts(dd); i++)
14676                write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14677        for (i = 0; i < chip_sdma_engines(dd); i++)
14678                write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14679
14680        /* set the local CU to AU mapping */
14681        assign_local_cm_au_table(dd, dd->vcu);
14682
14683        /*
14684         * Set reasonable default for Credit Return Timer
14685         * Don't set on Simulator - causes it to choke.
14686         */
14687        if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14688                write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14689}
14690
14691int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14692                       u16 jkey)
14693{
14694        u8 hw_ctxt;
14695        u64 reg;
14696
14697        if (!rcd || !rcd->sc)
14698                return -EINVAL;
14699
14700        hw_ctxt = rcd->sc->hw_context;
14701        reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14702                ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14703                 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14704        /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14705        if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14706                reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14707        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14708        /*
14709         * Enable send-side J_KEY integrity check, unless this is A0 h/w
14710         */
14711        if (!is_ax(dd)) {
14712                reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14713                reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14714                write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14715        }
14716
14717        /* Enable J_KEY check on receive context. */
14718        reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14719                ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14720                 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14721        write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14722
14723        return 0;
14724}
14725
14726int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14727{
14728        u8 hw_ctxt;
14729        u64 reg;
14730
14731        if (!rcd || !rcd->sc)
14732                return -EINVAL;
14733
14734        hw_ctxt = rcd->sc->hw_context;
14735        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14736        /*
14737         * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14738         * This check would not have been enabled for A0 h/w, see
14739         * set_ctxt_jkey().
14740         */
14741        if (!is_ax(dd)) {
14742                reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14743                reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14744                write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14745        }
14746        /* Turn off the J_KEY on the receive side */
14747        write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14748
14749        return 0;
14750}
14751
14752int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14753                       u16 pkey)
14754{
14755        u8 hw_ctxt;
14756        u64 reg;
14757
14758        if (!rcd || !rcd->sc)
14759                return -EINVAL;
14760
14761        hw_ctxt = rcd->sc->hw_context;
14762        reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14763                SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14764        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14765        reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14766        reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14767        reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14768        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14769
14770        return 0;
14771}
14772
14773int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14774{
14775        u8 hw_ctxt;
14776        u64 reg;
14777
14778        if (!ctxt || !ctxt->sc)
14779                return -EINVAL;
14780
14781        hw_ctxt = ctxt->sc->hw_context;
14782        reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14783        reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14784        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14785        write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14786
14787        return 0;
14788}
14789
14790/*
14791 * Start doing the clean up the the chip. Our clean up happens in multiple
14792 * stages and this is just the first.
14793 */
14794void hfi1_start_cleanup(struct hfi1_devdata *dd)
14795{
14796        aspm_exit(dd);
14797        free_cntrs(dd);
14798        free_rcverr(dd);
14799        finish_chip_resources(dd);
14800}
14801
14802#define HFI_BASE_GUID(dev) \
14803        ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14804
14805/*
14806 * Information can be shared between the two HFIs on the same ASIC
14807 * in the same OS.  This function finds the peer device and sets
14808 * up a shared structure.
14809 */
14810static int init_asic_data(struct hfi1_devdata *dd)
14811{
14812        unsigned long flags;
14813        struct hfi1_devdata *tmp, *peer = NULL;
14814        struct hfi1_asic_data *asic_data;
14815        int ret = 0;
14816
14817        /* pre-allocate the asic structure in case we are the first device */
14818        asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14819        if (!asic_data)
14820                return -ENOMEM;
14821
14822        spin_lock_irqsave(&hfi1_devs_lock, flags);
14823        /* Find our peer device */
14824        list_for_each_entry(tmp, &hfi1_dev_list, list) {
14825                if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14826                    dd->unit != tmp->unit) {
14827                        peer = tmp;
14828                        break;
14829                }
14830        }
14831
14832        if (peer) {
14833                /* use already allocated structure */
14834                dd->asic_data = peer->asic_data;
14835                kfree(asic_data);
14836        } else {
14837                dd->asic_data = asic_data;
14838                mutex_init(&dd->asic_data->asic_resource_mutex);
14839        }
14840        dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14841        spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14842
14843        /* first one through - set up i2c devices */
14844        if (!peer)
14845                ret = set_up_i2c(dd, dd->asic_data);
14846
14847        return ret;
14848}
14849
14850/*
14851 * Set dd->boardname.  Use a generic name if a name is not returned from
14852 * EFI variable space.
14853 *
14854 * Return 0 on success, -ENOMEM if space could not be allocated.
14855 */
14856static int obtain_boardname(struct hfi1_devdata *dd)
14857{
14858        /* generic board description */
14859        const char generic[] =
14860                "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14861        unsigned long size;
14862        int ret;
14863
14864        ret = read_hfi1_efi_var(dd, "description", &size,
14865                                (void **)&dd->boardname);
14866        if (ret) {
14867                dd_dev_info(dd, "Board description not found\n");
14868                /* use generic description */
14869                dd->boardname = kstrdup(generic, GFP_KERNEL);
14870                if (!dd->boardname)
14871                        return -ENOMEM;
14872        }
14873        return 0;
14874}
14875
14876/*
14877 * Check the interrupt registers to make sure that they are mapped correctly.
14878 * It is intended to help user identify any mismapping by VMM when the driver
14879 * is running in a VM. This function should only be called before interrupt
14880 * is set up properly.
14881 *
14882 * Return 0 on success, -EINVAL on failure.
14883 */
14884static int check_int_registers(struct hfi1_devdata *dd)
14885{
14886        u64 reg;
14887        u64 all_bits = ~(u64)0;
14888        u64 mask;
14889
14890        /* Clear CceIntMask[0] to avoid raising any interrupts */
14891        mask = read_csr(dd, CCE_INT_MASK);
14892        write_csr(dd, CCE_INT_MASK, 0ull);
14893        reg = read_csr(dd, CCE_INT_MASK);
14894        if (reg)
14895                goto err_exit;
14896
14897        /* Clear all interrupt status bits */
14898        write_csr(dd, CCE_INT_CLEAR, all_bits);
14899        reg = read_csr(dd, CCE_INT_STATUS);
14900        if (reg)
14901                goto err_exit;
14902
14903        /* Set all interrupt status bits */
14904        write_csr(dd, CCE_INT_FORCE, all_bits);
14905        reg = read_csr(dd, CCE_INT_STATUS);
14906        if (reg != all_bits)
14907                goto err_exit;
14908
14909        /* Restore the interrupt mask */
14910        write_csr(dd, CCE_INT_CLEAR, all_bits);
14911        write_csr(dd, CCE_INT_MASK, mask);
14912
14913        return 0;
14914err_exit:
14915        write_csr(dd, CCE_INT_MASK, mask);
14916        dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14917        return -EINVAL;
14918}
14919
14920/**
14921 * Allocate and initialize the device structure for the hfi.
14922 * @dev: the pci_dev for hfi1_ib device
14923 * @ent: pci_device_id struct for this dev
14924 *
14925 * Also allocates, initializes, and returns the devdata struct for this
14926 * device instance
14927 *
14928 * This is global, and is called directly at init to set up the
14929 * chip-specific function pointers for later use.
14930 */
14931struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14932                                  const struct pci_device_id *ent)
14933{
14934        struct hfi1_devdata *dd;
14935        struct hfi1_pportdata *ppd;
14936        u64 reg;
14937        int i, ret;
14938        static const char * const inames[] = { /* implementation names */
14939                "RTL silicon",
14940                "RTL VCS simulation",
14941                "RTL FPGA emulation",
14942                "Functional simulator"
14943        };
14944        struct pci_dev *parent = pdev->bus->self;
14945        u32 sdma_engines;
14946
14947        dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14948                                sizeof(struct hfi1_pportdata));
14949        if (IS_ERR(dd))
14950                goto bail;
14951        sdma_engines = chip_sdma_engines(dd);
14952        ppd = dd->pport;
14953        for (i = 0; i < dd->num_pports; i++, ppd++) {
14954                int vl;
14955                /* init common fields */
14956                hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14957                /* DC supports 4 link widths */
14958                ppd->link_width_supported =
14959                        OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14960                        OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14961                ppd->link_width_downgrade_supported =
14962                        ppd->link_width_supported;
14963                /* start out enabling only 4X */
14964                ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14965                ppd->link_width_downgrade_enabled =
14966                                        ppd->link_width_downgrade_supported;
14967                /* link width active is 0 when link is down */
14968                /* link width downgrade active is 0 when link is down */
14969
14970                if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14971                    num_vls > HFI1_MAX_VLS_SUPPORTED) {
14972                        dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14973                                   num_vls, HFI1_MAX_VLS_SUPPORTED);
14974                        num_vls = HFI1_MAX_VLS_SUPPORTED;
14975                }
14976                ppd->vls_supported = num_vls;
14977                ppd->vls_operational = ppd->vls_supported;
14978                /* Set the default MTU. */
14979                for (vl = 0; vl < num_vls; vl++)
14980                        dd->vld[vl].mtu = hfi1_max_mtu;
14981                dd->vld[15].mtu = MAX_MAD_PACKET;
14982                /*
14983                 * Set the initial values to reasonable default, will be set
14984                 * for real when link is up.
14985                 */
14986                ppd->overrun_threshold = 0x4;
14987                ppd->phy_error_threshold = 0xf;
14988                ppd->port_crc_mode_enabled = link_crc_mask;
14989                /* initialize supported LTP CRC mode */
14990                ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14991                /* initialize enabled LTP CRC mode */
14992                ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14993                /* start in offline */
14994                ppd->host_link_state = HLS_DN_OFFLINE;
14995                init_vl_arb_caches(ppd);
14996        }
14997
14998        /*
14999         * Do remaining PCIe setup and save PCIe values in dd.
15000         * Any error printing is already done by the init code.
15001         * On return, we have the chip mapped.
15002         */
15003        ret = hfi1_pcie_ddinit(dd, pdev);
15004        if (ret < 0)
15005                goto bail_free;
15006
15007        /* Save PCI space registers to rewrite after device reset */
15008        ret = save_pci_variables(dd);
15009        if (ret < 0)
15010                goto bail_cleanup;
15011
15012        dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15013                        & CCE_REVISION_CHIP_REV_MAJOR_MASK;
15014        dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15015                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
15016
15017        /*
15018         * Check interrupt registers mapping if the driver has no access to
15019         * the upstream component. In this case, it is likely that the driver
15020         * is running in a VM.
15021         */
15022        if (!parent) {
15023                ret = check_int_registers(dd);
15024                if (ret)
15025                        goto bail_cleanup;
15026        }
15027
15028        /*
15029         * obtain the hardware ID - NOT related to unit, which is a
15030         * software enumeration
15031         */
15032        reg = read_csr(dd, CCE_REVISION2);
15033        dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15034                                        & CCE_REVISION2_HFI_ID_MASK;
15035        /* the variable size will remove unwanted bits */
15036        dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15037        dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15038        dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15039                    dd->icode < ARRAY_SIZE(inames) ?
15040                    inames[dd->icode] : "unknown", (int)dd->irev);
15041
15042        /* speeds the hardware can support */
15043        dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15044        /* speeds allowed to run at */
15045        dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15046        /* give a reasonable active value, will be set on link up */
15047        dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15048
15049        /* fix up link widths for emulation _p */
15050        ppd = dd->pport;
15051        if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15052                ppd->link_width_supported =
15053                        ppd->link_width_enabled =
15054                        ppd->link_width_downgrade_supported =
15055                        ppd->link_width_downgrade_enabled =
15056                                OPA_LINK_WIDTH_1X;
15057        }
15058        /* insure num_vls isn't larger than number of sdma engines */
15059        if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
15060                dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15061                           num_vls, sdma_engines);
15062                num_vls = sdma_engines;
15063                ppd->vls_supported = sdma_engines;
15064                ppd->vls_operational = ppd->vls_supported;
15065        }
15066
15067        /*
15068         * Convert the ns parameter to the 64 * cclocks used in the CSR.
15069         * Limit the max if larger than the field holds.  If timeout is
15070         * non-zero, then the calculated field will be at least 1.
15071         *
15072         * Must be after icode is set up - the cclock rate depends
15073         * on knowing the hardware being used.
15074         */
15075        dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15076        if (dd->rcv_intr_timeout_csr >
15077                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15078                dd->rcv_intr_timeout_csr =
15079                        RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15080        else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15081                dd->rcv_intr_timeout_csr = 1;
15082
15083        /* needs to be done before we look for the peer device */
15084        read_guid(dd);
15085
15086        /* set up shared ASIC data with peer device */
15087        ret = init_asic_data(dd);
15088        if (ret)
15089                goto bail_cleanup;
15090
15091        /* obtain chip sizes, reset chip CSRs */
15092        ret = init_chip(dd);
15093        if (ret)
15094                goto bail_cleanup;
15095
15096        /* read in the PCIe link speed information */
15097        ret = pcie_speeds(dd);
15098        if (ret)
15099                goto bail_cleanup;
15100
15101        /* call before get_platform_config(), after init_chip_resources() */
15102        ret = eprom_init(dd);
15103        if (ret)
15104                goto bail_free_rcverr;
15105
15106        /* Needs to be called before hfi1_firmware_init */
15107        get_platform_config(dd);
15108
15109        /* read in firmware */
15110        ret = hfi1_firmware_init(dd);
15111        if (ret)
15112                goto bail_cleanup;
15113
15114        /*
15115         * In general, the PCIe Gen3 transition must occur after the
15116         * chip has been idled (so it won't initiate any PCIe transactions
15117         * e.g. an interrupt) and before the driver changes any registers
15118         * (the transition will reset the registers).
15119         *
15120         * In particular, place this call after:
15121         * - init_chip()     - the chip will not initiate any PCIe transactions
15122         * - pcie_speeds()   - reads the current link speed
15123         * - hfi1_firmware_init() - the needed firmware is ready to be
15124         *                          downloaded
15125         */
15126        ret = do_pcie_gen3_transition(dd);
15127        if (ret)
15128                goto bail_cleanup;
15129
15130        /* start setting dd values and adjusting CSRs */
15131        init_early_variables(dd);
15132
15133        parse_platform_config(dd);
15134
15135        ret = obtain_boardname(dd);
15136        if (ret)
15137                goto bail_cleanup;
15138
15139        snprintf(dd->boardversion, BOARD_VERS_MAX,
15140                 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15141                 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15142                 (u32)dd->majrev,
15143                 (u32)dd->minrev,
15144                 (dd->revision >> CCE_REVISION_SW_SHIFT)
15145                    & CCE_REVISION_SW_MASK);
15146
15147        ret = set_up_context_variables(dd);
15148        if (ret)
15149                goto bail_cleanup;
15150
15151        /* set initial RXE CSRs */
15152        init_rxe(dd);
15153        /* set initial TXE CSRs */
15154        init_txe(dd);
15155        /* set initial non-RXE, non-TXE CSRs */
15156        init_other(dd);
15157        /* set up KDETH QP prefix in both RX and TX CSRs */
15158        init_kdeth_qp(dd);
15159
15160        ret = hfi1_dev_affinity_init(dd);
15161        if (ret)
15162                goto bail_cleanup;
15163
15164        /* send contexts must be set up before receive contexts */
15165        ret = init_send_contexts(dd);
15166        if (ret)
15167                goto bail_cleanup;
15168
15169        ret = hfi1_create_kctxts(dd);
15170        if (ret)
15171                goto bail_cleanup;
15172
15173        /*
15174         * Initialize aspm, to be done after gen3 transition and setting up
15175         * contexts and before enabling interrupts
15176         */
15177        aspm_init(dd);
15178
15179        ret = init_pervl_scs(dd);
15180        if (ret)
15181                goto bail_cleanup;
15182
15183        /* sdma init */
15184        for (i = 0; i < dd->num_pports; ++i) {
15185                ret = sdma_init(dd, i);
15186                if (ret)
15187                        goto bail_cleanup;
15188        }
15189
15190        /* use contexts created by hfi1_create_kctxts */
15191        ret = set_up_interrupts(dd);
15192        if (ret)
15193                goto bail_cleanup;
15194
15195        ret = hfi1_comp_vectors_set_up(dd);
15196        if (ret)
15197                goto bail_clear_intr;
15198
15199        /* set up LCB access - must be after set_up_interrupts() */
15200        init_lcb_access(dd);
15201
15202        /*
15203         * Serial number is created from the base guid:
15204         * [27:24] = base guid [38:35]
15205         * [23: 0] = base guid [23: 0]
15206         */
15207        snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15208                 (dd->base_guid & 0xFFFFFF) |
15209                     ((dd->base_guid >> 11) & 0xF000000));
15210
15211        dd->oui1 = dd->base_guid >> 56 & 0xFF;
15212        dd->oui2 = dd->base_guid >> 48 & 0xFF;
15213        dd->oui3 = dd->base_guid >> 40 & 0xFF;
15214
15215        ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15216        if (ret)
15217                goto bail_clear_intr;
15218
15219        thermal_init(dd);
15220
15221        ret = init_cntrs(dd);
15222        if (ret)
15223                goto bail_clear_intr;
15224
15225        ret = init_rcverr(dd);
15226        if (ret)
15227                goto bail_free_cntrs;
15228
15229        init_completion(&dd->user_comp);
15230
15231        /* The user refcount starts with one to inidicate an active device */
15232        atomic_set(&dd->user_refcount, 1);
15233
15234        goto bail;
15235
15236bail_free_rcverr:
15237        free_rcverr(dd);
15238bail_free_cntrs:
15239        free_cntrs(dd);
15240bail_clear_intr:
15241        hfi1_comp_vectors_clean_up(dd);
15242        hfi1_clean_up_interrupts(dd);
15243bail_cleanup:
15244        hfi1_pcie_ddcleanup(dd);
15245bail_free:
15246        hfi1_free_devdata(dd);
15247        dd = ERR_PTR(ret);
15248bail:
15249        return dd;
15250}
15251
15252static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15253                        u32 dw_len)
15254{
15255        u32 delta_cycles;
15256        u32 current_egress_rate = ppd->current_egress_rate;
15257        /* rates here are in units of 10^6 bits/sec */
15258
15259        if (desired_egress_rate == -1)
15260                return 0; /* shouldn't happen */
15261
15262        if (desired_egress_rate >= current_egress_rate)
15263                return 0; /* we can't help go faster, only slower */
15264
15265        delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15266                        egress_cycles(dw_len * 4, current_egress_rate);
15267
15268        return (u16)delta_cycles;
15269}
15270
15271/**
15272 * create_pbc - build a pbc for transmission
15273 * @flags: special case flags or-ed in built pbc
15274 * @srate: static rate
15275 * @vl: vl
15276 * @dwlen: dword length (header words + data words + pbc words)
15277 *
15278 * Create a PBC with the given flags, rate, VL, and length.
15279 *
15280 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15281 * for verbs, which does not use this PSM feature.  The lone other caller
15282 * is for the diagnostic interface which calls this if the user does not
15283 * supply their own PBC.
15284 */
15285u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15286               u32 dw_len)
15287{
15288        u64 pbc, delay = 0;
15289
15290        if (unlikely(srate_mbs))
15291                delay = delay_cycles(ppd, srate_mbs, dw_len);
15292
15293        pbc = flags
15294                | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15295                | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15296                | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15297                | (dw_len & PBC_LENGTH_DWS_MASK)
15298                        << PBC_LENGTH_DWS_SHIFT;
15299
15300        return pbc;
15301}
15302
15303#define SBUS_THERMAL    0x4f
15304#define SBUS_THERM_MONITOR_MODE 0x1
15305
15306#define THERM_FAILURE(dev, ret, reason) \
15307        dd_dev_err((dd),                                                \
15308                   "Thermal sensor initialization failed: %s (%d)\n",   \
15309                   (reason), (ret))
15310
15311/*
15312 * Initialize the thermal sensor.
15313 *
15314 * After initialization, enable polling of thermal sensor through
15315 * SBus interface. In order for this to work, the SBus Master
15316 * firmware has to be loaded due to the fact that the HW polling
15317 * logic uses SBus interrupts, which are not supported with
15318 * default firmware. Otherwise, no data will be returned through
15319 * the ASIC_STS_THERM CSR.
15320 */
15321static int thermal_init(struct hfi1_devdata *dd)
15322{
15323        int ret = 0;
15324
15325        if (dd->icode != ICODE_RTL_SILICON ||
15326            check_chip_resource(dd, CR_THERM_INIT, NULL))
15327                return ret;
15328
15329        ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15330        if (ret) {
15331                THERM_FAILURE(dd, ret, "Acquire SBus");
15332                return ret;
15333        }
15334
15335        dd_dev_info(dd, "Initializing thermal sensor\n");
15336        /* Disable polling of thermal readings */
15337        write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15338        msleep(100);
15339        /* Thermal Sensor Initialization */
15340        /*    Step 1: Reset the Thermal SBus Receiver */
15341        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15342                                RESET_SBUS_RECEIVER, 0);
15343        if (ret) {
15344                THERM_FAILURE(dd, ret, "Bus Reset");
15345                goto done;
15346        }
15347        /*    Step 2: Set Reset bit in Thermal block */
15348        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15349                                WRITE_SBUS_RECEIVER, 0x1);
15350        if (ret) {
15351                THERM_FAILURE(dd, ret, "Therm Block Reset");
15352                goto done;
15353        }
15354        /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15355        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15356                                WRITE_SBUS_RECEIVER, 0x32);
15357        if (ret) {
15358                THERM_FAILURE(dd, ret, "Write Clock Div");
15359                goto done;
15360        }
15361        /*    Step 4: Select temperature mode */
15362        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15363                                WRITE_SBUS_RECEIVER,
15364                                SBUS_THERM_MONITOR_MODE);
15365        if (ret) {
15366                THERM_FAILURE(dd, ret, "Write Mode Sel");
15367                goto done;
15368        }
15369        /*    Step 5: De-assert block reset and start conversion */
15370        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15371                                WRITE_SBUS_RECEIVER, 0x2);
15372        if (ret) {
15373                THERM_FAILURE(dd, ret, "Write Reset Deassert");
15374                goto done;
15375        }
15376        /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15377        msleep(22);
15378
15379        /* Enable polling of thermal readings */
15380        write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15381
15382        /* Set initialized flag */
15383        ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15384        if (ret)
15385                THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15386
15387done:
15388        release_chip_resource(dd, CR_SBUS);
15389        return ret;
15390}
15391
15392static void handle_temp_err(struct hfi1_devdata *dd)
15393{
15394        struct hfi1_pportdata *ppd = &dd->pport[0];
15395        /*
15396         * Thermal Critical Interrupt
15397         * Put the device into forced freeze mode, take link down to
15398         * offline, and put DC into reset.
15399         */
15400        dd_dev_emerg(dd,
15401                     "Critical temperature reached! Forcing device into freeze mode!\n");
15402        dd->flags |= HFI1_FORCED_FREEZE;
15403        start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15404        /*
15405         * Shut DC down as much and as quickly as possible.
15406         *
15407         * Step 1: Take the link down to OFFLINE. This will cause the
15408         *         8051 to put the Serdes in reset. However, we don't want to
15409         *         go through the entire link state machine since we want to
15410         *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15411         *         but rather an attempt to save the chip.
15412         *         Code below is almost the same as quiet_serdes() but avoids
15413         *         all the extra work and the sleeps.
15414         */
15415        ppd->driver_link_ready = 0;
15416        ppd->link_enabled = 0;
15417        set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15418                                PLS_OFFLINE);
15419        /*
15420         * Step 2: Shutdown LCB and 8051
15421         *         After shutdown, do not restore DC_CFG_RESET value.
15422         */
15423        dc_shutdown(dd);
15424}
15425