linux/drivers/infiniband/hw/qib/qib_iba7220.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2011 - 2017 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
   4 * All rights reserved.
   5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35/*
  36 * This file contains all of the code that is specific to the
  37 * QLogic_IB 7220 chip (except that specific to the SerDes)
  38 */
  39
  40#include <linux/interrupt.h>
  41#include <linux/pci.h>
  42#include <linux/delay.h>
  43#include <linux/module.h>
  44#include <linux/io.h>
  45#include <rdma/ib_verbs.h>
  46
  47#include "qib.h"
  48#include "qib_7220.h"
  49
  50static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
  51static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
  52static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
  53static u32 qib_7220_iblink_state(u64);
  54static u8 qib_7220_phys_portstate(u64);
  55static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
  56static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
  57
  58/*
  59 * This file contains almost all the chip-specific register information and
  60 * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
  61 * exception of SerDes support, which in in qib_sd7220.c.
  62 */
  63
  64/* Below uses machine-generated qib_chipnum_regs.h file */
  65#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
  66
  67/* Use defines to tie machine-generated names to lower-case names */
  68#define kr_control KREG_IDX(Control)
  69#define kr_counterregbase KREG_IDX(CntrRegBase)
  70#define kr_errclear KREG_IDX(ErrClear)
  71#define kr_errmask KREG_IDX(ErrMask)
  72#define kr_errstatus KREG_IDX(ErrStatus)
  73#define kr_extctrl KREG_IDX(EXTCtrl)
  74#define kr_extstatus KREG_IDX(EXTStatus)
  75#define kr_gpio_clear KREG_IDX(GPIOClear)
  76#define kr_gpio_mask KREG_IDX(GPIOMask)
  77#define kr_gpio_out KREG_IDX(GPIOOut)
  78#define kr_gpio_status KREG_IDX(GPIOStatus)
  79#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
  80#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
  81#define kr_hwerrclear KREG_IDX(HwErrClear)
  82#define kr_hwerrmask KREG_IDX(HwErrMask)
  83#define kr_hwerrstatus KREG_IDX(HwErrStatus)
  84#define kr_ibcctrl KREG_IDX(IBCCtrl)
  85#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
  86#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
  87#define kr_ibcstatus KREG_IDX(IBCStatus)
  88#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
  89#define kr_intclear KREG_IDX(IntClear)
  90#define kr_intmask KREG_IDX(IntMask)
  91#define kr_intstatus KREG_IDX(IntStatus)
  92#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
  93#define kr_palign KREG_IDX(PageAlign)
  94#define kr_partitionkey KREG_IDX(RcvPartitionKey)
  95#define kr_portcnt KREG_IDX(PortCnt)
  96#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
  97#define kr_rcvctrl KREG_IDX(RcvCtrl)
  98#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
  99#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
 100#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
 101#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
 102#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
 103#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
 104#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
 105#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
 106#define kr_revision KREG_IDX(Revision)
 107#define kr_scratch KREG_IDX(Scratch)
 108#define kr_sendbuffererror KREG_IDX(SendBufErr0)
 109#define kr_sendctrl KREG_IDX(SendCtrl)
 110#define kr_senddmabase KREG_IDX(SendDmaBase)
 111#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
 112#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
 113#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
 114#define kr_senddmahead KREG_IDX(SendDmaHead)
 115#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
 116#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
 117#define kr_senddmastatus KREG_IDX(SendDmaStatus)
 118#define kr_senddmatail KREG_IDX(SendDmaTail)
 119#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
 120#define kr_sendpiobufbase KREG_IDX(SendBufBase)
 121#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
 122#define kr_sendpiosize KREG_IDX(SendBufSize)
 123#define kr_sendregbase KREG_IDX(SendRegBase)
 124#define kr_userregbase KREG_IDX(UserRegBase)
 125#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
 126
 127/* These must only be written via qib_write_kreg_ctxt() */
 128#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
 129#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
 130
 131
 132#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
 133                        QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
 134
 135#define cr_badformat CREG_IDX(RxVersionErrCnt)
 136#define cr_erricrc CREG_IDX(RxICRCErrCnt)
 137#define cr_errlink CREG_IDX(RxLinkMalformCnt)
 138#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
 139#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
 140#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
 141#define cr_err_rlen CREG_IDX(RxLenErrCnt)
 142#define cr_errslen CREG_IDX(TxLenErrCnt)
 143#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
 144#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
 145#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
 146#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
 147#define cr_lbint CREG_IDX(LBIntCnt)
 148#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
 149#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
 150#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
 151#define cr_pktrcv CREG_IDX(RxDataPktCnt)
 152#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
 153#define cr_pktsend CREG_IDX(TxDataPktCnt)
 154#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
 155#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
 156#define cr_rcvebp CREG_IDX(RxEBPCnt)
 157#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
 158#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
 159#define cr_sendstall CREG_IDX(TxFlowStallCnt)
 160#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
 161#define cr_wordrcv CREG_IDX(RxDwordCnt)
 162#define cr_wordsend CREG_IDX(TxDwordCnt)
 163#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
 164#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
 165#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
 166#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
 167#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
 168#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
 169#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
 170#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
 171#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
 172#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
 173#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
 174#define cr_psstat CREG_IDX(PSStat)
 175#define cr_psstart CREG_IDX(PSStart)
 176#define cr_psinterval CREG_IDX(PSInterval)
 177#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
 178#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
 179#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
 180#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
 181#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
 182#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
 183#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
 184
 185#define SYM_RMASK(regname, fldname) ((u64)              \
 186        QIB_7220_##regname##_##fldname##_RMASK)
 187#define SYM_MASK(regname, fldname) ((u64)               \
 188        QIB_7220_##regname##_##fldname##_RMASK <<       \
 189         QIB_7220_##regname##_##fldname##_LSB)
 190#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
 191#define SYM_FIELD(value, regname, fldname) ((u64) \
 192        (((value) >> SYM_LSB(regname, fldname)) & \
 193         SYM_RMASK(regname, fldname)))
 194#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
 195#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
 196
 197/* ibcctrl bits */
 198#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
 199/* cycle through TS1/TS2 till OK */
 200#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
 201/* wait for TS1, then go on */
 202#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
 203#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
 204
 205#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
 206#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
 207#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
 208
 209#define BLOB_7220_IBCHG 0x81
 210
 211/*
 212 * We could have a single register get/put routine, that takes a group type,
 213 * but this is somewhat clearer and cleaner.  It also gives us some error
 214 * checking.  64 bit register reads should always work, but are inefficient
 215 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
 216 * so we use kreg32 wherever possible.  User register and counter register
 217 * reads are always 32 bit reads, so only one form of those routines.
 218 */
 219
 220/**
 221 * qib_read_ureg32 - read 32-bit virtualized per-context register
 222 * @dd: device
 223 * @regno: register number
 224 * @ctxt: context number
 225 *
 226 * Return the contents of a register that is virtualized to be per context.
 227 * Returns -1 on errors (not distinguishable from valid contents at
 228 * runtime; we may add a separate error variable at some point).
 229 */
 230static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
 231                                  enum qib_ureg regno, int ctxt)
 232{
 233        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 234                return 0;
 235
 236        if (dd->userbase)
 237                return readl(regno + (u64 __iomem *)
 238                             ((char __iomem *)dd->userbase +
 239                              dd->ureg_align * ctxt));
 240        else
 241                return readl(regno + (u64 __iomem *)
 242                             (dd->uregbase +
 243                              (char __iomem *)dd->kregbase +
 244                              dd->ureg_align * ctxt));
 245}
 246
 247/**
 248 * qib_write_ureg - write 32-bit virtualized per-context register
 249 * @dd: device
 250 * @regno: register number
 251 * @value: value
 252 * @ctxt: context
 253 *
 254 * Write the contents of a register that is virtualized to be per context.
 255 */
 256static inline void qib_write_ureg(const struct qib_devdata *dd,
 257                                  enum qib_ureg regno, u64 value, int ctxt)
 258{
 259        u64 __iomem *ubase;
 260
 261        if (dd->userbase)
 262                ubase = (u64 __iomem *)
 263                        ((char __iomem *) dd->userbase +
 264                         dd->ureg_align * ctxt);
 265        else
 266                ubase = (u64 __iomem *)
 267                        (dd->uregbase +
 268                         (char __iomem *) dd->kregbase +
 269                         dd->ureg_align * ctxt);
 270
 271        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 272                writeq(value, &ubase[regno]);
 273}
 274
 275/**
 276 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
 277 * @dd: the qlogic_ib device
 278 * @regno: the register number to write
 279 * @ctxt: the context containing the register
 280 * @value: the value to write
 281 */
 282static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
 283                                       const u16 regno, unsigned ctxt,
 284                                       u64 value)
 285{
 286        qib_write_kreg(dd, regno + ctxt, value);
 287}
 288
 289static inline void write_7220_creg(const struct qib_devdata *dd,
 290                                   u16 regno, u64 value)
 291{
 292        if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
 293                writeq(value, &dd->cspec->cregbase[regno]);
 294}
 295
 296static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
 297{
 298        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 299                return 0;
 300        return readq(&dd->cspec->cregbase[regno]);
 301}
 302
 303static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
 304{
 305        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 306                return 0;
 307        return readl(&dd->cspec->cregbase[regno]);
 308}
 309
 310/* kr_revision bits */
 311#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
 312#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
 313
 314/* kr_control bits */
 315#define QLOGIC_IB_C_RESET (1U << 7)
 316
 317/* kr_intstatus, kr_intclear, kr_intmask bits */
 318#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
 319#define QLOGIC_IB_I_RCVURG_SHIFT 32
 320#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
 321#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
 322#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
 323
 324#define QLOGIC_IB_C_FREEZEMODE 0x00000002
 325#define QLOGIC_IB_C_LINKENABLE 0x00000004
 326
 327#define QLOGIC_IB_I_SDMAINT             0x8000000000000000ULL
 328#define QLOGIC_IB_I_SDMADISABLED        0x4000000000000000ULL
 329#define QLOGIC_IB_I_ERROR               0x0000000080000000ULL
 330#define QLOGIC_IB_I_SPIOSENT            0x0000000040000000ULL
 331#define QLOGIC_IB_I_SPIOBUFAVAIL        0x0000000020000000ULL
 332#define QLOGIC_IB_I_GPIO                0x0000000010000000ULL
 333
 334/* variables for sanity checking interrupt and errors */
 335#define QLOGIC_IB_I_BITSEXTANT \
 336                (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
 337                (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
 338                (QLOGIC_IB_I_RCVAVAIL_MASK << \
 339                 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
 340                QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
 341                QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
 342                QLOGIC_IB_I_SERDESTRIMDONE)
 343
 344#define IB_HWE_BITSEXTANT \
 345               (HWE_MASK(RXEMemParityErr) | \
 346                HWE_MASK(TXEMemParityErr) | \
 347                (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<  \
 348                 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
 349                QLOGIC_IB_HWE_PCIE1PLLFAILED | \
 350                QLOGIC_IB_HWE_PCIE0PLLFAILED | \
 351                QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
 352                QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
 353                QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
 354                QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
 355                QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
 356                HWE_MASK(PowerOnBISTFailed) |     \
 357                QLOGIC_IB_HWE_COREPLL_FBSLIP | \
 358                QLOGIC_IB_HWE_COREPLL_RFSLIP | \
 359                QLOGIC_IB_HWE_SERDESPLLFAILED | \
 360                HWE_MASK(IBCBusToSPCParityErr) | \
 361                HWE_MASK(IBCBusFromSPCParityErr) | \
 362                QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
 363                QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
 364                QLOGIC_IB_HWE_SDMAMEMREADERR | \
 365                QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
 366                QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
 367                QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
 368                QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
 369                QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
 370                QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
 371                QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
 372                QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
 373                QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
 374
 375#define IB_E_BITSEXTANT                                                 \
 376        (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) |                \
 377         ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) |             \
 378         ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) |       \
 379         ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
 380         ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) |          \
 381         ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) |          \
 382         ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |            \
 383         ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) |              \
 384         ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) |             \
 385         ERR_MASK(SendSpecialTriggerErr) |                              \
 386         ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) |       \
 387         ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) |       \
 388         ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) |     \
 389         ERR_MASK(SendDroppedDataPktErr) |                              \
 390         ERR_MASK(SendPioArmLaunchErr) |                                \
 391         ERR_MASK(SendUnexpectedPktNumErr) |                            \
 392         ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) |  \
 393         ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) |   \
 394         ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) |      \
 395         ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) |           \
 396         ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) |           \
 397         ERR_MASK(SDmaUnexpDataErr) |                                   \
 398         ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) |         \
 399         ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) |               \
 400         ERR_MASK(SDmaDescAddrMisalignErr) |                            \
 401         ERR_MASK(InvalidEEPCmd))
 402
 403/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
 404#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK  0x00000000000000ffULL
 405#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
 406#define QLOGIC_IB_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
 407#define QLOGIC_IB_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
 408#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
 409#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
 410#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
 411#define QLOGIC_IB_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
 412#define QLOGIC_IB_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
 413#define QLOGIC_IB_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
 414#define QLOGIC_IB_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
 415#define QLOGIC_IB_HWE_SERDESPLLFAILED      0x1000000000000000ULL
 416/* specific to this chip */
 417#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR         0x0000000000000040ULL
 418#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR          0x0000000000000080ULL
 419#define QLOGIC_IB_HWE_SDMAMEMREADERR              0x0000000010000000ULL
 420#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED          0x2000000000000000ULL
 421#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT   0x0100000000000000ULL
 422#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT   0x0200000000000000ULL
 423#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT   0x0400000000000000ULL
 424#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT   0x0800000000000000ULL
 425#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR       0x0000008000000000ULL
 426#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR        0x0000004000000000ULL
 427#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
 428#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
 429
 430#define IBA7220_IBCC_LINKCMD_SHIFT 19
 431
 432/* kr_ibcddrctrl bits */
 433#define IBA7220_IBC_DLIDLMC_MASK        0xFFFFFFFFUL
 434#define IBA7220_IBC_DLIDLMC_SHIFT       32
 435
 436#define IBA7220_IBC_HRTBT_MASK  (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
 437                                 SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
 438#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
 439
 440#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
 441#define IBA7220_IBC_LREV_MASK   1
 442#define IBA7220_IBC_LREV_SHIFT  8
 443#define IBA7220_IBC_RXPOL_MASK  1
 444#define IBA7220_IBC_RXPOL_SHIFT 7
 445#define IBA7220_IBC_WIDTH_SHIFT 5
 446#define IBA7220_IBC_WIDTH_MASK  0x3
 447#define IBA7220_IBC_WIDTH_1X_ONLY       (0 << IBA7220_IBC_WIDTH_SHIFT)
 448#define IBA7220_IBC_WIDTH_4X_ONLY       (1 << IBA7220_IBC_WIDTH_SHIFT)
 449#define IBA7220_IBC_WIDTH_AUTONEG       (2 << IBA7220_IBC_WIDTH_SHIFT)
 450#define IBA7220_IBC_SPEED_AUTONEG       (1 << 1)
 451#define IBA7220_IBC_SPEED_SDR           (1 << 2)
 452#define IBA7220_IBC_SPEED_DDR           (1 << 3)
 453#define IBA7220_IBC_SPEED_AUTONEG_MASK  (0x7 << 1)
 454#define IBA7220_IBC_IBTA_1_2_MASK       (1)
 455
 456/* kr_ibcddrstatus */
 457/* link latency shift is 0, don't bother defining */
 458#define IBA7220_DDRSTAT_LINKLAT_MASK    0x3ffffff
 459
 460/* kr_extstatus bits */
 461#define QLOGIC_IB_EXTS_FREQSEL 0x2
 462#define QLOGIC_IB_EXTS_SERDESSEL 0x4
 463#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST     0x0000000000004000
 464#define QLOGIC_IB_EXTS_MEMBIST_DISABLED    0x0000000000008000
 465
 466/* kr_xgxsconfig bits */
 467#define QLOGIC_IB_XGXS_RESET          0x5ULL
 468#define QLOGIC_IB_XGXS_FC_SAFE        (1ULL << 63)
 469
 470/* kr_rcvpktledcnt */
 471#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
 472#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
 473
 474#define _QIB_GPIO_SDA_NUM 1
 475#define _QIB_GPIO_SCL_NUM 0
 476#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
 477#define QIB_TWSI_TEMP_DEV 0x98
 478
 479/* HW counter clock is at 4nsec */
 480#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
 481
 482#define IBA7220_R_INTRAVAIL_SHIFT 17
 483#define IBA7220_R_PKEY_DIS_SHIFT 34
 484#define IBA7220_R_TAILUPD_SHIFT 35
 485#define IBA7220_R_CTXTCFG_SHIFT 36
 486
 487#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
 488
 489/*
 490 * the size bits give us 2^N, in KB units.  0 marks as invalid,
 491 * and 7 is reserved.  We currently use only 2KB and 4KB
 492 */
 493#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
 494#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
 495#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
 496#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
 497#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
 498#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
 499
 500#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
 501
 502/* packet rate matching delay multiplier */
 503static u8 rate_to_delay[2][2] = {
 504        /* 1x, 4x */
 505        {   8, 2 }, /* SDR */
 506        {   4, 1 }  /* DDR */
 507};
 508
 509static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 510        [IB_RATE_2_5_GBPS] = 8,
 511        [IB_RATE_5_GBPS] = 4,
 512        [IB_RATE_10_GBPS] = 2,
 513        [IB_RATE_20_GBPS] = 1
 514};
 515
 516#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
 517#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
 518
 519/* link training states, from IBC */
 520#define IB_7220_LT_STATE_DISABLED        0x00
 521#define IB_7220_LT_STATE_LINKUP          0x01
 522#define IB_7220_LT_STATE_POLLACTIVE      0x02
 523#define IB_7220_LT_STATE_POLLQUIET       0x03
 524#define IB_7220_LT_STATE_SLEEPDELAY      0x04
 525#define IB_7220_LT_STATE_SLEEPQUIET      0x05
 526#define IB_7220_LT_STATE_CFGDEBOUNCE     0x08
 527#define IB_7220_LT_STATE_CFGRCVFCFG      0x09
 528#define IB_7220_LT_STATE_CFGWAITRMT      0x0a
 529#define IB_7220_LT_STATE_CFGIDLE 0x0b
 530#define IB_7220_LT_STATE_RECOVERRETRAIN  0x0c
 531#define IB_7220_LT_STATE_RECOVERWAITRMT  0x0e
 532#define IB_7220_LT_STATE_RECOVERIDLE     0x0f
 533
 534/* link state machine states from IBC */
 535#define IB_7220_L_STATE_DOWN             0x0
 536#define IB_7220_L_STATE_INIT             0x1
 537#define IB_7220_L_STATE_ARM              0x2
 538#define IB_7220_L_STATE_ACTIVE           0x3
 539#define IB_7220_L_STATE_ACT_DEFER        0x4
 540
 541static const u8 qib_7220_physportstate[0x20] = {
 542        [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
 543        [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
 544        [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
 545        [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
 546        [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
 547        [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
 548        [IB_7220_LT_STATE_CFGDEBOUNCE] =
 549                IB_PHYSPORTSTATE_CFG_TRAIN,
 550        [IB_7220_LT_STATE_CFGRCVFCFG] =
 551                IB_PHYSPORTSTATE_CFG_TRAIN,
 552        [IB_7220_LT_STATE_CFGWAITRMT] =
 553                IB_PHYSPORTSTATE_CFG_TRAIN,
 554        [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
 555        [IB_7220_LT_STATE_RECOVERRETRAIN] =
 556                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 557        [IB_7220_LT_STATE_RECOVERWAITRMT] =
 558                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 559        [IB_7220_LT_STATE_RECOVERIDLE] =
 560                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 561        [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
 562        [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
 563        [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
 564        [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
 565        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
 566        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
 567        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
 568        [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
 569};
 570
 571int qib_special_trigger;
 572module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
 573MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
 574
 575#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
 576#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
 577
 578#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
 579        (1ULL << (SYM_LSB(regname, fldname) + (bit))))
 580
 581#define TXEMEMPARITYERR_PIOBUF \
 582        SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
 583#define TXEMEMPARITYERR_PIOPBC \
 584        SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
 585#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
 586        SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
 587
 588#define RXEMEMPARITYERR_RCVBUF \
 589        SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
 590#define RXEMEMPARITYERR_LOOKUPQ \
 591        SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
 592#define RXEMEMPARITYERR_EXPTID \
 593        SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
 594#define RXEMEMPARITYERR_EAGERTID \
 595        SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
 596#define RXEMEMPARITYERR_FLAGBUF \
 597        SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
 598#define RXEMEMPARITYERR_DATAINFO \
 599        SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
 600#define RXEMEMPARITYERR_HDRINFO \
 601        SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
 602
 603/* 7220 specific hardware errors... */
 604static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
 605        /* generic hardware errors */
 606        QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
 607        QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
 608
 609        QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
 610                          "TXE PIOBUF Memory Parity"),
 611        QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
 612                          "TXE PIOPBC Memory Parity"),
 613        QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
 614                          "TXE PIOLAUNCHFIFO Memory Parity"),
 615
 616        QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
 617                          "RXE RCVBUF Memory Parity"),
 618        QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
 619                          "RXE LOOKUPQ Memory Parity"),
 620        QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
 621                          "RXE EAGERTID Memory Parity"),
 622        QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
 623                          "RXE EXPTID Memory Parity"),
 624        QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
 625                          "RXE FLAGBUF Memory Parity"),
 626        QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
 627                          "RXE DATAINFO Memory Parity"),
 628        QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
 629                          "RXE HDRINFO Memory Parity"),
 630
 631        /* chip-specific hardware errors */
 632        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
 633                          "PCIe Poisoned TLP"),
 634        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
 635                          "PCIe completion timeout"),
 636        /*
 637         * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
 638         * parity or memory parity error failures, because most likely we
 639         * won't be able to talk to the core of the chip.  Nonetheless, we
 640         * might see them, if they are in parts of the PCIe core that aren't
 641         * essential.
 642         */
 643        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
 644                          "PCIePLL1"),
 645        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
 646                          "PCIePLL0"),
 647        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
 648                          "PCIe XTLH core parity"),
 649        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
 650                          "PCIe ADM TX core parity"),
 651        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
 652                          "PCIe ADM RX core parity"),
 653        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
 654                          "SerDes PLL"),
 655        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
 656                          "PCIe cpl header queue"),
 657        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
 658                          "PCIe cpl data queue"),
 659        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
 660                          "Send DMA memory read"),
 661        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
 662                          "uC PLL clock not locked"),
 663        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
 664                          "PCIe serdes Q0 no clock"),
 665        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
 666                          "PCIe serdes Q1 no clock"),
 667        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
 668                          "PCIe serdes Q2 no clock"),
 669        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
 670                          "PCIe serdes Q3 no clock"),
 671        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
 672                          "DDS RXEQ memory parity"),
 673        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
 674                          "IB uC memory parity"),
 675        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
 676                          "PCIe uC oct0 memory parity"),
 677        QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
 678                          "PCIe uC oct1 memory parity"),
 679};
 680
 681#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
 682
 683#define QLOGIC_IB_E_PKTERRS (\
 684                ERR_MASK(SendPktLenErr) |                               \
 685                ERR_MASK(SendDroppedDataPktErr) |                       \
 686                ERR_MASK(RcvVCRCErr) |                                  \
 687                ERR_MASK(RcvICRCErr) |                                  \
 688                ERR_MASK(RcvShortPktLenErr) |                           \
 689                ERR_MASK(RcvEBPErr))
 690
 691/* Convenience for decoding Send DMA errors */
 692#define QLOGIC_IB_E_SDMAERRS ( \
 693                ERR_MASK(SDmaGenMismatchErr) |                          \
 694                ERR_MASK(SDmaOutOfBoundErr) |                           \
 695                ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
 696                ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) |    \
 697                ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) |    \
 698                ERR_MASK(SDmaUnexpDataErr) |                            \
 699                ERR_MASK(SDmaDescAddrMisalignErr) |                     \
 700                ERR_MASK(SDmaDisabledErr) |                             \
 701                ERR_MASK(SendBufMisuseErr))
 702
 703/* These are all rcv-related errors which we want to count for stats */
 704#define E_SUM_PKTERRS \
 705        (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) |              \
 706         ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) |             \
 707         ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) |     \
 708         ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) |        \
 709         ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) |       \
 710         ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
 711
 712/* These are all send-related errors which we want to count for stats */
 713#define E_SUM_ERRS \
 714        (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
 715         ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
 716         ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) |  \
 717         ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |         \
 718         ERR_MASK(InvalidAddrErr))
 719
 720/*
 721 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
 722 * errors not related to freeze and cancelling buffers.  Can't ignore
 723 * armlaunch because could get more while still cleaning up, and need
 724 * to cancel those as they happen.
 725 */
 726#define E_SPKT_ERRS_IGNORE \
 727        (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
 728         ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) |      \
 729         ERR_MASK(SendPktLenErr))
 730
 731/*
 732 * these are errors that can occur when the link changes state while
 733 * a packet is being sent or received.  This doesn't cover things
 734 * like EBP or VCRC that can be the result of a sending having the
 735 * link change state, so we receive a "known bad" packet.
 736 */
 737#define E_SUM_LINK_PKTERRS \
 738        (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
 739         ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) |         \
 740         ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) |      \
 741         ERR_MASK(RcvUnexpectedCharErr))
 742
 743static void autoneg_7220_work(struct work_struct *);
 744static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
 745
 746/*
 747 * Called when we might have an error that is specific to a particular
 748 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
 749 * because we don't need to force the update of pioavail.
 750 */
 751static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
 752{
 753        unsigned long sbuf[3];
 754        struct qib_devdata *dd = ppd->dd;
 755
 756        /*
 757         * It's possible that sendbuffererror could have bits set; might
 758         * have already done this as a result of hardware error handling.
 759         */
 760        /* read these before writing errorclear */
 761        sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
 762        sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
 763        sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
 764
 765        if (sbuf[0] || sbuf[1] || sbuf[2])
 766                qib_disarm_piobufs_set(dd, sbuf,
 767                                       dd->piobcnt2k + dd->piobcnt4k);
 768}
 769
 770static void qib_7220_txe_recover(struct qib_devdata *dd)
 771{
 772        qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
 773        qib_disarm_7220_senderrbufs(dd->pport);
 774}
 775
 776/*
 777 * This is called with interrupts disabled and sdma_lock held.
 778 */
 779static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
 780{
 781        struct qib_devdata *dd = ppd->dd;
 782        u64 set_sendctrl = 0;
 783        u64 clr_sendctrl = 0;
 784
 785        if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
 786                set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
 787        else
 788                clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
 789
 790        if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
 791                set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
 792        else
 793                clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
 794
 795        if (op & QIB_SDMA_SENDCTRL_OP_HALT)
 796                set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
 797        else
 798                clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
 799
 800        spin_lock(&dd->sendctrl_lock);
 801
 802        dd->sendctrl |= set_sendctrl;
 803        dd->sendctrl &= ~clr_sendctrl;
 804
 805        qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
 806        qib_write_kreg(dd, kr_scratch, 0);
 807
 808        spin_unlock(&dd->sendctrl_lock);
 809}
 810
 811static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
 812                                      u64 err, char *buf, size_t blen)
 813{
 814        static const struct {
 815                u64 err;
 816                const char *msg;
 817        } errs[] = {
 818                { ERR_MASK(SDmaGenMismatchErr),
 819                  "SDmaGenMismatch" },
 820                { ERR_MASK(SDmaOutOfBoundErr),
 821                  "SDmaOutOfBound" },
 822                { ERR_MASK(SDmaTailOutOfBoundErr),
 823                  "SDmaTailOutOfBound" },
 824                { ERR_MASK(SDmaBaseErr),
 825                  "SDmaBase" },
 826                { ERR_MASK(SDma1stDescErr),
 827                  "SDma1stDesc" },
 828                { ERR_MASK(SDmaRpyTagErr),
 829                  "SDmaRpyTag" },
 830                { ERR_MASK(SDmaDwEnErr),
 831                  "SDmaDwEn" },
 832                { ERR_MASK(SDmaMissingDwErr),
 833                  "SDmaMissingDw" },
 834                { ERR_MASK(SDmaUnexpDataErr),
 835                  "SDmaUnexpData" },
 836                { ERR_MASK(SDmaDescAddrMisalignErr),
 837                  "SDmaDescAddrMisalign" },
 838                { ERR_MASK(SendBufMisuseErr),
 839                  "SendBufMisuse" },
 840                { ERR_MASK(SDmaDisabledErr),
 841                  "SDmaDisabled" },
 842        };
 843        int i;
 844        size_t bidx = 0;
 845
 846        for (i = 0; i < ARRAY_SIZE(errs); i++) {
 847                if (err & errs[i].err)
 848                        bidx += scnprintf(buf + bidx, blen - bidx,
 849                                         "%s ", errs[i].msg);
 850        }
 851}
 852
 853/*
 854 * This is called as part of link down clean up so disarm and flush
 855 * all send buffers so that SMP packets can be sent.
 856 */
 857static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
 858{
 859        /* This will trigger the Abort interrupt */
 860        sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
 861                          QIB_SENDCTRL_AVAIL_BLIP);
 862        ppd->dd->upd_pio_shadow  = 1; /* update our idea of what's busy */
 863}
 864
 865static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
 866{
 867        /*
 868         * Set SendDmaLenGen and clear and set
 869         * the MSB of the generation count to enable generation checking
 870         * and load the internal generation counter.
 871         */
 872        qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
 873        qib_write_kreg(ppd->dd, kr_senddmalengen,
 874                       ppd->sdma_descq_cnt |
 875                       (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
 876}
 877
 878static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
 879{
 880        qib_sdma_7220_setlengen(ppd);
 881        qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
 882        ppd->sdma_head_dma[0] = 0;
 883}
 884
 885#define DISABLES_SDMA (                                                 \
 886                ERR_MASK(SDmaDisabledErr) |                             \
 887                ERR_MASK(SDmaBaseErr) |                                 \
 888                ERR_MASK(SDmaTailOutOfBoundErr) |                       \
 889                ERR_MASK(SDmaOutOfBoundErr) |                           \
 890                ERR_MASK(SDma1stDescErr) |                              \
 891                ERR_MASK(SDmaRpyTagErr) |                               \
 892                ERR_MASK(SDmaGenMismatchErr) |                          \
 893                ERR_MASK(SDmaDescAddrMisalignErr) |                     \
 894                ERR_MASK(SDmaMissingDwErr) |                            \
 895                ERR_MASK(SDmaDwEnErr))
 896
 897static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
 898{
 899        unsigned long flags;
 900        struct qib_devdata *dd = ppd->dd;
 901        char *msg;
 902
 903        errs &= QLOGIC_IB_E_SDMAERRS;
 904
 905        msg = dd->cspec->sdmamsgbuf;
 906        qib_decode_7220_sdma_errs(ppd, errs, msg,
 907                sizeof(dd->cspec->sdmamsgbuf));
 908        spin_lock_irqsave(&ppd->sdma_lock, flags);
 909
 910        if (errs & ERR_MASK(SendBufMisuseErr)) {
 911                unsigned long sbuf[3];
 912
 913                sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
 914                sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
 915                sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
 916
 917                qib_dev_err(ppd->dd,
 918                            "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
 919                            ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
 920                            sbuf[0]);
 921        }
 922
 923        if (errs & ERR_MASK(SDmaUnexpDataErr))
 924                qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
 925                            ppd->port);
 926
 927        switch (ppd->sdma_state.current_state) {
 928        case qib_sdma_state_s00_hw_down:
 929                /* not expecting any interrupts */
 930                break;
 931
 932        case qib_sdma_state_s10_hw_start_up_wait:
 933                /* handled in intr path */
 934                break;
 935
 936        case qib_sdma_state_s20_idle:
 937                /* not expecting any interrupts */
 938                break;
 939
 940        case qib_sdma_state_s30_sw_clean_up_wait:
 941                /* not expecting any interrupts */
 942                break;
 943
 944        case qib_sdma_state_s40_hw_clean_up_wait:
 945                if (errs & ERR_MASK(SDmaDisabledErr))
 946                        __qib_sdma_process_event(ppd,
 947                                qib_sdma_event_e50_hw_cleaned);
 948                break;
 949
 950        case qib_sdma_state_s50_hw_halt_wait:
 951                /* handled in intr path */
 952                break;
 953
 954        case qib_sdma_state_s99_running:
 955                if (errs & DISABLES_SDMA)
 956                        __qib_sdma_process_event(ppd,
 957                                qib_sdma_event_e7220_err_halted);
 958                break;
 959        }
 960
 961        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 962}
 963
 964/*
 965 * Decode the error status into strings, deciding whether to always
 966 * print * it or not depending on "normal packet errors" vs everything
 967 * else.   Return 1 if "real" errors, otherwise 0 if only packet
 968 * errors, so caller can decide what to print with the string.
 969 */
 970static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
 971                               u64 err)
 972{
 973        int iserr = 1;
 974
 975        *buf = '\0';
 976        if (err & QLOGIC_IB_E_PKTERRS) {
 977                if (!(err & ~QLOGIC_IB_E_PKTERRS))
 978                        iserr = 0;
 979                if ((err & ERR_MASK(RcvICRCErr)) &&
 980                    !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
 981                        strlcat(buf, "CRC ", blen);
 982                if (!iserr)
 983                        goto done;
 984        }
 985        if (err & ERR_MASK(RcvHdrLenErr))
 986                strlcat(buf, "rhdrlen ", blen);
 987        if (err & ERR_MASK(RcvBadTidErr))
 988                strlcat(buf, "rbadtid ", blen);
 989        if (err & ERR_MASK(RcvBadVersionErr))
 990                strlcat(buf, "rbadversion ", blen);
 991        if (err & ERR_MASK(RcvHdrErr))
 992                strlcat(buf, "rhdr ", blen);
 993        if (err & ERR_MASK(SendSpecialTriggerErr))
 994                strlcat(buf, "sendspecialtrigger ", blen);
 995        if (err & ERR_MASK(RcvLongPktLenErr))
 996                strlcat(buf, "rlongpktlen ", blen);
 997        if (err & ERR_MASK(RcvMaxPktLenErr))
 998                strlcat(buf, "rmaxpktlen ", blen);
 999        if (err & ERR_MASK(RcvMinPktLenErr))
1000                strlcat(buf, "rminpktlen ", blen);
1001        if (err & ERR_MASK(SendMinPktLenErr))
1002                strlcat(buf, "sminpktlen ", blen);
1003        if (err & ERR_MASK(RcvFormatErr))
1004                strlcat(buf, "rformaterr ", blen);
1005        if (err & ERR_MASK(RcvUnsupportedVLErr))
1006                strlcat(buf, "runsupvl ", blen);
1007        if (err & ERR_MASK(RcvUnexpectedCharErr))
1008                strlcat(buf, "runexpchar ", blen);
1009        if (err & ERR_MASK(RcvIBFlowErr))
1010                strlcat(buf, "ribflow ", blen);
1011        if (err & ERR_MASK(SendUnderRunErr))
1012                strlcat(buf, "sunderrun ", blen);
1013        if (err & ERR_MASK(SendPioArmLaunchErr))
1014                strlcat(buf, "spioarmlaunch ", blen);
1015        if (err & ERR_MASK(SendUnexpectedPktNumErr))
1016                strlcat(buf, "sunexperrpktnum ", blen);
1017        if (err & ERR_MASK(SendDroppedSmpPktErr))
1018                strlcat(buf, "sdroppedsmppkt ", blen);
1019        if (err & ERR_MASK(SendMaxPktLenErr))
1020                strlcat(buf, "smaxpktlen ", blen);
1021        if (err & ERR_MASK(SendUnsupportedVLErr))
1022                strlcat(buf, "sunsupVL ", blen);
1023        if (err & ERR_MASK(InvalidAddrErr))
1024                strlcat(buf, "invalidaddr ", blen);
1025        if (err & ERR_MASK(RcvEgrFullErr))
1026                strlcat(buf, "rcvegrfull ", blen);
1027        if (err & ERR_MASK(RcvHdrFullErr))
1028                strlcat(buf, "rcvhdrfull ", blen);
1029        if (err & ERR_MASK(IBStatusChanged))
1030                strlcat(buf, "ibcstatuschg ", blen);
1031        if (err & ERR_MASK(RcvIBLostLinkErr))
1032                strlcat(buf, "riblostlink ", blen);
1033        if (err & ERR_MASK(HardwareErr))
1034                strlcat(buf, "hardware ", blen);
1035        if (err & ERR_MASK(ResetNegated))
1036                strlcat(buf, "reset ", blen);
1037        if (err & QLOGIC_IB_E_SDMAERRS)
1038                qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
1039        if (err & ERR_MASK(InvalidEEPCmd))
1040                strlcat(buf, "invalideepromcmd ", blen);
1041done:
1042        return iserr;
1043}
1044
1045static void reenable_7220_chase(struct timer_list *t)
1046{
1047        struct qib_chippport_specific *cpspec = from_timer(cpspec, t,
1048                                                         chase_timer);
1049        struct qib_pportdata *ppd = &cpspec->pportdata;
1050
1051        ppd->cpspec->chase_timer.expires = 0;
1052        qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1053                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1054}
1055
1056static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
1057{
1058        u8 ibclt;
1059        unsigned long tnow;
1060
1061        ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
1062
1063        /*
1064         * Detect and handle the state chase issue, where we can
1065         * get stuck if we are unlucky on timing on both sides of
1066         * the link.   If we are, we disable, set a timer, and
1067         * then re-enable.
1068         */
1069        switch (ibclt) {
1070        case IB_7220_LT_STATE_CFGRCVFCFG:
1071        case IB_7220_LT_STATE_CFGWAITRMT:
1072        case IB_7220_LT_STATE_TXREVLANES:
1073        case IB_7220_LT_STATE_CFGENH:
1074                tnow = jiffies;
1075                if (ppd->cpspec->chase_end &&
1076                    time_after(tnow, ppd->cpspec->chase_end)) {
1077                        ppd->cpspec->chase_end = 0;
1078                        qib_set_ib_7220_lstate(ppd,
1079                                QLOGIC_IB_IBCC_LINKCMD_DOWN,
1080                                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1081                        ppd->cpspec->chase_timer.expires = jiffies +
1082                                QIB_CHASE_DIS_TIME;
1083                        add_timer(&ppd->cpspec->chase_timer);
1084                } else if (!ppd->cpspec->chase_end)
1085                        ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1086                break;
1087
1088        default:
1089                ppd->cpspec->chase_end = 0;
1090                break;
1091        }
1092}
1093
1094static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1095{
1096        char *msg;
1097        u64 ignore_this_time = 0;
1098        u64 iserr = 0;
1099        struct qib_pportdata *ppd = dd->pport;
1100        u64 mask;
1101
1102        /* don't report errors that are masked */
1103        errs &= dd->cspec->errormask;
1104        msg = dd->cspec->emsgbuf;
1105
1106        /* do these first, they are most important */
1107        if (errs & ERR_MASK(HardwareErr))
1108                qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1109
1110        if (errs & QLOGIC_IB_E_SDMAERRS)
1111                sdma_7220_errors(ppd, errs);
1112
1113        if (errs & ~IB_E_BITSEXTANT)
1114                qib_dev_err(dd,
1115                        "error interrupt with unknown errors %llx set\n",
1116                        (unsigned long long) (errs & ~IB_E_BITSEXTANT));
1117
1118        if (errs & E_SUM_ERRS) {
1119                qib_disarm_7220_senderrbufs(ppd);
1120                if ((errs & E_SUM_LINK_PKTERRS) &&
1121                    !(ppd->lflags & QIBL_LINKACTIVE)) {
1122                        /*
1123                         * This can happen when trying to bring the link
1124                         * up, but the IB link changes state at the "wrong"
1125                         * time. The IB logic then complains that the packet
1126                         * isn't valid.  We don't want to confuse people, so
1127                         * we just don't print them, except at debug
1128                         */
1129                        ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1130                }
1131        } else if ((errs & E_SUM_LINK_PKTERRS) &&
1132                   !(ppd->lflags & QIBL_LINKACTIVE)) {
1133                /*
1134                 * This can happen when SMA is trying to bring the link
1135                 * up, but the IB link changes state at the "wrong" time.
1136                 * The IB logic then complains that the packet isn't
1137                 * valid.  We don't want to confuse people, so we just
1138                 * don't print them, except at debug
1139                 */
1140                ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1141        }
1142
1143        qib_write_kreg(dd, kr_errclear, errs);
1144
1145        errs &= ~ignore_this_time;
1146        if (!errs)
1147                goto done;
1148
1149        /*
1150         * The ones we mask off are handled specially below
1151         * or above.  Also mask SDMADISABLED by default as it
1152         * is too chatty.
1153         */
1154        mask = ERR_MASK(IBStatusChanged) |
1155                ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1156                ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1157
1158        qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
1159
1160        if (errs & E_SUM_PKTERRS)
1161                qib_stats.sps_rcverrs++;
1162        if (errs & E_SUM_ERRS)
1163                qib_stats.sps_txerrs++;
1164        iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
1165                         ERR_MASK(SDmaDisabledErr));
1166
1167        if (errs & ERR_MASK(IBStatusChanged)) {
1168                u64 ibcs;
1169
1170                ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1171                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1172                        handle_7220_chase(ppd, ibcs);
1173
1174                /* Update our picture of width and speed from chip */
1175                ppd->link_width_active =
1176                        ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
1177                            IB_WIDTH_4X : IB_WIDTH_1X;
1178                ppd->link_speed_active =
1179                        ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
1180                            QIB_IB_DDR : QIB_IB_SDR;
1181
1182                /*
1183                 * Since going into a recovery state causes the link state
1184                 * to go down and since recovery is transitory, it is better
1185                 * if we "miss" ever seeing the link training state go into
1186                 * recovery (i.e., ignore this transition for link state
1187                 * special handling purposes) without updating lastibcstat.
1188                 */
1189                if (qib_7220_phys_portstate(ibcs) !=
1190                                            IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1191                        qib_handle_e_ibstatuschanged(ppd, ibcs);
1192        }
1193
1194        if (errs & ERR_MASK(ResetNegated)) {
1195                qib_dev_err(dd,
1196                        "Got reset, requires re-init (unload and reload driver)\n");
1197                dd->flags &= ~QIB_INITTED;  /* needs re-init */
1198                /* mark as having had error */
1199                *dd->devstatusp |= QIB_STATUS_HWERROR;
1200                *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1201        }
1202
1203        if (*msg && iserr)
1204                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1205
1206        if (ppd->state_wanted & ppd->lflags)
1207                wake_up_interruptible(&ppd->state_wait);
1208
1209        /*
1210         * If there were hdrq or egrfull errors, wake up any processes
1211         * waiting in poll.  We used to try to check which contexts had
1212         * the overflow, but given the cost of that and the chip reads
1213         * to support it, it's better to just wake everybody up if we
1214         * get an overflow; waiters can poll again if it's not them.
1215         */
1216        if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1217                qib_handle_urcv(dd, ~0U);
1218                if (errs & ERR_MASK(RcvEgrFullErr))
1219                        qib_stats.sps_buffull++;
1220                else
1221                        qib_stats.sps_hdrfull++;
1222        }
1223done:
1224        return;
1225}
1226
1227/* enable/disable chip from delivering interrupts */
1228static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
1229{
1230        if (enable) {
1231                if (dd->flags & QIB_BADINTR)
1232                        return;
1233                qib_write_kreg(dd, kr_intmask, ~0ULL);
1234                /* force re-interrupt of any pending interrupts. */
1235                qib_write_kreg(dd, kr_intclear, 0ULL);
1236        } else
1237                qib_write_kreg(dd, kr_intmask, 0ULL);
1238}
1239
1240/*
1241 * Try to cleanup as much as possible for anything that might have gone
1242 * wrong while in freeze mode, such as pio buffers being written by user
1243 * processes (causing armlaunch), send errors due to going into freeze mode,
1244 * etc., and try to avoid causing extra interrupts while doing so.
1245 * Forcibly update the in-memory pioavail register copies after cleanup
1246 * because the chip won't do it while in freeze mode (the register values
1247 * themselves are kept correct).
1248 * Make sure that we don't lose any important interrupts by using the chip
1249 * feature that says that writing 0 to a bit in *clear that is set in
1250 * *status will cause an interrupt to be generated again (if allowed by
1251 * the *mask value).
1252 * This is in chip-specific code because of all of the register accesses,
1253 * even though the details are similar on most chips.
1254 */
1255static void qib_7220_clear_freeze(struct qib_devdata *dd)
1256{
1257        /* disable error interrupts, to avoid confusion */
1258        qib_write_kreg(dd, kr_errmask, 0ULL);
1259
1260        /* also disable interrupts; errormask is sometimes overwritten */
1261        qib_7220_set_intr_state(dd, 0);
1262
1263        qib_cancel_sends(dd->pport);
1264
1265        /* clear the freeze, and be sure chip saw it */
1266        qib_write_kreg(dd, kr_control, dd->control);
1267        qib_read_kreg32(dd, kr_scratch);
1268
1269        /* force in-memory update now we are out of freeze */
1270        qib_force_pio_avail_update(dd);
1271
1272        /*
1273         * force new interrupt if any hwerr, error or interrupt bits are
1274         * still set, and clear "safe" send packet errors related to freeze
1275         * and cancelling sends.  Re-enable error interrupts before possible
1276         * force of re-interrupt on pending interrupts.
1277         */
1278        qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1279        qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1280        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1281        qib_7220_set_intr_state(dd, 1);
1282}
1283
1284/**
1285 * qib_7220_handle_hwerrors - display hardware errors.
1286 * @dd: the qlogic_ib device
1287 * @msg: the output buffer
1288 * @msgl: the size of the output buffer
1289 *
1290 * Use same msg buffer as regular errors to avoid excessive stack
1291 * use.  Most hardware errors are catastrophic, but for right now,
1292 * we'll print them and continue.  We reuse the same message buffer as
1293 * handle_7220_errors() to avoid excessive stack usage.
1294 */
1295static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1296                                     size_t msgl)
1297{
1298        u64 hwerrs;
1299        u32 bits, ctrl;
1300        int isfatal = 0;
1301        char *bitsmsg;
1302
1303        hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
1304        if (!hwerrs)
1305                goto bail;
1306        if (hwerrs == ~0ULL) {
1307                qib_dev_err(dd,
1308                        "Read of hardware error status failed (all bits set); ignoring\n");
1309                goto bail;
1310        }
1311        qib_stats.sps_hwerrs++;
1312
1313        /*
1314         * Always clear the error status register, except MEMBISTFAIL,
1315         * regardless of whether we continue or stop using the chip.
1316         * We want that set so we know it failed, even across driver reload.
1317         * We'll still ignore it in the hwerrmask.  We do this partly for
1318         * diagnostics, but also for support.
1319         */
1320        qib_write_kreg(dd, kr_hwerrclear,
1321                       hwerrs & ~HWE_MASK(PowerOnBISTFailed));
1322
1323        hwerrs &= dd->cspec->hwerrmask;
1324
1325        if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
1326                       RXE_PARITY))
1327                qib_devinfo(dd->pcidev,
1328                        "Hardware error: hwerr=0x%llx (cleared)\n",
1329                        (unsigned long long) hwerrs);
1330
1331        if (hwerrs & ~IB_HWE_BITSEXTANT)
1332                qib_dev_err(dd,
1333                        "hwerror interrupt with unknown errors %llx set\n",
1334                        (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT));
1335
1336        if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
1337                qib_sd7220_clr_ibpar(dd);
1338
1339        ctrl = qib_read_kreg32(dd, kr_control);
1340        if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
1341                /*
1342                 * Parity errors in send memory are recoverable by h/w
1343                 * just do housekeeping, exit freeze mode and continue.
1344                 */
1345                if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
1346                              TXEMEMPARITYERR_PIOPBC)) {
1347                        qib_7220_txe_recover(dd);
1348                        hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
1349                                    TXEMEMPARITYERR_PIOPBC);
1350                }
1351                if (hwerrs)
1352                        isfatal = 1;
1353                else
1354                        qib_7220_clear_freeze(dd);
1355        }
1356
1357        *msg = '\0';
1358
1359        if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
1360                isfatal = 1;
1361                strlcat(msg,
1362                        "[Memory BIST test failed, InfiniPath hardware unusable]",
1363                        msgl);
1364                /* ignore from now on, so disable until driver reloaded */
1365                dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
1366                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1367        }
1368
1369        qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
1370                            ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
1371
1372        bitsmsg = dd->cspec->bitsmsgbuf;
1373        if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
1374                      QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
1375                bits = (u32) ((hwerrs >>
1376                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1377                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1378                snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1379                         "[PCIe Mem Parity Errs %x] ", bits);
1380                strlcat(msg, bitsmsg, msgl);
1381        }
1382
1383#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP |   \
1384                         QLOGIC_IB_HWE_COREPLL_RFSLIP)
1385
1386        if (hwerrs & _QIB_PLL_FAIL) {
1387                isfatal = 1;
1388                snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
1389                         "[PLL failed (%llx), InfiniPath hardware unusable]",
1390                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
1391                strlcat(msg, bitsmsg, msgl);
1392                /* ignore from now on, so disable until driver reloaded */
1393                dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
1394                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1395        }
1396
1397        if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
1398                /*
1399                 * If it occurs, it is left masked since the eternal
1400                 * interface is unused.
1401                 */
1402                dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
1403                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1404        }
1405
1406        qib_dev_err(dd, "%s hardware error\n", msg);
1407
1408        if (isfatal && !dd->diag_client) {
1409                qib_dev_err(dd,
1410                        "Fatal Hardware Error, no longer usable, SN %.16s\n",
1411                        dd->serial);
1412                /*
1413                 * For /sys status file and user programs to print; if no
1414                 * trailing brace is copied, we'll know it was truncated.
1415                 */
1416                if (dd->freezemsg)
1417                        snprintf(dd->freezemsg, dd->freezelen,
1418                                 "{%s}", msg);
1419                qib_disable_after_error(dd);
1420        }
1421bail:;
1422}
1423
1424/**
1425 * qib_7220_init_hwerrors - enable hardware errors
1426 * @dd: the qlogic_ib device
1427 *
1428 * now that we have finished initializing everything that might reasonably
1429 * cause a hardware error, and cleared those errors bits as they occur,
1430 * we can enable hardware errors in the mask (potentially enabling
1431 * freeze mode), and enable hardware errors as errors (along with
1432 * everything else) in errormask
1433 */
1434static void qib_7220_init_hwerrors(struct qib_devdata *dd)
1435{
1436        u64 val;
1437        u64 extsval;
1438
1439        extsval = qib_read_kreg64(dd, kr_extstatus);
1440
1441        if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
1442                         QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
1443                qib_dev_err(dd, "MemBIST did not complete!\n");
1444        if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
1445                qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
1446
1447        val = ~0ULL;    /* default to all hwerrors become interrupts, */
1448
1449        val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
1450        dd->cspec->hwerrmask = val;
1451
1452        qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1453        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1454
1455        /* clear all */
1456        qib_write_kreg(dd, kr_errclear, ~0ULL);
1457        /* enable errors that are masked, at least this first time. */
1458        qib_write_kreg(dd, kr_errmask, ~0ULL);
1459        dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1460        /* clear any interrupts up to this point (ints still not enabled) */
1461        qib_write_kreg(dd, kr_intclear, ~0ULL);
1462}
1463
1464/*
1465 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
1466 * on chips that are count-based, rather than trigger-based.  There is no
1467 * reference counting, but that's also fine, given the intended use.
1468 * Only chip-specific because it's all register accesses
1469 */
1470static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
1471{
1472        if (enable) {
1473                qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
1474                dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1475        } else
1476                dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1477        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1478}
1479
1480/*
1481 * Formerly took parameter <which> in pre-shifted,
1482 * pre-merged form with LinkCmd and LinkInitCmd
1483 * together, and assuming the zero was NOP.
1484 */
1485static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1486                                   u16 linitcmd)
1487{
1488        u64 mod_wd;
1489        struct qib_devdata *dd = ppd->dd;
1490        unsigned long flags;
1491
1492        if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1493                /*
1494                 * If we are told to disable, note that so link-recovery
1495                 * code does not attempt to bring us back up.
1496                 */
1497                spin_lock_irqsave(&ppd->lflags_lock, flags);
1498                ppd->lflags |= QIBL_IB_LINK_DISABLED;
1499                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1500        } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1501                /*
1502                 * Any other linkinitcmd will lead to LINKDOWN and then
1503                 * to INIT (if all is well), so clear flag to let
1504                 * link-recovery code attempt to bring us back up.
1505                 */
1506                spin_lock_irqsave(&ppd->lflags_lock, flags);
1507                ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1508                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1509        }
1510
1511        mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
1512                (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1513
1514        qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
1515        /* write to chip to prevent back-to-back writes of ibc reg */
1516        qib_write_kreg(dd, kr_scratch, 0);
1517}
1518
1519/*
1520 * All detailed interaction with the SerDes has been moved to qib_sd7220.c
1521 *
1522 * The portion of IBA7220-specific bringup_serdes() that actually deals with
1523 * registers and memory within the SerDes itself is qib_sd7220_init().
1524 */
1525
1526/**
1527 * qib_7220_bringup_serdes - bring up the serdes
1528 * @ppd: physical port on the qlogic_ib device
1529 */
1530static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
1531{
1532        struct qib_devdata *dd = ppd->dd;
1533        u64 val, prev_val, guid, ibc;
1534        int ret = 0;
1535
1536        /* Put IBC in reset, sends disabled */
1537        dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1538        qib_write_kreg(dd, kr_control, 0ULL);
1539
1540        if (qib_compat_ddr_negotiate) {
1541                ppd->cpspec->ibdeltainprog = 1;
1542                ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
1543                ppd->cpspec->iblnkerrsnap =
1544                        read_7220_creg32(dd, cr_iblinkerrrecov);
1545        }
1546
1547        /* flowcontrolwatermark is in units of KBytes */
1548        ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1549        /*
1550         * How often flowctrl sent.  More or less in usecs; balance against
1551         * watermark value, so that in theory senders always get a flow
1552         * control update in time to not let the IB link go idle.
1553         */
1554        ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1555        /* max error tolerance */
1556        ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
1557        /* use "real" buffer space for */
1558        ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1559        /* IB credit flow control. */
1560        ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1561        /*
1562         * set initial max size pkt IBC will send, including ICRC; it's the
1563         * PIO buffer size in dwords, less 1; also see qib_set_mtu()
1564         */
1565        ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1566        ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1567
1568        /* initially come up waiting for TS1, without sending anything. */
1569        val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1570                QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1571        qib_write_kreg(dd, kr_ibcctrl, val);
1572
1573        if (!ppd->cpspec->ibcddrctrl) {
1574                /* not on re-init after reset */
1575                ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
1576
1577                if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
1578                        ppd->cpspec->ibcddrctrl |=
1579                                IBA7220_IBC_SPEED_AUTONEG_MASK |
1580                                IBA7220_IBC_IBTA_1_2_MASK;
1581                else
1582                        ppd->cpspec->ibcddrctrl |=
1583                                ppd->link_speed_enabled == QIB_IB_DDR ?
1584                                IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
1585                if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
1586                    (IB_WIDTH_1X | IB_WIDTH_4X))
1587                        ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
1588                else
1589                        ppd->cpspec->ibcddrctrl |=
1590                                ppd->link_width_enabled == IB_WIDTH_4X ?
1591                                IBA7220_IBC_WIDTH_4X_ONLY :
1592                                IBA7220_IBC_WIDTH_1X_ONLY;
1593
1594                /* always enable these on driver reload, not sticky */
1595                ppd->cpspec->ibcddrctrl |=
1596                        IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
1597                ppd->cpspec->ibcddrctrl |=
1598                        IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
1599
1600                /* enable automatic lane reversal detection for receive */
1601                ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
1602        } else
1603                /* write to chip to prevent back-to-back writes of ibc reg */
1604                qib_write_kreg(dd, kr_scratch, 0);
1605
1606        qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
1607        qib_write_kreg(dd, kr_scratch, 0);
1608
1609        qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
1610        qib_write_kreg(dd, kr_scratch, 0);
1611
1612        ret = qib_sd7220_init(dd);
1613
1614        val = qib_read_kreg64(dd, kr_xgxs_cfg);
1615        prev_val = val;
1616        val |= QLOGIC_IB_XGXS_FC_SAFE;
1617        if (val != prev_val) {
1618                qib_write_kreg(dd, kr_xgxs_cfg, val);
1619                qib_read_kreg32(dd, kr_scratch);
1620        }
1621        if (val & QLOGIC_IB_XGXS_RESET)
1622                val &= ~QLOGIC_IB_XGXS_RESET;
1623        if (val != prev_val)
1624                qib_write_kreg(dd, kr_xgxs_cfg, val);
1625
1626        /* first time through, set port guid */
1627        if (!ppd->guid)
1628                ppd->guid = dd->base_guid;
1629        guid = be64_to_cpu(ppd->guid);
1630
1631        qib_write_kreg(dd, kr_hrtbt_guid, guid);
1632        if (!ret) {
1633                dd->control |= QLOGIC_IB_C_LINKENABLE;
1634                qib_write_kreg(dd, kr_control, dd->control);
1635        } else
1636                /* write to chip to prevent back-to-back writes of ibc reg */
1637                qib_write_kreg(dd, kr_scratch, 0);
1638        return ret;
1639}
1640
1641/**
1642 * qib_7220_quiet_serdes - set serdes to txidle
1643 * @ppd: physical port of the qlogic_ib device
1644 * Called when driver is being unloaded
1645 */
1646static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
1647{
1648        u64 val;
1649        struct qib_devdata *dd = ppd->dd;
1650        unsigned long flags;
1651
1652        /* disable IBC */
1653        dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1654        qib_write_kreg(dd, kr_control,
1655                       dd->control | QLOGIC_IB_C_FREEZEMODE);
1656
1657        ppd->cpspec->chase_end = 0;
1658        if (ppd->cpspec->chase_timer.function) /* if initted */
1659                del_timer_sync(&ppd->cpspec->chase_timer);
1660
1661        if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
1662            ppd->cpspec->ibdeltainprog) {
1663                u64 diagc;
1664
1665                /* enable counter writes */
1666                diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1667                qib_write_kreg(dd, kr_hwdiagctrl,
1668                               diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1669
1670                if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
1671                        val = read_7220_creg32(dd, cr_ibsymbolerr);
1672                        if (ppd->cpspec->ibdeltainprog)
1673                                val -= val - ppd->cpspec->ibsymsnap;
1674                        val -= ppd->cpspec->ibsymdelta;
1675                        write_7220_creg(dd, cr_ibsymbolerr, val);
1676                }
1677                if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
1678                        val = read_7220_creg32(dd, cr_iblinkerrrecov);
1679                        if (ppd->cpspec->ibdeltainprog)
1680                                val -= val - ppd->cpspec->iblnkerrsnap;
1681                        val -= ppd->cpspec->iblnkerrdelta;
1682                        write_7220_creg(dd, cr_iblinkerrrecov, val);
1683                }
1684
1685                /* and disable counter writes */
1686                qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1687        }
1688        qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1689
1690        spin_lock_irqsave(&ppd->lflags_lock, flags);
1691        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
1692        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1693        wake_up(&ppd->cpspec->autoneg_wait);
1694        cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
1695
1696        shutdown_7220_relock_poll(ppd->dd);
1697        val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
1698        val |= QLOGIC_IB_XGXS_RESET;
1699        qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
1700}
1701
1702/**
1703 * qib_setup_7220_setextled - set the state of the two external LEDs
1704 * @dd: the qlogic_ib device
1705 * @on: whether the link is up or not
1706 *
1707 * The exact combo of LEDs if on is true is determined by looking
1708 * at the ibcstatus.
1709 *
1710 * These LEDs indicate the physical and logical state of IB link.
1711 * For this chip (at least with recommended board pinouts), LED1
1712 * is Yellow (logical state) and LED2 is Green (physical state),
1713 *
1714 * Note:  We try to match the Mellanox HCA LED behavior as best
1715 * we can.  Green indicates physical link state is OK (something is
1716 * plugged in, and we can train).
1717 * Amber indicates the link is logically up (ACTIVE).
1718 * Mellanox further blinks the amber LED to indicate data packet
1719 * activity, but we have no hardware support for that, so it would
1720 * require waking up every 10-20 msecs and checking the counters
1721 * on the chip, and then turning the LED off if appropriate.  That's
1722 * visible overhead, so not something we will do.
1723 *
1724 */
1725static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
1726{
1727        struct qib_devdata *dd = ppd->dd;
1728        u64 extctl, ledblink = 0, val, lst, ltst;
1729        unsigned long flags;
1730
1731        /*
1732         * The diags use the LED to indicate diag info, so we leave
1733         * the external LED alone when the diags are running.
1734         */
1735        if (dd->diag_client)
1736                return;
1737
1738        if (ppd->led_override) {
1739                ltst = (ppd->led_override & QIB_LED_PHYS) ?
1740                        IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1741                lst = (ppd->led_override & QIB_LED_LOG) ?
1742                        IB_PORT_ACTIVE : IB_PORT_DOWN;
1743        } else if (on) {
1744                val = qib_read_kreg64(dd, kr_ibcstatus);
1745                ltst = qib_7220_phys_portstate(val);
1746                lst = qib_7220_iblink_state(val);
1747        } else {
1748                ltst = 0;
1749                lst = 0;
1750        }
1751
1752        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1753        extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1754                                 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1755        if (ltst == IB_PHYSPORTSTATE_LINKUP) {
1756                extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1757                /*
1758                 * counts are in chip clock (4ns) periods.
1759                 * This is 1/16 sec (66.6ms) on,
1760                 * 3/16 sec (187.5 ms) off, with packets rcvd
1761                 */
1762                ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
1763                        | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
1764        }
1765        if (lst == IB_PORT_ACTIVE)
1766                extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1767        dd->cspec->extctrl = extctl;
1768        qib_write_kreg(dd, kr_extctrl, extctl);
1769        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1770
1771        if (ledblink) /* blink the LED on packet receive */
1772                qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
1773}
1774
1775/*
1776 * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1777 * @dd: the qlogic_ib device
1778 *
1779 * This is called during driver unload.
1780 *
1781 */
1782static void qib_setup_7220_cleanup(struct qib_devdata *dd)
1783{
1784        qib_free_irq(dd);
1785        kfree(dd->cspec->cntrs);
1786        kfree(dd->cspec->portcntrs);
1787}
1788
1789/*
1790 * This is only called for SDmaInt.
1791 * SDmaDisabled is handled on the error path.
1792 */
1793static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
1794{
1795        unsigned long flags;
1796
1797        spin_lock_irqsave(&ppd->sdma_lock, flags);
1798
1799        switch (ppd->sdma_state.current_state) {
1800        case qib_sdma_state_s00_hw_down:
1801                break;
1802
1803        case qib_sdma_state_s10_hw_start_up_wait:
1804                __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
1805                break;
1806
1807        case qib_sdma_state_s20_idle:
1808                break;
1809
1810        case qib_sdma_state_s30_sw_clean_up_wait:
1811                break;
1812
1813        case qib_sdma_state_s40_hw_clean_up_wait:
1814                break;
1815
1816        case qib_sdma_state_s50_hw_halt_wait:
1817                __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1818                break;
1819
1820        case qib_sdma_state_s99_running:
1821                /* too chatty to print here */
1822                __qib_sdma_intr(ppd);
1823                break;
1824        }
1825        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1826}
1827
1828static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
1829{
1830        unsigned long flags;
1831
1832        spin_lock_irqsave(&dd->sendctrl_lock, flags);
1833        if (needint) {
1834                if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
1835                        goto done;
1836                /*
1837                 * blip the availupd off, next write will be on, so
1838                 * we ensure an avail update, regardless of threshold or
1839                 * buffers becoming free, whenever we want an interrupt
1840                 */
1841                qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
1842                        ~SYM_MASK(SendCtrl, SendBufAvailUpd));
1843                qib_write_kreg(dd, kr_scratch, 0ULL);
1844                dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
1845        } else
1846                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
1847        qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1848        qib_write_kreg(dd, kr_scratch, 0ULL);
1849done:
1850        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1851}
1852
1853/*
1854 * Handle errors and unusual events first, separate function
1855 * to improve cache hits for fast path interrupt handling.
1856 */
1857static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
1858{
1859        if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1860                qib_dev_err(dd,
1861                            "interrupt with unknown interrupts %Lx set\n",
1862                            istat & ~QLOGIC_IB_I_BITSEXTANT);
1863
1864        if (istat & QLOGIC_IB_I_GPIO) {
1865                u32 gpiostatus;
1866
1867                /*
1868                 * Boards for this chip currently don't use GPIO interrupts,
1869                 * so clear by writing GPIOstatus to GPIOclear, and complain
1870                 * to alert developer. To avoid endless repeats, clear
1871                 * the bits in the mask, since there is some kind of
1872                 * programming error or chip problem.
1873                 */
1874                gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1875                /*
1876                 * In theory, writing GPIOstatus to GPIOclear could
1877                 * have a bad side-effect on some diagnostic that wanted
1878                 * to poll for a status-change, but the various shadows
1879                 * make that problematic at best. Diags will just suppress
1880                 * all GPIO interrupts during such tests.
1881                 */
1882                qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
1883
1884                if (gpiostatus) {
1885                        const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1886                        u32 gpio_irq = mask & gpiostatus;
1887
1888                        /*
1889                         * A bit set in status and (chip) Mask register
1890                         * would cause an interrupt. Since we are not
1891                         * expecting any, report it. Also check that the
1892                         * chip reflects our shadow, report issues,
1893                         * and refresh from the shadow.
1894                         */
1895                        /*
1896                         * Clear any troublemakers, and update chip
1897                         * from shadow
1898                         */
1899                        dd->cspec->gpio_mask &= ~gpio_irq;
1900                        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1901                }
1902        }
1903
1904        if (istat & QLOGIC_IB_I_ERROR) {
1905                u64 estat;
1906
1907                qib_stats.sps_errints++;
1908                estat = qib_read_kreg64(dd, kr_errstatus);
1909                if (!estat)
1910                        qib_devinfo(dd->pcidev,
1911                                "error interrupt (%Lx), but no error bits set!\n",
1912                                istat);
1913                else
1914                        handle_7220_errors(dd, estat);
1915        }
1916}
1917
1918static irqreturn_t qib_7220intr(int irq, void *data)
1919{
1920        struct qib_devdata *dd = data;
1921        irqreturn_t ret;
1922        u64 istat;
1923        u64 ctxtrbits;
1924        u64 rmask;
1925        unsigned i;
1926
1927        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1928                /*
1929                 * This return value is not great, but we do not want the
1930                 * interrupt core code to remove our interrupt handler
1931                 * because we don't appear to be handling an interrupt
1932                 * during a chip reset.
1933                 */
1934                ret = IRQ_HANDLED;
1935                goto bail;
1936        }
1937
1938        istat = qib_read_kreg64(dd, kr_intstatus);
1939
1940        if (unlikely(!istat)) {
1941                ret = IRQ_NONE; /* not our interrupt, or already handled */
1942                goto bail;
1943        }
1944        if (unlikely(istat == -1)) {
1945                qib_bad_intrstatus(dd);
1946                /* don't know if it was our interrupt or not */
1947                ret = IRQ_NONE;
1948                goto bail;
1949        }
1950
1951        this_cpu_inc(*dd->int_counter);
1952        if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1953                              QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1954                unlikely_7220_intr(dd, istat);
1955
1956        /*
1957         * Clear the interrupt bits we found set, relatively early, so we
1958         * "know" know the chip will have seen this by the time we process
1959         * the queue, and will re-interrupt if necessary.  The processor
1960         * itself won't take the interrupt again until we return.
1961         */
1962        qib_write_kreg(dd, kr_intclear, istat);
1963
1964        /*
1965         * Handle kernel receive queues before checking for pio buffers
1966         * available since receives can overflow; piobuf waiters can afford
1967         * a few extra cycles, since they were waiting anyway.
1968         */
1969        ctxtrbits = istat &
1970                ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1971                 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1972        if (ctxtrbits) {
1973                rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1974                        (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
1975                for (i = 0; i < dd->first_user_ctxt; i++) {
1976                        if (ctxtrbits & rmask) {
1977                                ctxtrbits &= ~rmask;
1978                                qib_kreceive(dd->rcd[i], NULL, NULL);
1979                        }
1980                        rmask <<= 1;
1981                }
1982                if (ctxtrbits) {
1983                        ctxtrbits =
1984                                (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1985                                (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1986                        qib_handle_urcv(dd, ctxtrbits);
1987                }
1988        }
1989
1990        /* only call for SDmaInt */
1991        if (istat & QLOGIC_IB_I_SDMAINT)
1992                sdma_7220_intr(dd->pport, istat);
1993
1994        if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
1995                qib_ib_piobufavail(dd);
1996
1997        ret = IRQ_HANDLED;
1998bail:
1999        return ret;
2000}
2001
2002/*
2003 * Set up our chip-specific interrupt handler.
2004 * The interrupt type has already been setup, so
2005 * we just need to do the registration and error checking.
2006 * If we are using MSI interrupts, we may fall back to
2007 * INTx later, if the interrupt handler doesn't get called
2008 * within 1/2 second (see verify_interrupt()).
2009 */
2010static void qib_setup_7220_interrupt(struct qib_devdata *dd)
2011{
2012        int ret;
2013
2014        ret = pci_request_irq(dd->pcidev, 0, qib_7220intr, NULL, dd,
2015                              QIB_DRV_NAME);
2016        if (ret)
2017                qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n",
2018                            dd->pcidev->msi_enabled ?  "MSI" : "INTx",
2019                            pci_irq_vector(dd->pcidev, 0), ret);
2020}
2021
2022/**
2023 * qib_7220_boardname - fill in the board name
2024 * @dd: the qlogic_ib device
2025 *
2026 * info is based on the board revision register
2027 */
2028static void qib_7220_boardname(struct qib_devdata *dd)
2029{
2030        u32 boardid;
2031
2032        boardid = SYM_FIELD(dd->revision, Revision,
2033                            BoardID);
2034
2035        switch (boardid) {
2036        case 1:
2037                dd->boardname = "InfiniPath_QLE7240";
2038                break;
2039        case 2:
2040                dd->boardname = "InfiniPath_QLE7280";
2041                break;
2042        default:
2043                qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
2044                dd->boardname = "Unknown_InfiniPath_7220";
2045                break;
2046        }
2047
2048        if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
2049                qib_dev_err(dd,
2050                            "Unsupported InfiniPath hardware revision %u.%u!\n",
2051                            dd->majrev, dd->minrev);
2052
2053        snprintf(dd->boardversion, sizeof(dd->boardversion),
2054                 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
2055                 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
2056                 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
2057                 dd->majrev, dd->minrev,
2058                 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
2059}
2060
2061/*
2062 * This routine sleeps, so it can only be called from user context, not
2063 * from interrupt context.
2064 */
2065static int qib_setup_7220_reset(struct qib_devdata *dd)
2066{
2067        u64 val;
2068        int i;
2069        int ret;
2070        u16 cmdval;
2071        u8 int_line, clinesz;
2072        unsigned long flags;
2073
2074        qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
2075
2076        /* Use dev_err so it shows up in logs, etc. */
2077        qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
2078
2079        /* no interrupts till re-initted */
2080        qib_7220_set_intr_state(dd, 0);
2081
2082        dd->pport->cpspec->ibdeltainprog = 0;
2083        dd->pport->cpspec->ibsymdelta = 0;
2084        dd->pport->cpspec->iblnkerrdelta = 0;
2085
2086        /*
2087         * Keep chip from being accessed until we are ready.  Use
2088         * writeq() directly, to allow the write even though QIB_PRESENT
2089         * isn't set.
2090         */
2091        dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
2092        /* so we check interrupts work again */
2093        dd->z_int_counter = qib_int_counter(dd);
2094        val = dd->control | QLOGIC_IB_C_RESET;
2095        writeq(val, &dd->kregbase[kr_control]);
2096        mb(); /* prevent compiler reordering around actual reset */
2097
2098        for (i = 1; i <= 5; i++) {
2099                /*
2100                 * Allow MBIST, etc. to complete; longer on each retry.
2101                 * We sometimes get machine checks from bus timeout if no
2102                 * response, so for now, make it *really* long.
2103                 */
2104                msleep(1000 + (1 + i) * 2000);
2105
2106                qib_pcie_reenable(dd, cmdval, int_line, clinesz);
2107
2108                /*
2109                 * Use readq directly, so we don't need to mark it as PRESENT
2110                 * until we get a successful indication that all is well.
2111                 */
2112                val = readq(&dd->kregbase[kr_revision]);
2113                if (val == dd->revision) {
2114                        dd->flags |= QIB_PRESENT; /* it's back */
2115                        ret = qib_reinit_intr(dd);
2116                        goto bail;
2117                }
2118        }
2119        ret = 0; /* failed */
2120
2121bail:
2122        if (ret) {
2123                if (qib_pcie_params(dd, dd->lbus_width, NULL))
2124                        qib_dev_err(dd,
2125                                "Reset failed to setup PCIe or interrupts; continuing anyway\n");
2126
2127                /* hold IBC in reset, no sends, etc till later */
2128                qib_write_kreg(dd, kr_control, 0ULL);
2129
2130                /* clear the reset error, init error/hwerror mask */
2131                qib_7220_init_hwerrors(dd);
2132
2133                /* do setup similar to speed or link-width changes */
2134                if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
2135                        dd->cspec->presets_needed = 1;
2136                spin_lock_irqsave(&dd->pport->lflags_lock, flags);
2137                dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
2138                dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2139                spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
2140        }
2141
2142        return ret;
2143}
2144
2145/**
2146 * qib_7220_put_tid - write a TID to the chip
2147 * @dd: the qlogic_ib device
2148 * @tidptr: pointer to the expected TID (in chip) to update
2149 * @tidtype: 0 for eager, 1 for expected
2150 * @pa: physical address of in memory buffer; tidinvalid if freeing
2151 */
2152static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
2153                             u32 type, unsigned long pa)
2154{
2155        if (pa != dd->tidinvalid) {
2156                u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
2157
2158                /* paranoia checks */
2159                if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
2160                        qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
2161                                    pa);
2162                        return;
2163                }
2164                if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
2165                        qib_dev_err(dd,
2166                                "Physical page address 0x%lx larger than supported\n",
2167                                pa);
2168                        return;
2169                }
2170
2171                if (type == RCVHQ_RCV_TYPE_EAGER)
2172                        chippa |= dd->tidtemplate;
2173                else /* for now, always full 4KB page */
2174                        chippa |= IBA7220_TID_SZ_4K;
2175                pa = chippa;
2176        }
2177        writeq(pa, tidptr);
2178        mmiowb();
2179}
2180
2181/**
2182 * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
2183 * @dd: the qlogic_ib device
2184 * @ctxt: the ctxt
2185 *
2186 * clear all TID entries for a ctxt, expected and eager.
2187 * Used from qib_close().  On this chip, TIDs are only 32 bits,
2188 * not 64, but they are still on 64 bit boundaries, so tidbase
2189 * is declared as u64 * for the pointer math, even though we write 32 bits
2190 */
2191static void qib_7220_clear_tids(struct qib_devdata *dd,
2192                                struct qib_ctxtdata *rcd)
2193{
2194        u64 __iomem *tidbase;
2195        unsigned long tidinv;
2196        u32 ctxt;
2197        int i;
2198
2199        if (!dd->kregbase || !rcd)
2200                return;
2201
2202        ctxt = rcd->ctxt;
2203
2204        tidinv = dd->tidinvalid;
2205        tidbase = (u64 __iomem *)
2206                ((char __iomem *)(dd->kregbase) +
2207                 dd->rcvtidbase +
2208                 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
2209
2210        for (i = 0; i < dd->rcvtidcnt; i++)
2211                qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
2212                                 tidinv);
2213
2214        tidbase = (u64 __iomem *)
2215                ((char __iomem *)(dd->kregbase) +
2216                 dd->rcvegrbase +
2217                 rcd->rcvegr_tid_base * sizeof(*tidbase));
2218
2219        for (i = 0; i < rcd->rcvegrcnt; i++)
2220                qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2221                                 tidinv);
2222}
2223
2224/**
2225 * qib_7220_tidtemplate - setup constants for TID updates
2226 * @dd: the qlogic_ib device
2227 *
2228 * We setup stuff that we use a lot, to avoid calculating each time
2229 */
2230static void qib_7220_tidtemplate(struct qib_devdata *dd)
2231{
2232        if (dd->rcvegrbufsize == 2048)
2233                dd->tidtemplate = IBA7220_TID_SZ_2K;
2234        else if (dd->rcvegrbufsize == 4096)
2235                dd->tidtemplate = IBA7220_TID_SZ_4K;
2236        dd->tidinvalid = 0;
2237}
2238
2239/**
2240 * qib_init_7220_get_base_info - set chip-specific flags for user code
2241 * @rcd: the qlogic_ib ctxt
2242 * @kbase: qib_base_info pointer
2243 *
2244 * We set the PCIE flag because the lower bandwidth on PCIe vs
2245 * HyperTransport can affect some user packet algorithims.
2246 */
2247static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
2248                                  struct qib_base_info *kinfo)
2249{
2250        kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2251                QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
2252
2253        if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
2254                kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
2255
2256        return 0;
2257}
2258
2259static struct qib_message_header *
2260qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2261{
2262        u32 offset = qib_hdrget_offset(rhf_addr);
2263
2264        return (struct qib_message_header *)
2265                (rhf_addr - dd->rhf_offset + offset);
2266}
2267
2268static void qib_7220_config_ctxts(struct qib_devdata *dd)
2269{
2270        unsigned long flags;
2271        u32 nchipctxts;
2272
2273        nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2274        dd->cspec->numctxts = nchipctxts;
2275        if (qib_n_krcv_queues > 1) {
2276                dd->qpn_mask = 0x3e;
2277                dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2278                if (dd->first_user_ctxt > nchipctxts)
2279                        dd->first_user_ctxt = nchipctxts;
2280        } else
2281                dd->first_user_ctxt = dd->num_pports;
2282        dd->n_krcv_queues = dd->first_user_ctxt;
2283
2284        if (!qib_cfgctxts) {
2285                int nctxts = dd->first_user_ctxt + num_online_cpus();
2286
2287                if (nctxts <= 5)
2288                        dd->ctxtcnt = 5;
2289                else if (nctxts <= 9)
2290                        dd->ctxtcnt = 9;
2291                else if (nctxts <= nchipctxts)
2292                        dd->ctxtcnt = nchipctxts;
2293        } else if (qib_cfgctxts <= nchipctxts)
2294                dd->ctxtcnt = qib_cfgctxts;
2295        if (!dd->ctxtcnt) /* none of the above, set to max */
2296                dd->ctxtcnt = nchipctxts;
2297
2298        /*
2299         * Chip can be configured for 5, 9, or 17 ctxts, and choice
2300         * affects number of eager TIDs per ctxt (1K, 2K, 4K).
2301         * Lock to be paranoid about later motion, etc.
2302         */
2303        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2304        if (dd->ctxtcnt > 9)
2305                dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
2306        else if (dd->ctxtcnt > 5)
2307                dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
2308        /* else configure for default 5 receive ctxts */
2309        if (dd->qpn_mask)
2310                dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
2311        qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2312        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2313
2314        /* kr_rcvegrcnt changes based on the number of contexts enabled */
2315        dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
2316        dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
2317}
2318
2319static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
2320{
2321        int lsb, ret = 0;
2322        u64 maskr; /* right-justified mask */
2323
2324        switch (which) {
2325        case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
2326                ret = ppd->link_width_enabled;
2327                goto done;
2328
2329        case QIB_IB_CFG_LWID: /* Get currently active Link-width */
2330                ret = ppd->link_width_active;
2331                goto done;
2332
2333        case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
2334                ret = ppd->link_speed_enabled;
2335                goto done;
2336
2337        case QIB_IB_CFG_SPD: /* Get current Link spd */
2338                ret = ppd->link_speed_active;
2339                goto done;
2340
2341        case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2342                lsb = IBA7220_IBC_RXPOL_SHIFT;
2343                maskr = IBA7220_IBC_RXPOL_MASK;
2344                break;
2345
2346        case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2347                lsb = IBA7220_IBC_LREV_SHIFT;
2348                maskr = IBA7220_IBC_LREV_MASK;
2349                break;
2350
2351        case QIB_IB_CFG_LINKLATENCY:
2352                ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
2353                        & IBA7220_DDRSTAT_LINKLAT_MASK;
2354                goto done;
2355
2356        case QIB_IB_CFG_OP_VLS:
2357                ret = ppd->vls_operational;
2358                goto done;
2359
2360        case QIB_IB_CFG_VL_HIGH_CAP:
2361                ret = 0;
2362                goto done;
2363
2364        case QIB_IB_CFG_VL_LOW_CAP:
2365                ret = 0;
2366                goto done;
2367
2368        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2369                ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2370                                OverrunThreshold);
2371                goto done;
2372
2373        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2374                ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2375                                PhyerrThreshold);
2376                goto done;
2377
2378        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2379                /* will only take effect when the link state changes */
2380                ret = (ppd->cpspec->ibcctrl &
2381                       SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2382                        IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2383                goto done;
2384
2385        case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2386                lsb = IBA7220_IBC_HRTBT_SHIFT;
2387                maskr = IBA7220_IBC_HRTBT_MASK;
2388                break;
2389
2390        case QIB_IB_CFG_PMA_TICKS:
2391                /*
2392                 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
2393                 * Since the clock is always 250MHz, the value is 1 or 0.
2394                 */
2395                ret = (ppd->link_speed_active == QIB_IB_DDR);
2396                goto done;
2397
2398        default:
2399                ret = -EINVAL;
2400                goto done;
2401        }
2402        ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
2403done:
2404        return ret;
2405}
2406
2407static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2408{
2409        struct qib_devdata *dd = ppd->dd;
2410        u64 maskr; /* right-justified mask */
2411        int lsb, ret = 0, setforce = 0;
2412        u16 lcmd, licmd;
2413        unsigned long flags;
2414        u32 tmp = 0;
2415
2416        switch (which) {
2417        case QIB_IB_CFG_LIDLMC:
2418                /*
2419                 * Set LID and LMC. Combined to avoid possible hazard
2420                 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
2421                 */
2422                lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2423                maskr = IBA7220_IBC_DLIDLMC_MASK;
2424                break;
2425
2426        case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
2427                /*
2428                 * As with speed, only write the actual register if
2429                 * the link is currently down, otherwise takes effect
2430                 * on next link change.
2431                 */
2432                ppd->link_width_enabled = val;
2433                if (!(ppd->lflags & QIBL_LINKDOWN))
2434                        goto bail;
2435                /*
2436                 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2437                 * will get called because we want update
2438                 * link_width_active, and the change may not take
2439                 * effect for some time (if we are in POLL), so this
2440                 * flag will force the updown routine to be called
2441                 * on the next ibstatuschange down interrupt, even
2442                 * if it's not an down->up transition.
2443                 */
2444                val--; /* convert from IB to chip */
2445                maskr = IBA7220_IBC_WIDTH_MASK;
2446                lsb = IBA7220_IBC_WIDTH_SHIFT;
2447                setforce = 1;
2448                break;
2449
2450        case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2451                /*
2452                 * If we turn off IB1.2, need to preset SerDes defaults,
2453                 * but not right now. Set a flag for the next time
2454                 * we command the link down.  As with width, only write the
2455                 * actual register if the link is currently down, otherwise
2456                 * takes effect on next link change.  Since setting is being
2457                 * explicitly requested (via MAD or sysfs), clear autoneg
2458                 * failure status if speed autoneg is enabled.
2459                 */
2460                ppd->link_speed_enabled = val;
2461                if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
2462                    !(val & (val - 1)))
2463                        dd->cspec->presets_needed = 1;
2464                if (!(ppd->lflags & QIBL_LINKDOWN))
2465                        goto bail;
2466                /*
2467                 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2468                 * will get called because we want update
2469                 * link_speed_active, and the change may not take
2470                 * effect for some time (if we are in POLL), so this
2471                 * flag will force the updown routine to be called
2472                 * on the next ibstatuschange down interrupt, even
2473                 * if it's not an down->up transition.
2474                 */
2475                if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
2476                        val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2477                                IBA7220_IBC_IBTA_1_2_MASK;
2478                        spin_lock_irqsave(&ppd->lflags_lock, flags);
2479                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2480                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2481                } else
2482                        val = val == QIB_IB_DDR ?
2483                                IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2484                maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2485                        IBA7220_IBC_IBTA_1_2_MASK;
2486                /* IBTA 1.2 mode + speed bits are contiguous */
2487                lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
2488                setforce = 1;
2489                break;
2490
2491        case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2492                lsb = IBA7220_IBC_RXPOL_SHIFT;
2493                maskr = IBA7220_IBC_RXPOL_MASK;
2494                break;
2495
2496        case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2497                lsb = IBA7220_IBC_LREV_SHIFT;
2498                maskr = IBA7220_IBC_LREV_MASK;
2499                break;
2500
2501        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2502                maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2503                                  OverrunThreshold);
2504                if (maskr != val) {
2505                        ppd->cpspec->ibcctrl &=
2506                                ~SYM_MASK(IBCCtrl, OverrunThreshold);
2507                        ppd->cpspec->ibcctrl |= (u64) val <<
2508                                SYM_LSB(IBCCtrl, OverrunThreshold);
2509                        qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2510                        qib_write_kreg(dd, kr_scratch, 0);
2511                }
2512                goto bail;
2513
2514        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2515                maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2516                                  PhyerrThreshold);
2517                if (maskr != val) {
2518                        ppd->cpspec->ibcctrl &=
2519                                ~SYM_MASK(IBCCtrl, PhyerrThreshold);
2520                        ppd->cpspec->ibcctrl |= (u64) val <<
2521                                SYM_LSB(IBCCtrl, PhyerrThreshold);
2522                        qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2523                        qib_write_kreg(dd, kr_scratch, 0);
2524                }
2525                goto bail;
2526
2527        case QIB_IB_CFG_PKEYS: /* update pkeys */
2528                maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2529                        ((u64) ppd->pkeys[2] << 32) |
2530                        ((u64) ppd->pkeys[3] << 48);
2531                qib_write_kreg(dd, kr_partitionkey, maskr);
2532                goto bail;
2533
2534        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2535                /* will only take effect when the link state changes */
2536                if (val == IB_LINKINITCMD_POLL)
2537                        ppd->cpspec->ibcctrl &=
2538                                ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2539                else /* SLEEP */
2540                        ppd->cpspec->ibcctrl |=
2541                                SYM_MASK(IBCCtrl, LinkDownDefaultState);
2542                qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2543                qib_write_kreg(dd, kr_scratch, 0);
2544                goto bail;
2545
2546        case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2547                /*
2548                 * Update our housekeeping variables, and set IBC max
2549                 * size, same as init code; max IBC is max we allow in
2550                 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2551                 * Set even if it's unchanged, print debug message only
2552                 * on changes.
2553                 */
2554                val = (ppd->ibmaxlen >> 2) + 1;
2555                ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2556                ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
2557                qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2558                qib_write_kreg(dd, kr_scratch, 0);
2559                goto bail;
2560
2561        case QIB_IB_CFG_LSTATE: /* set the IB link state */
2562                switch (val & 0xffff0000) {
2563                case IB_LINKCMD_DOWN:
2564                        lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2565                        if (!ppd->cpspec->ibdeltainprog &&
2566                            qib_compat_ddr_negotiate) {
2567                                ppd->cpspec->ibdeltainprog = 1;
2568                                ppd->cpspec->ibsymsnap =
2569                                        read_7220_creg32(dd, cr_ibsymbolerr);
2570                                ppd->cpspec->iblnkerrsnap =
2571                                        read_7220_creg32(dd, cr_iblinkerrrecov);
2572                        }
2573                        break;
2574
2575                case IB_LINKCMD_ARMED:
2576                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2577                        break;
2578
2579                case IB_LINKCMD_ACTIVE:
2580                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2581                        break;
2582
2583                default:
2584                        ret = -EINVAL;
2585                        qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2586                        goto bail;
2587                }
2588                switch (val & 0xffff) {
2589                case IB_LINKINITCMD_NOP:
2590                        licmd = 0;
2591                        break;
2592
2593                case IB_LINKINITCMD_POLL:
2594                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2595                        break;
2596
2597                case IB_LINKINITCMD_SLEEP:
2598                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2599                        break;
2600
2601                case IB_LINKINITCMD_DISABLE:
2602                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2603                        ppd->cpspec->chase_end = 0;
2604                        /*
2605                         * stop state chase counter and timer, if running.
2606                         * wait forpending timer, but don't clear .data (ppd)!
2607                         */
2608                        if (ppd->cpspec->chase_timer.expires) {
2609                                del_timer_sync(&ppd->cpspec->chase_timer);
2610                                ppd->cpspec->chase_timer.expires = 0;
2611                        }
2612                        break;
2613
2614                default:
2615                        ret = -EINVAL;
2616                        qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2617                                    val & 0xffff);
2618                        goto bail;
2619                }
2620                qib_set_ib_7220_lstate(ppd, lcmd, licmd);
2621
2622                maskr = IBA7220_IBC_WIDTH_MASK;
2623                lsb = IBA7220_IBC_WIDTH_SHIFT;
2624                tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
2625                /* If the width active on the chip does not match the
2626                 * width in the shadow register, write the new active
2627                 * width to the chip.
2628                 * We don't have to worry about speed as the speed is taken
2629                 * care of by set_7220_ibspeed_fast called by ib_updown.
2630                 */
2631                if (ppd->link_width_enabled-1 != tmp) {
2632                        ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2633                        ppd->cpspec->ibcddrctrl |=
2634                                (((u64)(ppd->link_width_enabled-1) & maskr) <<
2635                                 lsb);
2636                        qib_write_kreg(dd, kr_ibcddrctrl,
2637                                       ppd->cpspec->ibcddrctrl);
2638                        qib_write_kreg(dd, kr_scratch, 0);
2639                        spin_lock_irqsave(&ppd->lflags_lock, flags);
2640                        ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2641                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2642                }
2643                goto bail;
2644
2645        case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2646                if (val > IBA7220_IBC_HRTBT_MASK) {
2647                        ret = -EINVAL;
2648                        goto bail;
2649                }
2650                lsb = IBA7220_IBC_HRTBT_SHIFT;
2651                maskr = IBA7220_IBC_HRTBT_MASK;
2652                break;
2653
2654        default:
2655                ret = -EINVAL;
2656                goto bail;
2657        }
2658        ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2659        ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
2660        qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
2661        qib_write_kreg(dd, kr_scratch, 0);
2662        if (setforce) {
2663                spin_lock_irqsave(&ppd->lflags_lock, flags);
2664                ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2665                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2666        }
2667bail:
2668        return ret;
2669}
2670
2671static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2672{
2673        int ret = 0;
2674        u64 val, ddr;
2675
2676        if (!strncmp(what, "ibc", 3)) {
2677                ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2678                val = 0; /* disable heart beat, so link will come up */
2679                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2680                         ppd->dd->unit, ppd->port);
2681        } else if (!strncmp(what, "off", 3)) {
2682                ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2683                /* enable heart beat again */
2684                val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
2685                qib_devinfo(ppd->dd->pcidev,
2686                        "Disabling IB%u:%u IBC loopback (normal)\n",
2687                        ppd->dd->unit, ppd->port);
2688        } else
2689                ret = -EINVAL;
2690        if (!ret) {
2691                qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2692                ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
2693                                             << IBA7220_IBC_HRTBT_SHIFT);
2694                ppd->cpspec->ibcddrctrl = ddr | val;
2695                qib_write_kreg(ppd->dd, kr_ibcddrctrl,
2696                               ppd->cpspec->ibcddrctrl);
2697                qib_write_kreg(ppd->dd, kr_scratch, 0);
2698        }
2699        return ret;
2700}
2701
2702static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2703                                    u32 updegr, u32 egrhd, u32 npkts)
2704{
2705        if (updegr)
2706                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2707        mmiowb();
2708        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2709        mmiowb();
2710}
2711
2712static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
2713{
2714        u32 head, tail;
2715
2716        head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2717        if (rcd->rcvhdrtail_kvaddr)
2718                tail = qib_get_rcvhdrtail(rcd);
2719        else
2720                tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2721        return head == tail;
2722}
2723
2724/*
2725 * Modify the RCVCTRL register in chip-specific way. This
2726 * is a function because bit positions and (future) register
2727 * location is chip-specifc, but the needed operations are
2728 * generic. <op> is a bit-mask because we often want to
2729 * do multiple modifications.
2730 */
2731static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
2732                             int ctxt)
2733{
2734        struct qib_devdata *dd = ppd->dd;
2735        u64 mask, val;
2736        unsigned long flags;
2737
2738        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2739        if (op & QIB_RCVCTRL_TAILUPD_ENB)
2740                dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
2741        if (op & QIB_RCVCTRL_TAILUPD_DIS)
2742                dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
2743        if (op & QIB_RCVCTRL_PKEY_ENB)
2744                dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2745        if (op & QIB_RCVCTRL_PKEY_DIS)
2746                dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2747        if (ctxt < 0)
2748                mask = (1ULL << dd->ctxtcnt) - 1;
2749        else
2750                mask = (1ULL << ctxt);
2751        if (op & QIB_RCVCTRL_CTXT_ENB) {
2752                /* always done for specific ctxt */
2753                dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2754                if (!(dd->flags & QIB_NODMA_RTAIL))
2755                        dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
2756                /* Write these registers before the context is enabled. */
2757                qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2758                        dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2759                qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2760                        dd->rcd[ctxt]->rcvhdrq_phys);
2761                dd->rcd[ctxt]->seq_cnt = 1;
2762        }
2763        if (op & QIB_RCVCTRL_CTXT_DIS)
2764                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2765        if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2766                dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
2767        if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2768                dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
2769        qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2770        if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2771                /* arm rcv interrupt */
2772                val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2773                        dd->rhdrhead_intr_off;
2774                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2775        }
2776        if (op & QIB_RCVCTRL_CTXT_ENB) {
2777                /*
2778                 * Init the context registers also; if we were
2779                 * disabled, tail and head should both be zero
2780                 * already from the enable, but since we don't
2781                 * know, we have to do it explicitly.
2782                 */
2783                val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2784                qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2785
2786                val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2787                dd->rcd[ctxt]->head = val;
2788                /* If kctxt, interrupt on next receive. */
2789                if (ctxt < dd->first_user_ctxt)
2790                        val |= dd->rhdrhead_intr_off;
2791                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2792        }
2793        if (op & QIB_RCVCTRL_CTXT_DIS) {
2794                if (ctxt >= 0) {
2795                        qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
2796                        qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
2797                } else {
2798                        unsigned i;
2799
2800                        for (i = 0; i < dd->cfgctxts; i++) {
2801                                qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2802                                                    i, 0);
2803                                qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
2804                        }
2805                }
2806        }
2807        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2808}
2809
2810/*
2811 * Modify the SENDCTRL register in chip-specific way. This
2812 * is a function there may be multiple such registers with
2813 * slightly different layouts. To start, we assume the
2814 * "canonical" register layout of the first chips.
2815 * Chip requires no back-back sendctrl writes, so write
2816 * scratch register after writing sendctrl
2817 */
2818static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
2819{
2820        struct qib_devdata *dd = ppd->dd;
2821        u64 tmp_dd_sendctrl;
2822        unsigned long flags;
2823
2824        spin_lock_irqsave(&dd->sendctrl_lock, flags);
2825
2826        /* First the ones that are "sticky", saved in shadow */
2827        if (op & QIB_SENDCTRL_CLEAR)
2828                dd->sendctrl = 0;
2829        if (op & QIB_SENDCTRL_SEND_DIS)
2830                dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
2831        else if (op & QIB_SENDCTRL_SEND_ENB) {
2832                dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
2833                if (dd->flags & QIB_USE_SPCL_TRIG)
2834                        dd->sendctrl |= SYM_MASK(SendCtrl,
2835                                                 SSpecialTriggerEn);
2836        }
2837        if (op & QIB_SENDCTRL_AVAIL_DIS)
2838                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2839        else if (op & QIB_SENDCTRL_AVAIL_ENB)
2840                dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
2841
2842        if (op & QIB_SENDCTRL_DISARM_ALL) {
2843                u32 i, last;
2844
2845                tmp_dd_sendctrl = dd->sendctrl;
2846                /*
2847                 * disarm any that are not yet launched, disabling sends
2848                 * and updates until done.
2849                 */
2850                last = dd->piobcnt2k + dd->piobcnt4k;
2851                tmp_dd_sendctrl &=
2852                        ~(SYM_MASK(SendCtrl, SPioEnable) |
2853                          SYM_MASK(SendCtrl, SendBufAvailUpd));
2854                for (i = 0; i < last; i++) {
2855                        qib_write_kreg(dd, kr_sendctrl,
2856                                       tmp_dd_sendctrl |
2857                                       SYM_MASK(SendCtrl, Disarm) | i);
2858                        qib_write_kreg(dd, kr_scratch, 0);
2859                }
2860        }
2861
2862        tmp_dd_sendctrl = dd->sendctrl;
2863
2864        if (op & QIB_SENDCTRL_FLUSH)
2865                tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2866        if (op & QIB_SENDCTRL_DISARM)
2867                tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2868                        ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
2869                         SYM_LSB(SendCtrl, DisarmPIOBuf));
2870        if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
2871            (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
2872                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2873
2874        qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2875        qib_write_kreg(dd, kr_scratch, 0);
2876
2877        if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2878                qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2879                qib_write_kreg(dd, kr_scratch, 0);
2880        }
2881
2882        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2883
2884        if (op & QIB_SENDCTRL_FLUSH) {
2885                u32 v;
2886                /*
2887                 * ensure writes have hit chip, then do a few
2888                 * more reads, to allow DMA of pioavail registers
2889                 * to occur, so in-memory copy is in sync with
2890                 * the chip.  Not always safe to sleep.
2891                 */
2892                v = qib_read_kreg32(dd, kr_scratch);
2893                qib_write_kreg(dd, kr_scratch, v);
2894                v = qib_read_kreg32(dd, kr_scratch);
2895                qib_write_kreg(dd, kr_scratch, v);
2896                qib_read_kreg32(dd, kr_scratch);
2897        }
2898}
2899
2900/**
2901 * qib_portcntr_7220 - read a per-port counter
2902 * @dd: the qlogic_ib device
2903 * @creg: the counter to snapshot
2904 */
2905static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
2906{
2907        u64 ret = 0ULL;
2908        struct qib_devdata *dd = ppd->dd;
2909        u16 creg;
2910        /* 0xffff for unimplemented or synthesized counters */
2911        static const u16 xlator[] = {
2912                [QIBPORTCNTR_PKTSEND] = cr_pktsend,
2913                [QIBPORTCNTR_WORDSEND] = cr_wordsend,
2914                [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
2915                [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
2916                [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
2917                [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2918                [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2919                [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
2920                [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
2921                [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2922                [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2923                [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2924                [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2925                [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
2926                [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
2927                [QIBPORTCNTR_ERRICRC] = cr_erricrc,
2928                [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2929                [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2930                [QIBPORTCNTR_BADFORMAT] = cr_badformat,
2931                [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2932                [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2933                [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2934                [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2935                [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
2936                [QIBPORTCNTR_ERRLINK] = cr_errlink,
2937                [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2938                [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2939                [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
2940                [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
2941                [QIBPORTCNTR_PSSTART] = cr_psstart,
2942                [QIBPORTCNTR_PSSTAT] = cr_psstat,
2943                [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
2944                [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2945                [QIBPORTCNTR_KHDROVFL] = 0xffff,
2946        };
2947
2948        if (reg >= ARRAY_SIZE(xlator)) {
2949                qib_devinfo(ppd->dd->pcidev,
2950                         "Unimplemented portcounter %u\n", reg);
2951                goto done;
2952        }
2953        creg = xlator[reg];
2954
2955        if (reg == QIBPORTCNTR_KHDROVFL) {
2956                int i;
2957
2958                /* sum over all kernel contexts */
2959                for (i = 0; i < dd->first_user_ctxt; i++)
2960                        ret += read_7220_creg32(dd, cr_portovfl + i);
2961        }
2962        if (creg == 0xffff)
2963                goto done;
2964
2965        /*
2966         * only fast incrementing counters are 64bit; use 32 bit reads to
2967         * avoid two independent reads when on opteron
2968         */
2969        if ((creg == cr_wordsend || creg == cr_wordrcv ||
2970             creg == cr_pktsend || creg == cr_pktrcv))
2971                ret = read_7220_creg(dd, creg);
2972        else
2973                ret = read_7220_creg32(dd, creg);
2974        if (creg == cr_ibsymbolerr) {
2975                if (dd->pport->cpspec->ibdeltainprog)
2976                        ret -= ret - ppd->cpspec->ibsymsnap;
2977                ret -= dd->pport->cpspec->ibsymdelta;
2978        } else if (creg == cr_iblinkerrrecov) {
2979                if (dd->pport->cpspec->ibdeltainprog)
2980                        ret -= ret - ppd->cpspec->iblnkerrsnap;
2981                ret -= dd->pport->cpspec->iblnkerrdelta;
2982        }
2983done:
2984        return ret;
2985}
2986
2987/*
2988 * Device counter names (not port-specific), one line per stat,
2989 * single string.  Used by utilities like ipathstats to print the stats
2990 * in a way which works for different versions of drivers, without changing
2991 * the utility.  Names need to be 12 chars or less (w/o newline), for proper
2992 * display by utility.
2993 * Non-error counters are first.
2994 * Start of "error" conters is indicated by a leading "E " on the first
2995 * "error" counter, and doesn't count in label length.
2996 * The EgrOvfl list needs to be last so we truncate them at the configured
2997 * context count for the device.
2998 * cntr7220indices contains the corresponding register indices.
2999 */
3000static const char cntr7220names[] =
3001        "Interrupts\n"
3002        "HostBusStall\n"
3003        "E RxTIDFull\n"
3004        "RxTIDInvalid\n"
3005        "Ctxt0EgrOvfl\n"
3006        "Ctxt1EgrOvfl\n"
3007        "Ctxt2EgrOvfl\n"
3008        "Ctxt3EgrOvfl\n"
3009        "Ctxt4EgrOvfl\n"
3010        "Ctxt5EgrOvfl\n"
3011        "Ctxt6EgrOvfl\n"
3012        "Ctxt7EgrOvfl\n"
3013        "Ctxt8EgrOvfl\n"
3014        "Ctxt9EgrOvfl\n"
3015        "Ctx10EgrOvfl\n"
3016        "Ctx11EgrOvfl\n"
3017        "Ctx12EgrOvfl\n"
3018        "Ctx13EgrOvfl\n"
3019        "Ctx14EgrOvfl\n"
3020        "Ctx15EgrOvfl\n"
3021        "Ctx16EgrOvfl\n";
3022
3023static const size_t cntr7220indices[] = {
3024        cr_lbint,
3025        cr_lbflowstall,
3026        cr_errtidfull,
3027        cr_errtidvalid,
3028        cr_portovfl + 0,
3029        cr_portovfl + 1,
3030        cr_portovfl + 2,
3031        cr_portovfl + 3,
3032        cr_portovfl + 4,
3033        cr_portovfl + 5,
3034        cr_portovfl + 6,
3035        cr_portovfl + 7,
3036        cr_portovfl + 8,
3037        cr_portovfl + 9,
3038        cr_portovfl + 10,
3039        cr_portovfl + 11,
3040        cr_portovfl + 12,
3041        cr_portovfl + 13,
3042        cr_portovfl + 14,
3043        cr_portovfl + 15,
3044        cr_portovfl + 16,
3045};
3046
3047/*
3048 * same as cntr7220names and cntr7220indices, but for port-specific counters.
3049 * portcntr7220indices is somewhat complicated by some registers needing
3050 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
3051 */
3052static const char portcntr7220names[] =
3053        "TxPkt\n"
3054        "TxFlowPkt\n"
3055        "TxWords\n"
3056        "RxPkt\n"
3057        "RxFlowPkt\n"
3058        "RxWords\n"
3059        "TxFlowStall\n"
3060        "TxDmaDesc\n"  /* 7220 and 7322-only */
3061        "E RxDlidFltr\n"  /* 7220 and 7322-only */
3062        "IBStatusChng\n"
3063        "IBLinkDown\n"
3064        "IBLnkRecov\n"
3065        "IBRxLinkErr\n"
3066        "IBSymbolErr\n"
3067        "RxLLIErr\n"
3068        "RxBadFormat\n"
3069        "RxBadLen\n"
3070        "RxBufOvrfl\n"
3071        "RxEBP\n"
3072        "RxFlowCtlErr\n"
3073        "RxICRCerr\n"
3074        "RxLPCRCerr\n"
3075        "RxVCRCerr\n"
3076        "RxInvalLen\n"
3077        "RxInvalPKey\n"
3078        "RxPktDropped\n"
3079        "TxBadLength\n"
3080        "TxDropped\n"
3081        "TxInvalLen\n"
3082        "TxUnderrun\n"
3083        "TxUnsupVL\n"
3084        "RxLclPhyErr\n" /* 7220 and 7322-only */
3085        "RxVL15Drop\n" /* 7220 and 7322-only */
3086        "RxVlErr\n" /* 7220 and 7322-only */
3087        "XcessBufOvfl\n" /* 7220 and 7322-only */
3088        ;
3089
3090#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
3091static const size_t portcntr7220indices[] = {
3092        QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
3093        cr_pktsendflow,
3094        QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
3095        QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
3096        cr_pktrcvflowctrl,
3097        QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
3098        QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
3099        cr_txsdmadesc,
3100        cr_rxdlidfltr,
3101        cr_ibstatuschange,
3102        QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
3103        QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
3104        QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
3105        QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
3106        QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
3107        QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
3108        QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
3109        QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
3110        QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
3111        cr_rcvflowctrl_err,
3112        QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
3113        QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
3114        QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
3115        QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
3116        QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
3117        QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
3118        cr_invalidslen,
3119        cr_senddropped,
3120        cr_errslen,
3121        cr_sendunderrun,
3122        cr_txunsupvl,
3123        QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
3124        QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
3125        QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
3126        QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
3127};
3128
3129/* do all the setup to make the counter reads efficient later */
3130static void init_7220_cntrnames(struct qib_devdata *dd)
3131{
3132        int i, j = 0;
3133        char *s;
3134
3135        for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
3136             i++) {
3137                /* we always have at least one counter before the egrovfl */
3138                if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
3139                        j = 1;
3140                s = strchr(s + 1, '\n');
3141                if (s && j)
3142                        j++;
3143        }
3144        dd->cspec->ncntrs = i;
3145        if (!s)
3146                /* full list; size is without terminating null */
3147                dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
3148        else
3149                dd->cspec->cntrnamelen = 1 + s - cntr7220names;
3150        dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
3151                * sizeof(u64), GFP_KERNEL);
3152
3153        for (i = 0, s = (char *)portcntr7220names; s; i++)
3154                s = strchr(s + 1, '\n');
3155        dd->cspec->nportcntrs = i - 1;
3156        dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
3157        dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
3158                * sizeof(u64), GFP_KERNEL);
3159}
3160
3161static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
3162                              u64 **cntrp)
3163{
3164        u32 ret;
3165
3166        if (!dd->cspec->cntrs) {
3167                ret = 0;
3168                goto done;
3169        }
3170
3171        if (namep) {
3172                *namep = (char *)cntr7220names;
3173                ret = dd->cspec->cntrnamelen;
3174                if (pos >= ret)
3175                        ret = 0; /* final read after getting everything */
3176        } else {
3177                u64 *cntr = dd->cspec->cntrs;
3178                int i;
3179
3180                ret = dd->cspec->ncntrs * sizeof(u64);
3181                if (!cntr || pos >= ret) {
3182                        /* everything read, or couldn't get memory */
3183                        ret = 0;
3184                        goto done;
3185                }
3186
3187                *cntrp = cntr;
3188                for (i = 0; i < dd->cspec->ncntrs; i++)
3189                        *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
3190        }
3191done:
3192        return ret;
3193}
3194
3195static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
3196                                  char **namep, u64 **cntrp)
3197{
3198        u32 ret;
3199
3200        if (!dd->cspec->portcntrs) {
3201                ret = 0;
3202                goto done;
3203        }
3204        if (namep) {
3205                *namep = (char *)portcntr7220names;
3206                ret = dd->cspec->portcntrnamelen;
3207                if (pos >= ret)
3208                        ret = 0; /* final read after getting everything */
3209        } else {
3210                u64 *cntr = dd->cspec->portcntrs;
3211                struct qib_pportdata *ppd = &dd->pport[port];
3212                int i;
3213
3214                ret = dd->cspec->nportcntrs * sizeof(u64);
3215                if (!cntr || pos >= ret) {
3216                        /* everything read, or couldn't get memory */
3217                        ret = 0;
3218                        goto done;
3219                }
3220                *cntrp = cntr;
3221                for (i = 0; i < dd->cspec->nportcntrs; i++) {
3222                        if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
3223                                *cntr++ = qib_portcntr_7220(ppd,
3224                                        portcntr7220indices[i] &
3225                                        ~_PORT_VIRT_FLAG);
3226                        else
3227                                *cntr++ = read_7220_creg32(dd,
3228                                           portcntr7220indices[i]);
3229                }
3230        }
3231done:
3232        return ret;
3233}
3234
3235/**
3236 * qib_get_7220_faststats - get word counters from chip before they overflow
3237 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
3238 *
3239 * This needs more work; in particular, decision on whether we really
3240 * need traffic_wds done the way it is
3241 * called from add_timer
3242 */
3243static void qib_get_7220_faststats(struct timer_list *t)
3244{
3245        struct qib_devdata *dd = from_timer(dd, t, stats_timer);
3246        struct qib_pportdata *ppd = dd->pport;
3247        unsigned long flags;
3248        u64 traffic_wds;
3249
3250        /*
3251         * don't access the chip while running diags, or memory diags can
3252         * fail
3253         */
3254        if (!(dd->flags & QIB_INITTED) || dd->diag_client)
3255                /* but re-arm the timer, for diags case; won't hurt other */
3256                goto done;
3257
3258        /*
3259         * We now try to maintain an activity timer, based on traffic
3260         * exceeding a threshold, so we need to check the word-counts
3261         * even if they are 64-bit.
3262         */
3263        traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
3264                qib_portcntr_7220(ppd, cr_wordrcv);
3265        spin_lock_irqsave(&dd->eep_st_lock, flags);
3266        traffic_wds -= dd->traffic_wds;
3267        dd->traffic_wds += traffic_wds;
3268        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3269done:
3270        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
3271}
3272
3273/*
3274 * If we are using MSI, try to fallback to INTx.
3275 */
3276static int qib_7220_intr_fallback(struct qib_devdata *dd)
3277{
3278        if (!dd->msi_lo)
3279                return 0;
3280
3281        qib_devinfo(dd->pcidev,
3282                    "MSI interrupt not detected, trying INTx interrupts\n");
3283
3284        qib_free_irq(dd);
3285        dd->msi_lo = 0;
3286        if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
3287                qib_dev_err(dd, "Failed to enable INTx\n");
3288        qib_setup_7220_interrupt(dd);
3289        return 1;
3290}
3291
3292/*
3293 * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
3294 * than resetting the IBC or external link state, and useful in some
3295 * cases to cause some retraining.  To do this right, we reset IBC
3296 * as well.
3297 */
3298static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
3299{
3300        u64 val, prev_val;
3301        struct qib_devdata *dd = ppd->dd;
3302
3303        prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
3304        val = prev_val | QLOGIC_IB_XGXS_RESET;
3305        prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
3306        qib_write_kreg(dd, kr_control,
3307                       dd->control & ~QLOGIC_IB_C_LINKENABLE);
3308        qib_write_kreg(dd, kr_xgxs_cfg, val);
3309        qib_read_kreg32(dd, kr_scratch);
3310        qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
3311        qib_write_kreg(dd, kr_control, dd->control);
3312}
3313
3314/*
3315 * For this chip, we want to use the same buffer every time
3316 * when we are trying to bring the link up (they are always VL15
3317 * packets).  At that link state the packet should always go out immediately
3318 * (or at least be discarded at the tx interface if the link is down).
3319 * If it doesn't, and the buffer isn't available, that means some other
3320 * sender has gotten ahead of us, and is preventing our packet from going
3321 * out.  In that case, we flush all packets, and try again.  If that still
3322 * fails, we fail the request, and hope things work the next time around.
3323 *
3324 * We don't need very complicated heuristics on whether the packet had
3325 * time to go out or not, since even at SDR 1X, it goes out in very short
3326 * time periods, covered by the chip reads done here and as part of the
3327 * flush.
3328 */
3329static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3330{
3331        u32 __iomem *buf;
3332        u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
3333        int do_cleanup;
3334        unsigned long flags;
3335
3336        /*
3337         * always blip to get avail list updated, since it's almost
3338         * always needed, and is fairly cheap.
3339         */
3340        sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3341        qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3342        buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3343        if (buf)
3344                goto done;
3345
3346        spin_lock_irqsave(&ppd->sdma_lock, flags);
3347        if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
3348            ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
3349                __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
3350                do_cleanup = 0;
3351        } else {
3352                do_cleanup = 1;
3353                qib_7220_sdma_hw_clean_up(ppd);
3354        }
3355        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3356
3357        if (do_cleanup) {
3358                qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3359                buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3360        }
3361done:
3362        return buf;
3363}
3364
3365/*
3366 * This code for non-IBTA-compliant IB speed negotiation is only known to
3367 * work for the SDR to DDR transition, and only between an HCA and a switch
3368 * with recent firmware.  It is based on observed heuristics, rather than
3369 * actual knowledge of the non-compliant speed negotiation.
3370 * It has a number of hard-coded fields, since the hope is to rewrite this
3371 * when a spec is available on how the negoation is intended to work.
3372 */
3373static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
3374                                 u32 dcnt, u32 *data)
3375{
3376        int i;
3377        u64 pbc;
3378        u32 __iomem *piobuf;
3379        u32 pnum;
3380        struct qib_devdata *dd = ppd->dd;
3381
3382        i = 0;
3383        pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
3384        pbc |= PBC_7220_VL15_SEND;
3385        while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
3386                if (i++ > 5)
3387                        return;
3388                udelay(2);
3389        }
3390        sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
3391        writeq(pbc, piobuf);
3392        qib_flush_wc();
3393        qib_pio_copy(piobuf + 2, hdr, 7);
3394        qib_pio_copy(piobuf + 9, data, dcnt);
3395        if (dd->flags & QIB_USE_SPCL_TRIG) {
3396                u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
3397
3398                qib_flush_wc();
3399                __raw_writel(0xaebecede, piobuf + spcl_off);
3400        }
3401        qib_flush_wc();
3402        qib_sendbuf_done(dd, pnum);
3403}
3404
3405/*
3406 * _start packet gets sent twice at start, _done gets sent twice at end
3407 */
3408static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
3409{
3410        struct qib_devdata *dd = ppd->dd;
3411        static u32 swapped;
3412        u32 dw, i, hcnt, dcnt, *data;
3413        static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
3414        static u32 madpayload_start[0x40] = {
3415                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3416                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3417                0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
3418                };
3419        static u32 madpayload_done[0x40] = {
3420                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3421                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3422                0x40000001, 0x1388, 0x15e, /* rest 0's */
3423                };
3424
3425        dcnt = ARRAY_SIZE(madpayload_start);
3426        hcnt = ARRAY_SIZE(hdr);
3427        if (!swapped) {
3428                /* for maintainability, do it at runtime */
3429                for (i = 0; i < hcnt; i++) {
3430                        dw = (__force u32) cpu_to_be32(hdr[i]);
3431                        hdr[i] = dw;
3432                }
3433                for (i = 0; i < dcnt; i++) {
3434                        dw = (__force u32) cpu_to_be32(madpayload_start[i]);
3435                        madpayload_start[i] = dw;
3436                        dw = (__force u32) cpu_to_be32(madpayload_done[i]);
3437                        madpayload_done[i] = dw;
3438                }
3439                swapped = 1;
3440        }
3441
3442        data = which ? madpayload_done : madpayload_start;
3443
3444        autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3445        qib_read_kreg64(dd, kr_scratch);
3446        udelay(2);
3447        autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3448        qib_read_kreg64(dd, kr_scratch);
3449        udelay(2);
3450}
3451
3452/*
3453 * Do the absolute minimum to cause an IB speed change, and make it
3454 * ready, but don't actually trigger the change.   The caller will
3455 * do that when ready (if link is in Polling training state, it will
3456 * happen immediately, otherwise when link next goes down)
3457 *
3458 * This routine should only be used as part of the DDR autonegotation
3459 * code for devices that are not compliant with IB 1.2 (or code that
3460 * fixes things up for same).
3461 *
3462 * When link has gone down, and autoneg enabled, or autoneg has
3463 * failed and we give up until next time we set both speeds, and
3464 * then we want IBTA enabled as well as "use max enabled speed.
3465 */
3466static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
3467{
3468        ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
3469                IBA7220_IBC_IBTA_1_2_MASK);
3470
3471        if (speed == (QIB_IB_SDR | QIB_IB_DDR))
3472                ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
3473                        IBA7220_IBC_IBTA_1_2_MASK;
3474        else
3475                ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
3476                        IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
3477
3478        qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
3479        qib_write_kreg(ppd->dd, kr_scratch, 0);
3480}
3481
3482/*
3483 * This routine is only used when we are not talking to another
3484 * IB 1.2-compliant device that we think can do DDR.
3485 * (This includes all existing switch chips as of Oct 2007.)
3486 * 1.2-compliant devices go directly to DDR prior to reaching INIT
3487 */
3488static void try_7220_autoneg(struct qib_pportdata *ppd)
3489{
3490        unsigned long flags;
3491
3492        /*
3493         * Required for older non-IB1.2 DDR switches.  Newer
3494         * non-IB-compliant switches don't need it, but so far,
3495         * aren't bothered by it either.  "Magic constant"
3496         */
3497        qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
3498
3499        spin_lock_irqsave(&ppd->lflags_lock, flags);
3500        ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
3501        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3502        autoneg_7220_send(ppd, 0);
3503        set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3504
3505        toggle_7220_rclkrls(ppd->dd);
3506        /* 2 msec is minimum length of a poll cycle */
3507        queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
3508                           msecs_to_jiffies(2));
3509}
3510
3511/*
3512 * Handle the empirically determined mechanism for auto-negotiation
3513 * of DDR speed with switches.
3514 */
3515static void autoneg_7220_work(struct work_struct *work)
3516{
3517        struct qib_pportdata *ppd;
3518        struct qib_devdata *dd;
3519        u32 i;
3520        unsigned long flags;
3521
3522        ppd = &container_of(work, struct qib_chippport_specific,
3523                            autoneg_work.work)->pportdata;
3524        dd = ppd->dd;
3525
3526        /*
3527         * Busy wait for this first part, it should be at most a
3528         * few hundred usec, since we scheduled ourselves for 2msec.
3529         */
3530        for (i = 0; i < 25; i++) {
3531                if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
3532                     == IB_7220_LT_STATE_POLLQUIET) {
3533                        qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
3534                        break;
3535                }
3536                udelay(100);
3537        }
3538
3539        if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3540                goto done; /* we got there early or told to stop */
3541
3542        /* we expect this to timeout */
3543        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3544                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3545                               msecs_to_jiffies(90)))
3546                goto done;
3547
3548        toggle_7220_rclkrls(dd);
3549
3550        /* we expect this to timeout */
3551        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3552                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3553                               msecs_to_jiffies(1700)))
3554                goto done;
3555
3556        set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
3557        toggle_7220_rclkrls(dd);
3558
3559        /*
3560         * Wait up to 250 msec for link to train and get to INIT at DDR;
3561         * this should terminate early.
3562         */
3563        wait_event_timeout(ppd->cpspec->autoneg_wait,
3564                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3565                msecs_to_jiffies(250));
3566done:
3567        if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
3568                spin_lock_irqsave(&ppd->lflags_lock, flags);
3569                ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
3570                if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
3571                        ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
3572                        dd->cspec->autoneg_tries = 0;
3573                }
3574                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3575                set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3576        }
3577}
3578
3579static u32 qib_7220_iblink_state(u64 ibcs)
3580{
3581        u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
3582
3583        switch (state) {
3584        case IB_7220_L_STATE_INIT:
3585                state = IB_PORT_INIT;
3586                break;
3587        case IB_7220_L_STATE_ARM:
3588                state = IB_PORT_ARMED;
3589                break;
3590        case IB_7220_L_STATE_ACTIVE:
3591                /* fall through */
3592        case IB_7220_L_STATE_ACT_DEFER:
3593                state = IB_PORT_ACTIVE;
3594                break;
3595        default: /* fall through */
3596        case IB_7220_L_STATE_DOWN:
3597                state = IB_PORT_DOWN;
3598                break;
3599        }
3600        return state;
3601}
3602
3603/* returns the IBTA port state, rather than the IBC link training state */
3604static u8 qib_7220_phys_portstate(u64 ibcs)
3605{
3606        u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3607        return qib_7220_physportstate[state];
3608}
3609
3610static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3611{
3612        int ret = 0, symadj = 0;
3613        struct qib_devdata *dd = ppd->dd;
3614        unsigned long flags;
3615
3616        spin_lock_irqsave(&ppd->lflags_lock, flags);
3617        ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3618        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3619
3620        if (!ibup) {
3621                /*
3622                 * When the link goes down we don't want AEQ running, so it
3623                 * won't interfere with IBC training, etc., and we need
3624                 * to go back to the static SerDes preset values.
3625                 */
3626                if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3627                                     QIBL_IB_AUTONEG_INPROG)))
3628                        set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3629                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3630                        qib_sd7220_presets(dd);
3631                        qib_cancel_sends(ppd); /* initial disarm, etc. */
3632                        spin_lock_irqsave(&ppd->sdma_lock, flags);
3633                        if (__qib_sdma_running(ppd))
3634                                __qib_sdma_process_event(ppd,
3635                                        qib_sdma_event_e70_go_idle);
3636                        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3637                }
3638                /* this might better in qib_sd7220_presets() */
3639                set_7220_relock_poll(dd, ibup);
3640        } else {
3641                if (qib_compat_ddr_negotiate &&
3642                    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3643                                     QIBL_IB_AUTONEG_INPROG)) &&
3644                    ppd->link_speed_active == QIB_IB_SDR &&
3645                    (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
3646                    (QIB_IB_DDR | QIB_IB_SDR) &&
3647                    dd->cspec->autoneg_tries < AUTONEG_TRIES) {
3648                        /* we are SDR, and DDR auto-negotiation enabled */
3649                        ++dd->cspec->autoneg_tries;
3650                        if (!ppd->cpspec->ibdeltainprog) {
3651                                ppd->cpspec->ibdeltainprog = 1;
3652                                ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
3653                                        cr_ibsymbolerr);
3654                                ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
3655                                        cr_iblinkerrrecov);
3656                        }
3657                        try_7220_autoneg(ppd);
3658                        ret = 1; /* no other IB status change processing */
3659                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3660                           ppd->link_speed_active == QIB_IB_SDR) {
3661                        autoneg_7220_send(ppd, 1);
3662                        set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3663                        udelay(2);
3664                        toggle_7220_rclkrls(dd);
3665                        ret = 1; /* no other IB status change processing */
3666                } else {
3667                        if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3668                            (ppd->link_speed_active & QIB_IB_DDR)) {
3669                                spin_lock_irqsave(&ppd->lflags_lock, flags);
3670                                ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
3671                                                 QIBL_IB_AUTONEG_FAILED);
3672                                spin_unlock_irqrestore(&ppd->lflags_lock,
3673                                                       flags);
3674                                dd->cspec->autoneg_tries = 0;
3675                                /* re-enable SDR, for next link down */
3676                                set_7220_ibspeed_fast(ppd,
3677                                                      ppd->link_speed_enabled);
3678                                wake_up(&ppd->cpspec->autoneg_wait);
3679                                symadj = 1;
3680                        } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
3681                                /*
3682                                 * Clear autoneg failure flag, and do setup
3683                                 * so we'll try next time link goes down and
3684                                 * back to INIT (possibly connected to a
3685                                 * different device).
3686                                 */
3687                                spin_lock_irqsave(&ppd->lflags_lock, flags);
3688                                ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3689                                spin_unlock_irqrestore(&ppd->lflags_lock,
3690                                                       flags);
3691                                ppd->cpspec->ibcddrctrl |=
3692                                        IBA7220_IBC_IBTA_1_2_MASK;
3693                                qib_write_kreg(dd, kr_ncmodectrl, 0);
3694                                symadj = 1;
3695                        }
3696                }
3697
3698                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3699                        symadj = 1;
3700
3701                if (!ret) {
3702                        ppd->delay_mult = rate_to_delay
3703                            [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
3704                            [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
3705
3706                        set_7220_relock_poll(dd, ibup);
3707                        spin_lock_irqsave(&ppd->sdma_lock, flags);
3708                        /*
3709                         * Unlike 7322, the 7220 needs this, due to lack of
3710                         * interrupt in some cases when we have sdma active
3711                         * when the link goes down.
3712                         */
3713                        if (ppd->sdma_state.current_state !=
3714                            qib_sdma_state_s20_idle)
3715                                __qib_sdma_process_event(ppd,
3716                                        qib_sdma_event_e00_go_hw_down);
3717                        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3718                }
3719        }
3720
3721        if (symadj) {
3722                if (ppd->cpspec->ibdeltainprog) {
3723                        ppd->cpspec->ibdeltainprog = 0;
3724                        ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
3725                                cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
3726                        ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
3727                                cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
3728                }
3729        } else if (!ibup && qib_compat_ddr_negotiate &&
3730                   !ppd->cpspec->ibdeltainprog &&
3731                        !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3732                ppd->cpspec->ibdeltainprog = 1;
3733                ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
3734                                                          cr_ibsymbolerr);
3735                ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
3736                                                     cr_iblinkerrrecov);
3737        }
3738
3739        if (!ret)
3740                qib_setup_7220_setextled(ppd, ibup);
3741        return ret;
3742}
3743
3744/*
3745 * Does read/modify/write to appropriate registers to
3746 * set output and direction bits selected by mask.
3747 * these are in their canonical postions (e.g. lsb of
3748 * dir will end up in D48 of extctrl on existing chips).
3749 * returns contents of GP Inputs.
3750 */
3751static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3752{
3753        u64 read_val, new_out;
3754        unsigned long flags;
3755
3756        if (mask) {
3757                /* some bits being written, lock access to GPIO */
3758                dir &= mask;
3759                out &= mask;
3760                spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3761                dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3762                dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3763                new_out = (dd->cspec->gpio_out & ~mask) | out;
3764
3765                qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3766                qib_write_kreg(dd, kr_gpio_out, new_out);
3767                dd->cspec->gpio_out = new_out;
3768                spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3769        }
3770        /*
3771         * It is unlikely that a read at this time would get valid
3772         * data on a pin whose direction line was set in the same
3773         * call to this function. We include the read here because
3774         * that allows us to potentially combine a change on one pin with
3775         * a read on another, and because the old code did something like
3776         * this.
3777         */
3778        read_val = qib_read_kreg64(dd, kr_extstatus);
3779        return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3780}
3781
3782/*
3783 * Read fundamental info we need to use the chip.  These are
3784 * the registers that describe chip capabilities, and are
3785 * saved in shadow registers.
3786 */
3787static void get_7220_chip_params(struct qib_devdata *dd)
3788{
3789        u64 val;
3790        u32 piobufs;
3791        int mtu;
3792
3793        dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3794
3795        dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3796        dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3797        dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3798        dd->palign = qib_read_kreg32(dd, kr_palign);
3799        dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3800        dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3801
3802        val = qib_read_kreg64(dd, kr_sendpiosize);
3803        dd->piosize2k = val & ~0U;
3804        dd->piosize4k = val >> 32;
3805
3806        mtu = ib_mtu_enum_to_int(qib_ibmtu);
3807        if (mtu == -1)
3808                mtu = QIB_DEFAULT_MTU;
3809        dd->pport->ibmtu = (u32)mtu;
3810
3811        val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3812        dd->piobcnt2k = val & ~0U;
3813        dd->piobcnt4k = val >> 32;
3814        /* these may be adjusted in init_chip_wc_pat() */
3815        dd->pio2kbase = (u32 __iomem *)
3816                ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
3817        if (dd->piobcnt4k) {
3818                dd->pio4kbase = (u32 __iomem *)
3819                        ((char __iomem *) dd->kregbase +
3820                         (dd->piobufbase >> 32));
3821                /*
3822                 * 4K buffers take 2 pages; we use roundup just to be
3823                 * paranoid; we calculate it once here, rather than on
3824                 * ever buf allocate
3825                 */
3826                dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3827        }
3828
3829        piobufs = dd->piobcnt4k + dd->piobcnt2k;
3830
3831        dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3832                (sizeof(u64) * BITS_PER_BYTE / 2);
3833}
3834
3835/*
3836 * The chip base addresses in cspec and cpspec have to be set
3837 * after possible init_chip_wc_pat(), rather than in
3838 * qib_get_7220_chip_params(), so split out as separate function
3839 */
3840static void set_7220_baseaddrs(struct qib_devdata *dd)
3841{
3842        u32 cregbase;
3843        /* init after possible re-map in init_chip_wc_pat() */
3844        cregbase = qib_read_kreg32(dd, kr_counterregbase);
3845        dd->cspec->cregbase = (u64 __iomem *)
3846                ((char __iomem *) dd->kregbase + cregbase);
3847
3848        dd->egrtidbase = (u64 __iomem *)
3849                ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3850}
3851
3852
3853#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) |        \
3854                           SYM_MASK(SendCtrl, SPioEnable) |             \
3855                           SYM_MASK(SendCtrl, SSpecialTriggerEn) |      \
3856                           SYM_MASK(SendCtrl, SendBufAvailUpd) |        \
3857                           SYM_MASK(SendCtrl, AvailUpdThld) |           \
3858                           SYM_MASK(SendCtrl, SDmaEnable) |             \
3859                           SYM_MASK(SendCtrl, SDmaIntEnable) |          \
3860                           SYM_MASK(SendCtrl, SDmaHalt) |               \
3861                           SYM_MASK(SendCtrl, SDmaSingleDescriptor))
3862
3863static int sendctrl_hook(struct qib_devdata *dd,
3864                         const struct diag_observer *op,
3865                         u32 offs, u64 *data, u64 mask, int only_32)
3866{
3867        unsigned long flags;
3868        unsigned idx = offs / sizeof(u64);
3869        u64 local_data, all_bits;
3870
3871        if (idx != kr_sendctrl) {
3872                qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
3873                            offs, only_32 ? "32" : "64");
3874                return 0;
3875        }
3876
3877        all_bits = ~0ULL;
3878        if (only_32)
3879                all_bits >>= 32;
3880        spin_lock_irqsave(&dd->sendctrl_lock, flags);
3881        if ((mask & all_bits) != all_bits) {
3882                /*
3883                 * At least some mask bits are zero, so we need
3884                 * to read. The judgement call is whether from
3885                 * reg or shadow. First-cut: read reg, and complain
3886                 * if any bits which should be shadowed are different
3887                 * from their shadowed value.
3888                 */
3889                if (only_32)
3890                        local_data = (u64)qib_read_kreg32(dd, idx);
3891                else
3892                        local_data = qib_read_kreg64(dd, idx);
3893                qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
3894                            (u32)local_data, (u32)dd->sendctrl);
3895                if ((local_data & SENDCTRL_SHADOWED) !=
3896                    (dd->sendctrl & SENDCTRL_SHADOWED))
3897                        qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
3898                                (u32)local_data, (u32) dd->sendctrl);
3899                *data = (local_data & ~mask) | (*data & mask);
3900        }
3901        if (mask) {
3902                /*
3903                 * At least some mask bits are one, so we need
3904                 * to write, but only shadow some bits.
3905                 */
3906                u64 sval, tval; /* Shadowed, transient */
3907
3908                /*
3909                 * New shadow val is bits we don't want to touch,
3910                 * ORed with bits we do, that are intended for shadow.
3911                 */
3912                sval = (dd->sendctrl & ~mask);
3913                sval |= *data & SENDCTRL_SHADOWED & mask;
3914                dd->sendctrl = sval;
3915                tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
3916                qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
3917                            (u32)tval, (u32)sval);
3918                qib_write_kreg(dd, kr_sendctrl, tval);
3919                qib_write_kreg(dd, kr_scratch, 0Ull);
3920        }
3921        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
3922
3923        return only_32 ? 4 : 8;
3924}
3925
3926static const struct diag_observer sendctrl_observer = {
3927        sendctrl_hook, kr_sendctrl * sizeof(u64),
3928        kr_sendctrl * sizeof(u64)
3929};
3930
3931/*
3932 * write the final few registers that depend on some of the
3933 * init setup.  Done late in init, just before bringing up
3934 * the serdes.
3935 */
3936static int qib_late_7220_initreg(struct qib_devdata *dd)
3937{
3938        int ret = 0;
3939        u64 val;
3940
3941        qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3942        qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3943        qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3944        qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3945        val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3946        if (val != dd->pioavailregs_phys) {
3947                qib_dev_err(dd,
3948                        "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
3949                        (unsigned long) dd->pioavailregs_phys,
3950                        (unsigned long long) val);
3951                ret = -EINVAL;
3952        }
3953        qib_register_observer(dd, &sendctrl_observer);
3954        return ret;
3955}
3956
3957static int qib_init_7220_variables(struct qib_devdata *dd)
3958{
3959        struct qib_chippport_specific *cpspec;
3960        struct qib_pportdata *ppd;
3961        int ret = 0;
3962        u32 sbufs, updthresh;
3963
3964        cpspec = (struct qib_chippport_specific *)(dd + 1);
3965        ppd = &cpspec->pportdata;
3966        dd->pport = ppd;
3967        dd->num_pports = 1;
3968
3969        dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
3970        dd->cspec->dd = dd;
3971        ppd->cpspec = cpspec;
3972
3973        spin_lock_init(&dd->cspec->sdepb_lock);
3974        spin_lock_init(&dd->cspec->rcvmod_lock);
3975        spin_lock_init(&dd->cspec->gpio_lock);
3976
3977        /* we haven't yet set QIB_PRESENT, so use read directly */
3978        dd->revision = readq(&dd->kregbase[kr_revision]);
3979
3980        if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3981                qib_dev_err(dd,
3982                        "Revision register read failure, giving up initialization\n");
3983                ret = -ENODEV;
3984                goto bail;
3985        }
3986        dd->flags |= QIB_PRESENT;  /* now register routines work */
3987
3988        dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3989                                    ChipRevMajor);
3990        dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3991                                    ChipRevMinor);
3992
3993        get_7220_chip_params(dd);
3994        qib_7220_boardname(dd);
3995
3996        /*
3997         * GPIO bits for TWSI data and clock,
3998         * used for serial EEPROM.
3999         */
4000        dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
4001        dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
4002        dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
4003
4004        dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
4005                QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
4006        dd->flags |= qib_special_trigger ?
4007                QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
4008
4009        init_waitqueue_head(&cpspec->autoneg_wait);
4010        INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
4011
4012        ret = qib_init_pportdata(ppd, dd, 0, 1);
4013        if (ret)
4014                goto bail;
4015        ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
4016        ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
4017
4018        ppd->link_width_enabled = ppd->link_width_supported;
4019        ppd->link_speed_enabled = ppd->link_speed_supported;
4020        /*
4021         * Set the initial values to reasonable default, will be set
4022         * for real when link is up.
4023         */
4024        ppd->link_width_active = IB_WIDTH_4X;
4025        ppd->link_speed_active = QIB_IB_SDR;
4026        ppd->delay_mult = rate_to_delay[0][1];
4027        ppd->vls_supported = IB_VL_VL0;
4028        ppd->vls_operational = ppd->vls_supported;
4029
4030        if (!qib_mini_init)
4031                qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
4032
4033        timer_setup(&ppd->cpspec->chase_timer, reenable_7220_chase, 0);
4034
4035        qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
4036
4037        dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
4038        dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
4039        dd->rhf_offset =
4040                dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
4041
4042        /* we always allocate at least 2048 bytes for eager buffers */
4043        ret = ib_mtu_enum_to_int(qib_ibmtu);
4044        dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
4045        BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
4046        dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
4047
4048        qib_7220_tidtemplate(dd);
4049
4050        /*
4051         * We can request a receive interrupt for 1 or
4052         * more packets from current offset.  For now, we set this
4053         * up for a single packet.
4054         */
4055        dd->rhdrhead_intr_off = 1ULL << 32;
4056
4057        /* setup the stats timer; the add_timer is done at end of init */
4058        timer_setup(&dd->stats_timer, qib_get_7220_faststats, 0);
4059        dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
4060
4061        /*
4062         * Control[4] has been added to change the arbitration within
4063         * the SDMA engine between favoring data fetches over descriptor
4064         * fetches.  qib_sdma_fetch_arb==0 gives data fetches priority.
4065         */
4066        if (qib_sdma_fetch_arb)
4067                dd->control |= 1 << 4;
4068
4069        dd->ureg_align = 0x10000;  /* 64KB alignment */
4070
4071        dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
4072        qib_7220_config_ctxts(dd);
4073        qib_set_ctxtcnt(dd);  /* needed for PAT setup */
4074
4075        ret = init_chip_wc_pat(dd, 0);
4076        if (ret)
4077                goto bail;
4078        set_7220_baseaddrs(dd); /* set chip access pointers now */
4079
4080        ret = 0;
4081        if (qib_mini_init)
4082                goto bail;
4083
4084        ret = qib_create_ctxts(dd);
4085        init_7220_cntrnames(dd);
4086
4087        /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
4088         * reserve the update threshold amount for other kernel use, such
4089         * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
4090         * unless we aren't enabling SDMA, in which case we want to use
4091         * all the 4k bufs for the kernel.
4092         * if this was less than the update threshold, we could wait
4093         * a long time for an update.  Coded this way because we
4094         * sometimes change the update threshold for various reasons,
4095         * and we want this to remain robust.
4096         */
4097        updthresh = 8U; /* update threshold */
4098        if (dd->flags & QIB_HAS_SEND_DMA) {
4099                dd->cspec->sdmabufcnt =  dd->piobcnt4k;
4100                sbufs = updthresh > 3 ? updthresh : 3;
4101        } else {
4102                dd->cspec->sdmabufcnt = 0;
4103                sbufs = dd->piobcnt4k;
4104        }
4105
4106        dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
4107                dd->cspec->sdmabufcnt;
4108        dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
4109        dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
4110        dd->last_pio = dd->cspec->lastbuf_for_pio;
4111        dd->pbufsctxt = dd->lastctxt_piobuf /
4112                (dd->cfgctxts - dd->first_user_ctxt);
4113
4114        /*
4115         * if we are at 16 user contexts, we will have one 7 sbufs
4116         * per context, so drop the update threshold to match.  We
4117         * want to update before we actually run out, at low pbufs/ctxt
4118         * so give ourselves some margin
4119         */
4120        if ((dd->pbufsctxt - 2) < updthresh)
4121                updthresh = dd->pbufsctxt - 2;
4122
4123        dd->cspec->updthresh_dflt = updthresh;
4124        dd->cspec->updthresh = updthresh;
4125
4126        /* before full enable, no interrupts, no locking needed */
4127        dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
4128                             << SYM_LSB(SendCtrl, AvailUpdThld);
4129
4130        dd->psxmitwait_supported = 1;
4131        dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
4132bail:
4133        return ret;
4134}
4135
4136static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
4137                                        u32 *pbufnum)
4138{
4139        u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
4140        struct qib_devdata *dd = ppd->dd;
4141        u32 __iomem *buf;
4142
4143        if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
4144                !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
4145                buf = get_7220_link_buf(ppd, pbufnum);
4146        else {
4147                if ((plen + 1) > dd->piosize2kmax_dwords)
4148                        first = dd->piobcnt2k;
4149                else
4150                        first = 0;
4151                /* try 4k if all 2k busy, so same last for both sizes */
4152                last = dd->cspec->lastbuf_for_pio;
4153                buf = qib_getsendbuf_range(dd, pbufnum, first, last);
4154        }
4155        return buf;
4156}
4157
4158/* these 2 "counters" are really control registers, and are always RW */
4159static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
4160                                     u32 start)
4161{
4162        write_7220_creg(ppd->dd, cr_psinterval, intv);
4163        write_7220_creg(ppd->dd, cr_psstart, start);
4164}
4165
4166/*
4167 * NOTE: no real attempt is made to generalize the SDMA stuff.
4168 * At some point "soon" we will have a new more generalized
4169 * set of sdma interface, and then we'll clean this up.
4170 */
4171
4172/* Must be called with sdma_lock held, or before init finished */
4173static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
4174{
4175        /* Commit writes to memory and advance the tail on the chip */
4176        wmb();
4177        ppd->sdma_descq_tail = tail;
4178        qib_write_kreg(ppd->dd, kr_senddmatail, tail);
4179}
4180
4181static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
4182{
4183}
4184
4185static struct sdma_set_state_action sdma_7220_action_table[] = {
4186        [qib_sdma_state_s00_hw_down] = {
4187                .op_enable = 0,
4188                .op_intenable = 0,
4189                .op_halt = 0,
4190                .go_s99_running_tofalse = 1,
4191        },
4192        [qib_sdma_state_s10_hw_start_up_wait] = {
4193                .op_enable = 1,
4194                .op_intenable = 1,
4195                .op_halt = 1,
4196        },
4197        [qib_sdma_state_s20_idle] = {
4198                .op_enable = 1,
4199                .op_intenable = 1,
4200                .op_halt = 1,
4201        },
4202        [qib_sdma_state_s30_sw_clean_up_wait] = {
4203                .op_enable = 0,
4204                .op_intenable = 1,
4205                .op_halt = 0,
4206        },
4207        [qib_sdma_state_s40_hw_clean_up_wait] = {
4208                .op_enable = 1,
4209                .op_intenable = 1,
4210                .op_halt = 1,
4211        },
4212        [qib_sdma_state_s50_hw_halt_wait] = {
4213                .op_enable = 1,
4214                .op_intenable = 1,
4215                .op_halt = 1,
4216        },
4217        [qib_sdma_state_s99_running] = {
4218                .op_enable = 1,
4219                .op_intenable = 1,
4220                .op_halt = 0,
4221                .go_s99_running_totrue = 1,
4222        },
4223};
4224
4225static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
4226{
4227        ppd->sdma_state.set_state_action = sdma_7220_action_table;
4228}
4229
4230static int init_sdma_7220_regs(struct qib_pportdata *ppd)
4231{
4232        struct qib_devdata *dd = ppd->dd;
4233        unsigned i, n;
4234        u64 senddmabufmask[3] = { 0 };
4235
4236        /* Set SendDmaBase */
4237        qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
4238        qib_sdma_7220_setlengen(ppd);
4239        qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
4240        /* Set SendDmaHeadAddr */
4241        qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
4242
4243        /*
4244         * Reserve all the former "kernel" piobufs, using high number range
4245         * so we get as many 4K buffers as possible
4246         */
4247        n = dd->piobcnt2k + dd->piobcnt4k;
4248        i = n - dd->cspec->sdmabufcnt;
4249
4250        for (; i < n; ++i) {
4251                unsigned word = i / 64;
4252                unsigned bit = i & 63;
4253
4254                BUG_ON(word >= 3);
4255                senddmabufmask[word] |= 1ULL << bit;
4256        }
4257        qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
4258        qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
4259        qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
4260
4261        ppd->sdma_state.first_sendbuf = i;
4262        ppd->sdma_state.last_sendbuf = n;
4263
4264        return 0;
4265}
4266
4267/* sdma_lock must be held */
4268static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
4269{
4270        struct qib_devdata *dd = ppd->dd;
4271        int sane;
4272        int use_dmahead;
4273        u16 swhead;
4274        u16 swtail;
4275        u16 cnt;
4276        u16 hwhead;
4277
4278        use_dmahead = __qib_sdma_running(ppd) &&
4279                (dd->flags & QIB_HAS_SDMA_TIMEOUT);
4280retry:
4281        hwhead = use_dmahead ?
4282                (u16)le64_to_cpu(*ppd->sdma_head_dma) :
4283                (u16)qib_read_kreg32(dd, kr_senddmahead);
4284
4285        swhead = ppd->sdma_descq_head;
4286        swtail = ppd->sdma_descq_tail;
4287        cnt = ppd->sdma_descq_cnt;
4288
4289        if (swhead < swtail) {
4290                /* not wrapped */
4291                sane = (hwhead >= swhead) & (hwhead <= swtail);
4292        } else if (swhead > swtail) {
4293                /* wrapped around */
4294                sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
4295                        (hwhead <= swtail);
4296        } else {
4297                /* empty */
4298                sane = (hwhead == swhead);
4299        }
4300
4301        if (unlikely(!sane)) {
4302                if (use_dmahead) {
4303                        /* try one more time, directly from the register */
4304                        use_dmahead = 0;
4305                        goto retry;
4306                }
4307                /* assume no progress */
4308                hwhead = swhead;
4309        }
4310
4311        return hwhead;
4312}
4313
4314static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
4315{
4316        u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
4317
4318        return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
4319               (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
4320               (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
4321               !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
4322}
4323
4324/*
4325 * Compute the amount of delay before sending the next packet if the
4326 * port's send rate differs from the static rate set for the QP.
4327 * Since the delay affects this packet but the amount of the delay is
4328 * based on the length of the previous packet, use the last delay computed
4329 * and save the delay count for this packet to be used next time
4330 * we get here.
4331 */
4332static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
4333                                   u8 srate, u8 vl)
4334{
4335        u8 snd_mult = ppd->delay_mult;
4336        u8 rcv_mult = ib_rate_to_delay[srate];
4337        u32 ret = ppd->cpspec->last_delay_mult;
4338
4339        ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
4340                (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
4341
4342        /* Indicate VL15, if necessary */
4343        if (vl == 15)
4344                ret |= PBC_7220_VL15_SEND_CTRL;
4345        return ret;
4346}
4347
4348static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
4349{
4350}
4351
4352static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
4353{
4354        if (!rcd->ctxt) {
4355                rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
4356                rcd->rcvegr_tid_base = 0;
4357        } else {
4358                rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
4359                rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
4360                        (rcd->ctxt - 1) * rcd->rcvegrcnt;
4361        }
4362}
4363
4364static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
4365                                  u32 len, u32 which, struct qib_ctxtdata *rcd)
4366{
4367        int i;
4368        unsigned long flags;
4369
4370        switch (which) {
4371        case TXCHK_CHG_TYPE_KERN:
4372                /* see if we need to raise avail update threshold */
4373                spin_lock_irqsave(&dd->uctxt_lock, flags);
4374                for (i = dd->first_user_ctxt;
4375                     dd->cspec->updthresh != dd->cspec->updthresh_dflt
4376                     && i < dd->cfgctxts; i++)
4377                        if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
4378                           ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
4379                           < dd->cspec->updthresh_dflt)
4380                                break;
4381                spin_unlock_irqrestore(&dd->uctxt_lock, flags);
4382                if (i == dd->cfgctxts) {
4383                        spin_lock_irqsave(&dd->sendctrl_lock, flags);
4384                        dd->cspec->updthresh = dd->cspec->updthresh_dflt;
4385                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4386                        dd->sendctrl |= (dd->cspec->updthresh &
4387                                         SYM_RMASK(SendCtrl, AvailUpdThld)) <<
4388                                           SYM_LSB(SendCtrl, AvailUpdThld);
4389                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4390                        sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4391                }
4392                break;
4393        case TXCHK_CHG_TYPE_USER:
4394                spin_lock_irqsave(&dd->sendctrl_lock, flags);
4395                if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
4396                        / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
4397                        dd->cspec->updthresh = (rcd->piocnt /
4398                                                rcd->subctxt_cnt) - 1;
4399                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4400                        dd->sendctrl |= (dd->cspec->updthresh &
4401                                        SYM_RMASK(SendCtrl, AvailUpdThld))
4402                                        << SYM_LSB(SendCtrl, AvailUpdThld);
4403                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4404                        sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4405                } else
4406                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4407                break;
4408        }
4409}
4410
4411static void writescratch(struct qib_devdata *dd, u32 val)
4412{
4413        qib_write_kreg(dd, kr_scratch, val);
4414}
4415
4416#define VALID_TS_RD_REG_MASK 0xBF
4417/**
4418 * qib_7220_tempsense_read - read register of temp sensor via TWSI
4419 * @dd: the qlogic_ib device
4420 * @regnum: register to read from
4421 *
4422 * returns reg contents (0..255) or < 0 for error
4423 */
4424static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
4425{
4426        int ret;
4427        u8 rdata;
4428
4429        if (regnum > 7) {
4430                ret = -EINVAL;
4431                goto bail;
4432        }
4433
4434        /* return a bogus value for (the one) register we do not have */
4435        if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
4436                ret = 0;
4437                goto bail;
4438        }
4439
4440        ret = mutex_lock_interruptible(&dd->eep_lock);
4441        if (ret)
4442                goto bail;
4443
4444        ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
4445        if (!ret)
4446                ret = rdata;
4447
4448        mutex_unlock(&dd->eep_lock);
4449
4450        /*
4451         * There are three possibilities here:
4452         * ret is actual value (0..255)
4453         * ret is -ENXIO or -EINVAL from twsi code or this file
4454         * ret is -EINTR from mutex_lock_interruptible.
4455         */
4456bail:
4457        return ret;
4458}
4459
4460#ifdef CONFIG_INFINIBAND_QIB_DCA
4461static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
4462{
4463        return 0;
4464}
4465#endif
4466
4467/* Dummy function, as 7220 boards never disable EEPROM Write */
4468static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
4469{
4470        return 1;
4471}
4472
4473/**
4474 * qib_init_iba7220_funcs - set up the chip-specific function pointers
4475 * @dev: the pci_dev for qlogic_ib device
4476 * @ent: pci_device_id struct for this dev
4477 *
4478 * This is global, and is called directly at init to set up the
4479 * chip-specific function pointers for later use.
4480 */
4481struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
4482                                           const struct pci_device_id *ent)
4483{
4484        struct qib_devdata *dd;
4485        int ret;
4486        u32 boardid, minwidth;
4487
4488        dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
4489                sizeof(struct qib_chippport_specific));
4490        if (IS_ERR(dd))
4491                goto bail;
4492
4493        dd->f_bringup_serdes    = qib_7220_bringup_serdes;
4494        dd->f_cleanup           = qib_setup_7220_cleanup;
4495        dd->f_clear_tids        = qib_7220_clear_tids;
4496        dd->f_free_irq          = qib_free_irq;
4497        dd->f_get_base_info     = qib_7220_get_base_info;
4498        dd->f_get_msgheader     = qib_7220_get_msgheader;
4499        dd->f_getsendbuf        = qib_7220_getsendbuf;
4500        dd->f_gpio_mod          = gpio_7220_mod;
4501        dd->f_eeprom_wen        = qib_7220_eeprom_wen;
4502        dd->f_hdrqempty         = qib_7220_hdrqempty;
4503        dd->f_ib_updown         = qib_7220_ib_updown;
4504        dd->f_init_ctxt         = qib_7220_init_ctxt;
4505        dd->f_initvl15_bufs     = qib_7220_initvl15_bufs;
4506        dd->f_intr_fallback     = qib_7220_intr_fallback;
4507        dd->f_late_initreg      = qib_late_7220_initreg;
4508        dd->f_setpbc_control    = qib_7220_setpbc_control;
4509        dd->f_portcntr          = qib_portcntr_7220;
4510        dd->f_put_tid           = qib_7220_put_tid;
4511        dd->f_quiet_serdes      = qib_7220_quiet_serdes;
4512        dd->f_rcvctrl           = rcvctrl_7220_mod;
4513        dd->f_read_cntrs        = qib_read_7220cntrs;
4514        dd->f_read_portcntrs    = qib_read_7220portcntrs;
4515        dd->f_reset             = qib_setup_7220_reset;
4516        dd->f_init_sdma_regs    = init_sdma_7220_regs;
4517        dd->f_sdma_busy         = qib_sdma_7220_busy;
4518        dd->f_sdma_gethead      = qib_sdma_7220_gethead;
4519        dd->f_sdma_sendctrl     = qib_7220_sdma_sendctrl;
4520        dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
4521        dd->f_sdma_update_tail  = qib_sdma_update_7220_tail;
4522        dd->f_sdma_hw_clean_up  = qib_7220_sdma_hw_clean_up;
4523        dd->f_sdma_hw_start_up  = qib_7220_sdma_hw_start_up;
4524        dd->f_sdma_init_early   = qib_7220_sdma_init_early;
4525        dd->f_sendctrl          = sendctrl_7220_mod;
4526        dd->f_set_armlaunch     = qib_set_7220_armlaunch;
4527        dd->f_set_cntr_sample   = qib_set_cntr_7220_sample;
4528        dd->f_iblink_state      = qib_7220_iblink_state;
4529        dd->f_ibphys_portstate  = qib_7220_phys_portstate;
4530        dd->f_get_ib_cfg        = qib_7220_get_ib_cfg;
4531        dd->f_set_ib_cfg        = qib_7220_set_ib_cfg;
4532        dd->f_set_ib_loopback   = qib_7220_set_loopback;
4533        dd->f_set_intr_state    = qib_7220_set_intr_state;
4534        dd->f_setextled         = qib_setup_7220_setextled;
4535        dd->f_txchk_change      = qib_7220_txchk_change;
4536        dd->f_update_usrhead    = qib_update_7220_usrhead;
4537        dd->f_wantpiobuf_intr   = qib_wantpiobuf_7220_intr;
4538        dd->f_xgxs_reset        = qib_7220_xgxs_reset;
4539        dd->f_writescratch      = writescratch;
4540        dd->f_tempsense_rd      = qib_7220_tempsense_rd;
4541#ifdef CONFIG_INFINIBAND_QIB_DCA
4542        dd->f_notify_dca = qib_7220_notify_dca;
4543#endif
4544        /*
4545         * Do remaining pcie setup and save pcie values in dd.
4546         * Any error printing is already done by the init code.
4547         * On return, we have the chip mapped, but chip registers
4548         * are not set up until start of qib_init_7220_variables.
4549         */
4550        ret = qib_pcie_ddinit(dd, pdev, ent);
4551        if (ret < 0)
4552                goto bail_free;
4553
4554        /* initialize chip-specific variables */
4555        ret = qib_init_7220_variables(dd);
4556        if (ret)
4557                goto bail_cleanup;
4558
4559        if (qib_mini_init)
4560                goto bail;
4561
4562        boardid = SYM_FIELD(dd->revision, Revision,
4563                            BoardID);
4564        switch (boardid) {
4565        case 0:
4566        case 2:
4567        case 10:
4568        case 12:
4569                minwidth = 16; /* x16 capable boards */
4570                break;
4571        default:
4572                minwidth = 8; /* x8 capable boards */
4573                break;
4574        }
4575        if (qib_pcie_params(dd, minwidth, NULL))
4576                qib_dev_err(dd,
4577                        "Failed to setup PCIe or interrupts; continuing anyway\n");
4578
4579        if (qib_read_kreg64(dd, kr_hwerrstatus) &
4580            QLOGIC_IB_HWE_SERDESPLLFAILED)
4581                qib_write_kreg(dd, kr_hwerrclear,
4582                               QLOGIC_IB_HWE_SERDESPLLFAILED);
4583
4584        /* setup interrupt handler (interrupt type handled above) */
4585        qib_setup_7220_interrupt(dd);
4586        qib_7220_init_hwerrors(dd);
4587
4588        /* clear diagctrl register, in case diags were running and crashed */
4589        qib_write_kreg(dd, kr_hwdiagctrl, 0);
4590
4591        goto bail;
4592
4593bail_cleanup:
4594        qib_pcie_ddcleanup(dd);
4595bail_free:
4596        qib_free_devdata(dd);
4597        dd = ERR_PTR(ret);
4598bail:
4599        return dd;
4600}
4601