linux/drivers/infiniband/hw/qib/qib_iba7322.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * This file contains all of the code that is specific to the
  36 * InfiniPath 7322 chip
  37 */
  38
  39#include <linux/interrupt.h>
  40#include <linux/pci.h>
  41#include <linux/delay.h>
  42#include <linux/io.h>
  43#include <linux/jiffies.h>
  44#include <linux/module.h>
  45#include <rdma/ib_verbs.h>
  46#include <rdma/ib_smi.h>
  47#ifdef CONFIG_INFINIBAND_QIB_DCA
  48#include <linux/dca.h>
  49#endif
  50
  51#include "qib.h"
  52#include "qib_7322_regs.h"
  53#include "qib_qsfp.h"
  54
  55#include "qib_mad.h"
  56#include "qib_verbs.h"
  57
  58#undef pr_fmt
  59#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  60
  61static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  62static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  63static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  64static irqreturn_t qib_7322intr(int irq, void *data);
  65static irqreturn_t qib_7322bufavail(int irq, void *data);
  66static irqreturn_t sdma_intr(int irq, void *data);
  67static irqreturn_t sdma_idle_intr(int irq, void *data);
  68static irqreturn_t sdma_progress_intr(int irq, void *data);
  69static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  70static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  71                                  struct qib_ctxtdata *rcd);
  72static u8 qib_7322_phys_portstate(u64);
  73static u32 qib_7322_iblink_state(u64);
  74static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  75                                   u16 linitcmd);
  76static void force_h1(struct qib_pportdata *);
  77static void adj_tx_serdes(struct qib_pportdata *);
  78static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  79static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  80
  81static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  82static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  83static void serdes_7322_los_enable(struct qib_pportdata *, int);
  84static int serdes_7322_init_old(struct qib_pportdata *);
  85static int serdes_7322_init_new(struct qib_pportdata *);
  86static void dump_sdma_7322_state(struct qib_pportdata *);
  87
  88#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  89
  90/* LE2 serdes values for different cases */
  91#define LE2_DEFAULT 5
  92#define LE2_5m 4
  93#define LE2_QME 0
  94
  95/* Below is special-purpose, so only really works for the IB SerDes blocks. */
  96#define IBSD(hw_pidx) (hw_pidx + 2)
  97
  98/* these are variables for documentation and experimentation purposes */
  99static const unsigned rcv_int_timeout = 375;
 100static const unsigned rcv_int_count = 16;
 101static const unsigned sdma_idle_cnt = 64;
 102
 103/* Time to stop altering Rx Equalization parameters, after link up. */
 104#define RXEQ_DISABLE_MSECS 2500
 105
 106/*
 107 * Number of VLs we are configured to use (to allow for more
 108 * credits per vl, etc.)
 109 */
 110ushort qib_num_cfg_vls = 2;
 111module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
 112MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
 113
 114static ushort qib_chase = 1;
 115module_param_named(chase, qib_chase, ushort, S_IRUGO);
 116MODULE_PARM_DESC(chase, "Enable state chase handling");
 117
 118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
 120MODULE_PARM_DESC(long_attenuation,
 121                 "attenuation cutoff (dB) for long copper cable setup");
 122
 123static ushort qib_singleport;
 124module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 125MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 126
 127static ushort qib_krcvq01_no_msi;
 128module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
 129MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
 130
 131/*
 132 * Receive header queue sizes
 133 */
 134static unsigned qib_rcvhdrcnt;
 135module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
 136MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
 137
 138static unsigned qib_rcvhdrsize;
 139module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
 140MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
 141
 142static unsigned qib_rcvhdrentsize;
 143module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
 144MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
 145
 146#define MAX_ATTEN_LEN 64 /* plenty for any real system */
 147/* for read back, default index is ~5m copper cable */
 148static char txselect_list[MAX_ATTEN_LEN] = "10";
 149static struct kparam_string kp_txselect = {
 150        .string = txselect_list,
 151        .maxlen = MAX_ATTEN_LEN
 152};
 153static int  setup_txselect(const char *, struct kernel_param *);
 154module_param_call(txselect, setup_txselect, param_get_string,
 155                  &kp_txselect, S_IWUSR | S_IRUGO);
 156MODULE_PARM_DESC(txselect,
 157                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 158
 159#define BOARD_QME7342 5
 160#define BOARD_QMH7342 6
 161#define BOARD_QMH7360 9
 162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 163                    BOARD_QMH7342)
 164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 165                    BOARD_QME7342)
 166
 167#define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
 168
 169#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
 170
 171#define MASK_ACROSS(lsb, msb) \
 172        (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
 173
 174#define SYM_RMASK(regname, fldname) ((u64)              \
 175        QIB_7322_##regname##_##fldname##_RMASK)
 176
 177#define SYM_MASK(regname, fldname) ((u64)               \
 178        QIB_7322_##regname##_##fldname##_RMASK <<       \
 179         QIB_7322_##regname##_##fldname##_LSB)
 180
 181#define SYM_FIELD(value, regname, fldname) ((u64)       \
 182        (((value) >> SYM_LSB(regname, fldname)) &       \
 183         SYM_RMASK(regname, fldname)))
 184
 185/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
 186#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
 187        (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
 188
 189#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
 190#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
 191#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
 192#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
 193#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
 194/* Below because most, but not all, fields of IntMask have that full suffix */
 195#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
 196
 197
 198#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
 199
 200/*
 201 * the size bits give us 2^N, in KB units.  0 marks as invalid,
 202 * and 7 is reserved.  We currently use only 2KB and 4KB
 203 */
 204#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
 205#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
 206#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
 207#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
 208
 209#define SendIBSLIDAssignMask \
 210        QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
 211#define SendIBSLMCMask \
 212        QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
 213
 214#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
 215#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
 216#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
 217#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
 218#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
 219#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
 220
 221#define _QIB_GPIO_SDA_NUM 1
 222#define _QIB_GPIO_SCL_NUM 0
 223#define QIB_EEPROM_WEN_NUM 14
 224#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
 225
 226/* HW counter clock is at 4nsec */
 227#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
 228
 229/* full speed IB port 1 only */
 230#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
 231#define PORT_SPD_CAP_SHIFT 3
 232
 233/* full speed featuremask, both ports */
 234#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
 235
 236/*
 237 * This file contains almost all the chip-specific register information and
 238 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
 239 */
 240
 241/* Use defines to tie machine-generated names to lower-case names */
 242#define kr_contextcnt KREG_IDX(ContextCnt)
 243#define kr_control KREG_IDX(Control)
 244#define kr_counterregbase KREG_IDX(CntrRegBase)
 245#define kr_errclear KREG_IDX(ErrClear)
 246#define kr_errmask KREG_IDX(ErrMask)
 247#define kr_errstatus KREG_IDX(ErrStatus)
 248#define kr_extctrl KREG_IDX(EXTCtrl)
 249#define kr_extstatus KREG_IDX(EXTStatus)
 250#define kr_gpio_clear KREG_IDX(GPIOClear)
 251#define kr_gpio_mask KREG_IDX(GPIOMask)
 252#define kr_gpio_out KREG_IDX(GPIOOut)
 253#define kr_gpio_status KREG_IDX(GPIOStatus)
 254#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
 255#define kr_debugportval KREG_IDX(DebugPortValueReg)
 256#define kr_fmask KREG_IDX(feature_mask)
 257#define kr_act_fmask KREG_IDX(active_feature_mask)
 258#define kr_hwerrclear KREG_IDX(HwErrClear)
 259#define kr_hwerrmask KREG_IDX(HwErrMask)
 260#define kr_hwerrstatus KREG_IDX(HwErrStatus)
 261#define kr_intclear KREG_IDX(IntClear)
 262#define kr_intmask KREG_IDX(IntMask)
 263#define kr_intredirect KREG_IDX(IntRedirect0)
 264#define kr_intstatus KREG_IDX(IntStatus)
 265#define kr_pagealign KREG_IDX(PageAlign)
 266#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
 267#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
 268#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
 269#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
 270#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
 271#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
 272#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
 273#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
 274#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
 275#define kr_revision KREG_IDX(Revision)
 276#define kr_scratch KREG_IDX(Scratch)
 277#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
 278#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
 279#define kr_sendctrl KREG_IDX(SendCtrl)
 280#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
 281#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
 282#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
 283#define kr_sendpiobufbase KREG_IDX(SendBufBase)
 284#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
 285#define kr_sendpiosize KREG_IDX(SendBufSize)
 286#define kr_sendregbase KREG_IDX(SendRegBase)
 287#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
 288#define kr_userregbase KREG_IDX(UserRegBase)
 289#define kr_intgranted KREG_IDX(Int_Granted)
 290#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
 291#define kr_intblocked KREG_IDX(IntBlocked)
 292#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
 293
 294/*
 295 * per-port kernel registers.  Access only with qib_read_kreg_port()
 296 * or qib_write_kreg_port()
 297 */
 298#define krp_errclear KREG_IBPORT_IDX(ErrClear)
 299#define krp_errmask KREG_IBPORT_IDX(ErrMask)
 300#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
 301#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
 302#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
 303#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
 304#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
 305#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
 306#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
 307#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
 308#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
 309#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
 310#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
 311#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
 312#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
 313#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
 314#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
 315#define krp_psstart KREG_IBPORT_IDX(PSStart)
 316#define krp_psstat KREG_IBPORT_IDX(PSStat)
 317#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
 318#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
 319#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
 320#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
 321#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
 322#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
 323#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
 324#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
 325#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
 326#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
 327#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
 328#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
 329#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
 330#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
 331#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
 332#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
 333#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
 334#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
 335#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
 336#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
 337#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
 338#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
 339#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
 340#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
 341#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
 342#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
 343#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
 344#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
 345#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
 346#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
 347#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 348
 349/*
 350 * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
 351 * or qib_write_kreg_ctxt()
 352 */
 353#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
 354#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
 355
 356/*
 357 * TID Flow table, per context.  Reduces
 358 * number of hdrq updates to one per flow (or on errors).
 359 * context 0 and 1 share same memory, but have distinct
 360 * addresses.  Since for now, we never use expected sends
 361 * on kernel contexts, we don't worry about that (we initialize
 362 * those entries for ctxt 0/1 on driver load twice, for example).
 363 */
 364#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
 365#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
 366
 367/* these are the error bits in the tid flows, and are W1C */
 368#define TIDFLOW_ERRBITS  ( \
 369        (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
 370        SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
 371        (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
 372        SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
 373
 374/* Most (not all) Counters are per-IBport.
 375 * Requires LBIntCnt is at offset 0 in the group
 376 */
 377#define CREG_IDX(regname) \
 378((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 379
 380#define crp_badformat CREG_IDX(RxVersionErrCnt)
 381#define crp_err_rlen CREG_IDX(RxLenErrCnt)
 382#define crp_erricrc CREG_IDX(RxICRCErrCnt)
 383#define crp_errlink CREG_IDX(RxLinkMalformCnt)
 384#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
 385#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
 386#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
 387#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
 388#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
 389#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
 390#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
 391#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
 392#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
 393#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
 394#define crp_pktrcv CREG_IDX(RxDataPktCnt)
 395#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
 396#define crp_pktsend CREG_IDX(TxDataPktCnt)
 397#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
 398#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
 399#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
 400#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
 401#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
 402#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
 403#define crp_rcvebp CREG_IDX(RxEBPCnt)
 404#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
 405#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
 406#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
 407#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
 408#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
 409#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
 410#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
 411#define crp_sendstall CREG_IDX(TxFlowStallCnt)
 412#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
 413#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
 414#define crp_txlenerr CREG_IDX(TxLenErrCnt)
 415#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
 416#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
 417#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
 418#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
 419#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
 420#define crp_wordrcv CREG_IDX(RxDwordCnt)
 421#define crp_wordsend CREG_IDX(TxDwordCnt)
 422#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
 423
 424/* these are the (few) counters that are not port-specific */
 425#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
 426                        QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 427#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
 428#define cr_lbint CREG_DEVIDX(LBIntCnt)
 429#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
 430#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
 431#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
 432#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
 433#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
 434
 435/* no chip register for # of IB ports supported, so define */
 436#define NUM_IB_PORTS 2
 437
 438/* 1 VL15 buffer per hardware IB port, no register for this, so define */
 439#define NUM_VL15_BUFS NUM_IB_PORTS
 440
 441/*
 442 * context 0 and 1 are special, and there is no chip register that
 443 * defines this value, so we have to define it here.
 444 * These are all allocated to either 0 or 1 for single port
 445 * hardware configuration, otherwise each gets half
 446 */
 447#define KCTXT0_EGRCNT 2048
 448
 449/* values for vl and port fields in PBC, 7322-specific */
 450#define PBC_PORT_SEL_LSB 26
 451#define PBC_PORT_SEL_RMASK 1
 452#define PBC_VL_NUM_LSB 27
 453#define PBC_VL_NUM_RMASK 7
 454#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
 455#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
 456
 457static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 458        [IB_RATE_2_5_GBPS] = 16,
 459        [IB_RATE_5_GBPS] = 8,
 460        [IB_RATE_10_GBPS] = 4,
 461        [IB_RATE_20_GBPS] = 2,
 462        [IB_RATE_30_GBPS] = 2,
 463        [IB_RATE_40_GBPS] = 1
 464};
 465
 466#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
 467#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
 468
 469/* link training states, from IBC */
 470#define IB_7322_LT_STATE_DISABLED        0x00
 471#define IB_7322_LT_STATE_LINKUP          0x01
 472#define IB_7322_LT_STATE_POLLACTIVE      0x02
 473#define IB_7322_LT_STATE_POLLQUIET       0x03
 474#define IB_7322_LT_STATE_SLEEPDELAY      0x04
 475#define IB_7322_LT_STATE_SLEEPQUIET      0x05
 476#define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
 477#define IB_7322_LT_STATE_CFGRCVFCFG      0x09
 478#define IB_7322_LT_STATE_CFGWAITRMT      0x0a
 479#define IB_7322_LT_STATE_CFGIDLE         0x0b
 480#define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
 481#define IB_7322_LT_STATE_TXREVLANES      0x0d
 482#define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
 483#define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 484#define IB_7322_LT_STATE_CFGENH          0x10
 485#define IB_7322_LT_STATE_CFGTEST         0x11
 486#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
 487#define IB_7322_LT_STATE_CFGWAITENH      0x13
 488
 489/* link state machine states from IBC */
 490#define IB_7322_L_STATE_DOWN             0x0
 491#define IB_7322_L_STATE_INIT             0x1
 492#define IB_7322_L_STATE_ARM              0x2
 493#define IB_7322_L_STATE_ACTIVE           0x3
 494#define IB_7322_L_STATE_ACT_DEFER        0x4
 495
 496static const u8 qib_7322_physportstate[0x20] = {
 497        [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
 498        [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
 499        [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
 500        [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
 501        [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
 502        [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
 503        [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
 504        [IB_7322_LT_STATE_CFGRCVFCFG] =
 505                IB_PHYSPORTSTATE_CFG_TRAIN,
 506        [IB_7322_LT_STATE_CFGWAITRMT] =
 507                IB_PHYSPORTSTATE_CFG_TRAIN,
 508        [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
 509        [IB_7322_LT_STATE_RECOVERRETRAIN] =
 510                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 511        [IB_7322_LT_STATE_RECOVERWAITRMT] =
 512                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 513        [IB_7322_LT_STATE_RECOVERIDLE] =
 514                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 515        [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
 516        [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
 517        [IB_7322_LT_STATE_CFGWAITRMTTEST] =
 518                IB_PHYSPORTSTATE_CFG_TRAIN,
 519        [IB_7322_LT_STATE_CFGWAITENH] =
 520                IB_PHYSPORTSTATE_CFG_WAIT_ENH,
 521        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
 522        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
 523        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
 524        [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
 525};
 526
 527#ifdef CONFIG_INFINIBAND_QIB_DCA
 528struct qib_irq_notify {
 529        int rcv;
 530        void *arg;
 531        struct irq_affinity_notify notify;
 532};
 533#endif
 534
 535struct qib_chip_specific {
 536        u64 __iomem *cregbase;
 537        u64 *cntrs;
 538        spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
 539        spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
 540        u64 main_int_mask;      /* clear bits which have dedicated handlers */
 541        u64 int_enable_mask;  /* for per port interrupts in single port mode */
 542        u64 errormask;
 543        u64 hwerrmask;
 544        u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
 545        u64 gpio_mask; /* shadow the gpio mask register */
 546        u64 extctrl; /* shadow the gpio output enable, etc... */
 547        u32 ncntrs;
 548        u32 nportcntrs;
 549        u32 cntrnamelen;
 550        u32 portcntrnamelen;
 551        u32 numctxts;
 552        u32 rcvegrcnt;
 553        u32 updthresh; /* current AvailUpdThld */
 554        u32 updthresh_dflt; /* default AvailUpdThld */
 555        u32 r1;
 556        int irq;
 557        u32 num_msix_entries;
 558        u32 sdmabufcnt;
 559        u32 lastbuf_for_pio;
 560        u32 stay_in_freeze;
 561        u32 recovery_ports_initted;
 562#ifdef CONFIG_INFINIBAND_QIB_DCA
 563        u32 dca_ctrl;
 564        int rhdr_cpu[18];
 565        int sdma_cpu[2];
 566        u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
 567#endif
 568        struct qib_msix_entry *msix_entries;
 569        unsigned long *sendchkenable;
 570        unsigned long *sendgrhchk;
 571        unsigned long *sendibchk;
 572        u32 rcvavail_timeout[18];
 573        char emsgbuf[128]; /* for device error interrupt msg buffer */
 574};
 575
 576/* Table of entries in "human readable" form Tx Emphasis. */
 577struct txdds_ent {
 578        u8 amp;
 579        u8 pre;
 580        u8 main;
 581        u8 post;
 582};
 583
 584struct vendor_txdds_ent {
 585        u8 oui[QSFP_VOUI_LEN];
 586        u8 *partnum;
 587        struct txdds_ent sdr;
 588        struct txdds_ent ddr;
 589        struct txdds_ent qdr;
 590};
 591
 592static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 593
 594#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 595#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
 596#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 597#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 598
 599#define H1_FORCE_VAL 8
 600#define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
 601#define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
 602
 603/* The static and dynamic registers are paired, and the pairs indexed by spd */
 604#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
 605        + ((spd) * 2))
 606
 607#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
 608#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
 609#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
 610#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
 611#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
 612
 613struct qib_chippport_specific {
 614        u64 __iomem *kpregbase;
 615        u64 __iomem *cpregbase;
 616        u64 *portcntrs;
 617        struct qib_pportdata *ppd;
 618        wait_queue_head_t autoneg_wait;
 619        struct delayed_work autoneg_work;
 620        struct delayed_work ipg_work;
 621        struct timer_list chase_timer;
 622        /*
 623         * these 5 fields are used to establish deltas for IB symbol
 624         * errors and linkrecovery errors.  They can be reported on
 625         * some chips during link negotiation prior to INIT, and with
 626         * DDR when faking DDR negotiations with non-IBTA switches.
 627         * The chip counters are adjusted at driver unload if there is
 628         * a non-zero delta.
 629         */
 630        u64 ibdeltainprog;
 631        u64 ibsymdelta;
 632        u64 ibsymsnap;
 633        u64 iblnkerrdelta;
 634        u64 iblnkerrsnap;
 635        u64 iblnkdownsnap;
 636        u64 iblnkdowndelta;
 637        u64 ibmalfdelta;
 638        u64 ibmalfsnap;
 639        u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
 640        u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
 641        unsigned long qdr_dfe_time;
 642        unsigned long chase_end;
 643        u32 autoneg_tries;
 644        u32 recovery_init;
 645        u32 qdr_dfe_on;
 646        u32 qdr_reforce;
 647        /*
 648         * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
 649         * entry zero is unused, to simplify indexing
 650         */
 651        u8 h1_val;
 652        u8 no_eep;  /* txselect table index to use if no qsfp info */
 653        u8 ipg_tries;
 654        u8 ibmalfusesnap;
 655        struct qib_qsfp_data qsfp_data;
 656        char epmsgbuf[192]; /* for port error interrupt msg buffer */
 657        char sdmamsgbuf[192]; /* for per-port sdma error messages */
 658};
 659
 660static struct {
 661        const char *name;
 662        irq_handler_t handler;
 663        int lsb;
 664        int port; /* 0 if not port-specific, else port # */
 665        int dca;
 666} irq_table[] = {
 667        { "", qib_7322intr, -1, 0, 0 },
 668        { " (buf avail)", qib_7322bufavail,
 669                SYM_LSB(IntStatus, SendBufAvail), 0, 0},
 670        { " (sdma 0)", sdma_intr,
 671                SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
 672        { " (sdma 1)", sdma_intr,
 673                SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
 674        { " (sdmaI 0)", sdma_idle_intr,
 675                SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
 676        { " (sdmaI 1)", sdma_idle_intr,
 677                SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
 678        { " (sdmaP 0)", sdma_progress_intr,
 679                SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
 680        { " (sdmaP 1)", sdma_progress_intr,
 681                SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
 682        { " (sdmaC 0)", sdma_cleanup_intr,
 683                SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
 684        { " (sdmaC 1)", sdma_cleanup_intr,
 685                SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
 686};
 687
 688#ifdef CONFIG_INFINIBAND_QIB_DCA
 689
 690static const struct dca_reg_map {
 691        int     shadow_inx;
 692        int     lsb;
 693        u64     mask;
 694        u16     regno;
 695} dca_rcvhdr_reg_map[] = {
 696        { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
 697           ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
 698        { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
 699           ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
 700        { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
 701           ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
 702        { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
 703           ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
 704        { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
 705           ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
 706        { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
 707           ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
 708        { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
 709           ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
 710        { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
 711           ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
 712        { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
 713           ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
 714        { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
 715           ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
 716        { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
 717           ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
 718        { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
 719           ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
 720        { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
 721           ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
 722        { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
 723           ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
 724        { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
 725           ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
 726        { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
 727           ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
 728        { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
 729           ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
 730        { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
 731           ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
 732};
 733#endif
 734
 735/* ibcctrl bits */
 736#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
 737/* cycle through TS1/TS2 till OK */
 738#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
 739/* wait for TS1, then go on */
 740#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
 741#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
 742
 743#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
 744#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
 745#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
 746
 747#define BLOB_7322_IBCHG 0x101
 748
 749static inline void qib_write_kreg(const struct qib_devdata *dd,
 750                                  const u32 regno, u64 value);
 751static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
 752static void write_7322_initregs(struct qib_devdata *);
 753static void write_7322_init_portregs(struct qib_pportdata *);
 754static void setup_7322_link_recovery(struct qib_pportdata *, u32);
 755static void check_7322_rxe_status(struct qib_pportdata *);
 756static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
 757#ifdef CONFIG_INFINIBAND_QIB_DCA
 758static void qib_setup_dca(struct qib_devdata *dd);
 759static void setup_dca_notifier(struct qib_devdata *dd,
 760                               struct qib_msix_entry *m);
 761static void reset_dca_notifier(struct qib_devdata *dd,
 762                               struct qib_msix_entry *m);
 763#endif
 764
 765/**
 766 * qib_read_ureg32 - read 32-bit virtualized per-context register
 767 * @dd: device
 768 * @regno: register number
 769 * @ctxt: context number
 770 *
 771 * Return the contents of a register that is virtualized to be per context.
 772 * Returns -1 on errors (not distinguishable from valid contents at
 773 * runtime; we may add a separate error variable at some point).
 774 */
 775static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
 776                                  enum qib_ureg regno, int ctxt)
 777{
 778        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 779                return 0;
 780        return readl(regno + (u64 __iomem *)(
 781                (dd->ureg_align * ctxt) + (dd->userbase ?
 782                 (char __iomem *)dd->userbase :
 783                 (char __iomem *)dd->kregbase + dd->uregbase)));
 784}
 785
 786/**
 787 * qib_read_ureg - read virtualized per-context register
 788 * @dd: device
 789 * @regno: register number
 790 * @ctxt: context number
 791 *
 792 * Return the contents of a register that is virtualized to be per context.
 793 * Returns -1 on errors (not distinguishable from valid contents at
 794 * runtime; we may add a separate error variable at some point).
 795 */
 796static inline u64 qib_read_ureg(const struct qib_devdata *dd,
 797                                enum qib_ureg regno, int ctxt)
 798{
 799
 800        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 801                return 0;
 802        return readq(regno + (u64 __iomem *)(
 803                (dd->ureg_align * ctxt) + (dd->userbase ?
 804                 (char __iomem *)dd->userbase :
 805                 (char __iomem *)dd->kregbase + dd->uregbase)));
 806}
 807
 808/**
 809 * qib_write_ureg - write virtualized per-context register
 810 * @dd: device
 811 * @regno: register number
 812 * @value: value
 813 * @ctxt: context
 814 *
 815 * Write the contents of a register that is virtualized to be per context.
 816 */
 817static inline void qib_write_ureg(const struct qib_devdata *dd,
 818                                  enum qib_ureg regno, u64 value, int ctxt)
 819{
 820        u64 __iomem *ubase;
 821
 822        if (dd->userbase)
 823                ubase = (u64 __iomem *)
 824                        ((char __iomem *) dd->userbase +
 825                         dd->ureg_align * ctxt);
 826        else
 827                ubase = (u64 __iomem *)
 828                        (dd->uregbase +
 829                         (char __iomem *) dd->kregbase +
 830                         dd->ureg_align * ctxt);
 831
 832        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 833                writeq(value, &ubase[regno]);
 834}
 835
 836static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
 837                                  const u32 regno)
 838{
 839        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 840                return -1;
 841        return readl((u32 __iomem *) &dd->kregbase[regno]);
 842}
 843
 844static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
 845                                  const u32 regno)
 846{
 847        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 848                return -1;
 849        return readq(&dd->kregbase[regno]);
 850}
 851
 852static inline void qib_write_kreg(const struct qib_devdata *dd,
 853                                  const u32 regno, u64 value)
 854{
 855        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 856                writeq(value, &dd->kregbase[regno]);
 857}
 858
 859/*
 860 * not many sanity checks for the port-specific kernel register routines,
 861 * since they are only used when it's known to be safe.
 862*/
 863static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
 864                                     const u16 regno)
 865{
 866        if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
 867                return 0ULL;
 868        return readq(&ppd->cpspec->kpregbase[regno]);
 869}
 870
 871static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
 872                                       const u16 regno, u64 value)
 873{
 874        if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
 875            (ppd->dd->flags & QIB_PRESENT))
 876                writeq(value, &ppd->cpspec->kpregbase[regno]);
 877}
 878
 879/**
 880 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
 881 * @dd: the qlogic_ib device
 882 * @regno: the register number to write
 883 * @ctxt: the context containing the register
 884 * @value: the value to write
 885 */
 886static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
 887                                       const u16 regno, unsigned ctxt,
 888                                       u64 value)
 889{
 890        qib_write_kreg(dd, regno + ctxt, value);
 891}
 892
 893static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
 894{
 895        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 896                return 0;
 897        return readq(&dd->cspec->cregbase[regno]);
 898
 899
 900}
 901
 902static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
 903{
 904        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 905                return 0;
 906        return readl(&dd->cspec->cregbase[regno]);
 907
 908
 909}
 910
 911static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
 912                                        u16 regno, u64 value)
 913{
 914        if (ppd->cpspec && ppd->cpspec->cpregbase &&
 915            (ppd->dd->flags & QIB_PRESENT))
 916                writeq(value, &ppd->cpspec->cpregbase[regno]);
 917}
 918
 919static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
 920                                      u16 regno)
 921{
 922        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 923            !(ppd->dd->flags & QIB_PRESENT))
 924                return 0;
 925        return readq(&ppd->cpspec->cpregbase[regno]);
 926}
 927
 928static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
 929                                        u16 regno)
 930{
 931        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 932            !(ppd->dd->flags & QIB_PRESENT))
 933                return 0;
 934        return readl(&ppd->cpspec->cpregbase[regno]);
 935}
 936
 937/* bits in Control register */
 938#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
 939#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
 940
 941/* bits in general interrupt regs */
 942#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
 943#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
 944#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
 945#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
 946#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
 947#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
 948#define QIB_I_C_ERROR INT_MASK(Err)
 949
 950#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
 951#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
 952#define QIB_I_GPIO INT_MASK(AssertGPIO)
 953#define QIB_I_P_SDMAINT(pidx) \
 954        (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 955         INT_MASK_P(SDmaProgress, pidx) | \
 956         INT_MASK_PM(SDmaCleanupDone, pidx))
 957
 958/* Interrupt bits that are "per port" */
 959#define QIB_I_P_BITSEXTANT(pidx) \
 960        (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
 961        INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 962        INT_MASK_P(SDmaProgress, pidx) | \
 963        INT_MASK_PM(SDmaCleanupDone, pidx))
 964
 965/* Interrupt bits that are common to a device */
 966/* currently unused: QIB_I_SPIOSENT */
 967#define QIB_I_C_BITSEXTANT \
 968        (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
 969        QIB_I_SPIOSENT | \
 970        QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
 971
 972#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
 973        QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
 974
 975/*
 976 * Error bits that are "per port".
 977 */
 978#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
 979#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
 980#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
 981#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
 982#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
 983#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
 984#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
 985#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
 986#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
 987#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
 988#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
 989#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
 990#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
 991#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
 992#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
 993#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
 994#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
 995#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
 996#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
 997#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
 998#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
 999#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1000#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1001#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1002#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1003#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1004#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1005#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1006
1007#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1008#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1009#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1010#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1011#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1012#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1013#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1014#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1015#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1016#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1017#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1018
1019/* Error bits that are common to a device */
1020#define QIB_E_RESET ERR_MASK(ResetNegated)
1021#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1022#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1023
1024
1025/*
1026 * Per chip (rather than per-port) errors.  Most either do
1027 * nothing but trigger a print (because they self-recover, or
1028 * always occur in tandem with other errors that handle the
1029 * issue), or because they indicate errors with no recovery,
1030 * but we want to know that they happened.
1031 */
1032#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1033#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1034#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1035#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1036#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1037#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1038#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1039#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1040
1041/* SDMA chip errors (not per port)
1042 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1043 * the SDMAHALT error immediately, so we just print the dup error via the
1044 * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1045 * as well, but since this is port-independent, by definition, it's
1046 * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1047 * packet send errors, and so are handled in the same manner as other
1048 * per-packet errors.
1049 */
1050#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1051#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1052#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1053
1054/*
1055 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1056 * it is used to print "common" packet errors.
1057 */
1058#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1059        QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1060        QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1061        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1062        QIB_E_P_REBP)
1063
1064/* Error Bits that Packet-related (Receive, per-port) */
1065#define QIB_E_P_RPKTERRS (\
1066        QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1067        QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1068        QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1069        QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1070        QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1071        QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1072
1073/*
1074 * Error bits that are Send-related (per port)
1075 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1076 * All of these potentially need to have a buffer disarmed
1077 */
1078#define QIB_E_P_SPKTERRS (\
1079        QIB_E_P_SUNEXP_PKTNUM |\
1080        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1081        QIB_E_P_SMAXPKTLEN |\
1082        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1083        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1084        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1085
1086#define QIB_E_SPKTERRS ( \
1087                QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1088                ERR_MASK_N(SendUnsupportedVLErr) |                      \
1089                QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1090
1091#define QIB_E_P_SDMAERRS ( \
1092        QIB_E_P_SDMAHALT | \
1093        QIB_E_P_SDMADESCADDRMISALIGN | \
1094        QIB_E_P_SDMAUNEXPDATA | \
1095        QIB_E_P_SDMAMISSINGDW | \
1096        QIB_E_P_SDMADWEN | \
1097        QIB_E_P_SDMARPYTAG | \
1098        QIB_E_P_SDMA1STDESC | \
1099        QIB_E_P_SDMABASE | \
1100        QIB_E_P_SDMATAILOUTOFBOUND | \
1101        QIB_E_P_SDMAOUTOFBOUND | \
1102        QIB_E_P_SDMAGENMISMATCH)
1103
1104/*
1105 * This sets some bits more than once, but makes it more obvious which
1106 * bits are not handled under other categories, and the repeat definition
1107 * is not a problem.
1108 */
1109#define QIB_E_P_BITSEXTANT ( \
1110        QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1111        QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1112        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1113        QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1114        )
1115
1116/*
1117 * These are errors that can occur when the link
1118 * changes state while a packet is being sent or received.  This doesn't
1119 * cover things like EBP or VCRC that can be the result of a sending
1120 * having the link change state, so we receive a "known bad" packet.
1121 * All of these are "per port", so renamed:
1122 */
1123#define QIB_E_P_LINK_PKTERRS (\
1124        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1125        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1126        QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1127        QIB_E_P_RUNEXPCHAR)
1128
1129/*
1130 * This sets some bits more than once, but makes it more obvious which
1131 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1132 * and the repeat definition is not a problem.
1133 */
1134#define QIB_E_C_BITSEXTANT (\
1135        QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1136        QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1137        QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1138
1139/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1140#define E_SPKT_ERRS_IGNORE 0
1141
1142#define QIB_EXTS_MEMBIST_DISABLED \
1143        SYM_MASK(EXTStatus, MemBISTDisabled)
1144#define QIB_EXTS_MEMBIST_ENDTEST \
1145        SYM_MASK(EXTStatus, MemBISTEndTest)
1146
1147#define QIB_E_SPIOARMLAUNCH \
1148        ERR_MASK(SendArmLaunchErr)
1149
1150#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1151#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1152
1153/*
1154 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1155 * and also if forced QDR (only QDR enabled).  It's enabled for the
1156 * forced QDR case so that scrambling will be enabled by the TS3
1157 * exchange, when supported by both sides of the link.
1158 */
1159#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1160#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1161#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1162#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1163#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1164#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1165        SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1166#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1167
1168#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1169#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1170
1171#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1172#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1173#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1174
1175#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1176#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1177#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1178        SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1179#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1180        SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1181#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1182
1183#define IBA7322_REDIRECT_VEC_PER_REG 12
1184
1185#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1186#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1187#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1188#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1189#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1190
1191#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1192
1193#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1194        .msg = #fldname , .sz = sizeof(#fldname) }
1195#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1196        fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1197static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1198        HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1199        HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1200        HWE_AUTO(PCIESerdesPClkNotDetect),
1201        HWE_AUTO(PowerOnBISTFailed),
1202        HWE_AUTO(TempsenseTholdReached),
1203        HWE_AUTO(MemoryErr),
1204        HWE_AUTO(PCIeBusParityErr),
1205        HWE_AUTO(PcieCplTimeout),
1206        HWE_AUTO(PciePoisonedTLP),
1207        HWE_AUTO_P(SDmaMemReadErr, 1),
1208        HWE_AUTO_P(SDmaMemReadErr, 0),
1209        HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1210        HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1211        HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1212        HWE_AUTO(statusValidNoEop),
1213        HWE_AUTO(LATriggered),
1214        { .mask = 0, .sz = 0 }
1215};
1216
1217#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1218        .msg = #fldname, .sz = sizeof(#fldname) }
1219#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1220        .msg = #fldname, .sz = sizeof(#fldname) }
1221static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1222        E_AUTO(RcvEgrFullErr),
1223        E_AUTO(RcvHdrFullErr),
1224        E_AUTO(ResetNegated),
1225        E_AUTO(HardwareErr),
1226        E_AUTO(InvalidAddrErr),
1227        E_AUTO(SDmaVL15Err),
1228        E_AUTO(SBufVL15MisUseErr),
1229        E_AUTO(InvalidEEPCmd),
1230        E_AUTO(RcvContextShareErr),
1231        E_AUTO(SendVLMismatchErr),
1232        E_AUTO(SendArmLaunchErr),
1233        E_AUTO(SendSpecialTriggerErr),
1234        E_AUTO(SDmaWrongPortErr),
1235        E_AUTO(SDmaBufMaskDuplicateErr),
1236        { .mask = 0, .sz = 0 }
1237};
1238
1239static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1240        E_P_AUTO(IBStatusChanged),
1241        E_P_AUTO(SHeadersErr),
1242        E_P_AUTO(VL15BufMisuseErr),
1243        /*
1244         * SDmaHaltErr is not really an error, make it clearer;
1245         */
1246        {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1247                .sz = 11},
1248        E_P_AUTO(SDmaDescAddrMisalignErr),
1249        E_P_AUTO(SDmaUnexpDataErr),
1250        E_P_AUTO(SDmaMissingDwErr),
1251        E_P_AUTO(SDmaDwEnErr),
1252        E_P_AUTO(SDmaRpyTagErr),
1253        E_P_AUTO(SDma1stDescErr),
1254        E_P_AUTO(SDmaBaseErr),
1255        E_P_AUTO(SDmaTailOutOfBoundErr),
1256        E_P_AUTO(SDmaOutOfBoundErr),
1257        E_P_AUTO(SDmaGenMismatchErr),
1258        E_P_AUTO(SendBufMisuseErr),
1259        E_P_AUTO(SendUnsupportedVLErr),
1260        E_P_AUTO(SendUnexpectedPktNumErr),
1261        E_P_AUTO(SendDroppedDataPktErr),
1262        E_P_AUTO(SendDroppedSmpPktErr),
1263        E_P_AUTO(SendPktLenErr),
1264        E_P_AUTO(SendUnderRunErr),
1265        E_P_AUTO(SendMaxPktLenErr),
1266        E_P_AUTO(SendMinPktLenErr),
1267        E_P_AUTO(RcvIBLostLinkErr),
1268        E_P_AUTO(RcvHdrErr),
1269        E_P_AUTO(RcvHdrLenErr),
1270        E_P_AUTO(RcvBadTidErr),
1271        E_P_AUTO(RcvBadVersionErr),
1272        E_P_AUTO(RcvIBFlowErr),
1273        E_P_AUTO(RcvEBPErr),
1274        E_P_AUTO(RcvUnsupportedVLErr),
1275        E_P_AUTO(RcvUnexpectedCharErr),
1276        E_P_AUTO(RcvShortPktLenErr),
1277        E_P_AUTO(RcvLongPktLenErr),
1278        E_P_AUTO(RcvMaxPktLenErr),
1279        E_P_AUTO(RcvMinPktLenErr),
1280        E_P_AUTO(RcvICRCErr),
1281        E_P_AUTO(RcvVCRCErr),
1282        E_P_AUTO(RcvFormatErr),
1283        { .mask = 0, .sz = 0 }
1284};
1285
1286/*
1287 * Below generates "auto-message" for interrupts not specific to any port or
1288 * context
1289 */
1290#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1291        .msg = #fldname, .sz = sizeof(#fldname) }
1292/* Below generates "auto-message" for interrupts specific to a port */
1293#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1294        SYM_LSB(IntMask, fldname##Mask##_0), \
1295        SYM_LSB(IntMask, fldname##Mask##_1)), \
1296        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1297/* For some reason, the SerDesTrimDone bits are reversed */
1298#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1299        SYM_LSB(IntMask, fldname##Mask##_1), \
1300        SYM_LSB(IntMask, fldname##Mask##_0)), \
1301        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1302/*
1303 * Below generates "auto-message" for interrupts specific to a context,
1304 * with ctxt-number appended
1305 */
1306#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1307        SYM_LSB(IntMask, fldname##0IntMask), \
1308        SYM_LSB(IntMask, fldname##17IntMask)), \
1309        .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1310
1311#define TXSYMPTOM_AUTO_P(fldname) \
1312        { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1313        .msg = #fldname, .sz = sizeof(#fldname) }
1314static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1315        TXSYMPTOM_AUTO_P(NonKeyPacket),
1316        TXSYMPTOM_AUTO_P(GRHFail),
1317        TXSYMPTOM_AUTO_P(PkeyFail),
1318        TXSYMPTOM_AUTO_P(QPFail),
1319        TXSYMPTOM_AUTO_P(SLIDFail),
1320        TXSYMPTOM_AUTO_P(RawIPV6),
1321        TXSYMPTOM_AUTO_P(PacketTooSmall),
1322        { .mask = 0, .sz = 0 }
1323};
1324
1325#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1326
1327/*
1328 * Called when we might have an error that is specific to a particular
1329 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1330 * because we don't need to force the update of pioavail
1331 */
1332static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1333{
1334        struct qib_devdata *dd = ppd->dd;
1335        u32 i;
1336        int any;
1337        u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1338        u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1339        unsigned long sbuf[4];
1340
1341        /*
1342         * It's possible that sendbuffererror could have bits set; might
1343         * have already done this as a result of hardware error handling.
1344         */
1345        any = 0;
1346        for (i = 0; i < regcnt; ++i) {
1347                sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1348                if (sbuf[i]) {
1349                        any = 1;
1350                        qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1351                }
1352        }
1353
1354        if (any)
1355                qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1356}
1357
1358/* No txe_recover yet, if ever */
1359
1360/* No decode__errors yet */
1361static void err_decode(char *msg, size_t len, u64 errs,
1362                       const struct qib_hwerror_msgs *msp)
1363{
1364        u64 these, lmask;
1365        int took, multi, n = 0;
1366
1367        while (errs && msp && msp->mask) {
1368                multi = (msp->mask & (msp->mask - 1));
1369                while (errs & msp->mask) {
1370                        these = (errs & msp->mask);
1371                        lmask = (these & (these - 1)) ^ these;
1372                        if (len) {
1373                                if (n++) {
1374                                        /* separate the strings */
1375                                        *msg++ = ',';
1376                                        len--;
1377                                }
1378                                BUG_ON(!msp->sz);
1379                                /* msp->sz counts the nul */
1380                                took = min_t(size_t, msp->sz - (size_t)1, len);
1381                                memcpy(msg,  msp->msg, took);
1382                                len -= took;
1383                                msg += took;
1384                                if (len)
1385                                        *msg = '\0';
1386                        }
1387                        errs &= ~lmask;
1388                        if (len && multi) {
1389                                /* More than one bit this mask */
1390                                int idx = -1;
1391
1392                                while (lmask & msp->mask) {
1393                                        ++idx;
1394                                        lmask >>= 1;
1395                                }
1396                                took = scnprintf(msg, len, "_%d", idx);
1397                                len -= took;
1398                                msg += took;
1399                        }
1400                }
1401                ++msp;
1402        }
1403        /* If some bits are left, show in hex. */
1404        if (len && errs)
1405                snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1406                        (unsigned long long) errs);
1407}
1408
1409/* only called if r1 set */
1410static void flush_fifo(struct qib_pportdata *ppd)
1411{
1412        struct qib_devdata *dd = ppd->dd;
1413        u32 __iomem *piobuf;
1414        u32 bufn;
1415        u32 *hdr;
1416        u64 pbc;
1417        const unsigned hdrwords = 7;
1418        static struct ib_header ibhdr = {
1419                .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1420                .lrh[1] = IB_LID_PERMISSIVE,
1421                .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1422                .lrh[3] = IB_LID_PERMISSIVE,
1423                .u.oth.bth[0] = cpu_to_be32(
1424                        (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1425                .u.oth.bth[1] = cpu_to_be32(0),
1426                .u.oth.bth[2] = cpu_to_be32(0),
1427                .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1428                .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1429        };
1430
1431        /*
1432         * Send a dummy VL15 packet to flush the launch FIFO.
1433         * This will not actually be sent since the TxeBypassIbc bit is set.
1434         */
1435        pbc = PBC_7322_VL15_SEND |
1436                (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1437                (hdrwords + SIZE_OF_CRC);
1438        piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1439        if (!piobuf)
1440                return;
1441        writeq(pbc, piobuf);
1442        hdr = (u32 *) &ibhdr;
1443        if (dd->flags & QIB_PIO_FLUSH_WC) {
1444                qib_flush_wc();
1445                qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1446                qib_flush_wc();
1447                __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1448                qib_flush_wc();
1449        } else
1450                qib_pio_copy(piobuf + 2, hdr, hdrwords);
1451        qib_sendbuf_done(dd, bufn);
1452}
1453
1454/*
1455 * This is called with interrupts disabled and sdma_lock held.
1456 */
1457static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1458{
1459        struct qib_devdata *dd = ppd->dd;
1460        u64 set_sendctrl = 0;
1461        u64 clr_sendctrl = 0;
1462
1463        if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1464                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1465        else
1466                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1467
1468        if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1469                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1470        else
1471                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1472
1473        if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1474                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1475        else
1476                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1477
1478        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1479                set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1480                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1481                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1482        else
1483                clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1484                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1485                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1486
1487        spin_lock(&dd->sendctrl_lock);
1488
1489        /* If we are draining everything, block sends first */
1490        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1491                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1492                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1493                qib_write_kreg(dd, kr_scratch, 0);
1494        }
1495
1496        ppd->p_sendctrl |= set_sendctrl;
1497        ppd->p_sendctrl &= ~clr_sendctrl;
1498
1499        if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1500                qib_write_kreg_port(ppd, krp_sendctrl,
1501                                    ppd->p_sendctrl |
1502                                    SYM_MASK(SendCtrl_0, SDmaCleanup));
1503        else
1504                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1505        qib_write_kreg(dd, kr_scratch, 0);
1506
1507        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1508                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1509                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1510                qib_write_kreg(dd, kr_scratch, 0);
1511        }
1512
1513        spin_unlock(&dd->sendctrl_lock);
1514
1515        if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1516                flush_fifo(ppd);
1517}
1518
1519static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1520{
1521        __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1522}
1523
1524static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1525{
1526        /*
1527         * Set SendDmaLenGen and clear and set
1528         * the MSB of the generation count to enable generation checking
1529         * and load the internal generation counter.
1530         */
1531        qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1532        qib_write_kreg_port(ppd, krp_senddmalengen,
1533                            ppd->sdma_descq_cnt |
1534                            (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1535}
1536
1537/*
1538 * Must be called with sdma_lock held, or before init finished.
1539 */
1540static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1541{
1542        /* Commit writes to memory and advance the tail on the chip */
1543        wmb();
1544        ppd->sdma_descq_tail = tail;
1545        qib_write_kreg_port(ppd, krp_senddmatail, tail);
1546}
1547
1548/*
1549 * This is called with interrupts disabled and sdma_lock held.
1550 */
1551static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1552{
1553        /*
1554         * Drain all FIFOs.
1555         * The hardware doesn't require this but we do it so that verbs
1556         * and user applications don't wait for link active to send stale
1557         * data.
1558         */
1559        sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1560
1561        qib_sdma_7322_setlengen(ppd);
1562        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1563        ppd->sdma_head_dma[0] = 0;
1564        qib_7322_sdma_sendctrl(ppd,
1565                ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1566}
1567
1568#define DISABLES_SDMA ( \
1569        QIB_E_P_SDMAHALT | \
1570        QIB_E_P_SDMADESCADDRMISALIGN | \
1571        QIB_E_P_SDMAMISSINGDW | \
1572        QIB_E_P_SDMADWEN | \
1573        QIB_E_P_SDMARPYTAG | \
1574        QIB_E_P_SDMA1STDESC | \
1575        QIB_E_P_SDMABASE | \
1576        QIB_E_P_SDMATAILOUTOFBOUND | \
1577        QIB_E_P_SDMAOUTOFBOUND | \
1578        QIB_E_P_SDMAGENMISMATCH)
1579
1580static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1581{
1582        unsigned long flags;
1583        struct qib_devdata *dd = ppd->dd;
1584
1585        errs &= QIB_E_P_SDMAERRS;
1586        err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1587                   errs, qib_7322p_error_msgs);
1588
1589        if (errs & QIB_E_P_SDMAUNEXPDATA)
1590                qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1591                            ppd->port);
1592
1593        spin_lock_irqsave(&ppd->sdma_lock, flags);
1594
1595        if (errs != QIB_E_P_SDMAHALT) {
1596                /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1597                qib_dev_porterr(dd, ppd->port,
1598                        "SDMA %s 0x%016llx %s\n",
1599                        qib_sdma_state_names[ppd->sdma_state.current_state],
1600                        errs, ppd->cpspec->sdmamsgbuf);
1601                dump_sdma_7322_state(ppd);
1602        }
1603
1604        switch (ppd->sdma_state.current_state) {
1605        case qib_sdma_state_s00_hw_down:
1606                break;
1607
1608        case qib_sdma_state_s10_hw_start_up_wait:
1609                if (errs & QIB_E_P_SDMAHALT)
1610                        __qib_sdma_process_event(ppd,
1611                                qib_sdma_event_e20_hw_started);
1612                break;
1613
1614        case qib_sdma_state_s20_idle:
1615                break;
1616
1617        case qib_sdma_state_s30_sw_clean_up_wait:
1618                break;
1619
1620        case qib_sdma_state_s40_hw_clean_up_wait:
1621                if (errs & QIB_E_P_SDMAHALT)
1622                        __qib_sdma_process_event(ppd,
1623                                qib_sdma_event_e50_hw_cleaned);
1624                break;
1625
1626        case qib_sdma_state_s50_hw_halt_wait:
1627                if (errs & QIB_E_P_SDMAHALT)
1628                        __qib_sdma_process_event(ppd,
1629                                qib_sdma_event_e60_hw_halted);
1630                break;
1631
1632        case qib_sdma_state_s99_running:
1633                __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1634                __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1635                break;
1636        }
1637
1638        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1639}
1640
1641/*
1642 * handle per-device errors (not per-port errors)
1643 */
1644static noinline void handle_7322_errors(struct qib_devdata *dd)
1645{
1646        char *msg;
1647        u64 iserr = 0;
1648        u64 errs;
1649        u64 mask;
1650        int log_idx;
1651
1652        qib_stats.sps_errints++;
1653        errs = qib_read_kreg64(dd, kr_errstatus);
1654        if (!errs) {
1655                qib_devinfo(dd->pcidev,
1656                        "device error interrupt, but no error bits set!\n");
1657                goto done;
1658        }
1659
1660        /* don't report errors that are masked */
1661        errs &= dd->cspec->errormask;
1662        msg = dd->cspec->emsgbuf;
1663
1664        /* do these first, they are most important */
1665        if (errs & QIB_E_HARDWARE) {
1666                *msg = '\0';
1667                qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1668        } else
1669                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1670                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1671                                qib_inc_eeprom_err(dd, log_idx, 1);
1672
1673        if (errs & QIB_E_SPKTERRS) {
1674                qib_disarm_7322_senderrbufs(dd->pport);
1675                qib_stats.sps_txerrs++;
1676        } else if (errs & QIB_E_INVALIDADDR)
1677                qib_stats.sps_txerrs++;
1678        else if (errs & QIB_E_ARMLAUNCH) {
1679                qib_stats.sps_txerrs++;
1680                qib_disarm_7322_senderrbufs(dd->pport);
1681        }
1682        qib_write_kreg(dd, kr_errclear, errs);
1683
1684        /*
1685         * The ones we mask off are handled specially below
1686         * or above.  Also mask SDMADISABLED by default as it
1687         * is too chatty.
1688         */
1689        mask = QIB_E_HARDWARE;
1690        *msg = '\0';
1691
1692        err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1693                   qib_7322error_msgs);
1694
1695        /*
1696         * Getting reset is a tragedy for all ports. Mark the device
1697         * _and_ the ports as "offline" in way meaningful to each.
1698         */
1699        if (errs & QIB_E_RESET) {
1700                int pidx;
1701
1702                qib_dev_err(dd,
1703                        "Got reset, requires re-init (unload and reload driver)\n");
1704                dd->flags &= ~QIB_INITTED;  /* needs re-init */
1705                /* mark as having had error */
1706                *dd->devstatusp |= QIB_STATUS_HWERROR;
1707                for (pidx = 0; pidx < dd->num_pports; ++pidx)
1708                        if (dd->pport[pidx].link_speed_supported)
1709                                *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1710        }
1711
1712        if (*msg && iserr)
1713                qib_dev_err(dd, "%s error\n", msg);
1714
1715        /*
1716         * If there were hdrq or egrfull errors, wake up any processes
1717         * waiting in poll.  We used to try to check which contexts had
1718         * the overflow, but given the cost of that and the chip reads
1719         * to support it, it's better to just wake everybody up if we
1720         * get an overflow; waiters can poll again if it's not them.
1721         */
1722        if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1723                qib_handle_urcv(dd, ~0U);
1724                if (errs & ERR_MASK(RcvEgrFullErr))
1725                        qib_stats.sps_buffull++;
1726                else
1727                        qib_stats.sps_hdrfull++;
1728        }
1729
1730done:
1731        return;
1732}
1733
1734static void qib_error_tasklet(unsigned long data)
1735{
1736        struct qib_devdata *dd = (struct qib_devdata *)data;
1737
1738        handle_7322_errors(dd);
1739        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1740}
1741
1742static void reenable_chase(unsigned long opaque)
1743{
1744        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1745
1746        ppd->cpspec->chase_timer.expires = 0;
1747        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1748                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1749}
1750
1751static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1752                u8 ibclt)
1753{
1754        ppd->cpspec->chase_end = 0;
1755
1756        if (!qib_chase)
1757                return;
1758
1759        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1760                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1761        ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1762        add_timer(&ppd->cpspec->chase_timer);
1763}
1764
1765static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1766{
1767        u8 ibclt;
1768        unsigned long tnow;
1769
1770        ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1771
1772        /*
1773         * Detect and handle the state chase issue, where we can
1774         * get stuck if we are unlucky on timing on both sides of
1775         * the link.   If we are, we disable, set a timer, and
1776         * then re-enable.
1777         */
1778        switch (ibclt) {
1779        case IB_7322_LT_STATE_CFGRCVFCFG:
1780        case IB_7322_LT_STATE_CFGWAITRMT:
1781        case IB_7322_LT_STATE_TXREVLANES:
1782        case IB_7322_LT_STATE_CFGENH:
1783                tnow = jiffies;
1784                if (ppd->cpspec->chase_end &&
1785                     time_after(tnow, ppd->cpspec->chase_end))
1786                        disable_chase(ppd, tnow, ibclt);
1787                else if (!ppd->cpspec->chase_end)
1788                        ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1789                break;
1790        default:
1791                ppd->cpspec->chase_end = 0;
1792                break;
1793        }
1794
1795        if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1796              ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1797             ibclt == IB_7322_LT_STATE_LINKUP) &&
1798            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1799                force_h1(ppd);
1800                ppd->cpspec->qdr_reforce = 1;
1801                if (!ppd->dd->cspec->r1)
1802                        serdes_7322_los_enable(ppd, 0);
1803        } else if (ppd->cpspec->qdr_reforce &&
1804                (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1805                 (ibclt == IB_7322_LT_STATE_CFGENH ||
1806                ibclt == IB_7322_LT_STATE_CFGIDLE ||
1807                ibclt == IB_7322_LT_STATE_LINKUP))
1808                force_h1(ppd);
1809
1810        if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1811            ppd->link_speed_enabled == QIB_IB_QDR &&
1812            (ibclt == IB_7322_LT_STATE_CFGTEST ||
1813             ibclt == IB_7322_LT_STATE_CFGENH ||
1814             (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1815              ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1816                adj_tx_serdes(ppd);
1817
1818        if (ibclt != IB_7322_LT_STATE_LINKUP) {
1819                u8 ltstate = qib_7322_phys_portstate(ibcst);
1820                u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1821                                          LinkTrainingState);
1822                if (!ppd->dd->cspec->r1 &&
1823                    pibclt == IB_7322_LT_STATE_LINKUP &&
1824                    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1825                    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1826                    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1827                    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1828                        /* If the link went down (but no into recovery,
1829                         * turn LOS back on */
1830                        serdes_7322_los_enable(ppd, 1);
1831                if (!ppd->cpspec->qdr_dfe_on &&
1832                    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1833                        ppd->cpspec->qdr_dfe_on = 1;
1834                        ppd->cpspec->qdr_dfe_time = 0;
1835                        /* On link down, reenable QDR adaptation */
1836                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1837                                            ppd->dd->cspec->r1 ?
1838                                            QDR_STATIC_ADAPT_DOWN_R1 :
1839                                            QDR_STATIC_ADAPT_DOWN);
1840                        pr_info(
1841                                "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1842                                ppd->dd->unit, ppd->port, ibclt);
1843                }
1844        }
1845}
1846
1847static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1848
1849/*
1850 * This is per-pport error handling.
1851 * will likely get it's own MSIx interrupt (one for each port,
1852 * although just a single handler).
1853 */
1854static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1855{
1856        char *msg;
1857        u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1858        struct qib_devdata *dd = ppd->dd;
1859
1860        /* do this as soon as possible */
1861        fmask = qib_read_kreg64(dd, kr_act_fmask);
1862        if (!fmask)
1863                check_7322_rxe_status(ppd);
1864
1865        errs = qib_read_kreg_port(ppd, krp_errstatus);
1866        if (!errs)
1867                qib_devinfo(dd->pcidev,
1868                         "Port%d error interrupt, but no error bits set!\n",
1869                         ppd->port);
1870        if (!fmask)
1871                errs &= ~QIB_E_P_IBSTATUSCHANGED;
1872        if (!errs)
1873                goto done;
1874
1875        msg = ppd->cpspec->epmsgbuf;
1876        *msg = '\0';
1877
1878        if (errs & ~QIB_E_P_BITSEXTANT) {
1879                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1880                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1881                if (!*msg)
1882                        snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1883                                 "no others");
1884                qib_dev_porterr(dd, ppd->port,
1885                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1886                        (errs & ~QIB_E_P_BITSEXTANT), msg);
1887                *msg = '\0';
1888        }
1889
1890        if (errs & QIB_E_P_SHDR) {
1891                u64 symptom;
1892
1893                /* determine cause, then write to clear */
1894                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1895                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1896                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1897                           hdrchk_msgs);
1898                *msg = '\0';
1899                /* senderrbuf cleared in SPKTERRS below */
1900        }
1901
1902        if (errs & QIB_E_P_SPKTERRS) {
1903                if ((errs & QIB_E_P_LINK_PKTERRS) &&
1904                    !(ppd->lflags & QIBL_LINKACTIVE)) {
1905                        /*
1906                         * This can happen when trying to bring the link
1907                         * up, but the IB link changes state at the "wrong"
1908                         * time. The IB logic then complains that the packet
1909                         * isn't valid.  We don't want to confuse people, so
1910                         * we just don't print them, except at debug
1911                         */
1912                        err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1913                                   (errs & QIB_E_P_LINK_PKTERRS),
1914                                   qib_7322p_error_msgs);
1915                        *msg = '\0';
1916                        ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1917                }
1918                qib_disarm_7322_senderrbufs(ppd);
1919        } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1920                   !(ppd->lflags & QIBL_LINKACTIVE)) {
1921                /*
1922                 * This can happen when SMA is trying to bring the link
1923                 * up, but the IB link changes state at the "wrong" time.
1924                 * The IB logic then complains that the packet isn't
1925                 * valid.  We don't want to confuse people, so we just
1926                 * don't print them, except at debug
1927                 */
1928                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1929                           qib_7322p_error_msgs);
1930                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1931                *msg = '\0';
1932        }
1933
1934        qib_write_kreg_port(ppd, krp_errclear, errs);
1935
1936        errs &= ~ignore_this_time;
1937        if (!errs)
1938                goto done;
1939
1940        if (errs & QIB_E_P_RPKTERRS)
1941                qib_stats.sps_rcverrs++;
1942        if (errs & QIB_E_P_SPKTERRS)
1943                qib_stats.sps_txerrs++;
1944
1945        iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1946
1947        if (errs & QIB_E_P_SDMAERRS)
1948                sdma_7322_p_errors(ppd, errs);
1949
1950        if (errs & QIB_E_P_IBSTATUSCHANGED) {
1951                u64 ibcs;
1952                u8 ltstate;
1953
1954                ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1955                ltstate = qib_7322_phys_portstate(ibcs);
1956
1957                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1958                        handle_serdes_issues(ppd, ibcs);
1959                if (!(ppd->cpspec->ibcctrl_a &
1960                      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1961                        /*
1962                         * We got our interrupt, so init code should be
1963                         * happy and not try alternatives. Now squelch
1964                         * other "chatter" from link-negotiation (pre Init)
1965                         */
1966                        ppd->cpspec->ibcctrl_a |=
1967                                SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1968                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
1969                                            ppd->cpspec->ibcctrl_a);
1970                }
1971
1972                /* Update our picture of width and speed from chip */
1973                ppd->link_width_active =
1974                        (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1975                            IB_WIDTH_4X : IB_WIDTH_1X;
1976                ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1977                        LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1978                          SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1979                                   QIB_IB_DDR : QIB_IB_SDR;
1980
1981                if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1982                    IB_PHYSPORTSTATE_DISABLED)
1983                        qib_set_ib_7322_lstate(ppd, 0,
1984                               QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1985                else
1986                        /*
1987                         * Since going into a recovery state causes the link
1988                         * state to go down and since recovery is transitory,
1989                         * it is better if we "miss" ever seeing the link
1990                         * training state go into recovery (i.e., ignore this
1991                         * transition for link state special handling purposes)
1992                         * without updating lastibcstat.
1993                         */
1994                        if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1995                            ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1996                            ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1997                            ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1998                                qib_handle_e_ibstatuschanged(ppd, ibcs);
1999        }
2000        if (*msg && iserr)
2001                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2002
2003        if (ppd->state_wanted & ppd->lflags)
2004                wake_up_interruptible(&ppd->state_wait);
2005done:
2006        return;
2007}
2008
2009/* enable/disable chip from delivering interrupts */
2010static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2011{
2012        if (enable) {
2013                if (dd->flags & QIB_BADINTR)
2014                        return;
2015                qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2016                /* cause any pending enabled interrupts to be re-delivered */
2017                qib_write_kreg(dd, kr_intclear, 0ULL);
2018                if (dd->cspec->num_msix_entries) {
2019                        /* and same for MSIx */
2020                        u64 val = qib_read_kreg64(dd, kr_intgranted);
2021
2022                        if (val)
2023                                qib_write_kreg(dd, kr_intgranted, val);
2024                }
2025        } else
2026                qib_write_kreg(dd, kr_intmask, 0ULL);
2027}
2028
2029/*
2030 * Try to cleanup as much as possible for anything that might have gone
2031 * wrong while in freeze mode, such as pio buffers being written by user
2032 * processes (causing armlaunch), send errors due to going into freeze mode,
2033 * etc., and try to avoid causing extra interrupts while doing so.
2034 * Forcibly update the in-memory pioavail register copies after cleanup
2035 * because the chip won't do it while in freeze mode (the register values
2036 * themselves are kept correct).
2037 * Make sure that we don't lose any important interrupts by using the chip
2038 * feature that says that writing 0 to a bit in *clear that is set in
2039 * *status will cause an interrupt to be generated again (if allowed by
2040 * the *mask value).
2041 * This is in chip-specific code because of all of the register accesses,
2042 * even though the details are similar on most chips.
2043 */
2044static void qib_7322_clear_freeze(struct qib_devdata *dd)
2045{
2046        int pidx;
2047
2048        /* disable error interrupts, to avoid confusion */
2049        qib_write_kreg(dd, kr_errmask, 0ULL);
2050
2051        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2052                if (dd->pport[pidx].link_speed_supported)
2053                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2054                                            0ULL);
2055
2056        /* also disable interrupts; errormask is sometimes overwritten */
2057        qib_7322_set_intr_state(dd, 0);
2058
2059        /* clear the freeze, and be sure chip saw it */
2060        qib_write_kreg(dd, kr_control, dd->control);
2061        qib_read_kreg32(dd, kr_scratch);
2062
2063        /*
2064         * Force new interrupt if any hwerr, error or interrupt bits are
2065         * still set, and clear "safe" send packet errors related to freeze
2066         * and cancelling sends.  Re-enable error interrupts before possible
2067         * force of re-interrupt on pending interrupts.
2068         */
2069        qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2070        qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2071        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2072        /* We need to purge per-port errs and reset mask, too */
2073        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2074                if (!dd->pport[pidx].link_speed_supported)
2075                        continue;
2076                qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2077                qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2078        }
2079        qib_7322_set_intr_state(dd, 1);
2080}
2081
2082/* no error handling to speak of */
2083/**
2084 * qib_7322_handle_hwerrors - display hardware errors.
2085 * @dd: the qlogic_ib device
2086 * @msg: the output buffer
2087 * @msgl: the size of the output buffer
2088 *
2089 * Use same msg buffer as regular errors to avoid excessive stack
2090 * use.  Most hardware errors are catastrophic, but for right now,
2091 * we'll print them and continue.  We reuse the same message buffer as
2092 * qib_handle_errors() to avoid excessive stack usage.
2093 */
2094static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2095                                     size_t msgl)
2096{
2097        u64 hwerrs;
2098        u32 ctrl;
2099        int isfatal = 0;
2100
2101        hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2102        if (!hwerrs)
2103                goto bail;
2104        if (hwerrs == ~0ULL) {
2105                qib_dev_err(dd,
2106                        "Read of hardware error status failed (all bits set); ignoring\n");
2107                goto bail;
2108        }
2109        qib_stats.sps_hwerrs++;
2110
2111        /* Always clear the error status register, except BIST fail */
2112        qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2113                       ~HWE_MASK(PowerOnBISTFailed));
2114
2115        hwerrs &= dd->cspec->hwerrmask;
2116
2117        /* no EEPROM logging, yet */
2118
2119        if (hwerrs)
2120                qib_devinfo(dd->pcidev,
2121                        "Hardware error: hwerr=0x%llx (cleared)\n",
2122                        (unsigned long long) hwerrs);
2123
2124        ctrl = qib_read_kreg32(dd, kr_control);
2125        if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2126                /*
2127                 * No recovery yet...
2128                 */
2129                if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2130                    dd->cspec->stay_in_freeze) {
2131                        /*
2132                         * If any set that we aren't ignoring only make the
2133                         * complaint once, in case it's stuck or recurring,
2134                         * and we get here multiple times
2135                         * Force link down, so switch knows, and
2136                         * LEDs are turned off.
2137                         */
2138                        if (dd->flags & QIB_INITTED)
2139                                isfatal = 1;
2140                } else
2141                        qib_7322_clear_freeze(dd);
2142        }
2143
2144        if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2145                isfatal = 1;
2146                strlcpy(msg,
2147                        "[Memory BIST test failed, InfiniPath hardware unusable]",
2148                        msgl);
2149                /* ignore from now on, so disable until driver reloaded */
2150                dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2151                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2152        }
2153
2154        err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2155
2156        /* Ignore esoteric PLL failures et al. */
2157
2158        qib_dev_err(dd, "%s hardware error\n", msg);
2159
2160        if (hwerrs &
2161                   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2162                    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2163                int pidx = 0;
2164                int err;
2165                unsigned long flags;
2166                struct qib_pportdata *ppd = dd->pport;
2167
2168                for (; pidx < dd->num_pports; ++pidx, ppd++) {
2169                        err = 0;
2170                        if (pidx == 0 && (hwerrs &
2171                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2172                                err++;
2173                        if (pidx == 1 && (hwerrs &
2174                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2175                                err++;
2176                        if (err) {
2177                                spin_lock_irqsave(&ppd->sdma_lock, flags);
2178                                dump_sdma_7322_state(ppd);
2179                                spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2180                        }
2181                }
2182        }
2183
2184        if (isfatal && !dd->diag_client) {
2185                qib_dev_err(dd,
2186                        "Fatal Hardware Error, no longer usable, SN %.16s\n",
2187                        dd->serial);
2188                /*
2189                 * for /sys status file and user programs to print; if no
2190                 * trailing brace is copied, we'll know it was truncated.
2191                 */
2192                if (dd->freezemsg)
2193                        snprintf(dd->freezemsg, dd->freezelen,
2194                                 "{%s}", msg);
2195                qib_disable_after_error(dd);
2196        }
2197bail:;
2198}
2199
2200/**
2201 * qib_7322_init_hwerrors - enable hardware errors
2202 * @dd: the qlogic_ib device
2203 *
2204 * now that we have finished initializing everything that might reasonably
2205 * cause a hardware error, and cleared those errors bits as they occur,
2206 * we can enable hardware errors in the mask (potentially enabling
2207 * freeze mode), and enable hardware errors as errors (along with
2208 * everything else) in errormask
2209 */
2210static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2211{
2212        int pidx;
2213        u64 extsval;
2214
2215        extsval = qib_read_kreg64(dd, kr_extstatus);
2216        if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2217                         QIB_EXTS_MEMBIST_ENDTEST)))
2218                qib_dev_err(dd, "MemBIST did not complete!\n");
2219
2220        /* never clear BIST failure, so reported on each driver load */
2221        qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2222        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2223
2224        /* clear all */
2225        qib_write_kreg(dd, kr_errclear, ~0ULL);
2226        /* enable errors that are masked, at least this first time. */
2227        qib_write_kreg(dd, kr_errmask, ~0ULL);
2228        dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2229        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2230                if (dd->pport[pidx].link_speed_supported)
2231                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2232                                            ~0ULL);
2233}
2234
2235/*
2236 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2237 * on chips that are count-based, rather than trigger-based.  There is no
2238 * reference counting, but that's also fine, given the intended use.
2239 * Only chip-specific because it's all register accesses
2240 */
2241static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2242{
2243        if (enable) {
2244                qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2245                dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2246        } else
2247                dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2248        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2249}
2250
2251/*
2252 * Formerly took parameter <which> in pre-shifted,
2253 * pre-merged form with LinkCmd and LinkInitCmd
2254 * together, and assuming the zero was NOP.
2255 */
2256static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2257                                   u16 linitcmd)
2258{
2259        u64 mod_wd;
2260        struct qib_devdata *dd = ppd->dd;
2261        unsigned long flags;
2262
2263        if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2264                /*
2265                 * If we are told to disable, note that so link-recovery
2266                 * code does not attempt to bring us back up.
2267                 * Also reset everything that we can, so we start
2268                 * completely clean when re-enabled (before we
2269                 * actually issue the disable to the IBC)
2270                 */
2271                qib_7322_mini_pcs_reset(ppd);
2272                spin_lock_irqsave(&ppd->lflags_lock, flags);
2273                ppd->lflags |= QIBL_IB_LINK_DISABLED;
2274                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2275        } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2276                /*
2277                 * Any other linkinitcmd will lead to LINKDOWN and then
2278                 * to INIT (if all is well), so clear flag to let
2279                 * link-recovery code attempt to bring us back up.
2280                 */
2281                spin_lock_irqsave(&ppd->lflags_lock, flags);
2282                ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2283                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2284                /*
2285                 * Clear status change interrupt reduction so the
2286                 * new state is seen.
2287                 */
2288                ppd->cpspec->ibcctrl_a &=
2289                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2290        }
2291
2292        mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2293                (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2294
2295        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2296                            mod_wd);
2297        /* write to chip to prevent back-to-back writes of ibc reg */
2298        qib_write_kreg(dd, kr_scratch, 0);
2299
2300}
2301
2302/*
2303 * The total RCV buffer memory is 64KB, used for both ports, and is
2304 * in units of 64 bytes (same as IB flow control credit unit).
2305 * The consumedVL unit in the same registers are in 32 byte units!
2306 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2307 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2308 * in krp_rxcreditvl15, rather than 10.
2309 */
2310#define RCV_BUF_UNITSZ 64
2311#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2312
2313static void set_vls(struct qib_pportdata *ppd)
2314{
2315        int i, numvls, totcred, cred_vl, vl0extra;
2316        struct qib_devdata *dd = ppd->dd;
2317        u64 val;
2318
2319        numvls = qib_num_vls(ppd->vls_operational);
2320
2321        /*
2322         * Set up per-VL credits. Below is kluge based on these assumptions:
2323         * 1) port is disabled at the time early_init is called.
2324         * 2) give VL15 17 credits, for two max-plausible packets.
2325         * 3) Give VL0-N the rest, with any rounding excess used for VL0
2326         */
2327        /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2328        totcred = NUM_RCV_BUF_UNITS(dd);
2329        cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2330        totcred -= cred_vl;
2331        qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2332        cred_vl = totcred / numvls;
2333        vl0extra = totcred - cred_vl * numvls;
2334        qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2335        for (i = 1; i < numvls; i++)
2336                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2337        for (; i < 8; i++) /* no buffer space for other VLs */
2338                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2339
2340        /* Notify IBC that credits need to be recalculated */
2341        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2342        val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2343        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2344        qib_write_kreg(dd, kr_scratch, 0ULL);
2345        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2346        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2347
2348        for (i = 0; i < numvls; i++)
2349                val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2350        val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2351
2352        /* Change the number of operational VLs */
2353        ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2354                                ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2355                ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2356        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2357        qib_write_kreg(dd, kr_scratch, 0ULL);
2358}
2359
2360/*
2361 * The code that deals with actual SerDes is in serdes_7322_init().
2362 * Compared to the code for iba7220, it is minimal.
2363 */
2364static int serdes_7322_init(struct qib_pportdata *ppd);
2365
2366/**
2367 * qib_7322_bringup_serdes - bring up the serdes
2368 * @ppd: physical port on the qlogic_ib device
2369 */
2370static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2371{
2372        struct qib_devdata *dd = ppd->dd;
2373        u64 val, guid, ibc;
2374        unsigned long flags;
2375        int ret = 0;
2376
2377        /*
2378         * SerDes model not in Pd, but still need to
2379         * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2380         * eventually.
2381         */
2382        /* Put IBC in reset, sends disabled (should be in reset already) */
2383        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2384        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2385        qib_write_kreg(dd, kr_scratch, 0ULL);
2386
2387        /* ensure previous Tx parameters are not still forced */
2388        qib_write_kreg_port(ppd, krp_tx_deemph_override,
2389                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2390                reset_tx_deemphasis_override));
2391
2392        if (qib_compat_ddr_negotiate) {
2393                ppd->cpspec->ibdeltainprog = 1;
2394                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2395                                                crp_ibsymbolerr);
2396                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2397                                                crp_iblinkerrrecov);
2398        }
2399
2400        /* flowcontrolwatermark is in units of KBytes */
2401        ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2402        /*
2403         * Flow control is sent this often, even if no changes in
2404         * buffer space occur.  Units are 128ns for this chip.
2405         * Set to 3usec.
2406         */
2407        ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2408        /* max error tolerance */
2409        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2410        /* IB credit flow control. */
2411        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2412        /*
2413         * set initial max size pkt IBC will send, including ICRC; it's the
2414         * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2415         */
2416        ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2417                SYM_LSB(IBCCtrlA_0, MaxPktLen);
2418        ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2419
2420        /*
2421         * Reset the PCS interface to the serdes (and also ibc, which is still
2422         * in reset from above).  Writes new value of ibcctrl_a as last step.
2423         */
2424        qib_7322_mini_pcs_reset(ppd);
2425
2426        if (!ppd->cpspec->ibcctrl_b) {
2427                unsigned lse = ppd->link_speed_enabled;
2428
2429                /*
2430                 * Not on re-init after reset, establish shadow
2431                 * and force initial config.
2432                 */
2433                ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2434                                                             krp_ibcctrl_b);
2435                ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2436                                IBA7322_IBC_SPEED_DDR |
2437                                IBA7322_IBC_SPEED_SDR |
2438                                IBA7322_IBC_WIDTH_AUTONEG |
2439                                SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2440                if (lse & (lse - 1)) /* Muliple speeds enabled */
2441                        ppd->cpspec->ibcctrl_b |=
2442                                (lse << IBA7322_IBC_SPEED_LSB) |
2443                                IBA7322_IBC_IBTA_1_2_MASK |
2444                                IBA7322_IBC_MAX_SPEED_MASK;
2445                else
2446                        ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2447                                IBA7322_IBC_SPEED_QDR |
2448                                 IBA7322_IBC_IBTA_1_2_MASK :
2449                                (lse == QIB_IB_DDR) ?
2450                                        IBA7322_IBC_SPEED_DDR :
2451                                        IBA7322_IBC_SPEED_SDR;
2452                if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2453                    (IB_WIDTH_1X | IB_WIDTH_4X))
2454                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2455                else
2456                        ppd->cpspec->ibcctrl_b |=
2457                                ppd->link_width_enabled == IB_WIDTH_4X ?
2458                                IBA7322_IBC_WIDTH_4X_ONLY :
2459                                IBA7322_IBC_WIDTH_1X_ONLY;
2460
2461                /* always enable these on driver reload, not sticky */
2462                ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2463                        IBA7322_IBC_HRTBT_MASK);
2464        }
2465        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2466
2467        /* setup so we have more time at CFGTEST to change H1 */
2468        val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2469        val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2470        val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2471        qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2472
2473        serdes_7322_init(ppd);
2474
2475        guid = be64_to_cpu(ppd->guid);
2476        if (!guid) {
2477                if (dd->base_guid)
2478                        guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2479                ppd->guid = cpu_to_be64(guid);
2480        }
2481
2482        qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2483        /* write to chip to prevent back-to-back writes of ibc reg */
2484        qib_write_kreg(dd, kr_scratch, 0);
2485
2486        /* Enable port */
2487        ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2488        set_vls(ppd);
2489
2490        /* initially come up DISABLED, without sending anything. */
2491        val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2492                                        QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2493        qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2494        qib_write_kreg(dd, kr_scratch, 0ULL);
2495        /* clear the linkinit cmds */
2496        ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2497
2498        /* be paranoid against later code motion, etc. */
2499        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2500        ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2501        qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2502        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2503
2504        /* Also enable IBSTATUSCHG interrupt.  */
2505        val = qib_read_kreg_port(ppd, krp_errmask);
2506        qib_write_kreg_port(ppd, krp_errmask,
2507                val | ERR_MASK_N(IBStatusChanged));
2508
2509        /* Always zero until we start messing with SerDes for real */
2510        return ret;
2511}
2512
2513/**
2514 * qib_7322_quiet_serdes - set serdes to txidle
2515 * @dd: the qlogic_ib device
2516 * Called when driver is being unloaded
2517 */
2518static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2519{
2520        u64 val;
2521        unsigned long flags;
2522
2523        qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2524
2525        spin_lock_irqsave(&ppd->lflags_lock, flags);
2526        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2527        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2528        wake_up(&ppd->cpspec->autoneg_wait);
2529        cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2530        if (ppd->dd->cspec->r1)
2531                cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2532
2533        ppd->cpspec->chase_end = 0;
2534        if (ppd->cpspec->chase_timer.data) /* if initted */
2535                del_timer_sync(&ppd->cpspec->chase_timer);
2536
2537        /*
2538         * Despite the name, actually disables IBC as well. Do it when
2539         * we are as sure as possible that no more packets can be
2540         * received, following the down and the PCS reset.
2541         * The actual disabling happens in qib_7322_mini_pci_reset(),
2542         * along with the PCS being reset.
2543         */
2544        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2545        qib_7322_mini_pcs_reset(ppd);
2546
2547        /*
2548         * Update the adjusted counters so the adjustment persists
2549         * across driver reload.
2550         */
2551        if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2552            ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2553                struct qib_devdata *dd = ppd->dd;
2554                u64 diagc;
2555
2556                /* enable counter writes */
2557                diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2558                qib_write_kreg(dd, kr_hwdiagctrl,
2559                               diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2560
2561                if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2562                        val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2563                        if (ppd->cpspec->ibdeltainprog)
2564                                val -= val - ppd->cpspec->ibsymsnap;
2565                        val -= ppd->cpspec->ibsymdelta;
2566                        write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2567                }
2568                if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2569                        val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2570                        if (ppd->cpspec->ibdeltainprog)
2571                                val -= val - ppd->cpspec->iblnkerrsnap;
2572                        val -= ppd->cpspec->iblnkerrdelta;
2573                        write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2574                }
2575                if (ppd->cpspec->iblnkdowndelta) {
2576                        val = read_7322_creg32_port(ppd, crp_iblinkdown);
2577                        val += ppd->cpspec->iblnkdowndelta;
2578                        write_7322_creg_port(ppd, crp_iblinkdown, val);
2579                }
2580                /*
2581                 * No need to save ibmalfdelta since IB perfcounters
2582                 * are cleared on driver reload.
2583                 */
2584
2585                /* and disable counter writes */
2586                qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2587        }
2588}
2589
2590/**
2591 * qib_setup_7322_setextled - set the state of the two external LEDs
2592 * @ppd: physical port on the qlogic_ib device
2593 * @on: whether the link is up or not
2594 *
2595 * The exact combo of LEDs if on is true is determined by looking
2596 * at the ibcstatus.
2597 *
2598 * These LEDs indicate the physical and logical state of IB link.
2599 * For this chip (at least with recommended board pinouts), LED1
2600 * is Yellow (logical state) and LED2 is Green (physical state),
2601 *
2602 * Note:  We try to match the Mellanox HCA LED behavior as best
2603 * we can.  Green indicates physical link state is OK (something is
2604 * plugged in, and we can train).
2605 * Amber indicates the link is logically up (ACTIVE).
2606 * Mellanox further blinks the amber LED to indicate data packet
2607 * activity, but we have no hardware support for that, so it would
2608 * require waking up every 10-20 msecs and checking the counters
2609 * on the chip, and then turning the LED off if appropriate.  That's
2610 * visible overhead, so not something we will do.
2611 */
2612static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2613{
2614        struct qib_devdata *dd = ppd->dd;
2615        u64 extctl, ledblink = 0, val;
2616        unsigned long flags;
2617        int yel, grn;
2618
2619        /*
2620         * The diags use the LED to indicate diag info, so we leave
2621         * the external LED alone when the diags are running.
2622         */
2623        if (dd->diag_client)
2624                return;
2625
2626        /* Allow override of LED display for, e.g. Locating system in rack */
2627        if (ppd->led_override) {
2628                grn = (ppd->led_override & QIB_LED_PHYS);
2629                yel = (ppd->led_override & QIB_LED_LOG);
2630        } else if (on) {
2631                val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2632                grn = qib_7322_phys_portstate(val) ==
2633                        IB_PHYSPORTSTATE_LINKUP;
2634                yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2635        } else {
2636                grn = 0;
2637                yel = 0;
2638        }
2639
2640        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2641        extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2642                ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2643        if (grn) {
2644                extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2645                /*
2646                 * Counts are in chip clock (4ns) periods.
2647                 * This is 1/16 sec (66.6ms) on,
2648                 * 3/16 sec (187.5 ms) off, with packets rcvd.
2649                 */
2650                ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2651                        ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2652        }
2653        if (yel)
2654                extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2655        dd->cspec->extctrl = extctl;
2656        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2657        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2658
2659        if (ledblink) /* blink the LED on packet receive */
2660                qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2661}
2662
2663#ifdef CONFIG_INFINIBAND_QIB_DCA
2664
2665static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2666{
2667        switch (event) {
2668        case DCA_PROVIDER_ADD:
2669                if (dd->flags & QIB_DCA_ENABLED)
2670                        break;
2671                if (!dca_add_requester(&dd->pcidev->dev)) {
2672                        qib_devinfo(dd->pcidev, "DCA enabled\n");
2673                        dd->flags |= QIB_DCA_ENABLED;
2674                        qib_setup_dca(dd);
2675                }
2676                break;
2677        case DCA_PROVIDER_REMOVE:
2678                if (dd->flags & QIB_DCA_ENABLED) {
2679                        dca_remove_requester(&dd->pcidev->dev);
2680                        dd->flags &= ~QIB_DCA_ENABLED;
2681                        dd->cspec->dca_ctrl = 0;
2682                        qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2683                                dd->cspec->dca_ctrl);
2684                }
2685                break;
2686        }
2687        return 0;
2688}
2689
2690static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2691{
2692        struct qib_devdata *dd = rcd->dd;
2693        struct qib_chip_specific *cspec = dd->cspec;
2694
2695        if (!(dd->flags & QIB_DCA_ENABLED))
2696                return;
2697        if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2698                const struct dca_reg_map *rmp;
2699
2700                cspec->rhdr_cpu[rcd->ctxt] = cpu;
2701                rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2702                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2703                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2704                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2705                qib_devinfo(dd->pcidev,
2706                        "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2707                        (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2708                qib_write_kreg(dd, rmp->regno,
2709                               cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2710                cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2711                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2712        }
2713}
2714
2715static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2716{
2717        struct qib_devdata *dd = ppd->dd;
2718        struct qib_chip_specific *cspec = dd->cspec;
2719        unsigned pidx = ppd->port - 1;
2720
2721        if (!(dd->flags & QIB_DCA_ENABLED))
2722                return;
2723        if (cspec->sdma_cpu[pidx] != cpu) {
2724                cspec->sdma_cpu[pidx] = cpu;
2725                cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2726                        SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2727                        SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2728                cspec->dca_rcvhdr_ctrl[4] |=
2729                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2730                                (ppd->hw_pidx ?
2731                                        SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2732                                        SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2733                qib_devinfo(dd->pcidev,
2734                        "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2735                        (long long) cspec->dca_rcvhdr_ctrl[4]);
2736                qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2737                               cspec->dca_rcvhdr_ctrl[4]);
2738                cspec->dca_ctrl |= ppd->hw_pidx ?
2739                        SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2740                        SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2741                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2742        }
2743}
2744
2745static void qib_setup_dca(struct qib_devdata *dd)
2746{
2747        struct qib_chip_specific *cspec = dd->cspec;
2748        int i;
2749
2750        for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2751                cspec->rhdr_cpu[i] = -1;
2752        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2753                cspec->sdma_cpu[i] = -1;
2754        cspec->dca_rcvhdr_ctrl[0] =
2755                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2756                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2757                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2758                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2759        cspec->dca_rcvhdr_ctrl[1] =
2760                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2761                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2762                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2763                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2764        cspec->dca_rcvhdr_ctrl[2] =
2765                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2766                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2767                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2768                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2769        cspec->dca_rcvhdr_ctrl[3] =
2770                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2771                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2772                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2773                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2774        cspec->dca_rcvhdr_ctrl[4] =
2775                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2776                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2777        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2778                qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2779                               cspec->dca_rcvhdr_ctrl[i]);
2780        for (i = 0; i < cspec->num_msix_entries; i++)
2781                setup_dca_notifier(dd, &cspec->msix_entries[i]);
2782}
2783
2784static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2785                             const cpumask_t *mask)
2786{
2787        struct qib_irq_notify *n =
2788                container_of(notify, struct qib_irq_notify, notify);
2789        int cpu = cpumask_first(mask);
2790
2791        if (n->rcv) {
2792                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2793
2794                qib_update_rhdrq_dca(rcd, cpu);
2795        } else {
2796                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2797
2798                qib_update_sdma_dca(ppd, cpu);
2799        }
2800}
2801
2802static void qib_irq_notifier_release(struct kref *ref)
2803{
2804        struct qib_irq_notify *n =
2805                container_of(ref, struct qib_irq_notify, notify.kref);
2806        struct qib_devdata *dd;
2807
2808        if (n->rcv) {
2809                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2810
2811                dd = rcd->dd;
2812        } else {
2813                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2814
2815                dd = ppd->dd;
2816        }
2817        qib_devinfo(dd->pcidev,
2818                "release on HCA notify 0x%p n 0x%p\n", ref, n);
2819        kfree(n);
2820}
2821#endif
2822
2823/*
2824 * Disable MSIx interrupt if enabled, call generic MSIx code
2825 * to cleanup, and clear pending MSIx interrupts.
2826 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2827 */
2828static void qib_7322_nomsix(struct qib_devdata *dd)
2829{
2830        u64 intgranted;
2831        int n;
2832
2833        dd->cspec->main_int_mask = ~0ULL;
2834        n = dd->cspec->num_msix_entries;
2835        if (n) {
2836                int i;
2837
2838                dd->cspec->num_msix_entries = 0;
2839                for (i = 0; i < n; i++) {
2840#ifdef CONFIG_INFINIBAND_QIB_DCA
2841                        reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2842#endif
2843                        irq_set_affinity_hint(
2844                          dd->cspec->msix_entries[i].msix.vector, NULL);
2845                        free_cpumask_var(dd->cspec->msix_entries[i].mask);
2846                        free_irq(dd->cspec->msix_entries[i].msix.vector,
2847                           dd->cspec->msix_entries[i].arg);
2848                }
2849                qib_nomsix(dd);
2850        }
2851        /* make sure no MSIx interrupts are left pending */
2852        intgranted = qib_read_kreg64(dd, kr_intgranted);
2853        if (intgranted)
2854                qib_write_kreg(dd, kr_intgranted, intgranted);
2855}
2856
2857static void qib_7322_free_irq(struct qib_devdata *dd)
2858{
2859        if (dd->cspec->irq) {
2860                free_irq(dd->cspec->irq, dd);
2861                dd->cspec->irq = 0;
2862        }
2863        qib_7322_nomsix(dd);
2864}
2865
2866static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2867{
2868        int i;
2869
2870#ifdef CONFIG_INFINIBAND_QIB_DCA
2871        if (dd->flags & QIB_DCA_ENABLED) {
2872                dca_remove_requester(&dd->pcidev->dev);
2873                dd->flags &= ~QIB_DCA_ENABLED;
2874                dd->cspec->dca_ctrl = 0;
2875                qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2876        }
2877#endif
2878
2879        qib_7322_free_irq(dd);
2880        kfree(dd->cspec->cntrs);
2881        kfree(dd->cspec->sendchkenable);
2882        kfree(dd->cspec->sendgrhchk);
2883        kfree(dd->cspec->sendibchk);
2884        kfree(dd->cspec->msix_entries);
2885        for (i = 0; i < dd->num_pports; i++) {
2886                unsigned long flags;
2887                u32 mask = QSFP_GPIO_MOD_PRS_N |
2888                        (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2889
2890                kfree(dd->pport[i].cpspec->portcntrs);
2891                if (dd->flags & QIB_HAS_QSFP) {
2892                        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2893                        dd->cspec->gpio_mask &= ~mask;
2894                        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2895                        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2896                }
2897        }
2898}
2899
2900/* handle SDMA interrupts */
2901static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2902{
2903        struct qib_pportdata *ppd0 = &dd->pport[0];
2904        struct qib_pportdata *ppd1 = &dd->pport[1];
2905        u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2906                INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2907        u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2908                INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2909
2910        if (intr0)
2911                qib_sdma_intr(ppd0);
2912        if (intr1)
2913                qib_sdma_intr(ppd1);
2914
2915        if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2916                qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2917        if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2918                qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2919}
2920
2921/*
2922 * Set or clear the Send buffer available interrupt enable bit.
2923 */
2924static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2925{
2926        unsigned long flags;
2927
2928        spin_lock_irqsave(&dd->sendctrl_lock, flags);
2929        if (needint)
2930                dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2931        else
2932                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2933        qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2934        qib_write_kreg(dd, kr_scratch, 0ULL);
2935        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2936}
2937
2938/*
2939 * Somehow got an interrupt with reserved bits set in interrupt status.
2940 * Print a message so we know it happened, then clear them.
2941 * keep mainline interrupt handler cache-friendly
2942 */
2943static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2944{
2945        u64 kills;
2946        char msg[128];
2947
2948        kills = istat & ~QIB_I_BITSEXTANT;
2949        qib_dev_err(dd,
2950                "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2951                (unsigned long long) kills, msg);
2952        qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2953}
2954
2955/* keep mainline interrupt handler cache-friendly */
2956static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2957{
2958        u32 gpiostatus;
2959        int handled = 0;
2960        int pidx;
2961
2962        /*
2963         * Boards for this chip currently don't use GPIO interrupts,
2964         * so clear by writing GPIOstatus to GPIOclear, and complain
2965         * to developer.  To avoid endless repeats, clear
2966         * the bits in the mask, since there is some kind of
2967         * programming error or chip problem.
2968         */
2969        gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2970        /*
2971         * In theory, writing GPIOstatus to GPIOclear could
2972         * have a bad side-effect on some diagnostic that wanted
2973         * to poll for a status-change, but the various shadows
2974         * make that problematic at best. Diags will just suppress
2975         * all GPIO interrupts during such tests.
2976         */
2977        qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2978        /*
2979         * Check for QSFP MOD_PRS changes
2980         * only works for single port if IB1 != pidx1
2981         */
2982        for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2983             ++pidx) {
2984                struct qib_pportdata *ppd;
2985                struct qib_qsfp_data *qd;
2986                u32 mask;
2987
2988                if (!dd->pport[pidx].link_speed_supported)
2989                        continue;
2990                mask = QSFP_GPIO_MOD_PRS_N;
2991                ppd = dd->pport + pidx;
2992                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2993                if (gpiostatus & dd->cspec->gpio_mask & mask) {
2994                        u64 pins;
2995
2996                        qd = &ppd->cpspec->qsfp_data;
2997                        gpiostatus &= ~mask;
2998                        pins = qib_read_kreg64(dd, kr_extstatus);
2999                        pins >>= SYM_LSB(EXTStatus, GPIOIn);
3000                        if (!(pins & mask)) {
3001                                ++handled;
3002                                qd->t_insert = jiffies;
3003                                queue_work(ib_wq, &qd->work);
3004                        }
3005                }
3006        }
3007
3008        if (gpiostatus && !handled) {
3009                const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3010                u32 gpio_irq = mask & gpiostatus;
3011
3012                /*
3013                 * Clear any troublemakers, and update chip from shadow
3014                 */
3015                dd->cspec->gpio_mask &= ~gpio_irq;
3016                qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3017        }
3018}
3019
3020/*
3021 * Handle errors and unusual events first, separate function
3022 * to improve cache hits for fast path interrupt handling.
3023 */
3024static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3025{
3026        if (istat & ~QIB_I_BITSEXTANT)
3027                unknown_7322_ibits(dd, istat);
3028        if (istat & QIB_I_GPIO)
3029                unknown_7322_gpio_intr(dd);
3030        if (istat & QIB_I_C_ERROR) {
3031                qib_write_kreg(dd, kr_errmask, 0ULL);
3032                tasklet_schedule(&dd->error_tasklet);
3033        }
3034        if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3035                handle_7322_p_errors(dd->rcd[0]->ppd);
3036        if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3037                handle_7322_p_errors(dd->rcd[1]->ppd);
3038}
3039
3040/*
3041 * Dynamically adjust the rcv int timeout for a context based on incoming
3042 * packet rate.
3043 */
3044static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3045{
3046        struct qib_devdata *dd = rcd->dd;
3047        u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3048
3049        /*
3050         * Dynamically adjust idle timeout on chip
3051         * based on number of packets processed.
3052         */
3053        if (npkts < rcv_int_count && timeout > 2)
3054                timeout >>= 1;
3055        else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3056                timeout = min(timeout << 1, rcv_int_timeout);
3057        else
3058                return;
3059
3060        dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3061        qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3062}
3063
3064/*
3065 * This is the main interrupt handler.
3066 * It will normally only be used for low frequency interrupts but may
3067 * have to handle all interrupts if INTx is enabled or fewer than normal
3068 * MSIx interrupts were allocated.
3069 * This routine should ignore the interrupt bits for any of the
3070 * dedicated MSIx handlers.
3071 */
3072static irqreturn_t qib_7322intr(int irq, void *data)
3073{
3074        struct qib_devdata *dd = data;
3075        irqreturn_t ret;
3076        u64 istat;
3077        u64 ctxtrbits;
3078        u64 rmask;
3079        unsigned i;
3080        u32 npkts;
3081
3082        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3083                /*
3084                 * This return value is not great, but we do not want the
3085                 * interrupt core code to remove our interrupt handler
3086                 * because we don't appear to be handling an interrupt
3087                 * during a chip reset.
3088                 */
3089                ret = IRQ_HANDLED;
3090                goto bail;
3091        }
3092
3093        istat = qib_read_kreg64(dd, kr_intstatus);
3094
3095        if (unlikely(istat == ~0ULL)) {
3096                qib_bad_intrstatus(dd);
3097                qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3098                /* don't know if it was our interrupt or not */
3099                ret = IRQ_NONE;
3100                goto bail;
3101        }
3102
3103        istat &= dd->cspec->main_int_mask;
3104        if (unlikely(!istat)) {
3105                /* already handled, or shared and not us */
3106                ret = IRQ_NONE;
3107                goto bail;
3108        }
3109
3110        this_cpu_inc(*dd->int_counter);
3111
3112        /* handle "errors" of various kinds first, device ahead of port */
3113        if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3114                              QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3115                              INT_MASK_P(Err, 1))))
3116                unlikely_7322_intr(dd, istat);
3117
3118        /*
3119         * Clear the interrupt bits we found set, relatively early, so we
3120         * "know" know the chip will have seen this by the time we process
3121         * the queue, and will re-interrupt if necessary.  The processor
3122         * itself won't take the interrupt again until we return.
3123         */
3124        qib_write_kreg(dd, kr_intclear, istat);
3125
3126        /*
3127         * Handle kernel receive queues before checking for pio buffers
3128         * available since receives can overflow; piobuf waiters can afford
3129         * a few extra cycles, since they were waiting anyway.
3130         */
3131        ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3132        if (ctxtrbits) {
3133                rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3134                        (1ULL << QIB_I_RCVURG_LSB);
3135                for (i = 0; i < dd->first_user_ctxt; i++) {
3136                        if (ctxtrbits & rmask) {
3137                                ctxtrbits &= ~rmask;
3138                                if (dd->rcd[i])
3139                                        qib_kreceive(dd->rcd[i], NULL, &npkts);
3140                        }
3141                        rmask <<= 1;
3142                }
3143                if (ctxtrbits) {
3144                        ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3145                                (ctxtrbits >> QIB_I_RCVURG_LSB);
3146                        qib_handle_urcv(dd, ctxtrbits);
3147                }
3148        }
3149
3150        if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3151                sdma_7322_intr(dd, istat);
3152
3153        if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3154                qib_ib_piobufavail(dd);
3155
3156        ret = IRQ_HANDLED;
3157bail:
3158        return ret;
3159}
3160
3161/*
3162 * Dedicated receive packet available interrupt handler.
3163 */
3164static irqreturn_t qib_7322pintr(int irq, void *data)
3165{
3166        struct qib_ctxtdata *rcd = data;
3167        struct qib_devdata *dd = rcd->dd;
3168        u32 npkts;
3169
3170        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3171                /*
3172                 * This return value is not great, but we do not want the
3173                 * interrupt core code to remove our interrupt handler
3174                 * because we don't appear to be handling an interrupt
3175                 * during a chip reset.
3176                 */
3177                return IRQ_HANDLED;
3178
3179        this_cpu_inc(*dd->int_counter);
3180
3181        /* Clear the interrupt bit we expect to be set. */
3182        qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3183                       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3184
3185        qib_kreceive(rcd, NULL, &npkts);
3186
3187        return IRQ_HANDLED;
3188}
3189
3190/*
3191 * Dedicated Send buffer available interrupt handler.
3192 */
3193static irqreturn_t qib_7322bufavail(int irq, void *data)
3194{
3195        struct qib_devdata *dd = data;
3196
3197        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3198                /*
3199                 * This return value is not great, but we do not want the
3200                 * interrupt core code to remove our interrupt handler
3201                 * because we don't appear to be handling an interrupt
3202                 * during a chip reset.
3203                 */
3204                return IRQ_HANDLED;
3205
3206        this_cpu_inc(*dd->int_counter);
3207
3208        /* Clear the interrupt bit we expect to be set. */
3209        qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3210
3211        /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3212        if (dd->flags & QIB_INITTED)
3213                qib_ib_piobufavail(dd);
3214        else
3215                qib_wantpiobuf_7322_intr(dd, 0);
3216
3217        return IRQ_HANDLED;
3218}
3219
3220/*
3221 * Dedicated Send DMA interrupt handler.
3222 */
3223static irqreturn_t sdma_intr(int irq, void *data)
3224{
3225        struct qib_pportdata *ppd = data;
3226        struct qib_devdata *dd = ppd->dd;
3227
3228        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3229                /*
3230                 * This return value is not great, but we do not want the
3231                 * interrupt core code to remove our interrupt handler
3232                 * because we don't appear to be handling an interrupt
3233                 * during a chip reset.
3234                 */
3235                return IRQ_HANDLED;
3236
3237        this_cpu_inc(*dd->int_counter);
3238
3239        /* Clear the interrupt bit we expect to be set. */
3240        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3241                       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3242        qib_sdma_intr(ppd);
3243
3244        return IRQ_HANDLED;
3245}
3246
3247/*
3248 * Dedicated Send DMA idle interrupt handler.
3249 */
3250static irqreturn_t sdma_idle_intr(int irq, void *data)
3251{
3252        struct qib_pportdata *ppd = data;
3253        struct qib_devdata *dd = ppd->dd;
3254
3255        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3256                /*
3257                 * This return value is not great, but we do not want the
3258                 * interrupt core code to remove our interrupt handler
3259                 * because we don't appear to be handling an interrupt
3260                 * during a chip reset.
3261                 */
3262                return IRQ_HANDLED;
3263
3264        this_cpu_inc(*dd->int_counter);
3265
3266        /* Clear the interrupt bit we expect to be set. */
3267        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3268                       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3269        qib_sdma_intr(ppd);
3270
3271        return IRQ_HANDLED;
3272}
3273
3274/*
3275 * Dedicated Send DMA progress interrupt handler.
3276 */
3277static irqreturn_t sdma_progress_intr(int irq, void *data)
3278{
3279        struct qib_pportdata *ppd = data;
3280        struct qib_devdata *dd = ppd->dd;
3281
3282        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3283                /*
3284                 * This return value is not great, but we do not want the
3285                 * interrupt core code to remove our interrupt handler
3286                 * because we don't appear to be handling an interrupt
3287                 * during a chip reset.
3288                 */
3289                return IRQ_HANDLED;
3290
3291        this_cpu_inc(*dd->int_counter);
3292
3293        /* Clear the interrupt bit we expect to be set. */
3294        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3295                       INT_MASK_P(SDmaProgress, 1) :
3296                       INT_MASK_P(SDmaProgress, 0));
3297        qib_sdma_intr(ppd);
3298
3299        return IRQ_HANDLED;
3300}
3301
3302/*
3303 * Dedicated Send DMA cleanup interrupt handler.
3304 */
3305static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3306{
3307        struct qib_pportdata *ppd = data;
3308        struct qib_devdata *dd = ppd->dd;
3309
3310        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3311                /*
3312                 * This return value is not great, but we do not want the
3313                 * interrupt core code to remove our interrupt handler
3314                 * because we don't appear to be handling an interrupt
3315                 * during a chip reset.
3316                 */
3317                return IRQ_HANDLED;
3318
3319        this_cpu_inc(*dd->int_counter);
3320
3321        /* Clear the interrupt bit we expect to be set. */
3322        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3323                       INT_MASK_PM(SDmaCleanupDone, 1) :
3324                       INT_MASK_PM(SDmaCleanupDone, 0));
3325        qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3326
3327        return IRQ_HANDLED;
3328}
3329
3330#ifdef CONFIG_INFINIBAND_QIB_DCA
3331
3332static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3333{
3334        if (!m->dca)
3335                return;
3336        qib_devinfo(dd->pcidev,
3337                "Disabling notifier on HCA %d irq %d\n",
3338                dd->unit,
3339                m->msix.vector);
3340        irq_set_affinity_notifier(
3341                m->msix.vector,
3342                NULL);
3343        m->notifier = NULL;
3344}
3345
3346static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3347{
3348        struct qib_irq_notify *n;
3349
3350        if (!m->dca)
3351                return;
3352        n = kzalloc(sizeof(*n), GFP_KERNEL);
3353        if (n) {
3354                int ret;
3355
3356                m->notifier = n;
3357                n->notify.irq = m->msix.vector;
3358                n->notify.notify = qib_irq_notifier_notify;
3359                n->notify.release = qib_irq_notifier_release;
3360                n->arg = m->arg;
3361                n->rcv = m->rcv;
3362                qib_devinfo(dd->pcidev,
3363                        "set notifier irq %d rcv %d notify %p\n",
3364                        n->notify.irq, n->rcv, &n->notify);
3365                ret = irq_set_affinity_notifier(
3366                                n->notify.irq,
3367                                &n->notify);
3368                if (ret) {
3369                        m->notifier = NULL;
3370                        kfree(n);
3371                }
3372        }
3373}
3374
3375#endif
3376
3377/*
3378 * Set up our chip-specific interrupt handler.
3379 * The interrupt type has already been setup, so
3380 * we just need to do the registration and error checking.
3381 * If we are using MSIx interrupts, we may fall back to
3382 * INTx later, if the interrupt handler doesn't get called
3383 * within 1/2 second (see verify_interrupt()).
3384 */
3385static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3386{
3387        int ret, i, msixnum;
3388        u64 redirect[6];
3389        u64 mask;
3390        const struct cpumask *local_mask;
3391        int firstcpu, secondcpu = 0, currrcvcpu = 0;
3392
3393        if (!dd->num_pports)
3394                return;
3395
3396        if (clearpend) {
3397                /*
3398                 * if not switching interrupt types, be sure interrupts are
3399                 * disabled, and then clear anything pending at this point,
3400                 * because we are starting clean.
3401                 */
3402                qib_7322_set_intr_state(dd, 0);
3403
3404                /* clear the reset error, init error/hwerror mask */
3405                qib_7322_init_hwerrors(dd);
3406
3407                /* clear any interrupt bits that might be set */
3408                qib_write_kreg(dd, kr_intclear, ~0ULL);
3409
3410                /* make sure no pending MSIx intr, and clear diag reg */
3411                qib_write_kreg(dd, kr_intgranted, ~0ULL);
3412                qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3413        }
3414
3415        if (!dd->cspec->num_msix_entries) {
3416                /* Try to get INTx interrupt */
3417try_intx:
3418                if (!dd->pcidev->irq) {
3419                        qib_dev_err(dd,
3420                                "irq is 0, BIOS error?  Interrupts won't work\n");
3421                        goto bail;
3422                }
3423                ret = request_irq(dd->pcidev->irq, qib_7322intr,
3424                                  IRQF_SHARED, QIB_DRV_NAME, dd);
3425                if (ret) {
3426                        qib_dev_err(dd,
3427                                "Couldn't setup INTx interrupt (irq=%d): %d\n",
3428                                dd->pcidev->irq, ret);
3429                        goto bail;
3430                }
3431                dd->cspec->irq = dd->pcidev->irq;
3432                dd->cspec->main_int_mask = ~0ULL;
3433                goto bail;
3434        }
3435
3436        /* Try to get MSIx interrupts */
3437        memset(redirect, 0, sizeof(redirect));
3438        mask = ~0ULL;
3439        msixnum = 0;
3440        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3441        firstcpu = cpumask_first(local_mask);
3442        if (firstcpu >= nr_cpu_ids ||
3443                        cpumask_weight(local_mask) == num_online_cpus()) {
3444                local_mask = topology_core_cpumask(0);
3445                firstcpu = cpumask_first(local_mask);
3446        }
3447        if (firstcpu < nr_cpu_ids) {
3448                secondcpu = cpumask_next(firstcpu, local_mask);
3449                if (secondcpu >= nr_cpu_ids)
3450                        secondcpu = firstcpu;
3451                currrcvcpu = secondcpu;
3452        }
3453        for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3454                irq_handler_t handler;
3455                void *arg;
3456                u64 val;
3457                int lsb, reg, sh;
3458#ifdef CONFIG_INFINIBAND_QIB_DCA
3459                int dca = 0;
3460#endif
3461
3462                dd->cspec->msix_entries[msixnum].
3463                        name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3464                        = '\0';
3465                if (i < ARRAY_SIZE(irq_table)) {
3466                        if (irq_table[i].port) {
3467                                /* skip if for a non-configured port */
3468                                if (irq_table[i].port > dd->num_pports)
3469                                        continue;
3470                                arg = dd->pport + irq_table[i].port - 1;
3471                        } else
3472                                arg = dd;
3473#ifdef CONFIG_INFINIBAND_QIB_DCA
3474                        dca = irq_table[i].dca;
3475#endif
3476                        lsb = irq_table[i].lsb;
3477                        handler = irq_table[i].handler;
3478                        snprintf(dd->cspec->msix_entries[msixnum].name,
3479                                sizeof(dd->cspec->msix_entries[msixnum].name)
3480                                 - 1,
3481                                QIB_DRV_NAME "%d%s", dd->unit,
3482                                irq_table[i].name);
3483                } else {
3484                        unsigned ctxt;
3485
3486                        ctxt = i - ARRAY_SIZE(irq_table);
3487                        /* per krcvq context receive interrupt */
3488                        arg = dd->rcd[ctxt];
3489                        if (!arg)
3490                                continue;
3491                        if (qib_krcvq01_no_msi && ctxt < 2)
3492                                continue;
3493#ifdef CONFIG_INFINIBAND_QIB_DCA
3494                        dca = 1;
3495#endif
3496                        lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3497                        handler = qib_7322pintr;
3498                        snprintf(dd->cspec->msix_entries[msixnum].name,
3499                                sizeof(dd->cspec->msix_entries[msixnum].name)
3500                                 - 1,
3501                                QIB_DRV_NAME "%d (kctx)", dd->unit);
3502                }
3503                ret = request_irq(
3504                        dd->cspec->msix_entries[msixnum].msix.vector,
3505                        handler, 0, dd->cspec->msix_entries[msixnum].name,
3506                        arg);
3507                if (ret) {
3508                        /*
3509                         * Shouldn't happen since the enable said we could
3510                         * have as many as we are trying to setup here.
3511                         */
3512                        qib_dev_err(dd,
3513                                "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3514                                msixnum,
3515                                dd->cspec->msix_entries[msixnum].msix.vector,
3516                                ret);
3517                        qib_7322_nomsix(dd);
3518                        goto try_intx;
3519                }
3520                dd->cspec->msix_entries[msixnum].arg = arg;
3521#ifdef CONFIG_INFINIBAND_QIB_DCA
3522                dd->cspec->msix_entries[msixnum].dca = dca;
3523                dd->cspec->msix_entries[msixnum].rcv =
3524                        handler == qib_7322pintr;
3525#endif
3526                if (lsb >= 0) {
3527                        reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3528                        sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3529                                SYM_LSB(IntRedirect0, vec1);
3530                        mask &= ~(1ULL << lsb);
3531                        redirect[reg] |= ((u64) msixnum) << sh;
3532                }
3533                val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3534                        (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3535                if (firstcpu < nr_cpu_ids &&
3536                        zalloc_cpumask_var(
3537                                &dd->cspec->msix_entries[msixnum].mask,
3538                                GFP_KERNEL)) {
3539                        if (handler == qib_7322pintr) {
3540                                cpumask_set_cpu(currrcvcpu,
3541                                        dd->cspec->msix_entries[msixnum].mask);
3542                                currrcvcpu = cpumask_next(currrcvcpu,
3543                                        local_mask);
3544                                if (currrcvcpu >= nr_cpu_ids)
3545                                        currrcvcpu = secondcpu;
3546                        } else {
3547                                cpumask_set_cpu(firstcpu,
3548                                        dd->cspec->msix_entries[msixnum].mask);
3549                        }
3550                        irq_set_affinity_hint(
3551                                dd->cspec->msix_entries[msixnum].msix.vector,
3552                                dd->cspec->msix_entries[msixnum].mask);
3553                }
3554                msixnum++;
3555        }
3556        /* Initialize the vector mapping */
3557        for (i = 0; i < ARRAY_SIZE(redirect); i++)
3558                qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3559        dd->cspec->main_int_mask = mask;
3560        tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3561                (unsigned long)dd);
3562bail:;
3563}
3564
3565/**
3566 * qib_7322_boardname - fill in the board name and note features
3567 * @dd: the qlogic_ib device
3568 *
3569 * info will be based on the board revision register
3570 */
3571static unsigned qib_7322_boardname(struct qib_devdata *dd)
3572{
3573        /* Will need enumeration of board-types here */
3574        char *n;
3575        u32 boardid, namelen;
3576        unsigned features = DUAL_PORT_CAP;
3577
3578        boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3579
3580        switch (boardid) {
3581        case 0:
3582                n = "InfiniPath_QLE7342_Emulation";
3583                break;
3584        case 1:
3585                n = "InfiniPath_QLE7340";
3586                dd->flags |= QIB_HAS_QSFP;
3587                features = PORT_SPD_CAP;
3588                break;
3589        case 2:
3590                n = "InfiniPath_QLE7342";
3591                dd->flags |= QIB_HAS_QSFP;
3592                break;
3593        case 3:
3594                n = "InfiniPath_QMI7342";
3595                break;
3596        case 4:
3597                n = "InfiniPath_Unsupported7342";
3598                qib_dev_err(dd, "Unsupported version of QMH7342\n");
3599                features = 0;
3600                break;
3601        case BOARD_QMH7342:
3602                n = "InfiniPath_QMH7342";
3603                features = 0x24;
3604                break;
3605        case BOARD_QME7342:
3606                n = "InfiniPath_QME7342";
3607                break;
3608        case 8:
3609                n = "InfiniPath_QME7362";
3610                dd->flags |= QIB_HAS_QSFP;
3611                break;
3612        case BOARD_QMH7360:
3613                n = "Intel IB QDR 1P FLR-QSFP Adptr";
3614                dd->flags |= QIB_HAS_QSFP;
3615                break;
3616        case 15:
3617                n = "InfiniPath_QLE7342_TEST";
3618                dd->flags |= QIB_HAS_QSFP;
3619                break;
3620        default:
3621                n = "InfiniPath_QLE73xy_UNKNOWN";
3622                qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3623                break;
3624        }
3625        dd->board_atten = 1; /* index into txdds_Xdr */
3626
3627        namelen = strlen(n) + 1;
3628        dd->boardname = kmalloc(namelen, GFP_KERNEL);
3629        if (dd->boardname)
3630                snprintf(dd->boardname, namelen, "%s", n);
3631
3632        snprintf(dd->boardversion, sizeof(dd->boardversion),
3633                 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3634                 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3635                 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3636                 dd->majrev, dd->minrev,
3637                 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3638
3639        if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3640                qib_devinfo(dd->pcidev,
3641                        "IB%u: Forced to single port mode by module parameter\n",
3642                        dd->unit);
3643                features &= PORT_SPD_CAP;
3644        }
3645
3646        return features;
3647}
3648
3649/*
3650 * This routine sleeps, so it can only be called from user context, not
3651 * from interrupt context.
3652 */
3653static int qib_do_7322_reset(struct qib_devdata *dd)
3654{
3655        u64 val;
3656        u64 *msix_vecsave = NULL;
3657        int i, msix_entries, ret = 1;
3658        u16 cmdval;
3659        u8 int_line, clinesz;
3660        unsigned long flags;
3661
3662        /* Use dev_err so it shows up in logs, etc. */
3663        qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3664
3665        qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3666
3667        msix_entries = dd->cspec->num_msix_entries;
3668
3669        /* no interrupts till re-initted */
3670        qib_7322_set_intr_state(dd, 0);
3671
3672        if (msix_entries) {
3673                qib_7322_nomsix(dd);
3674                /* can be up to 512 bytes, too big for stack */
3675                msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3676                        sizeof(u64), GFP_KERNEL);
3677        }
3678
3679        /*
3680         * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3681         * info that is set up by the BIOS, so we have to save and restore
3682         * it ourselves.   There is some risk something could change it,
3683         * after we save it, but since we have disabled the MSIx, it
3684         * shouldn't be touched...
3685         */
3686        for (i = 0; i < msix_entries; i++) {
3687                u64 vecaddr, vecdata;
3688
3689                vecaddr = qib_read_kreg64(dd, 2 * i +
3690                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3691                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3692                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3693                if (msix_vecsave) {
3694                        msix_vecsave[2 * i] = vecaddr;
3695                        /* save it without the masked bit set */
3696                        msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3697                }
3698        }
3699
3700        dd->pport->cpspec->ibdeltainprog = 0;
3701        dd->pport->cpspec->ibsymdelta = 0;
3702        dd->pport->cpspec->iblnkerrdelta = 0;
3703        dd->pport->cpspec->ibmalfdelta = 0;
3704        /* so we check interrupts work again */
3705        dd->z_int_counter = qib_int_counter(dd);
3706
3707        /*
3708         * Keep chip from being accessed until we are ready.  Use
3709         * writeq() directly, to allow the write even though QIB_PRESENT
3710         * isn't set.
3711         */
3712        dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3713        dd->flags |= QIB_DOING_RESET;
3714        val = dd->control | QLOGIC_IB_C_RESET;
3715        writeq(val, &dd->kregbase[kr_control]);
3716
3717        for (i = 1; i <= 5; i++) {
3718                /*
3719                 * Allow MBIST, etc. to complete; longer on each retry.
3720                 * We sometimes get machine checks from bus timeout if no
3721                 * response, so for now, make it *really* long.
3722                 */
3723                msleep(1000 + (1 + i) * 3000);
3724
3725                qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3726
3727                /*
3728                 * Use readq directly, so we don't need to mark it as PRESENT
3729                 * until we get a successful indication that all is well.
3730                 */
3731                val = readq(&dd->kregbase[kr_revision]);
3732                if (val == dd->revision)
3733                        break;
3734                if (i == 5) {
3735                        qib_dev_err(dd,
3736                                "Failed to initialize after reset, unusable\n");
3737                        ret = 0;
3738                        goto  bail;
3739                }
3740        }
3741
3742        dd->flags |= QIB_PRESENT; /* it's back */
3743
3744        if (msix_entries) {
3745                /* restore the MSIx vector address and data if saved above */
3746                for (i = 0; i < msix_entries; i++) {
3747                        dd->cspec->msix_entries[i].msix.entry = i;
3748                        if (!msix_vecsave || !msix_vecsave[2 * i])
3749                                continue;
3750                        qib_write_kreg(dd, 2 * i +
3751                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3752                                msix_vecsave[2 * i]);
3753                        qib_write_kreg(dd, 1 + 2 * i +
3754                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3755                                msix_vecsave[1 + 2 * i]);
3756                }
3757        }
3758
3759        /* initialize the remaining registers.  */
3760        for (i = 0; i < dd->num_pports; ++i)
3761                write_7322_init_portregs(&dd->pport[i]);
3762        write_7322_initregs(dd);
3763
3764        if (qib_pcie_params(dd, dd->lbus_width,
3765                            &dd->cspec->num_msix_entries,
3766                            dd->cspec->msix_entries))
3767                qib_dev_err(dd,
3768                        "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3769
3770        qib_setup_7322_interrupt(dd, 1);
3771
3772        for (i = 0; i < dd->num_pports; ++i) {
3773                struct qib_pportdata *ppd = &dd->pport[i];
3774
3775                spin_lock_irqsave(&ppd->lflags_lock, flags);
3776                ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3777                ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3778                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3779        }
3780
3781bail:
3782        dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3783        kfree(msix_vecsave);
3784        return ret;
3785}
3786
3787/**
3788 * qib_7322_put_tid - write a TID to the chip
3789 * @dd: the qlogic_ib device
3790 * @tidptr: pointer to the expected TID (in chip) to update
3791 * @tidtype: 0 for eager, 1 for expected
3792 * @pa: physical address of in memory buffer; tidinvalid if freeing
3793 */
3794static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3795                             u32 type, unsigned long pa)
3796{
3797        if (!(dd->flags & QIB_PRESENT))
3798                return;
3799        if (pa != dd->tidinvalid) {
3800                u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3801
3802                /* paranoia checks */
3803                if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3804                        qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3805                                    pa);
3806                        return;
3807                }
3808                if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3809                        qib_dev_err(dd,
3810                                "Physical page address 0x%lx larger than supported\n",
3811                                pa);
3812                        return;
3813                }
3814
3815                if (type == RCVHQ_RCV_TYPE_EAGER)
3816                        chippa |= dd->tidtemplate;
3817                else /* for now, always full 4KB page */
3818                        chippa |= IBA7322_TID_SZ_4K;
3819                pa = chippa;
3820        }
3821        writeq(pa, tidptr);
3822        mmiowb();
3823}
3824
3825/**
3826 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3827 * @dd: the qlogic_ib device
3828 * @ctxt: the ctxt
3829 *
3830 * clear all TID entries for a ctxt, expected and eager.
3831 * Used from qib_close().
3832 */
3833static void qib_7322_clear_tids(struct qib_devdata *dd,
3834                                struct qib_ctxtdata *rcd)
3835{
3836        u64 __iomem *tidbase;
3837        unsigned long tidinv;
3838        u32 ctxt;
3839        int i;
3840
3841        if (!dd->kregbase || !rcd)
3842                return;
3843
3844        ctxt = rcd->ctxt;
3845
3846        tidinv = dd->tidinvalid;
3847        tidbase = (u64 __iomem *)
3848                ((char __iomem *) dd->kregbase +
3849                 dd->rcvtidbase +
3850                 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3851
3852        for (i = 0; i < dd->rcvtidcnt; i++)
3853                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3854                                 tidinv);
3855
3856        tidbase = (u64 __iomem *)
3857                ((char __iomem *) dd->kregbase +
3858                 dd->rcvegrbase +
3859                 rcd->rcvegr_tid_base * sizeof(*tidbase));
3860
3861        for (i = 0; i < rcd->rcvegrcnt; i++)
3862                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3863                                 tidinv);
3864}
3865
3866/**
3867 * qib_7322_tidtemplate - setup constants for TID updates
3868 * @dd: the qlogic_ib device
3869 *
3870 * We setup stuff that we use a lot, to avoid calculating each time
3871 */
3872static void qib_7322_tidtemplate(struct qib_devdata *dd)
3873{
3874        /*
3875         * For now, we always allocate 4KB buffers (at init) so we can
3876         * receive max size packets.  We may want a module parameter to
3877         * specify 2KB or 4KB and/or make it per port instead of per device
3878         * for those who want to reduce memory footprint.  Note that the
3879         * rcvhdrentsize size must be large enough to hold the largest
3880         * IB header (currently 96 bytes) that we expect to handle (plus of
3881         * course the 2 dwords of RHF).
3882         */
3883        if (dd->rcvegrbufsize == 2048)
3884                dd->tidtemplate = IBA7322_TID_SZ_2K;
3885        else if (dd->rcvegrbufsize == 4096)
3886                dd->tidtemplate = IBA7322_TID_SZ_4K;
3887        dd->tidinvalid = 0;
3888}
3889
3890/**
3891 * qib_init_7322_get_base_info - set chip-specific flags for user code
3892 * @rcd: the qlogic_ib ctxt
3893 * @kbase: qib_base_info pointer
3894 *
3895 * We set the PCIE flag because the lower bandwidth on PCIe vs
3896 * HyperTransport can affect some user packet algorithims.
3897 */
3898
3899static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3900                                  struct qib_base_info *kinfo)
3901{
3902        kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3903                QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3904                QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3905        if (rcd->dd->cspec->r1)
3906                kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3907        if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3908                kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3909
3910        return 0;
3911}
3912
3913static struct qib_message_header *
3914qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3915{
3916        u32 offset = qib_hdrget_offset(rhf_addr);
3917
3918        return (struct qib_message_header *)
3919                (rhf_addr - dd->rhf_offset + offset);
3920}
3921
3922/*
3923 * Configure number of contexts.
3924 */
3925static void qib_7322_config_ctxts(struct qib_devdata *dd)
3926{
3927        unsigned long flags;
3928        u32 nchipctxts;
3929
3930        nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3931        dd->cspec->numctxts = nchipctxts;
3932        if (qib_n_krcv_queues > 1 && dd->num_pports) {
3933                dd->first_user_ctxt = NUM_IB_PORTS +
3934                        (qib_n_krcv_queues - 1) * dd->num_pports;
3935                if (dd->first_user_ctxt > nchipctxts)
3936                        dd->first_user_ctxt = nchipctxts;
3937                dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3938        } else {
3939                dd->first_user_ctxt = NUM_IB_PORTS;
3940                dd->n_krcv_queues = 1;
3941        }
3942
3943        if (!qib_cfgctxts) {
3944                int nctxts = dd->first_user_ctxt + num_online_cpus();
3945
3946                if (nctxts <= 6)
3947                        dd->ctxtcnt = 6;
3948                else if (nctxts <= 10)
3949                        dd->ctxtcnt = 10;
3950                else if (nctxts <= nchipctxts)
3951                        dd->ctxtcnt = nchipctxts;
3952        } else if (qib_cfgctxts < dd->num_pports)
3953                dd->ctxtcnt = dd->num_pports;
3954        else if (qib_cfgctxts <= nchipctxts)
3955                dd->ctxtcnt = qib_cfgctxts;
3956        if (!dd->ctxtcnt) /* none of the above, set to max */
3957                dd->ctxtcnt = nchipctxts;
3958
3959        /*
3960         * Chip can be configured for 6, 10, or 18 ctxts, and choice
3961         * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3962         * Lock to be paranoid about later motion, etc.
3963         */
3964        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3965        if (dd->ctxtcnt > 10)
3966                dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3967        else if (dd->ctxtcnt > 6)
3968                dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3969        /* else configure for default 6 receive ctxts */
3970
3971        /* The XRC opcode is 5. */
3972        dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3973
3974        /*
3975         * RcvCtrl *must* be written here so that the
3976         * chip understands how to change rcvegrcnt below.
3977         */
3978        qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3979        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3980
3981        /* kr_rcvegrcnt changes based on the number of contexts enabled */
3982        dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3983        if (qib_rcvhdrcnt)
3984                dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3985        else
3986                dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3987                                    dd->num_pports > 1 ? 1024U : 2048U);
3988}
3989
3990static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3991{
3992
3993        int lsb, ret = 0;
3994        u64 maskr; /* right-justified mask */
3995
3996        switch (which) {
3997
3998        case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3999                ret = ppd->link_width_enabled;
4000                goto done;
4001
4002        case QIB_IB_CFG_LWID: /* Get currently active Link-width */
4003                ret = ppd->link_width_active;
4004                goto done;
4005
4006        case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
4007                ret = ppd->link_speed_enabled;
4008                goto done;
4009
4010        case QIB_IB_CFG_SPD: /* Get current Link spd */
4011                ret = ppd->link_speed_active;
4012                goto done;
4013
4014        case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
4015                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4016                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4017                break;
4018
4019        case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
4020                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4021                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4022                break;
4023
4024        case QIB_IB_CFG_LINKLATENCY:
4025                ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4026                        SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4027                goto done;
4028
4029        case QIB_IB_CFG_OP_VLS:
4030                ret = ppd->vls_operational;
4031                goto done;
4032
4033        case QIB_IB_CFG_VL_HIGH_CAP:
4034                ret = 16;
4035                goto done;
4036
4037        case QIB_IB_CFG_VL_LOW_CAP:
4038                ret = 16;
4039                goto done;
4040
4041        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4042                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4043                                OverrunThreshold);
4044                goto done;
4045
4046        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4047                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4048                                PhyerrThreshold);
4049                goto done;
4050
4051        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4052                /* will only take effect when the link state changes */
4053                ret = (ppd->cpspec->ibcctrl_a &
4054                       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4055                        IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4056                goto done;
4057
4058        case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4059                lsb = IBA7322_IBC_HRTBT_LSB;
4060                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4061                break;
4062
4063        case QIB_IB_CFG_PMA_TICKS:
4064                /*
4065                 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4066                 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4067                 */
4068                if (ppd->link_speed_active == QIB_IB_QDR)
4069                        ret = 3;
4070                else if (ppd->link_speed_active == QIB_IB_DDR)
4071                        ret = 1;
4072                else
4073                        ret = 0;
4074                goto done;
4075
4076        default:
4077                ret = -EINVAL;
4078                goto done;
4079        }
4080        ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4081done:
4082        return ret;
4083}
4084
4085/*
4086 * Below again cribbed liberally from older version. Do not lean
4087 * heavily on it.
4088 */
4089#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4090#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4091        | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4092
4093static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4094{
4095        struct qib_devdata *dd = ppd->dd;
4096        u64 maskr; /* right-justified mask */
4097        int lsb, ret = 0;
4098        u16 lcmd, licmd;
4099        unsigned long flags;
4100
4101        switch (which) {
4102        case QIB_IB_CFG_LIDLMC:
4103                /*
4104                 * Set LID and LMC. Combined to avoid possible hazard
4105                 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4106                 */
4107                lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4108                maskr = IBA7322_IBC_DLIDLMC_MASK;
4109                /*
4110                 * For header-checking, the SLID in the packet will
4111                 * be masked with SendIBSLMCMask, and compared
4112                 * with SendIBSLIDAssignMask. Make sure we do not
4113                 * set any bits not covered by the mask, or we get
4114                 * false-positives.
4115                 */
4116                qib_write_kreg_port(ppd, krp_sendslid,
4117                                    val & (val >> 16) & SendIBSLIDAssignMask);
4118                qib_write_kreg_port(ppd, krp_sendslidmask,
4119                                    (val >> 16) & SendIBSLMCMask);
4120                break;
4121
4122        case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4123                ppd->link_width_enabled = val;
4124                /* convert IB value to chip register value */
4125                if (val == IB_WIDTH_1X)
4126                        val = 0;
4127                else if (val == IB_WIDTH_4X)
4128                        val = 1;
4129                else
4130                        val = 3;
4131                maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4132                lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4133                break;
4134
4135        case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4136                /*
4137                 * As with width, only write the actual register if the
4138                 * link is currently down, otherwise takes effect on next
4139                 * link change.  Since setting is being explicitly requested
4140                 * (via MAD or sysfs), clear autoneg failure status if speed
4141                 * autoneg is enabled.
4142                 */
4143                ppd->link_speed_enabled = val;
4144                val <<= IBA7322_IBC_SPEED_LSB;
4145                maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4146                        IBA7322_IBC_MAX_SPEED_MASK;
4147                if (val & (val - 1)) {
4148                        /* Muliple speeds enabled */
4149                        val |= IBA7322_IBC_IBTA_1_2_MASK |
4150                                IBA7322_IBC_MAX_SPEED_MASK;
4151                        spin_lock_irqsave(&ppd->lflags_lock, flags);
4152                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4153                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4154                } else if (val & IBA7322_IBC_SPEED_QDR)
4155                        val |= IBA7322_IBC_IBTA_1_2_MASK;
4156                /* IBTA 1.2 mode + min/max + speed bits are contiguous */
4157                lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4158                break;
4159
4160        case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4161                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4162                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4163                break;
4164
4165        case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4166                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4167                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4168                break;
4169
4170        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4171                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4172                                  OverrunThreshold);
4173                if (maskr != val) {
4174                        ppd->cpspec->ibcctrl_a &=
4175                                ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4176                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4177                                SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4178                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4179                                            ppd->cpspec->ibcctrl_a);
4180                        qib_write_kreg(dd, kr_scratch, 0ULL);
4181                }
4182                goto bail;
4183
4184        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4185                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4186                                  PhyerrThreshold);
4187                if (maskr != val) {
4188                        ppd->cpspec->ibcctrl_a &=
4189                                ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4190                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4191                                SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4192                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4193                                            ppd->cpspec->ibcctrl_a);
4194                        qib_write_kreg(dd, kr_scratch, 0ULL);
4195                }
4196                goto bail;
4197
4198        case QIB_IB_CFG_PKEYS: /* update pkeys */
4199                maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4200                        ((u64) ppd->pkeys[2] << 32) |
4201                        ((u64) ppd->pkeys[3] << 48);
4202                qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4203                goto bail;
4204
4205        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4206                /* will only take effect when the link state changes */
4207                if (val == IB_LINKINITCMD_POLL)
4208                        ppd->cpspec->ibcctrl_a &=
4209                                ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4210                else /* SLEEP */
4211                        ppd->cpspec->ibcctrl_a |=
4212                                SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4213                qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4214                qib_write_kreg(dd, kr_scratch, 0ULL);
4215                goto bail;
4216
4217        case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4218                /*
4219                 * Update our housekeeping variables, and set IBC max
4220                 * size, same as init code; max IBC is max we allow in
4221                 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4222                 * Set even if it's unchanged, print debug message only
4223                 * on changes.
4224                 */
4225                val = (ppd->ibmaxlen >> 2) + 1;
4226                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4227                ppd->cpspec->ibcctrl_a |= (u64)val <<
4228                        SYM_LSB(IBCCtrlA_0, MaxPktLen);
4229                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4230                                    ppd->cpspec->ibcctrl_a);
4231                qib_write_kreg(dd, kr_scratch, 0ULL);
4232                goto bail;
4233
4234        case QIB_IB_CFG_LSTATE: /* set the IB link state */
4235                switch (val & 0xffff0000) {
4236                case IB_LINKCMD_DOWN:
4237                        lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4238                        ppd->cpspec->ibmalfusesnap = 1;
4239                        ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4240                                crp_errlink);
4241                        if (!ppd->cpspec->ibdeltainprog &&
4242                            qib_compat_ddr_negotiate) {
4243                                ppd->cpspec->ibdeltainprog = 1;
4244                                ppd->cpspec->ibsymsnap =
4245                                        read_7322_creg32_port(ppd,
4246                                                              crp_ibsymbolerr);
4247                                ppd->cpspec->iblnkerrsnap =
4248                                        read_7322_creg32_port(ppd,
4249                                                      crp_iblinkerrrecov);
4250                        }
4251                        break;
4252
4253                case IB_LINKCMD_ARMED:
4254                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4255                        if (ppd->cpspec->ibmalfusesnap) {
4256                                ppd->cpspec->ibmalfusesnap = 0;
4257                                ppd->cpspec->ibmalfdelta +=
4258                                        read_7322_creg32_port(ppd,
4259                                                              crp_errlink) -
4260                                        ppd->cpspec->ibmalfsnap;
4261                        }
4262                        break;
4263
4264                case IB_LINKCMD_ACTIVE:
4265                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4266                        break;
4267
4268                default:
4269                        ret = -EINVAL;
4270                        qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4271                        goto bail;
4272                }
4273                switch (val & 0xffff) {
4274                case IB_LINKINITCMD_NOP:
4275                        licmd = 0;
4276                        break;
4277
4278                case IB_LINKINITCMD_POLL:
4279                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4280                        break;
4281
4282                case IB_LINKINITCMD_SLEEP:
4283                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4284                        break;
4285
4286                case IB_LINKINITCMD_DISABLE:
4287                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4288                        ppd->cpspec->chase_end = 0;
4289                        /*
4290                         * stop state chase counter and timer, if running.
4291                         * wait forpending timer, but don't clear .data (ppd)!
4292                         */
4293                        if (ppd->cpspec->chase_timer.expires) {
4294                                del_timer_sync(&ppd->cpspec->chase_timer);
4295                                ppd->cpspec->chase_timer.expires = 0;
4296                        }
4297                        break;
4298
4299                default:
4300                        ret = -EINVAL;
4301                        qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4302                                    val & 0xffff);
4303                        goto bail;
4304                }
4305                qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4306                goto bail;
4307
4308        case QIB_IB_CFG_OP_VLS:
4309                if (ppd->vls_operational != val) {
4310                        ppd->vls_operational = val;
4311                        set_vls(ppd);
4312                }
4313                goto bail;
4314
4315        case QIB_IB_CFG_VL_HIGH_LIMIT:
4316                qib_write_kreg_port(ppd, krp_highprio_limit, val);
4317                goto bail;
4318
4319        case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4320                if (val > 3) {
4321                        ret = -EINVAL;
4322                        goto bail;
4323                }
4324                lsb = IBA7322_IBC_HRTBT_LSB;
4325                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4326                break;
4327
4328        case QIB_IB_CFG_PORT:
4329                /* val is the port number of the switch we are connected to. */
4330                if (ppd->dd->cspec->r1) {
4331                        cancel_delayed_work(&ppd->cpspec->ipg_work);
4332                        ppd->cpspec->ipg_tries = 0;
4333                }
4334                goto bail;
4335
4336        default:
4337                ret = -EINVAL;
4338                goto bail;
4339        }
4340        ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4341        ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4342        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4343        qib_write_kreg(dd, kr_scratch, 0);
4344bail:
4345        return ret;
4346}
4347
4348static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4349{
4350        int ret = 0;
4351        u64 val, ctrlb;
4352
4353        /* only IBC loopback, may add serdes and xgxs loopbacks later */
4354        if (!strncmp(what, "ibc", 3)) {
4355                ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4356                                                       Loopback);
4357                val = 0; /* disable heart beat, so link will come up */
4358                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4359                         ppd->dd->unit, ppd->port);
4360        } else if (!strncmp(what, "off", 3)) {
4361                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4362                                                        Loopback);
4363                /* enable heart beat again */
4364                val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4365                qib_devinfo(ppd->dd->pcidev,
4366                        "Disabling IB%u:%u IBC loopback (normal)\n",
4367                        ppd->dd->unit, ppd->port);
4368        } else
4369                ret = -EINVAL;
4370        if (!ret) {
4371                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4372                                    ppd->cpspec->ibcctrl_a);
4373                ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4374                                             << IBA7322_IBC_HRTBT_LSB);
4375                ppd->cpspec->ibcctrl_b = ctrlb | val;
4376                qib_write_kreg_port(ppd, krp_ibcctrl_b,
4377                                    ppd->cpspec->ibcctrl_b);
4378                qib_write_kreg(ppd->dd, kr_scratch, 0);
4379        }
4380        return ret;
4381}
4382
4383static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4384                           struct ib_vl_weight_elem *vl)
4385{
4386        unsigned i;
4387
4388        for (i = 0; i < 16; i++, regno++, vl++) {
4389                u32 val = qib_read_kreg_port(ppd, regno);
4390
4391                vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4392                        SYM_RMASK(LowPriority0_0, VirtualLane);
4393                vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4394                        SYM_RMASK(LowPriority0_0, Weight);
4395        }
4396}
4397
4398static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4399                           struct ib_vl_weight_elem *vl)
4400{
4401        unsigned i;
4402
4403        for (i = 0; i < 16; i++, regno++, vl++) {
4404                u64 val;
4405
4406                val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4407                        SYM_LSB(LowPriority0_0, VirtualLane)) |
4408                      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4409                        SYM_LSB(LowPriority0_0, Weight));
4410                qib_write_kreg_port(ppd, regno, val);
4411        }
4412        if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4413                struct qib_devdata *dd = ppd->dd;
4414                unsigned long flags;
4415
4416                spin_lock_irqsave(&dd->sendctrl_lock, flags);
4417                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4418                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4419                qib_write_kreg(dd, kr_scratch, 0);
4420                spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4421        }
4422}
4423
4424static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4425{
4426        switch (which) {
4427        case QIB_IB_TBL_VL_HIGH_ARB:
4428                get_vl_weights(ppd, krp_highprio_0, t);
4429                break;
4430
4431        case QIB_IB_TBL_VL_LOW_ARB:
4432                get_vl_weights(ppd, krp_lowprio_0, t);
4433                break;
4434
4435        default:
4436                return -EINVAL;
4437        }
4438        return 0;
4439}
4440
4441static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4442{
4443        switch (which) {
4444        case QIB_IB_TBL_VL_HIGH_ARB:
4445                set_vl_weights(ppd, krp_highprio_0, t);
4446                break;
4447
4448        case QIB_IB_TBL_VL_LOW_ARB:
4449                set_vl_weights(ppd, krp_lowprio_0, t);
4450                break;
4451
4452        default:
4453                return -EINVAL;
4454        }
4455        return 0;
4456}
4457
4458static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4459                                    u32 updegr, u32 egrhd, u32 npkts)
4460{
4461        /*
4462         * Need to write timeout register before updating rcvhdrhead to ensure
4463         * that the timer is enabled on reception of a packet.
4464         */
4465        if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4466                adjust_rcv_timeout(rcd, npkts);
4467        if (updegr)
4468                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4469        mmiowb();
4470        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4471        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4472        mmiowb();
4473}
4474
4475static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4476{
4477        u32 head, tail;
4478
4479        head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4480        if (rcd->rcvhdrtail_kvaddr)
4481                tail = qib_get_rcvhdrtail(rcd);
4482        else
4483                tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4484        return head == tail;
4485}
4486
4487#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4488        QIB_RCVCTRL_CTXT_DIS | \
4489        QIB_RCVCTRL_TIDFLOW_ENB | \
4490        QIB_RCVCTRL_TIDFLOW_DIS | \
4491        QIB_RCVCTRL_TAILUPD_ENB | \
4492        QIB_RCVCTRL_TAILUPD_DIS | \
4493        QIB_RCVCTRL_INTRAVAIL_ENB | \
4494        QIB_RCVCTRL_INTRAVAIL_DIS | \
4495        QIB_RCVCTRL_BP_ENB | \
4496        QIB_RCVCTRL_BP_DIS)
4497
4498#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4499        QIB_RCVCTRL_CTXT_DIS | \
4500        QIB_RCVCTRL_PKEY_DIS | \
4501        QIB_RCVCTRL_PKEY_ENB)
4502
4503/*
4504 * Modify the RCVCTRL register in chip-specific way. This
4505 * is a function because bit positions and (future) register
4506 * location is chip-specifc, but the needed operations are
4507 * generic. <op> is a bit-mask because we often want to
4508 * do multiple modifications.
4509 */
4510static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4511                             int ctxt)
4512{
4513        struct qib_devdata *dd = ppd->dd;
4514        struct qib_ctxtdata *rcd;
4515        u64 mask, val;
4516        unsigned long flags;
4517
4518        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4519
4520        if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4521                dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4522        if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4523                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4524        if (op & QIB_RCVCTRL_TAILUPD_ENB)
4525                dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4526        if (op & QIB_RCVCTRL_TAILUPD_DIS)
4527                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4528        if (op & QIB_RCVCTRL_PKEY_ENB)
4529                ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4530        if (op & QIB_RCVCTRL_PKEY_DIS)
4531                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4532        if (ctxt < 0) {
4533                mask = (1ULL << dd->ctxtcnt) - 1;
4534                rcd = NULL;
4535        } else {
4536                mask = (1ULL << ctxt);
4537                rcd = dd->rcd[ctxt];
4538        }
4539        if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4540                ppd->p_rcvctrl |=
4541                        (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4542                if (!(dd->flags & QIB_NODMA_RTAIL)) {
4543                        op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4544                        dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4545                }
4546                /* Write these registers before the context is enabled. */
4547                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4548                                    rcd->rcvhdrqtailaddr_phys);
4549                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4550                                    rcd->rcvhdrq_phys);
4551                rcd->seq_cnt = 1;
4552        }
4553        if (op & QIB_RCVCTRL_CTXT_DIS)
4554                ppd->p_rcvctrl &=
4555                        ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4556        if (op & QIB_RCVCTRL_BP_ENB)
4557                dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4558        if (op & QIB_RCVCTRL_BP_DIS)
4559                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4560        if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4561                dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4562        if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4563                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4564        /*
4565         * Decide which registers to write depending on the ops enabled.
4566         * Special case is "flush" (no bits set at all)
4567         * which needs to write both.
4568         */
4569        if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4570                qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4571        if (op == 0 || (op & RCVCTRL_PORT_MODS))
4572                qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4573        if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4574                /*
4575                 * Init the context registers also; if we were
4576                 * disabled, tail and head should both be zero
4577                 * already from the enable, but since we don't
4578                 * know, we have to do it explicitly.
4579                 */
4580                val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4581                qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4582
4583                /* be sure enabling write seen; hd/tl should be 0 */
4584                (void) qib_read_kreg32(dd, kr_scratch);
4585                val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4586                dd->rcd[ctxt]->head = val;
4587                /* If kctxt, interrupt on next receive. */
4588                if (ctxt < dd->first_user_ctxt)
4589                        val |= dd->rhdrhead_intr_off;
4590                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4591        } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4592                dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4593                /* arm rcv interrupt */
4594                val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4595                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4596        }
4597        if (op & QIB_RCVCTRL_CTXT_DIS) {
4598                unsigned f;
4599
4600                /* Now that the context is disabled, clear these registers. */
4601                if (ctxt >= 0) {
4602                        qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4603                        qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4604                        for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4605                                qib_write_ureg(dd, ur_rcvflowtable + f,
4606                                               TIDFLOW_ERRBITS, ctxt);
4607                } else {
4608                        unsigned i;
4609
4610                        for (i = 0; i < dd->cfgctxts; i++) {
4611                                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4612                                                    i, 0);
4613                                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4614                                for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4615                                        qib_write_ureg(dd, ur_rcvflowtable + f,
4616                                                       TIDFLOW_ERRBITS, i);
4617                        }
4618                }
4619        }
4620        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4621}
4622
4623/*
4624 * Modify the SENDCTRL register in chip-specific way. This
4625 * is a function where there are multiple such registers with
4626 * slightly different layouts.
4627 * The chip doesn't allow back-to-back sendctrl writes, so write
4628 * the scratch register after writing sendctrl.
4629 *
4630 * Which register is written depends on the operation.
4631 * Most operate on the common register, while
4632 * SEND_ENB and SEND_DIS operate on the per-port ones.
4633 * SEND_ENB is included in common because it can change SPCL_TRIG
4634 */
4635#define SENDCTRL_COMMON_MODS (\
4636        QIB_SENDCTRL_CLEAR | \
4637        QIB_SENDCTRL_AVAIL_DIS | \
4638        QIB_SENDCTRL_AVAIL_ENB | \
4639        QIB_SENDCTRL_AVAIL_BLIP | \
4640        QIB_SENDCTRL_DISARM | \
4641        QIB_SENDCTRL_DISARM_ALL | \
4642        QIB_SENDCTRL_SEND_ENB)
4643
4644#define SENDCTRL_PORT_MODS (\
4645        QIB_SENDCTRL_CLEAR | \
4646        QIB_SENDCTRL_SEND_ENB | \
4647        QIB_SENDCTRL_SEND_DIS | \
4648        QIB_SENDCTRL_FLUSH)
4649
4650static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4651{
4652        struct qib_devdata *dd = ppd->dd;
4653        u64 tmp_dd_sendctrl;
4654        unsigned long flags;
4655
4656        spin_lock_irqsave(&dd->sendctrl_lock, flags);
4657
4658        /* First the dd ones that are "sticky", saved in shadow */
4659        if (op & QIB_SENDCTRL_CLEAR)
4660                dd->sendctrl = 0;
4661        if (op & QIB_SENDCTRL_AVAIL_DIS)
4662                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4663        else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4664                dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4665                if (dd->flags & QIB_USE_SPCL_TRIG)
4666                        dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4667        }
4668
4669        /* Then the ppd ones that are "sticky", saved in shadow */
4670        if (op & QIB_SENDCTRL_SEND_DIS)
4671                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4672        else if (op & QIB_SENDCTRL_SEND_ENB)
4673                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4674
4675        if (op & QIB_SENDCTRL_DISARM_ALL) {
4676                u32 i, last;
4677
4678                tmp_dd_sendctrl = dd->sendctrl;
4679                last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4680                /*
4681                 * Disarm any buffers that are not yet launched,
4682                 * disabling updates until done.
4683                 */
4684                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4685                for (i = 0; i < last; i++) {
4686                        qib_write_kreg(dd, kr_sendctrl,
4687                                       tmp_dd_sendctrl |
4688                                       SYM_MASK(SendCtrl, Disarm) | i);
4689                        qib_write_kreg(dd, kr_scratch, 0);
4690                }
4691        }
4692
4693        if (op & QIB_SENDCTRL_FLUSH) {
4694                u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4695
4696                /*
4697                 * Now drain all the fifos.  The Abort bit should never be
4698                 * needed, so for now, at least, we don't use it.
4699                 */
4700                tmp_ppd_sendctrl |=
4701                        SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4702                        SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4703                        SYM_MASK(SendCtrl_0, TxeBypassIbc);
4704                qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4705                qib_write_kreg(dd, kr_scratch, 0);
4706        }
4707
4708        tmp_dd_sendctrl = dd->sendctrl;
4709
4710        if (op & QIB_SENDCTRL_DISARM)
4711                tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4712                        ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4713                         SYM_LSB(SendCtrl, DisarmSendBuf));
4714        if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4715            (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4716                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4717
4718        if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4719                qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4720                qib_write_kreg(dd, kr_scratch, 0);
4721        }
4722
4723        if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4724                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4725                qib_write_kreg(dd, kr_scratch, 0);
4726        }
4727
4728        if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4729                qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4730                qib_write_kreg(dd, kr_scratch, 0);
4731        }
4732
4733        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4734
4735        if (op & QIB_SENDCTRL_FLUSH) {
4736                u32 v;
4737                /*
4738                 * ensure writes have hit chip, then do a few
4739                 * more reads, to allow DMA of pioavail registers
4740                 * to occur, so in-memory copy is in sync with
4741                 * the chip.  Not always safe to sleep.
4742                 */
4743                v = qib_read_kreg32(dd, kr_scratch);
4744                qib_write_kreg(dd, kr_scratch, v);
4745                v = qib_read_kreg32(dd, kr_scratch);
4746                qib_write_kreg(dd, kr_scratch, v);
4747                qib_read_kreg32(dd, kr_scratch);
4748        }
4749}
4750
4751#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4752#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4753#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4754
4755/**
4756 * qib_portcntr_7322 - read a per-port chip counter
4757 * @ppd: the qlogic_ib pport
4758 * @creg: the counter to read (not a chip offset)
4759 */
4760static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4761{
4762        struct qib_devdata *dd = ppd->dd;
4763        u64 ret = 0ULL;
4764        u16 creg;
4765        /* 0xffff for unimplemented or synthesized counters */
4766        static const u32 xlator[] = {
4767                [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4768                [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4769                [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4770                [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4771                [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4772                [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4773                [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4774                [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4775                [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4776                [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4777                [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4778                [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4779                [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4780                [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4781                [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4782                [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4783                [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4784                [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4785                [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4786                [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4787                [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4788                [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4789                [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4790                [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4791                [QIBPORTCNTR_ERRLINK] = crp_errlink,
4792                [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4793                [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4794                [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4795                [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4796                [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4797                /*
4798                 * the next 3 aren't really counters, but were implemented
4799                 * as counters in older chips, so still get accessed as
4800                 * though they were counters from this code.
4801                 */
4802                [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4803                [QIBPORTCNTR_PSSTART] = krp_psstart,
4804                [QIBPORTCNTR_PSSTAT] = krp_psstat,
4805                /* pseudo-counter, summed for all ports */
4806                [QIBPORTCNTR_KHDROVFL] = 0xffff,
4807        };
4808
4809        if (reg >= ARRAY_SIZE(xlator)) {
4810                qib_devinfo(ppd->dd->pcidev,
4811                         "Unimplemented portcounter %u\n", reg);
4812                goto done;
4813        }
4814        creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4815
4816        /* handle non-counters and special cases first */
4817        if (reg == QIBPORTCNTR_KHDROVFL) {
4818                int i;
4819
4820                /* sum over all kernel contexts (skip if mini_init) */
4821                for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4822                        struct qib_ctxtdata *rcd = dd->rcd[i];
4823
4824                        if (!rcd || rcd->ppd != ppd)
4825                                continue;
4826                        ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4827                }
4828                goto done;
4829        } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4830                /*
4831                 * Used as part of the synthesis of port_rcv_errors
4832                 * in the verbs code for IBTA counters.  Not needed for 7322,
4833                 * because all the errors are already counted by other cntrs.
4834                 */
4835                goto done;
4836        } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4837                   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4838                /* were counters in older chips, now per-port kernel regs */
4839                ret = qib_read_kreg_port(ppd, creg);
4840                goto done;
4841        }
4842
4843        /*
4844         * Only fast increment counters are 64 bits; use 32 bit reads to
4845         * avoid two independent reads when on Opteron.
4846         */
4847        if (xlator[reg] & _PORT_64BIT_FLAG)
4848                ret = read_7322_creg_port(ppd, creg);
4849        else
4850                ret = read_7322_creg32_port(ppd, creg);
4851        if (creg == crp_ibsymbolerr) {
4852                if (ppd->cpspec->ibdeltainprog)
4853                        ret -= ret - ppd->cpspec->ibsymsnap;
4854                ret -= ppd->cpspec->ibsymdelta;
4855        } else if (creg == crp_iblinkerrrecov) {
4856                if (ppd->cpspec->ibdeltainprog)
4857                        ret -= ret - ppd->cpspec->iblnkerrsnap;
4858                ret -= ppd->cpspec->iblnkerrdelta;
4859        } else if (creg == crp_errlink)
4860                ret -= ppd->cpspec->ibmalfdelta;
4861        else if (creg == crp_iblinkdown)
4862                ret += ppd->cpspec->iblnkdowndelta;
4863done:
4864        return ret;
4865}
4866
4867/*
4868 * Device counter names (not port-specific), one line per stat,
4869 * single string.  Used by utilities like ipathstats to print the stats
4870 * in a way which works for different versions of drivers, without changing
4871 * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4872 * display by utility.
4873 * Non-error counters are first.
4874 * Start of "error" conters is indicated by a leading "E " on the first
4875 * "error" counter, and doesn't count in label length.
4876 * The EgrOvfl list needs to be last so we truncate them at the configured
4877 * context count for the device.
4878 * cntr7322indices contains the corresponding register indices.
4879 */
4880static const char cntr7322names[] =
4881        "Interrupts\n"
4882        "HostBusStall\n"
4883        "E RxTIDFull\n"
4884        "RxTIDInvalid\n"
4885        "RxTIDFloDrop\n" /* 7322 only */
4886        "Ctxt0EgrOvfl\n"
4887        "Ctxt1EgrOvfl\n"
4888        "Ctxt2EgrOvfl\n"
4889        "Ctxt3EgrOvfl\n"
4890        "Ctxt4EgrOvfl\n"
4891        "Ctxt5EgrOvfl\n"
4892        "Ctxt6EgrOvfl\n"
4893        "Ctxt7EgrOvfl\n"
4894        "Ctxt8EgrOvfl\n"
4895        "Ctxt9EgrOvfl\n"
4896        "Ctx10EgrOvfl\n"
4897        "Ctx11EgrOvfl\n"
4898        "Ctx12EgrOvfl\n"
4899        "Ctx13EgrOvfl\n"
4900        "Ctx14EgrOvfl\n"
4901        "Ctx15EgrOvfl\n"
4902        "Ctx16EgrOvfl\n"
4903        "Ctx17EgrOvfl\n"
4904        ;
4905
4906static const u32 cntr7322indices[] = {
4907        cr_lbint | _PORT_64BIT_FLAG,
4908        cr_lbstall | _PORT_64BIT_FLAG,
4909        cr_tidfull,
4910        cr_tidinvalid,
4911        cr_rxtidflowdrop,
4912        cr_base_egrovfl + 0,
4913        cr_base_egrovfl + 1,
4914        cr_base_egrovfl + 2,
4915        cr_base_egrovfl + 3,
4916        cr_base_egrovfl + 4,
4917        cr_base_egrovfl + 5,
4918        cr_base_egrovfl + 6,
4919        cr_base_egrovfl + 7,
4920        cr_base_egrovfl + 8,
4921        cr_base_egrovfl + 9,
4922        cr_base_egrovfl + 10,
4923        cr_base_egrovfl + 11,
4924        cr_base_egrovfl + 12,
4925        cr_base_egrovfl + 13,
4926        cr_base_egrovfl + 14,
4927        cr_base_egrovfl + 15,
4928        cr_base_egrovfl + 16,
4929        cr_base_egrovfl + 17,
4930};
4931
4932/*
4933 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4934 * portcntr7322indices is somewhat complicated by some registers needing
4935 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4936 */
4937static const char portcntr7322names[] =
4938        "TxPkt\n"
4939        "TxFlowPkt\n"
4940        "TxWords\n"
4941        "RxPkt\n"
4942        "RxFlowPkt\n"
4943        "RxWords\n"
4944        "TxFlowStall\n"
4945        "TxDmaDesc\n"  /* 7220 and 7322-only */
4946        "E RxDlidFltr\n"  /* 7220 and 7322-only */
4947        "IBStatusChng\n"
4948        "IBLinkDown\n"
4949        "IBLnkRecov\n"
4950        "IBRxLinkErr\n"
4951        "IBSymbolErr\n"
4952        "RxLLIErr\n"
4953        "RxBadFormat\n"
4954        "RxBadLen\n"
4955        "RxBufOvrfl\n"
4956        "RxEBP\n"
4957        "RxFlowCtlErr\n"
4958        "RxICRCerr\n"
4959        "RxLPCRCerr\n"
4960        "RxVCRCerr\n"
4961        "RxInvalLen\n"
4962        "RxInvalPKey\n"
4963        "RxPktDropped\n"
4964        "TxBadLength\n"
4965        "TxDropped\n"
4966        "TxInvalLen\n"
4967        "TxUnderrun\n"
4968        "TxUnsupVL\n"
4969        "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4970        "RxVL15Drop\n"
4971        "RxVlErr\n"
4972        "XcessBufOvfl\n"
4973        "RxQPBadCtxt\n" /* 7322-only from here down */
4974        "TXBadHeader\n"
4975        ;
4976
4977static const u32 portcntr7322indices[] = {
4978        QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4979        crp_pktsendflow,
4980        QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4981        QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4982        crp_pktrcvflowctrl,
4983        QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4984        QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4985        crp_txsdmadesc | _PORT_64BIT_FLAG,
4986        crp_rxdlidfltr,
4987        crp_ibstatuschange,
4988        QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4989        QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4990        QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4991        QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4992        QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4993        QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4994        QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4995        QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4996        QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4997        crp_rcvflowctrlviol,
4998        QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4999        QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
5000        QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
5001        QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
5002        QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
5003        QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
5004        crp_txminmaxlenerr,
5005        crp_txdroppedpkt,
5006        crp_txlenerr,
5007        crp_txunderrun,
5008        crp_txunsupvl,
5009        QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
5010        QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
5011        QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
5012        QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
5013        crp_rxqpinvalidctxt,
5014        crp_txhdrerr,
5015};
5016
5017/* do all the setup to make the counter reads efficient later */
5018static void init_7322_cntrnames(struct qib_devdata *dd)
5019{
5020        int i, j = 0;
5021        char *s;
5022
5023        for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
5024             i++) {
5025                /* we always have at least one counter before the egrovfl */
5026                if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5027                        j = 1;
5028                s = strchr(s + 1, '\n');
5029                if (s && j)
5030                        j++;
5031        }
5032        dd->cspec->ncntrs = i;
5033        if (!s)
5034                /* full list; size is without terminating null */
5035                dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5036        else
5037                dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5038        dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5039                * sizeof(u64), GFP_KERNEL);
5040
5041        for (i = 0, s = (char *)portcntr7322names; s; i++)
5042                s = strchr(s + 1, '\n');
5043        dd->cspec->nportcntrs = i - 1;
5044        dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5045        for (i = 0; i < dd->num_pports; ++i) {
5046                dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5047                        * sizeof(u64), GFP_KERNEL);
5048        }
5049}
5050
5051static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5052                              u64 **cntrp)
5053{
5054        u32 ret;
5055
5056        if (namep) {
5057                ret = dd->cspec->cntrnamelen;
5058                if (pos >= ret)
5059                        ret = 0; /* final read after getting everything */
5060                else
5061                        *namep = (char *) cntr7322names;
5062        } else {
5063                u64 *cntr = dd->cspec->cntrs;
5064                int i;
5065
5066                ret = dd->cspec->ncntrs * sizeof(u64);
5067                if (!cntr || pos >= ret) {
5068                        /* everything read, or couldn't get memory */
5069                        ret = 0;
5070                        goto done;
5071                }
5072                *cntrp = cntr;
5073                for (i = 0; i < dd->cspec->ncntrs; i++)
5074                        if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5075                                *cntr++ = read_7322_creg(dd,
5076                                                         cntr7322indices[i] &
5077                                                         _PORT_CNTR_IDXMASK);
5078                        else
5079                                *cntr++ = read_7322_creg32(dd,
5080                                                           cntr7322indices[i]);
5081        }
5082done:
5083        return ret;
5084}
5085
5086static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5087                                  char **namep, u64 **cntrp)
5088{
5089        u32 ret;
5090
5091        if (namep) {
5092                ret = dd->cspec->portcntrnamelen;
5093                if (pos >= ret)
5094                        ret = 0; /* final read after getting everything */
5095                else
5096                        *namep = (char *)portcntr7322names;
5097        } else {
5098                struct qib_pportdata *ppd = &dd->pport[port];
5099                u64 *cntr = ppd->cpspec->portcntrs;
5100                int i;
5101
5102                ret = dd->cspec->nportcntrs * sizeof(u64);
5103                if (!cntr || pos >= ret) {
5104                        /* everything read, or couldn't get memory */
5105                        ret = 0;
5106                        goto done;
5107                }
5108                *cntrp = cntr;
5109                for (i = 0; i < dd->cspec->nportcntrs; i++) {
5110                        if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5111                                *cntr++ = qib_portcntr_7322(ppd,
5112                                        portcntr7322indices[i] &
5113                                        _PORT_CNTR_IDXMASK);
5114                        else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5115                                *cntr++ = read_7322_creg_port(ppd,
5116                                           portcntr7322indices[i] &
5117                                            _PORT_CNTR_IDXMASK);
5118                        else
5119                                *cntr++ = read_7322_creg32_port(ppd,
5120                                           portcntr7322indices[i]);
5121                }
5122        }
5123done:
5124        return ret;
5125}
5126
5127/**
5128 * qib_get_7322_faststats - get word counters from chip before they overflow
5129 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5130 *
5131 * VESTIGIAL IBA7322 has no "small fast counters", so the only
5132 * real purpose of this function is to maintain the notion of
5133 * "active time", which in turn is only logged into the eeprom,
5134 * which we don;t have, yet, for 7322-based boards.
5135 *
5136 * called from add_timer
5137 */
5138static void qib_get_7322_faststats(unsigned long opaque)
5139{
5140        struct qib_devdata *dd = (struct qib_devdata *) opaque;
5141        struct qib_pportdata *ppd;
5142        unsigned long flags;
5143        u64 traffic_wds;
5144        int pidx;
5145
5146        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5147                ppd = dd->pport + pidx;
5148
5149                /*
5150                 * If port isn't enabled or not operational ports, or
5151                 * diags is running (can cause memory diags to fail)
5152                 * skip this port this time.
5153                 */
5154                if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5155                    || dd->diag_client)
5156                        continue;
5157
5158                /*
5159                 * Maintain an activity timer, based on traffic
5160                 * exceeding a threshold, so we need to check the word-counts
5161                 * even if they are 64-bit.
5162                 */
5163                traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5164                        qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5165                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5166                traffic_wds -= ppd->dd->traffic_wds;
5167                ppd->dd->traffic_wds += traffic_wds;
5168                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5169                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5170                                                QIB_IB_QDR) &&
5171                    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5172                                    QIBL_LINKACTIVE)) &&
5173                    ppd->cpspec->qdr_dfe_time &&
5174                    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5175                        ppd->cpspec->qdr_dfe_on = 0;
5176
5177                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5178                                            ppd->dd->cspec->r1 ?
5179                                            QDR_STATIC_ADAPT_INIT_R1 :
5180                                            QDR_STATIC_ADAPT_INIT);
5181                        force_h1(ppd);
5182                }
5183        }
5184        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5185}
5186
5187/*
5188 * If we were using MSIx, try to fallback to INTx.
5189 */
5190static int qib_7322_intr_fallback(struct qib_devdata *dd)
5191{
5192        if (!dd->cspec->num_msix_entries)
5193                return 0; /* already using INTx */
5194
5195        qib_devinfo(dd->pcidev,
5196                "MSIx interrupt not detected, trying INTx interrupts\n");
5197        qib_7322_nomsix(dd);
5198        qib_enable_intx(dd->pcidev);
5199        qib_setup_7322_interrupt(dd, 0);
5200        return 1;
5201}
5202
5203/*
5204 * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5205 * than resetting the IBC or external link state, and useful in some
5206 * cases to cause some retraining.  To do this right, we reset IBC
5207 * as well, then return to previous state (which may be still in reset)
5208 * NOTE: some callers of this "know" this writes the current value
5209 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5210 * check all callers.
5211 */
5212static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5213{
5214        u64 val;
5215        struct qib_devdata *dd = ppd->dd;
5216        const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5217                SYM_MASK(IBPCSConfig_0, xcv_treset) |
5218                SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5219
5220        val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5221        qib_write_kreg(dd, kr_hwerrmask,
5222                       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5223        qib_write_kreg_port(ppd, krp_ibcctrl_a,
5224                            ppd->cpspec->ibcctrl_a &
5225                            ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5226
5227        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5228        qib_read_kreg32(dd, kr_scratch);
5229        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5230        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5231        qib_write_kreg(dd, kr_scratch, 0ULL);
5232        qib_write_kreg(dd, kr_hwerrclear,
5233                       SYM_MASK(HwErrClear, statusValidNoEopClear));
5234        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5235}
5236
5237/*
5238 * This code for non-IBTA-compliant IB speed negotiation is only known to
5239 * work for the SDR to DDR transition, and only between an HCA and a switch
5240 * with recent firmware.  It is based on observed heuristics, rather than
5241 * actual knowledge of the non-compliant speed negotiation.
5242 * It has a number of hard-coded fields, since the hope is to rewrite this
5243 * when a spec is available on how the negoation is intended to work.
5244 */
5245static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5246                                 u32 dcnt, u32 *data)
5247{
5248        int i;
5249        u64 pbc;
5250        u32 __iomem *piobuf;
5251        u32 pnum, control, len;
5252        struct qib_devdata *dd = ppd->dd;
5253
5254        i = 0;
5255        len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5256        control = qib_7322_setpbc_control(ppd, len, 0, 15);
5257        pbc = ((u64) control << 32) | len;
5258        while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5259                if (i++ > 15)
5260                        return;
5261                udelay(2);
5262        }
5263        /* disable header check on this packet, since it can't be valid */
5264        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5265        writeq(pbc, piobuf);
5266        qib_flush_wc();
5267        qib_pio_copy(piobuf + 2, hdr, 7);
5268        qib_pio_copy(piobuf + 9, data, dcnt);
5269        if (dd->flags & QIB_USE_SPCL_TRIG) {
5270                u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5271
5272                qib_flush_wc();
5273                __raw_writel(0xaebecede, piobuf + spcl_off);
5274        }
5275        qib_flush_wc();
5276        qib_sendbuf_done(dd, pnum);
5277        /* and re-enable hdr check */
5278        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5279}
5280
5281/*
5282 * _start packet gets sent twice at start, _done gets sent twice at end
5283 */
5284static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5285{
5286        struct qib_devdata *dd = ppd->dd;
5287        static u32 swapped;
5288        u32 dw, i, hcnt, dcnt, *data;
5289        static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5290        static u32 madpayload_start[0x40] = {
5291                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5292                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5293                0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5294                };
5295        static u32 madpayload_done[0x40] = {
5296                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5297                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5298                0x40000001, 0x1388, 0x15e, /* rest 0's */
5299                };
5300
5301        dcnt = ARRAY_SIZE(madpayload_start);
5302        hcnt = ARRAY_SIZE(hdr);
5303        if (!swapped) {
5304                /* for maintainability, do it at runtime */
5305                for (i = 0; i < hcnt; i++) {
5306                        dw = (__force u32) cpu_to_be32(hdr[i]);
5307                        hdr[i] = dw;
5308                }
5309                for (i = 0; i < dcnt; i++) {
5310                        dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5311                        madpayload_start[i] = dw;
5312                        dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5313                        madpayload_done[i] = dw;
5314                }
5315                swapped = 1;
5316        }
5317
5318        data = which ? madpayload_done : madpayload_start;
5319
5320        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5321        qib_read_kreg64(dd, kr_scratch);
5322        udelay(2);
5323        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5324        qib_read_kreg64(dd, kr_scratch);
5325        udelay(2);
5326}
5327
5328/*
5329 * Do the absolute minimum to cause an IB speed change, and make it
5330 * ready, but don't actually trigger the change.   The caller will
5331 * do that when ready (if link is in Polling training state, it will
5332 * happen immediately, otherwise when link next goes down)
5333 *
5334 * This routine should only be used as part of the DDR autonegotation
5335 * code for devices that are not compliant with IB 1.2 (or code that
5336 * fixes things up for same).
5337 *
5338 * When link has gone down, and autoneg enabled, or autoneg has
5339 * failed and we give up until next time we set both speeds, and
5340 * then we want IBTA enabled as well as "use max enabled speed.
5341 */
5342static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5343{
5344        u64 newctrlb;
5345
5346        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5347                                    IBA7322_IBC_IBTA_1_2_MASK |
5348                                    IBA7322_IBC_MAX_SPEED_MASK);
5349
5350        if (speed & (speed - 1)) /* multiple speeds */
5351                newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5352                                    IBA7322_IBC_IBTA_1_2_MASK |
5353                                    IBA7322_IBC_MAX_SPEED_MASK;
5354        else
5355                newctrlb |= speed == QIB_IB_QDR ?
5356                        IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5357                        ((speed == QIB_IB_DDR ?
5358                          IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5359
5360        if (newctrlb == ppd->cpspec->ibcctrl_b)
5361                return;
5362
5363        ppd->cpspec->ibcctrl_b = newctrlb;
5364        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5365        qib_write_kreg(ppd->dd, kr_scratch, 0);
5366}
5367
5368/*
5369 * This routine is only used when we are not talking to another
5370 * IB 1.2-compliant device that we think can do DDR.
5371 * (This includes all existing switch chips as of Oct 2007.)
5372 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5373 */
5374static void try_7322_autoneg(struct qib_pportdata *ppd)
5375{
5376        unsigned long flags;
5377
5378        spin_lock_irqsave(&ppd->lflags_lock, flags);
5379        ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5380        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5381        qib_autoneg_7322_send(ppd, 0);
5382        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5383        qib_7322_mini_pcs_reset(ppd);
5384        /* 2 msec is minimum length of a poll cycle */
5385        queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5386                           msecs_to_jiffies(2));
5387}
5388
5389/*
5390 * Handle the empirically determined mechanism for auto-negotiation
5391 * of DDR speed with switches.
5392 */
5393static void autoneg_7322_work(struct work_struct *work)
5394{
5395        struct qib_pportdata *ppd;
5396        struct qib_devdata *dd;
5397        u64 startms;
5398        u32 i;
5399        unsigned long flags;
5400
5401        ppd = container_of(work, struct qib_chippport_specific,
5402                            autoneg_work.work)->ppd;
5403        dd = ppd->dd;
5404
5405        startms = jiffies_to_msecs(jiffies);
5406
5407        /*
5408         * Busy wait for this first part, it should be at most a
5409         * few hundred usec, since we scheduled ourselves for 2msec.
5410         */
5411        for (i = 0; i < 25; i++) {
5412                if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5413                     == IB_7322_LT_STATE_POLLQUIET) {
5414                        qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5415                        break;
5416                }
5417                udelay(100);
5418        }
5419
5420        if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5421                goto done; /* we got there early or told to stop */
5422
5423        /* we expect this to timeout */
5424        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5425                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5426                               msecs_to_jiffies(90)))
5427                goto done;
5428        qib_7322_mini_pcs_reset(ppd);
5429
5430        /* we expect this to timeout */
5431        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5432                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5433                               msecs_to_jiffies(1700)))
5434                goto done;
5435        qib_7322_mini_pcs_reset(ppd);
5436
5437        set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5438
5439        /*
5440         * Wait up to 250 msec for link to train and get to INIT at DDR;
5441         * this should terminate early.
5442         */
5443        wait_event_timeout(ppd->cpspec->autoneg_wait,
5444                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5445                msecs_to_jiffies(250));
5446done:
5447        if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5448                spin_lock_irqsave(&ppd->lflags_lock, flags);
5449                ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5450                if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5451                        ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5452                        ppd->cpspec->autoneg_tries = 0;
5453                }
5454                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5455                set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5456        }
5457}
5458
5459/*
5460 * This routine is used to request IPG set in the QLogic switch.
5461 * Only called if r1.
5462 */
5463static void try_7322_ipg(struct qib_pportdata *ppd)
5464{
5465        struct qib_ibport *ibp = &ppd->ibport_data;
5466        struct ib_mad_send_buf *send_buf;
5467        struct ib_mad_agent *agent;
5468        struct ib_smp *smp;
5469        unsigned delay;
5470        int ret;
5471
5472        agent = ibp->rvp.send_agent;
5473        if (!agent)
5474                goto retry;
5475
5476        send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5477                                      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5478                                      IB_MGMT_BASE_VERSION);
5479        if (IS_ERR(send_buf))
5480                goto retry;
5481
5482        if (!ibp->smi_ah) {
5483                struct ib_ah *ah;
5484
5485                ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5486                if (IS_ERR(ah))
5487                        ret = PTR_ERR(ah);
5488                else {
5489                        send_buf->ah = ah;
5490                        ibp->smi_ah = ibah_to_rvtah(ah);
5491                        ret = 0;
5492                }
5493        } else {
5494                send_buf->ah = &ibp->smi_ah->ibah;
5495                ret = 0;
5496        }
5497
5498        smp = send_buf->mad;
5499        smp->base_version = IB_MGMT_BASE_VERSION;
5500        smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5501        smp->class_version = 1;
5502        smp->method = IB_MGMT_METHOD_SEND;
5503        smp->hop_cnt = 1;
5504        smp->attr_id = QIB_VENDOR_IPG;
5505        smp->attr_mod = 0;
5506
5507        if (!ret)
5508                ret = ib_post_send_mad(send_buf, NULL);
5509        if (ret)
5510                ib_free_send_mad(send_buf);
5511retry:
5512        delay = 2 << ppd->cpspec->ipg_tries;
5513        queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5514                           msecs_to_jiffies(delay));
5515}
5516
5517/*
5518 * Timeout handler for setting IPG.
5519 * Only called if r1.
5520 */
5521static void ipg_7322_work(struct work_struct *work)
5522{
5523        struct qib_pportdata *ppd;
5524
5525        ppd = container_of(work, struct qib_chippport_specific,
5526                           ipg_work.work)->ppd;
5527        if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5528            && ++ppd->cpspec->ipg_tries <= 10)
5529                try_7322_ipg(ppd);
5530}
5531
5532static u32 qib_7322_iblink_state(u64 ibcs)
5533{
5534        u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5535
5536        switch (state) {
5537        case IB_7322_L_STATE_INIT:
5538                state = IB_PORT_INIT;
5539                break;
5540        case IB_7322_L_STATE_ARM:
5541                state = IB_PORT_ARMED;
5542                break;
5543        case IB_7322_L_STATE_ACTIVE:
5544                /* fall through */
5545        case IB_7322_L_STATE_ACT_DEFER:
5546                state = IB_PORT_ACTIVE;
5547                break;
5548        default: /* fall through */
5549        case IB_7322_L_STATE_DOWN:
5550                state = IB_PORT_DOWN;
5551                break;
5552        }
5553        return state;
5554}
5555
5556/* returns the IBTA port state, rather than the IBC link training state */
5557static u8 qib_7322_phys_portstate(u64 ibcs)
5558{
5559        u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5560        return qib_7322_physportstate[state];
5561}
5562
5563static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5564{
5565        int ret = 0, symadj = 0;
5566        unsigned long flags;
5567        int mult;
5568
5569        spin_lock_irqsave(&ppd->lflags_lock, flags);
5570        ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5571        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5572
5573        /* Update our picture of width and speed from chip */
5574        if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5575                ppd->link_speed_active = QIB_IB_QDR;
5576                mult = 4;
5577        } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5578                ppd->link_speed_active = QIB_IB_DDR;
5579                mult = 2;
5580        } else {
5581                ppd->link_speed_active = QIB_IB_SDR;
5582                mult = 1;
5583        }
5584        if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5585                ppd->link_width_active = IB_WIDTH_4X;
5586                mult *= 4;
5587        } else
5588                ppd->link_width_active = IB_WIDTH_1X;
5589        ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5590
5591        if (!ibup) {
5592                u64 clr;
5593
5594                /* Link went down. */
5595                /* do IPG MAD again after linkdown, even if last time failed */
5596                ppd->cpspec->ipg_tries = 0;
5597                clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5598                        (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5599                         SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5600                if (clr)
5601                        qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5602                if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5603                                     QIBL_IB_AUTONEG_INPROG)))
5604                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5605                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5606                        struct qib_qsfp_data *qd =
5607                                &ppd->cpspec->qsfp_data;
5608                        /* unlock the Tx settings, speed may change */
5609                        qib_write_kreg_port(ppd, krp_tx_deemph_override,
5610                                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5611                                reset_tx_deemphasis_override));
5612                        qib_cancel_sends(ppd);
5613                        /* on link down, ensure sane pcs state */
5614                        qib_7322_mini_pcs_reset(ppd);
5615                        /* schedule the qsfp refresh which should turn the link
5616                           off */
5617                        if (ppd->dd->flags & QIB_HAS_QSFP) {
5618                                qd->t_insert = jiffies;
5619                                queue_work(ib_wq, &qd->work);
5620                        }
5621                        spin_lock_irqsave(&ppd->sdma_lock, flags);
5622                        if (__qib_sdma_running(ppd))
5623                                __qib_sdma_process_event(ppd,
5624                                        qib_sdma_event_e70_go_idle);
5625                        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5626                }
5627                clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5628                if (clr == ppd->cpspec->iblnkdownsnap)
5629                        ppd->cpspec->iblnkdowndelta++;
5630        } else {
5631                if (qib_compat_ddr_negotiate &&
5632                    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5633                                     QIBL_IB_AUTONEG_INPROG)) &&
5634                    ppd->link_speed_active == QIB_IB_SDR &&
5635                    (ppd->link_speed_enabled & QIB_IB_DDR)
5636                    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5637                        /* we are SDR, and auto-negotiation enabled */
5638                        ++ppd->cpspec->autoneg_tries;
5639                        if (!ppd->cpspec->ibdeltainprog) {
5640                                ppd->cpspec->ibdeltainprog = 1;
5641                                ppd->cpspec->ibsymdelta +=
5642                                        read_7322_creg32_port(ppd,
5643                                                crp_ibsymbolerr) -
5644                                                ppd->cpspec->ibsymsnap;
5645                                ppd->cpspec->iblnkerrdelta +=
5646                                        read_7322_creg32_port(ppd,
5647                                                crp_iblinkerrrecov) -
5648                                                ppd->cpspec->iblnkerrsnap;
5649                        }
5650                        try_7322_autoneg(ppd);
5651                        ret = 1; /* no other IB status change processing */
5652                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5653                           ppd->link_speed_active == QIB_IB_SDR) {
5654                        qib_autoneg_7322_send(ppd, 1);
5655                        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5656                        qib_7322_mini_pcs_reset(ppd);
5657                        udelay(2);
5658                        ret = 1; /* no other IB status change processing */
5659                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5660                           (ppd->link_speed_active & QIB_IB_DDR)) {
5661                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5662                        ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5663                                         QIBL_IB_AUTONEG_FAILED);
5664                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5665                        ppd->cpspec->autoneg_tries = 0;
5666                        /* re-enable SDR, for next link down */
5667                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5668                        wake_up(&ppd->cpspec->autoneg_wait);
5669                        symadj = 1;
5670                } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5671                        /*
5672                         * Clear autoneg failure flag, and do setup
5673                         * so we'll try next time link goes down and
5674                         * back to INIT (possibly connected to a
5675                         * different device).
5676                         */
5677                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5678                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5679                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5680                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5681                        symadj = 1;
5682                }
5683                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5684                        symadj = 1;
5685                        if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5686                                try_7322_ipg(ppd);
5687                        if (!ppd->cpspec->recovery_init)
5688                                setup_7322_link_recovery(ppd, 0);
5689                        ppd->cpspec->qdr_dfe_time = jiffies +
5690                                msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5691                }
5692                ppd->cpspec->ibmalfusesnap = 0;
5693                ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5694                        crp_errlink);
5695        }
5696        if (symadj) {
5697                ppd->cpspec->iblnkdownsnap =
5698                        read_7322_creg32_port(ppd, crp_iblinkdown);
5699                if (ppd->cpspec->ibdeltainprog) {
5700                        ppd->cpspec->ibdeltainprog = 0;
5701                        ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5702                                crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5703                        ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5704                                crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5705                }
5706        } else if (!ibup && qib_compat_ddr_negotiate &&
5707                   !ppd->cpspec->ibdeltainprog &&
5708                        !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5709                ppd->cpspec->ibdeltainprog = 1;
5710                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5711                        crp_ibsymbolerr);
5712                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5713                        crp_iblinkerrrecov);
5714        }
5715
5716        if (!ret)
5717                qib_setup_7322_setextled(ppd, ibup);
5718        return ret;
5719}
5720
5721/*
5722 * Does read/modify/write to appropriate registers to
5723 * set output and direction bits selected by mask.
5724 * these are in their canonical postions (e.g. lsb of
5725 * dir will end up in D48 of extctrl on existing chips).
5726 * returns contents of GP Inputs.
5727 */
5728static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5729{
5730        u64 read_val, new_out;
5731        unsigned long flags;
5732
5733        if (mask) {
5734                /* some bits being written, lock access to GPIO */
5735                dir &= mask;
5736                out &= mask;
5737                spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5738                dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5739                dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5740                new_out = (dd->cspec->gpio_out & ~mask) | out;
5741
5742                qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5743                qib_write_kreg(dd, kr_gpio_out, new_out);
5744                dd->cspec->gpio_out = new_out;
5745                spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5746        }
5747        /*
5748         * It is unlikely that a read at this time would get valid
5749         * data on a pin whose direction line was set in the same
5750         * call to this function. We include the read here because
5751         * that allows us to potentially combine a change on one pin with
5752         * a read on another, and because the old code did something like
5753         * this.
5754         */
5755        read_val = qib_read_kreg64(dd, kr_extstatus);
5756        return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5757}
5758
5759/* Enable writes to config EEPROM, if possible. Returns previous state */
5760static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5761{
5762        int prev_wen;
5763        u32 mask;
5764
5765        mask = 1 << QIB_EEPROM_WEN_NUM;
5766        prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5767        gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5768
5769        return prev_wen & 1;
5770}
5771
5772/*
5773 * Read fundamental info we need to use the chip.  These are
5774 * the registers that describe chip capabilities, and are
5775 * saved in shadow registers.
5776 */
5777static void get_7322_chip_params(struct qib_devdata *dd)
5778{
5779        u64 val;
5780        u32 piobufs;
5781        int mtu;
5782
5783        dd->palign = qib_read_kreg32(dd, kr_pagealign);
5784
5785        dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5786
5787        dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5788        dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5789        dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5790        dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5791        dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5792
5793        val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5794        dd->piobcnt2k = val & ~0U;
5795        dd->piobcnt4k = val >> 32;
5796        val = qib_read_kreg64(dd, kr_sendpiosize);
5797        dd->piosize2k = val & ~0U;
5798        dd->piosize4k = val >> 32;
5799
5800        mtu = ib_mtu_enum_to_int(qib_ibmtu);
5801        if (mtu == -1)
5802                mtu = QIB_DEFAULT_MTU;
5803        dd->pport[0].ibmtu = (u32)mtu;
5804        dd->pport[1].ibmtu = (u32)mtu;
5805
5806        /* these may be adjusted in init_chip_wc_pat() */
5807        dd->pio2kbase = (u32 __iomem *)
5808                ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5809        dd->pio4kbase = (u32 __iomem *)
5810                ((char __iomem *) dd->kregbase +
5811                 (dd->piobufbase >> 32));
5812        /*
5813         * 4K buffers take 2 pages; we use roundup just to be
5814         * paranoid; we calculate it once here, rather than on
5815         * ever buf allocate
5816         */
5817        dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5818
5819        piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5820
5821        dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5822                (sizeof(u64) * BITS_PER_BYTE / 2);
5823}
5824
5825/*
5826 * The chip base addresses in cspec and cpspec have to be set
5827 * after possible init_chip_wc_pat(), rather than in
5828 * get_7322_chip_params(), so split out as separate function
5829 */
5830static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5831{
5832        u32 cregbase;
5833
5834        cregbase = qib_read_kreg32(dd, kr_counterregbase);
5835
5836        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5837                (char __iomem *)dd->kregbase);
5838
5839        dd->egrtidbase = (u64 __iomem *)
5840                ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5841
5842        /* port registers are defined as relative to base of chip */
5843        dd->pport[0].cpspec->kpregbase =
5844                (u64 __iomem *)((char __iomem *)dd->kregbase);
5845        dd->pport[1].cpspec->kpregbase =
5846                (u64 __iomem *)(dd->palign +
5847                (char __iomem *)dd->kregbase);
5848        dd->pport[0].cpspec->cpregbase =
5849                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5850                kr_counterregbase) + (char __iomem *)dd->kregbase);
5851        dd->pport[1].cpspec->cpregbase =
5852                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5853                kr_counterregbase) + (char __iomem *)dd->kregbase);
5854}
5855
5856/*
5857 * This is a fairly special-purpose observer, so we only support
5858 * the port-specific parts of SendCtrl
5859 */
5860
5861#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |           \
5862                           SYM_MASK(SendCtrl_0, SDmaEnable) |           \
5863                           SYM_MASK(SendCtrl_0, SDmaIntEnable) |        \
5864                           SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5865                           SYM_MASK(SendCtrl_0, SDmaHalt) |             \
5866                           SYM_MASK(SendCtrl_0, IBVLArbiterEn) |        \
5867                           SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5868
5869static int sendctrl_hook(struct qib_devdata *dd,
5870                         const struct diag_observer *op, u32 offs,
5871                         u64 *data, u64 mask, int only_32)
5872{
5873        unsigned long flags;
5874        unsigned idx;
5875        unsigned pidx;
5876        struct qib_pportdata *ppd = NULL;
5877        u64 local_data, all_bits;
5878
5879        /*
5880         * The fixed correspondence between Physical ports and pports is
5881         * severed. We need to hunt for the ppd that corresponds
5882         * to the offset we got. And we have to do that without admitting
5883         * we know the stride, apparently.
5884         */
5885        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5886                u64 __iomem *psptr;
5887                u32 psoffs;
5888
5889                ppd = dd->pport + pidx;
5890                if (!ppd->cpspec->kpregbase)
5891                        continue;
5892
5893                psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5894                psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5895                if (psoffs == offs)
5896                        break;
5897        }
5898
5899        /* If pport is not being managed by driver, just avoid shadows. */
5900        if (pidx >= dd->num_pports)
5901                ppd = NULL;
5902
5903        /* In any case, "idx" is flat index in kreg space */
5904        idx = offs / sizeof(u64);
5905
5906        all_bits = ~0ULL;
5907        if (only_32)
5908                all_bits >>= 32;
5909
5910        spin_lock_irqsave(&dd->sendctrl_lock, flags);
5911        if (!ppd || (mask & all_bits) != all_bits) {
5912                /*
5913                 * At least some mask bits are zero, so we need
5914                 * to read. The judgement call is whether from
5915                 * reg or shadow. First-cut: read reg, and complain
5916                 * if any bits which should be shadowed are different
5917                 * from their shadowed value.
5918                 */
5919                if (only_32)
5920                        local_data = (u64)qib_read_kreg32(dd, idx);
5921                else
5922                        local_data = qib_read_kreg64(dd, idx);
5923                *data = (local_data & ~mask) | (*data & mask);
5924        }
5925        if (mask) {
5926                /*
5927                 * At least some mask bits are one, so we need
5928                 * to write, but only shadow some bits.
5929                 */
5930                u64 sval, tval; /* Shadowed, transient */
5931
5932                /*
5933                 * New shadow val is bits we don't want to touch,
5934                 * ORed with bits we do, that are intended for shadow.
5935                 */
5936                if (ppd) {
5937                        sval = ppd->p_sendctrl & ~mask;
5938                        sval |= *data & SENDCTRL_SHADOWED & mask;
5939                        ppd->p_sendctrl = sval;
5940                } else
5941                        sval = *data & SENDCTRL_SHADOWED & mask;
5942                tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5943                qib_write_kreg(dd, idx, tval);
5944                qib_write_kreg(dd, kr_scratch, 0Ull);
5945        }
5946        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5947        return only_32 ? 4 : 8;
5948}
5949
5950static const struct diag_observer sendctrl_0_observer = {
5951        sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5952        KREG_IDX(SendCtrl_0) * sizeof(u64)
5953};
5954
5955static const struct diag_observer sendctrl_1_observer = {
5956        sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5957        KREG_IDX(SendCtrl_1) * sizeof(u64)
5958};
5959
5960static ushort sdma_fetch_prio = 8;
5961module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5962MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5963
5964/* Besides logging QSFP events, we set appropriate TxDDS values */
5965static void init_txdds_table(struct qib_pportdata *ppd, int override);
5966
5967static void qsfp_7322_event(struct work_struct *work)
5968{
5969        struct qib_qsfp_data *qd;
5970        struct qib_pportdata *ppd;
5971        unsigned long pwrup;
5972        unsigned long flags;
5973        int ret;
5974        u32 le2;
5975
5976        qd = container_of(work, struct qib_qsfp_data, work);
5977        ppd = qd->ppd;
5978        pwrup = qd->t_insert +
5979                msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5980
5981        /* Delay for 20 msecs to allow ModPrs resistor to setup */
5982        mdelay(QSFP_MODPRS_LAG_MSEC);
5983
5984        if (!qib_qsfp_mod_present(ppd)) {
5985                ppd->cpspec->qsfp_data.modpresent = 0;
5986                /* Set the physical link to disabled */
5987                qib_set_ib_7322_lstate(ppd, 0,
5988                                       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5989                spin_lock_irqsave(&ppd->lflags_lock, flags);
5990                ppd->lflags &= ~QIBL_LINKV;
5991                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5992        } else {
5993                /*
5994                 * Some QSFP's not only do not respond until the full power-up
5995                 * time, but may behave badly if we try. So hold off responding
5996                 * to insertion.
5997                 */
5998                while (1) {
5999                        if (time_is_before_jiffies(pwrup))
6000                                break;
6001                        msleep(20);
6002                }
6003
6004                ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
6005
6006                /*
6007                 * Need to change LE2 back to defaults if we couldn't
6008                 * read the cable type (to handle cable swaps), so do this
6009                 * even on failure to read cable information.  We don't
6010                 * get here for QME, so IS_QME check not needed here.
6011                 */
6012                if (!ret && !ppd->dd->cspec->r1) {
6013                        if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
6014                                le2 = LE2_QME;
6015                        else if (qd->cache.atten[1] >= qib_long_atten &&
6016                                 QSFP_IS_CU(qd->cache.tech))
6017                                le2 = LE2_5m;
6018                        else
6019                                le2 = LE2_DEFAULT;
6020                } else
6021                        le2 = LE2_DEFAULT;
6022                ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
6023                /*
6024                 * We always change parameteters, since we can choose
6025                 * values for cables without eeproms, and the cable may have
6026                 * changed from a cable with full or partial eeprom content
6027                 * to one with partial or no content.
6028                 */
6029                init_txdds_table(ppd, 0);
6030                /* The physical link is being re-enabled only when the
6031                 * previous state was DISABLED and the VALID bit is not
6032                 * set. This should only happen when  the cable has been
6033                 * physically pulled. */
6034                if (!ppd->cpspec->qsfp_data.modpresent &&
6035                    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6036                        ppd->cpspec->qsfp_data.modpresent = 1;
6037                        qib_set_ib_7322_lstate(ppd, 0,
6038                                QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6039                        spin_lock_irqsave(&ppd->lflags_lock, flags);
6040                        ppd->lflags |= QIBL_LINKV;
6041                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6042                }
6043        }
6044}
6045
6046/*
6047 * There is little we can do but complain to the user if QSFP
6048 * initialization fails.
6049 */
6050static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6051{
6052        unsigned long flags;
6053        struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6054        struct qib_devdata *dd = ppd->dd;
6055        u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6056
6057        mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6058        qd->ppd = ppd;
6059        qib_qsfp_init(qd, qsfp_7322_event);
6060        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6061        dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6062        dd->cspec->gpio_mask |= mod_prs_bit;
6063        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6064        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6065        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6066}
6067
6068/*
6069 * called at device initialization time, and also if the txselect
6070 * module parameter is changed.  This is used for cables that don't
6071 * have valid QSFP EEPROMs (not present, or attenuation is zero).
6072 * We initialize to the default, then if there is a specific
6073 * unit,port match, we use that (and set it immediately, for the
6074 * current speed, if the link is at INIT or better).
6075 * String format is "default# unit#,port#=# ... u,p=#", separators must
6076 * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6077 * optionally have "u,p=#,#", where the final # is the H1 value
6078 * The last specific match is used (actually, all are used, but last
6079 * one is the one that winds up set); if none at all, fall back on default.
6080 */
6081static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6082{
6083        char *nxt, *str;
6084        u32 pidx, unit, port, deflt, h1;
6085        unsigned long val;
6086        int any = 0, seth1;
6087        int txdds_size;
6088
6089        str = txselect_list;
6090
6091        /* default number is validated in setup_txselect() */
6092        deflt = simple_strtoul(str, &nxt, 0);
6093        for (pidx = 0; pidx < dd->num_pports; ++pidx)
6094                dd->pport[pidx].cpspec->no_eep = deflt;
6095
6096        txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6097        if (IS_QME(dd) || IS_QMH(dd))
6098                txdds_size += TXDDS_MFG_SZ;
6099
6100        while (*nxt && nxt[1]) {
6101                str = ++nxt;
6102                unit = simple_strtoul(str, &nxt, 0);
6103                if (nxt == str || !*nxt || *nxt != ',') {
6104                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6105                                ;
6106                        continue;
6107                }
6108                str = ++nxt;
6109                port = simple_strtoul(str, &nxt, 0);
6110                if (nxt == str || *nxt != '=') {
6111                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6112                                ;
6113                        continue;
6114                }
6115                str = ++nxt;
6116                val = simple_strtoul(str, &nxt, 0);
6117                if (nxt == str) {
6118                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6119                                ;
6120                        continue;
6121                }
6122                if (val >= txdds_size)
6123                        continue;
6124                seth1 = 0;
6125                h1 = 0; /* gcc thinks it might be used uninitted */
6126                if (*nxt == ',' && nxt[1]) {
6127                        str = ++nxt;
6128                        h1 = (u32)simple_strtoul(str, &nxt, 0);
6129                        if (nxt == str)
6130                                while (*nxt && *nxt++ != ' ') /* skip */
6131                                        ;
6132                        else
6133                                seth1 = 1;
6134                }
6135                for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6136                     ++pidx) {
6137                        struct qib_pportdata *ppd = &dd->pport[pidx];
6138
6139                        if (ppd->port != port || !ppd->link_speed_supported)
6140                                continue;
6141                        ppd->cpspec->no_eep = val;
6142                        if (seth1)
6143                                ppd->cpspec->h1_val = h1;
6144                        /* now change the IBC and serdes, overriding generic */
6145                        init_txdds_table(ppd, 1);
6146                        /* Re-enable the physical state machine on mezz boards
6147                         * now that the correct settings have been set.
6148                         * QSFP boards are handles by the QSFP event handler */
6149                        if (IS_QMH(dd) || IS_QME(dd))
6150                                qib_set_ib_7322_lstate(ppd, 0,
6151                                            QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6152                        any++;
6153                }
6154                if (*nxt == '\n')
6155                        break; /* done */
6156        }
6157        if (change && !any) {
6158                /* no specific setting, use the default.
6159                 * Change the IBC and serdes, but since it's
6160                 * general, don't override specific settings.
6161                 */
6162                for (pidx = 0; pidx < dd->num_pports; ++pidx)
6163                        if (dd->pport[pidx].link_speed_supported)
6164                                init_txdds_table(&dd->pport[pidx], 0);
6165        }
6166}
6167
6168/* handle the txselect parameter changing */
6169static int setup_txselect(const char *str, struct kernel_param *kp)
6170{
6171        struct qib_devdata *dd;
6172        unsigned long val;
6173        char *n;
6174
6175        if (strlen(str) >= MAX_ATTEN_LEN) {
6176                pr_info("txselect_values string too long\n");
6177                return -ENOSPC;
6178        }
6179        val = simple_strtoul(str, &n, 0);
6180        if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6181                                TXDDS_MFG_SZ)) {
6182                pr_info("txselect_values must start with a number < %d\n",
6183                        TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6184                return -EINVAL;
6185        }
6186        strcpy(txselect_list, str);
6187
6188        list_for_each_entry(dd, &qib_dev_list, list)
6189                if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6190                        set_no_qsfp_atten(dd, 1);
6191        return 0;
6192}
6193
6194/*
6195 * Write the final few registers that depend on some of the
6196 * init setup.  Done late in init, just before bringing up
6197 * the serdes.
6198 */
6199static int qib_late_7322_initreg(struct qib_devdata *dd)
6200{
6201        int ret = 0, n;
6202        u64 val;
6203
6204        qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6205        qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6206        qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6207        qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6208        val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6209        if (val != dd->pioavailregs_phys) {
6210                qib_dev_err(dd,
6211                        "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6212                        (unsigned long) dd->pioavailregs_phys,
6213                        (unsigned long long) val);
6214                ret = -EINVAL;
6215        }
6216
6217        n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6218        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6219        /* driver sends get pkey, lid, etc. checking also, to catch bugs */
6220        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6221
6222        qib_register_observer(dd, &sendctrl_0_observer);
6223        qib_register_observer(dd, &sendctrl_1_observer);
6224
6225        dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6226        qib_write_kreg(dd, kr_control, dd->control);
6227        /*
6228         * Set SendDmaFetchPriority and init Tx params, including
6229         * QSFP handler on boards that have QSFP.
6230         * First set our default attenuation entry for cables that
6231         * don't have valid attenuation.
6232         */
6233        set_no_qsfp_atten(dd, 0);
6234        for (n = 0; n < dd->num_pports; ++n) {
6235                struct qib_pportdata *ppd = dd->pport + n;
6236
6237                qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6238                                    sdma_fetch_prio & 0xf);
6239                /* Initialize qsfp if present on board. */
6240                if (dd->flags & QIB_HAS_QSFP)
6241                        qib_init_7322_qsfp(ppd);
6242        }
6243        dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6244        qib_write_kreg(dd, kr_control, dd->control);
6245
6246        return ret;
6247}
6248
6249/* per IB port errors.  */
6250#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6251        MASK_ACROSS(8, 15))
6252#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6253#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6254        MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6255        MASK_ACROSS(0, 11))
6256
6257/*
6258 * Write the initialization per-port registers that need to be done at
6259 * driver load and after reset completes (i.e., that aren't done as part
6260 * of other init procedures called from qib_init.c).
6261 * Some of these should be redundant on reset, but play safe.
6262 */
6263static void write_7322_init_portregs(struct qib_pportdata *ppd)
6264{
6265        u64 val;
6266        int i;
6267
6268        if (!ppd->link_speed_supported) {
6269                /* no buffer credits for this port */
6270                for (i = 1; i < 8; i++)
6271                        qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6272                qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6273                qib_write_kreg(ppd->dd, kr_scratch, 0);
6274                return;
6275        }
6276
6277        /*
6278         * Set the number of supported virtual lanes in IBC,
6279         * for flow control packet handling on unsupported VLs
6280         */
6281        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6282        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6283        val |= (u64)(ppd->vls_supported - 1) <<
6284                SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6285        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6286
6287        qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6288
6289        /* enable tx header checking */
6290        qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6291                            IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6292                            IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6293
6294        qib_write_kreg_port(ppd, krp_ncmodectrl,
6295                SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6296
6297        /*
6298         * Unconditionally clear the bufmask bits.  If SDMA is
6299         * enabled, we'll set them appropriately later.
6300         */
6301        qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6302        qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6303        qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6304        if (ppd->dd->cspec->r1)
6305                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6306}
6307
6308/*
6309 * Write the initialization per-device registers that need to be done at
6310 * driver load and after reset completes (i.e., that aren't done as part
6311 * of other init procedures called from qib_init.c).  Also write per-port
6312 * registers that are affected by overall device config, such as QP mapping
6313 * Some of these should be redundant on reset, but play safe.
6314 */
6315static void write_7322_initregs(struct qib_devdata *dd)
6316{
6317        struct qib_pportdata *ppd;
6318        int i, pidx;
6319        u64 val;
6320
6321        /* Set Multicast QPs received by port 2 to map to context one. */
6322        qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6323
6324        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6325                unsigned n, regno;
6326                unsigned long flags;
6327
6328                if (dd->n_krcv_queues < 2 ||
6329                        !dd->pport[pidx].link_speed_supported)
6330                        continue;
6331
6332                ppd = &dd->pport[pidx];
6333
6334                /* be paranoid against later code motion, etc. */
6335                spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6336                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6337                spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6338
6339                /* Initialize QP to context mapping */
6340                regno = krp_rcvqpmaptable;
6341                val = 0;
6342                if (dd->num_pports > 1)
6343                        n = dd->first_user_ctxt / dd->num_pports;
6344                else
6345                        n = dd->first_user_ctxt - 1;
6346                for (i = 0; i < 32; ) {
6347                        unsigned ctxt;
6348
6349                        if (dd->num_pports > 1)
6350                                ctxt = (i % n) * dd->num_pports + pidx;
6351                        else if (i % n)
6352                                ctxt = (i % n) + 1;
6353                        else
6354                                ctxt = ppd->hw_pidx;
6355                        val |= ctxt << (5 * (i % 6));
6356                        i++;
6357                        if (i % 6 == 0) {
6358                                qib_write_kreg_port(ppd, regno, val);
6359                                val = 0;
6360                                regno++;
6361                        }
6362                }
6363                qib_write_kreg_port(ppd, regno, val);
6364        }
6365
6366        /*
6367         * Setup up interrupt mitigation for kernel contexts, but
6368         * not user contexts (user contexts use interrupts when
6369         * stalled waiting for any packet, so want those interrupts
6370         * right away).
6371         */
6372        for (i = 0; i < dd->first_user_ctxt; i++) {
6373                dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6374                qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6375        }
6376
6377        /*
6378         * Initialize  as (disabled) rcvflow tables.  Application code
6379         * will setup each flow as it uses the flow.
6380         * Doesn't clear any of the error bits that might be set.
6381         */
6382        val = TIDFLOW_ERRBITS; /* these are W1C */
6383        for (i = 0; i < dd->cfgctxts; i++) {
6384                int flow;
6385
6386                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6387                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6388        }
6389
6390        /*
6391         * dual cards init to dual port recovery, single port cards to
6392         * the one port.  Dual port cards may later adjust to 1 port,
6393         * and then back to dual port if both ports are connected
6394         * */
6395        if (dd->num_pports)
6396                setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6397}
6398
6399static int qib_init_7322_variables(struct qib_devdata *dd)
6400{
6401        struct qib_pportdata *ppd;
6402        unsigned features, pidx, sbufcnt;
6403        int ret, mtu;
6404        u32 sbufs, updthresh;
6405        resource_size_t vl15off;
6406
6407        /* pport structs are contiguous, allocated after devdata */
6408        ppd = (struct qib_pportdata *)(dd + 1);
6409        dd->pport = ppd;
6410        ppd[0].dd = dd;
6411        ppd[1].dd = dd;
6412
6413        dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6414
6415        ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6416        ppd[1].cpspec = &ppd[0].cpspec[1];
6417        ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6418        ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6419
6420        spin_lock_init(&dd->cspec->rcvmod_lock);
6421        spin_lock_init(&dd->cspec->gpio_lock);
6422
6423        /* we haven't yet set QIB_PRESENT, so use read directly */
6424        dd->revision = readq(&dd->kregbase[kr_revision]);
6425
6426        if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6427                qib_dev_err(dd,
6428                        "Revision register read failure, giving up initialization\n");
6429                ret = -ENODEV;
6430                goto bail;
6431        }
6432        dd->flags |= QIB_PRESENT;  /* now register routines work */
6433
6434        dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6435        dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6436        dd->cspec->r1 = dd->minrev == 1;
6437
6438        get_7322_chip_params(dd);
6439        features = qib_7322_boardname(dd);
6440
6441        /* now that piobcnt2k and 4k set, we can allocate these */
6442        sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6443                NUM_VL15_BUFS + BITS_PER_LONG - 1;
6444        sbufcnt /= BITS_PER_LONG;
6445        dd->cspec->sendchkenable = kmalloc(sbufcnt *
6446                sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6447        dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6448                sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6449        dd->cspec->sendibchk = kmalloc(sbufcnt *
6450                sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6451        if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6452                !dd->cspec->sendibchk) {
6453                ret = -ENOMEM;
6454                goto bail;
6455        }
6456
6457        ppd = dd->pport;
6458
6459        /*
6460         * GPIO bits for TWSI data and clock,
6461         * used for serial EEPROM.
6462         */
6463        dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6464        dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6465        dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6466
6467        dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6468                QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6469                QIB_HAS_THRESH_UPDATE |
6470                (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6471        dd->flags |= qib_special_trigger ?
6472                QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6473
6474        /*
6475         * Setup initial values.  These may change when PAT is enabled, but
6476         * we need these to do initial chip register accesses.
6477         */
6478        qib_7322_set_baseaddrs(dd);
6479
6480        mtu = ib_mtu_enum_to_int(qib_ibmtu);
6481        if (mtu == -1)
6482                mtu = QIB_DEFAULT_MTU;
6483
6484        dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6485        /* all hwerrors become interrupts, unless special purposed */
6486        dd->cspec->hwerrmask = ~0ULL;
6487        /*  link_recovery setup causes these errors, so ignore them,
6488         *  other than clearing them when they occur */
6489        dd->cspec->hwerrmask &=
6490                ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6491                  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6492                  HWE_MASK(LATriggered));
6493
6494        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6495                struct qib_chippport_specific *cp = ppd->cpspec;
6496
6497                ppd->link_speed_supported = features & PORT_SPD_CAP;
6498                features >>=  PORT_SPD_CAP_SHIFT;
6499                if (!ppd->link_speed_supported) {
6500                        /* single port mode (7340, or configured) */
6501                        dd->skip_kctxt_mask |= 1 << pidx;
6502                        if (pidx == 0) {
6503                                /* Make sure port is disabled. */
6504                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6505                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6506                                ppd[0] = ppd[1];
6507                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6508                                                  IBSerdesPClkNotDetectMask_0)
6509                                                  | SYM_MASK(HwErrMask,
6510                                                  SDmaMemReadErrMask_0));
6511                                dd->cspec->int_enable_mask &= ~(
6512                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6513                                     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6514                                     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6515                                     SYM_MASK(IntMask, SDmaIntMask_0) |
6516                                     SYM_MASK(IntMask, ErrIntMask_0) |
6517                                     SYM_MASK(IntMask, SendDoneIntMask_0));
6518                        } else {
6519                                /* Make sure port is disabled. */
6520                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6521                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6522                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6523                                                  IBSerdesPClkNotDetectMask_1)
6524                                                  | SYM_MASK(HwErrMask,
6525                                                  SDmaMemReadErrMask_1));
6526                                dd->cspec->int_enable_mask &= ~(
6527                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6528                                     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6529                                     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6530                                     SYM_MASK(IntMask, SDmaIntMask_1) |
6531                                     SYM_MASK(IntMask, ErrIntMask_1) |
6532                                     SYM_MASK(IntMask, SendDoneIntMask_1));
6533                        }
6534                        continue;
6535                }
6536
6537                dd->num_pports++;
6538                ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6539                if (ret) {
6540                        dd->num_pports--;
6541                        goto bail;
6542                }
6543
6544                ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6545                ppd->link_width_enabled = IB_WIDTH_4X;
6546                ppd->link_speed_enabled = ppd->link_speed_supported;
6547                /*
6548                 * Set the initial values to reasonable default, will be set
6549                 * for real when link is up.
6550                 */
6551                ppd->link_width_active = IB_WIDTH_4X;
6552                ppd->link_speed_active = QIB_IB_SDR;
6553                ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6554                switch (qib_num_cfg_vls) {
6555                case 1:
6556                        ppd->vls_supported = IB_VL_VL0;
6557                        break;
6558                case 2:
6559                        ppd->vls_supported = IB_VL_VL0_1;
6560                        break;
6561                default:
6562                        qib_devinfo(dd->pcidev,
6563                                    "Invalid num_vls %u, using 4 VLs\n",
6564                                    qib_num_cfg_vls);
6565                        qib_num_cfg_vls = 4;
6566                        /* fall through */
6567                case 4:
6568                        ppd->vls_supported = IB_VL_VL0_3;
6569                        break;
6570                case 8:
6571                        if (mtu <= 2048)
6572                                ppd->vls_supported = IB_VL_VL0_7;
6573                        else {
6574                                qib_devinfo(dd->pcidev,
6575                                            "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6576                                            qib_num_cfg_vls, mtu);
6577                                ppd->vls_supported = IB_VL_VL0_3;
6578                                qib_num_cfg_vls = 4;
6579                        }
6580                        break;
6581                }
6582                ppd->vls_operational = ppd->vls_supported;
6583
6584                init_waitqueue_head(&cp->autoneg_wait);
6585                INIT_DELAYED_WORK(&cp->autoneg_work,
6586                                  autoneg_7322_work);
6587                if (ppd->dd->cspec->r1)
6588                        INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6589
6590                /*
6591                 * For Mez and similar cards, no qsfp info, so do
6592                 * the "cable info" setup here.  Can be overridden
6593                 * in adapter-specific routines.
6594                 */
6595                if (!(dd->flags & QIB_HAS_QSFP)) {
6596                        if (!IS_QMH(dd) && !IS_QME(dd))
6597                                qib_devinfo(dd->pcidev,
6598                                        "IB%u:%u: Unknown mezzanine card type\n",
6599                                        dd->unit, ppd->port);
6600                        cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6601                        /*
6602                         * Choose center value as default tx serdes setting
6603                         * until changed through module parameter.
6604                         */
6605                        ppd->cpspec->no_eep = IS_QMH(dd) ?
6606                                TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6607                } else
6608                        cp->h1_val = H1_FORCE_VAL;
6609
6610                /* Avoid writes to chip for mini_init */
6611                if (!qib_mini_init)
6612                        write_7322_init_portregs(ppd);
6613
6614                init_timer(&cp->chase_timer);
6615                cp->chase_timer.function = reenable_chase;
6616                cp->chase_timer.data = (unsigned long)ppd;
6617
6618                ppd++;
6619        }
6620
6621        dd->rcvhdrentsize = qib_rcvhdrentsize ?
6622                qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6623        dd->rcvhdrsize = qib_rcvhdrsize ?
6624                qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6625        dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6626
6627        /* we always allocate at least 2048 bytes for eager buffers */
6628        dd->rcvegrbufsize = max(mtu, 2048);
6629        BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6630        dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6631
6632        qib_7322_tidtemplate(dd);
6633
6634        /*
6635         * We can request a receive interrupt for 1 or
6636         * more packets from current offset.
6637         */
6638        dd->rhdrhead_intr_off =
6639                (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6640
6641        /* setup the stats timer; the add_timer is done at end of init */
6642        init_timer(&dd->stats_timer);
6643        dd->stats_timer.function = qib_get_7322_faststats;
6644        dd->stats_timer.data = (unsigned long) dd;
6645
6646        dd->ureg_align = 0x10000;  /* 64KB alignment */
6647
6648        dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6649
6650        qib_7322_config_ctxts(dd);
6651        qib_set_ctxtcnt(dd);
6652
6653        /*
6654         * We do not set WC on the VL15 buffers to avoid
6655         * a rare problem with unaligned writes from
6656         * interrupt-flushed store buffers, so we need
6657         * to map those separately here.  We can't solve
6658         * this for the rarely used mtrr case.
6659         */
6660        ret = init_chip_wc_pat(dd, 0);
6661        if (ret)
6662                goto bail;
6663
6664        /* vl15 buffers start just after the 4k buffers */
6665        vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6666                  dd->piobcnt4k * dd->align4k;
6667        dd->piovl15base = ioremap_nocache(vl15off,
6668                                          NUM_VL15_BUFS * dd->align4k);
6669        if (!dd->piovl15base) {
6670                ret = -ENOMEM;
6671                goto bail;
6672        }
6673
6674        qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6675
6676        ret = 0;
6677        if (qib_mini_init)
6678                goto bail;
6679        if (!dd->num_pports) {
6680                qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6681                goto bail; /* no error, so can still figure out why err */
6682        }
6683
6684        write_7322_initregs(dd);
6685        ret = qib_create_ctxts(dd);
6686        init_7322_cntrnames(dd);
6687
6688        updthresh = 8U; /* update threshold */
6689
6690        /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6691         * reserve the update threshold amount for other kernel use, such
6692         * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6693         * unless we aren't enabling SDMA, in which case we want to use
6694         * all the 4k bufs for the kernel.
6695         * if this was less than the update threshold, we could wait
6696         * a long time for an update.  Coded this way because we
6697         * sometimes change the update threshold for various reasons,
6698         * and we want this to remain robust.
6699         */
6700        if (dd->flags & QIB_HAS_SEND_DMA) {
6701                dd->cspec->sdmabufcnt = dd->piobcnt4k;
6702                sbufs = updthresh > 3 ? updthresh : 3;
6703        } else {
6704                dd->cspec->sdmabufcnt = 0;
6705                sbufs = dd->piobcnt4k;
6706        }
6707        dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6708                dd->cspec->sdmabufcnt;
6709        dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6710        dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6711        dd->last_pio = dd->cspec->lastbuf_for_pio;
6712        dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6713                dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6714
6715        /*
6716         * If we have 16 user contexts, we will have 7 sbufs
6717         * per context, so reduce the update threshold to match.  We
6718         * want to update before we actually run out, at low pbufs/ctxt
6719         * so give ourselves some margin.
6720         */
6721        if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6722                updthresh = dd->pbufsctxt - 2;
6723        dd->cspec->updthresh_dflt = updthresh;
6724        dd->cspec->updthresh = updthresh;
6725
6726        /* before full enable, no interrupts, no locking needed */
6727        dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6728                             << SYM_LSB(SendCtrl, AvailUpdThld)) |
6729                        SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6730
6731        dd->psxmitwait_supported = 1;
6732        dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6733bail:
6734        if (!dd->ctxtcnt)
6735                dd->ctxtcnt = 1; /* for other initialization code */
6736
6737        return ret;
6738}
6739
6740static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6741                                        u32 *pbufnum)
6742{
6743        u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6744        struct qib_devdata *dd = ppd->dd;
6745
6746        /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6747        if (pbc & PBC_7322_VL15_SEND) {
6748                first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6749                last = first;
6750        } else {
6751                if ((plen + 1) > dd->piosize2kmax_dwords)
6752                        first = dd->piobcnt2k;
6753                else
6754                        first = 0;
6755                last = dd->cspec->lastbuf_for_pio;
6756        }
6757        return qib_getsendbuf_range(dd, pbufnum, first, last);
6758}
6759
6760static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6761                                     u32 start)
6762{
6763        qib_write_kreg_port(ppd, krp_psinterval, intv);
6764        qib_write_kreg_port(ppd, krp_psstart, start);
6765}
6766
6767/*
6768 * Must be called with sdma_lock held, or before init finished.
6769 */
6770static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6771{
6772        qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6773}
6774
6775/*
6776 * sdma_lock should be acquired before calling this routine
6777 */
6778static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6779{
6780        u64 reg, reg1, reg2;
6781
6782        reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6783        qib_dev_porterr(ppd->dd, ppd->port,
6784                "SDMA senddmastatus: 0x%016llx\n", reg);
6785
6786        reg = qib_read_kreg_port(ppd, krp_sendctrl);
6787        qib_dev_porterr(ppd->dd, ppd->port,
6788                "SDMA sendctrl: 0x%016llx\n", reg);
6789
6790        reg = qib_read_kreg_port(ppd, krp_senddmabase);
6791        qib_dev_porterr(ppd->dd, ppd->port,
6792                "SDMA senddmabase: 0x%016llx\n", reg);
6793
6794        reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6795        reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6796        reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6797        qib_dev_porterr(ppd->dd, ppd->port,
6798                "SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6799                 reg, reg1, reg2);
6800
6801        /* get bufuse bits, clear them, and print them again if non-zero */
6802        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6803        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6804        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6805        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6806        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6807        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6808        /* 0 and 1 should always be zero, so print as short form */
6809        qib_dev_porterr(ppd->dd, ppd->port,
6810                 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6811                 reg, reg1, reg2);
6812        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6813        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6814        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6815        /* 0 and 1 should always be zero, so print as short form */
6816        qib_dev_porterr(ppd->dd, ppd->port,
6817                 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6818                 reg, reg1, reg2);
6819
6820        reg = qib_read_kreg_port(ppd, krp_senddmatail);
6821        qib_dev_porterr(ppd->dd, ppd->port,
6822                "SDMA senddmatail: 0x%016llx\n", reg);
6823
6824        reg = qib_read_kreg_port(ppd, krp_senddmahead);
6825        qib_dev_porterr(ppd->dd, ppd->port,
6826                "SDMA senddmahead: 0x%016llx\n", reg);
6827
6828        reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6829        qib_dev_porterr(ppd->dd, ppd->port,
6830                "SDMA senddmaheadaddr: 0x%016llx\n", reg);
6831
6832        reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6833        qib_dev_porterr(ppd->dd, ppd->port,
6834                "SDMA senddmalengen: 0x%016llx\n", reg);
6835
6836        reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6837        qib_dev_porterr(ppd->dd, ppd->port,
6838                "SDMA senddmadesccnt: 0x%016llx\n", reg);
6839
6840        reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6841        qib_dev_porterr(ppd->dd, ppd->port,
6842                "SDMA senddmaidlecnt: 0x%016llx\n", reg);
6843
6844        reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6845        qib_dev_porterr(ppd->dd, ppd->port,
6846                "SDMA senddmapriorityhld: 0x%016llx\n", reg);
6847
6848        reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6849        qib_dev_porterr(ppd->dd, ppd->port,
6850                "SDMA senddmareloadcnt: 0x%016llx\n", reg);
6851
6852        dump_sdma_state(ppd);
6853}
6854
6855static struct sdma_set_state_action sdma_7322_action_table[] = {
6856        [qib_sdma_state_s00_hw_down] = {
6857                .go_s99_running_tofalse = 1,
6858                .op_enable = 0,
6859                .op_intenable = 0,
6860                .op_halt = 0,
6861                .op_drain = 0,
6862        },
6863        [qib_sdma_state_s10_hw_start_up_wait] = {
6864                .op_enable = 0,
6865                .op_intenable = 1,
6866                .op_halt = 1,
6867                .op_drain = 0,
6868        },
6869        [qib_sdma_state_s20_idle] = {
6870                .op_enable = 1,
6871                .op_intenable = 1,
6872                .op_halt = 1,
6873                .op_drain = 0,
6874        },
6875        [qib_sdma_state_s30_sw_clean_up_wait] = {
6876                .op_enable = 0,
6877                .op_intenable = 1,
6878                .op_halt = 1,
6879                .op_drain = 0,
6880        },
6881        [qib_sdma_state_s40_hw_clean_up_wait] = {
6882                .op_enable = 1,
6883                .op_intenable = 1,
6884                .op_halt = 1,
6885                .op_drain = 0,
6886        },
6887        [qib_sdma_state_s50_hw_halt_wait] = {
6888                .op_enable = 1,
6889                .op_intenable = 1,
6890                .op_halt = 1,
6891                .op_drain = 1,
6892        },
6893        [qib_sdma_state_s99_running] = {
6894                .op_enable = 1,
6895                .op_intenable = 1,
6896                .op_halt = 0,
6897                .op_drain = 0,
6898                .go_s99_running_totrue = 1,
6899        },
6900};
6901
6902static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6903{
6904        ppd->sdma_state.set_state_action = sdma_7322_action_table;
6905}
6906
6907static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6908{
6909        struct qib_devdata *dd = ppd->dd;
6910        unsigned lastbuf, erstbuf;
6911        u64 senddmabufmask[3] = { 0 };
6912        int n, ret = 0;
6913
6914        qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6915        qib_sdma_7322_setlengen(ppd);
6916        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6917        qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6918        qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6919        qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6920
6921        if (dd->num_pports)
6922                n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6923        else
6924                n = dd->cspec->sdmabufcnt; /* failsafe for init */
6925        erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6926                ((dd->num_pports == 1 || ppd->port == 2) ? n :
6927                dd->cspec->sdmabufcnt);
6928        lastbuf = erstbuf + n;
6929
6930        ppd->sdma_state.first_sendbuf = erstbuf;
6931        ppd->sdma_state.last_sendbuf = lastbuf;
6932        for (; erstbuf < lastbuf; ++erstbuf) {
6933                unsigned word = erstbuf / BITS_PER_LONG;
6934                unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6935
6936                BUG_ON(word >= 3);
6937                senddmabufmask[word] |= 1ULL << bit;
6938        }
6939        qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6940        qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6941        qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6942        return ret;
6943}
6944
6945/* sdma_lock must be held */
6946static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6947{
6948        struct qib_devdata *dd = ppd->dd;
6949        int sane;
6950        int use_dmahead;
6951        u16 swhead;
6952        u16 swtail;
6953        u16 cnt;
6954        u16 hwhead;
6955
6956        use_dmahead = __qib_sdma_running(ppd) &&
6957                (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6958retry:
6959        hwhead = use_dmahead ?
6960                (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6961                (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6962
6963        swhead = ppd->sdma_descq_head;
6964        swtail = ppd->sdma_descq_tail;
6965        cnt = ppd->sdma_descq_cnt;
6966
6967        if (swhead < swtail)
6968                /* not wrapped */
6969                sane = (hwhead >= swhead) & (hwhead <= swtail);
6970        else if (swhead > swtail)
6971                /* wrapped around */
6972                sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6973                        (hwhead <= swtail);
6974        else
6975                /* empty */
6976                sane = (hwhead == swhead);
6977
6978        if (unlikely(!sane)) {
6979                if (use_dmahead) {
6980                        /* try one more time, directly from the register */
6981                        use_dmahead = 0;
6982                        goto retry;
6983                }
6984                /* proceed as if no progress */
6985                hwhead = swhead;
6986        }
6987
6988        return hwhead;
6989}
6990
6991static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6992{
6993        u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6994
6995        return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6996               (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6997               !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6998               !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6999}
7000
7001/*
7002 * Compute the amount of delay before sending the next packet if the
7003 * port's send rate differs from the static rate set for the QP.
7004 * The delay affects the next packet and the amount of the delay is
7005 * based on the length of the this packet.
7006 */
7007static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
7008                                   u8 srate, u8 vl)
7009{
7010        u8 snd_mult = ppd->delay_mult;
7011        u8 rcv_mult = ib_rate_to_delay[srate];
7012        u32 ret;
7013
7014        ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
7015
7016        /* Indicate VL15, else set the VL in the control word */
7017        if (vl == 15)
7018                ret |= PBC_7322_VL15_SEND_CTRL;
7019        else
7020                ret |= vl << PBC_VL_NUM_LSB;
7021        ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7022
7023        return ret;
7024}
7025
7026/*
7027 * Enable the per-port VL15 send buffers for use.
7028 * They follow the rest of the buffers, without a config parameter.
7029 * This was in initregs, but that is done before the shadow
7030 * is set up, and this has to be done after the shadow is
7031 * set up.
7032 */
7033static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7034{
7035        unsigned vl15bufs;
7036
7037        vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7038        qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7039                               TXCHK_CHG_TYPE_KERN, NULL);
7040}
7041
7042static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7043{
7044        if (rcd->ctxt < NUM_IB_PORTS) {
7045                if (rcd->dd->num_pports > 1) {
7046                        rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7047                        rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7048                } else {
7049                        rcd->rcvegrcnt = KCTXT0_EGRCNT;
7050                        rcd->rcvegr_tid_base = 0;
7051                }
7052        } else {
7053                rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7054                rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7055                        (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7056        }
7057}
7058
7059#define QTXSLEEPS 5000
7060static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7061                                  u32 len, u32 which, struct qib_ctxtdata *rcd)
7062{
7063        int i;
7064        const int last = start + len - 1;
7065        const int lastr = last / BITS_PER_LONG;
7066        u32 sleeps = 0;
7067        int wait = rcd != NULL;
7068        unsigned long flags;
7069
7070        while (wait) {
7071                unsigned long shadow = 0;
7072                int cstart, previ = -1;
7073
7074                /*
7075                 * when flipping from kernel to user, we can't change
7076                 * the checking type if the buffer is allocated to the
7077                 * driver.   It's OK the other direction, because it's
7078                 * from close, and we have just disarm'ed all the
7079                 * buffers.  All the kernel to kernel changes are also
7080                 * OK.
7081                 */
7082                for (cstart = start; cstart <= last; cstart++) {
7083                        i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7084                                / BITS_PER_LONG;
7085                        if (i != previ) {
7086                                shadow = (unsigned long)
7087                                        le64_to_cpu(dd->pioavailregs_dma[i]);
7088                                previ = i;
7089                        }
7090                        if (test_bit(((2 * cstart) +
7091                                      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7092                                     % BITS_PER_LONG, &shadow))
7093                                break;
7094                }
7095
7096                if (cstart > last)
7097                        break;
7098
7099                if (sleeps == QTXSLEEPS)
7100                        break;
7101                /* make sure we see an updated copy next time around */
7102                sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7103                sleeps++;
7104                msleep(20);
7105        }
7106
7107        switch (which) {
7108        case TXCHK_CHG_TYPE_DIS1:
7109                /*
7110                 * disable checking on a range; used by diags; just
7111                 * one buffer, but still written generically
7112                 */
7113                for (i = start; i <= last; i++)
7114                        clear_bit(i, dd->cspec->sendchkenable);
7115                break;
7116
7117        case TXCHK_CHG_TYPE_ENAB1:
7118                /*
7119                 * (re)enable checking on a range; used by diags; just
7120                 * one buffer, but still written generically; read
7121                 * scratch to be sure buffer actually triggered, not
7122                 * just flushed from processor.
7123                 */
7124                qib_read_kreg32(dd, kr_scratch);
7125                for (i = start; i <= last; i++)
7126                        set_bit(i, dd->cspec->sendchkenable);
7127                break;
7128
7129        case TXCHK_CHG_TYPE_KERN:
7130                /* usable by kernel */
7131                for (i = start; i <= last; i++) {
7132                        set_bit(i, dd->cspec->sendibchk);
7133                        clear_bit(i, dd->cspec->sendgrhchk);
7134                }
7135                spin_lock_irqsave(&dd->uctxt_lock, flags);
7136                /* see if we need to raise avail update threshold */
7137                for (i = dd->first_user_ctxt;
7138                     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7139                     && i < dd->cfgctxts; i++)
7140                        if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7141                           ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7142                           < dd->cspec->updthresh_dflt)
7143                                break;
7144                spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7145                if (i == dd->cfgctxts) {
7146                        spin_lock_irqsave(&dd->sendctrl_lock, flags);
7147                        dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7148                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7149                        dd->sendctrl |= (dd->cspec->updthresh &
7150                                         SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7151                                           SYM_LSB(SendCtrl, AvailUpdThld);
7152                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7153                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7154                }
7155                break;
7156
7157        case TXCHK_CHG_TYPE_USER:
7158                /* for user process */
7159                for (i = start; i <= last; i++) {
7160                        clear_bit(i, dd->cspec->sendibchk);
7161                        set_bit(i, dd->cspec->sendgrhchk);
7162                }
7163                spin_lock_irqsave(&dd->sendctrl_lock, flags);
7164                if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7165                        / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7166                        dd->cspec->updthresh = (rcd->piocnt /
7167                                                rcd->subctxt_cnt) - 1;
7168                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7169                        dd->sendctrl |= (dd->cspec->updthresh &
7170                                        SYM_RMASK(SendCtrl, AvailUpdThld))
7171                                        << SYM_LSB(SendCtrl, AvailUpdThld);
7172                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7173                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7174                } else
7175                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7176                break;
7177
7178        default:
7179                break;
7180        }
7181
7182        for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7183                qib_write_kreg(dd, kr_sendcheckmask + i,
7184                               dd->cspec->sendchkenable[i]);
7185
7186        for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7187                qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7188                               dd->cspec->sendgrhchk[i]);
7189                qib_write_kreg(dd, kr_sendibpktmask + i,
7190                               dd->cspec->sendibchk[i]);
7191        }
7192
7193        /*
7194         * Be sure whatever we did was seen by the chip and acted upon,
7195         * before we return.  Mostly important for which >= 2.
7196         */
7197        qib_read_kreg32(dd, kr_scratch);
7198}
7199
7200
7201/* useful for trigger analyzers, etc. */
7202static void writescratch(struct qib_devdata *dd, u32 val)
7203{
7204        qib_write_kreg(dd, kr_scratch, val);
7205}
7206
7207/* Dummy for now, use chip regs soon */
7208static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7209{
7210        return -ENXIO;
7211}
7212
7213/**
7214 * qib_init_iba7322_funcs - set up the chip-specific function pointers
7215 * @dev: the pci_dev for qlogic_ib device
7216 * @ent: pci_device_id struct for this dev
7217 *
7218 * Also allocates, inits, and returns the devdata struct for this
7219 * device instance
7220 *
7221 * This is global, and is called directly at init to set up the
7222 * chip-specific function pointers for later use.
7223 */
7224struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7225                                           const struct pci_device_id *ent)
7226{
7227        struct qib_devdata *dd;
7228        int ret, i;
7229        u32 tabsize, actual_cnt = 0;
7230
7231        dd = qib_alloc_devdata(pdev,
7232                NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7233                sizeof(struct qib_chip_specific) +
7234                NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7235        if (IS_ERR(dd))
7236                goto bail;
7237
7238        dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7239        dd->f_cleanup           = qib_setup_7322_cleanup;
7240        dd->f_clear_tids        = qib_7322_clear_tids;
7241        dd->f_free_irq          = qib_7322_free_irq;
7242        dd->f_get_base_info     = qib_7322_get_base_info;
7243        dd->f_get_msgheader     = qib_7322_get_msgheader;
7244        dd->f_getsendbuf        = qib_7322_getsendbuf;
7245        dd->f_gpio_mod          = gpio_7322_mod;
7246        dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7247        dd->f_hdrqempty         = qib_7322_hdrqempty;
7248        dd->f_ib_updown         = qib_7322_ib_updown;
7249        dd->f_init_ctxt         = qib_7322_init_ctxt;
7250        dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7251        dd->f_intr_fallback     = qib_7322_intr_fallback;
7252        dd->f_late_initreg      = qib_late_7322_initreg;
7253        dd->f_setpbc_control    = qib_7322_setpbc_control;
7254        dd->f_portcntr          = qib_portcntr_7322;
7255        dd->f_put_tid           = qib_7322_put_tid;
7256        dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7257        dd->f_rcvctrl           = rcvctrl_7322_mod;
7258        dd->f_read_cntrs        = qib_read_7322cntrs;
7259        dd->f_read_portcntrs    = qib_read_7322portcntrs;
7260        dd->f_reset             = qib_do_7322_reset;
7261        dd->f_init_sdma_regs    = init_sdma_7322_regs;
7262        dd->f_sdma_busy         = qib_sdma_7322_busy;
7263        dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7264        dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7265        dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7266        dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7267        dd->f_sendctrl          = sendctrl_7322_mod;
7268        dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7269        dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7270        dd->f_iblink_state      = qib_7322_iblink_state;
7271        dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7272        dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7273        dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7274        dd->f_set_ib_loopback   = qib_7322_set_loopback;
7275        dd->f_get_ib_table      = qib_7322_get_ib_table;
7276        dd->f_set_ib_table      = qib_7322_set_ib_table;
7277        dd->f_set_intr_state    = qib_7322_set_intr_state;
7278        dd->f_setextled         = qib_setup_7322_setextled;
7279        dd->f_txchk_change      = qib_7322_txchk_change;
7280        dd->f_update_usrhead    = qib_update_7322_usrhead;
7281        dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7282        dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7283        dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7284        dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7285        dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7286        dd->f_writescratch      = writescratch;
7287        dd->f_tempsense_rd      = qib_7322_tempsense_rd;
7288#ifdef CONFIG_INFINIBAND_QIB_DCA
7289        dd->f_notify_dca        = qib_7322_notify_dca;
7290#endif
7291        /*
7292         * Do remaining PCIe setup and save PCIe values in dd.
7293         * Any error printing is already done by the init code.
7294         * On return, we have the chip mapped, but chip registers
7295         * are not set up until start of qib_init_7322_variables.
7296         */
7297        ret = qib_pcie_ddinit(dd, pdev, ent);
7298        if (ret < 0)
7299                goto bail_free;
7300
7301        /* initialize chip-specific variables */
7302        ret = qib_init_7322_variables(dd);
7303        if (ret)
7304                goto bail_cleanup;
7305
7306        if (qib_mini_init || !dd->num_pports)
7307                goto bail;
7308
7309        /*
7310         * Determine number of vectors we want; depends on port count
7311         * and number of configured kernel receive queues actually used.
7312         * Should also depend on whether sdma is enabled or not, but
7313         * that's such a rare testing case it's not worth worrying about.
7314         */
7315        tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7316        for (i = 0; i < tabsize; i++)
7317                if ((i < ARRAY_SIZE(irq_table) &&
7318                     irq_table[i].port <= dd->num_pports) ||
7319                    (i >= ARRAY_SIZE(irq_table) &&
7320                     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7321                        actual_cnt++;
7322        /* reduce by ctxt's < 2 */
7323        if (qib_krcvq01_no_msi)
7324                actual_cnt -= dd->num_pports;
7325
7326        tabsize = actual_cnt;
7327        dd->cspec->msix_entries = kzalloc(tabsize *
7328                        sizeof(struct qib_msix_entry), GFP_KERNEL);
7329        if (!dd->cspec->msix_entries)
7330                tabsize = 0;
7331
7332        for (i = 0; i < tabsize; i++)
7333                dd->cspec->msix_entries[i].msix.entry = i;
7334
7335        if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
7336                qib_dev_err(dd,
7337                        "Failed to setup PCIe or interrupts; continuing anyway\n");
7338        /* may be less than we wanted, if not enough available */
7339        dd->cspec->num_msix_entries = tabsize;
7340
7341        /* setup interrupt handler */
7342        qib_setup_7322_interrupt(dd, 1);
7343
7344        /* clear diagctrl register, in case diags were running and crashed */
7345        qib_write_kreg(dd, kr_hwdiagctrl, 0);
7346#ifdef CONFIG_INFINIBAND_QIB_DCA
7347        if (!dca_add_requester(&pdev->dev)) {
7348                qib_devinfo(dd->pcidev, "DCA enabled\n");
7349                dd->flags |= QIB_DCA_ENABLED;
7350                qib_setup_dca(dd);
7351        }
7352#endif
7353        goto bail;
7354
7355bail_cleanup:
7356        qib_pcie_ddcleanup(dd);
7357bail_free:
7358        qib_free_devdata(dd);
7359        dd = ERR_PTR(ret);
7360bail:
7361        return dd;
7362}
7363
7364/*
7365 * Set the table entry at the specified index from the table specifed.
7366 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7367 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7368 * 'idx' below addresses the correct entry, while its 4 LSBs select the
7369 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7370 */
7371#define DDS_ENT_AMP_LSB 14
7372#define DDS_ENT_MAIN_LSB 9
7373#define DDS_ENT_POST_LSB 5
7374#define DDS_ENT_PRE_XTRA_LSB 3
7375#define DDS_ENT_PRE_LSB 0
7376
7377/*
7378 * Set one entry in the TxDDS table for spec'd port
7379 * ridx picks one of the entries, while tp points
7380 * to the appropriate table entry.
7381 */
7382static void set_txdds(struct qib_pportdata *ppd, int ridx,
7383                      const struct txdds_ent *tp)
7384{
7385        struct qib_devdata *dd = ppd->dd;
7386        u32 pack_ent;
7387        int regidx;
7388
7389        /* Get correct offset in chip-space, and in source table */
7390        regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7391        /*
7392         * We do not use qib_write_kreg_port() because it was intended
7393         * only for registers in the lower "port specific" pages.
7394         * So do index calculation  by hand.
7395         */
7396        if (ppd->hw_pidx)
7397                regidx += (dd->palign / sizeof(u64));
7398
7399        pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7400        pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7401        pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7402        pack_ent |= tp->post << DDS_ENT_POST_LSB;
7403        qib_write_kreg(dd, regidx, pack_ent);
7404        /* Prevent back-to-back writes by hitting scratch */
7405        qib_write_kreg(ppd->dd, kr_scratch, 0);
7406}
7407
7408static const struct vendor_txdds_ent vendor_txdds[] = {
7409        { /* Amphenol 1m 30awg NoEq */
7410                { 0x41, 0x50, 0x48 }, "584470002       ",
7411                { 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7412        },
7413        { /* Amphenol 3m 28awg NoEq */
7414                { 0x41, 0x50, 0x48 }, "584470004       ",
7415                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7416        },
7417        { /* Finisar 3m OM2 Optical */
7418                { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7419                {  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7420        },
7421        { /* Finisar 30m OM2 Optical */
7422                { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7423                {  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7424        },
7425        { /* Finisar Default OM2 Optical */
7426                { 0x00, 0x90, 0x65 }, NULL,
7427                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7428        },
7429        { /* Gore 1m 30awg NoEq */
7430                { 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7431                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7432        },
7433        { /* Gore 2m 30awg NoEq */
7434                { 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7435                {  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7436        },
7437        { /* Gore 1m 28awg NoEq */
7438                { 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7439                {  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7440        },
7441        { /* Gore 3m 28awg NoEq */
7442                { 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7443                {  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7444        },
7445        { /* Gore 5m 24awg Eq */
7446                { 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7447                {  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7448        },
7449        { /* Gore 7m 24awg Eq */
7450                { 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7451                {  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7452        },
7453        { /* Gore 5m 26awg Eq */
7454                { 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7455                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7456        },
7457        { /* Gore 7m 26awg Eq */
7458                { 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7459                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7460        },
7461        { /* Intersil 12m 24awg Active */
7462                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7463                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7464        },
7465        { /* Intersil 10m 28awg Active */
7466                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7467                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7468        },
7469        { /* Intersil 7m 30awg Active */
7470                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7471                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7472        },
7473        { /* Intersil 5m 32awg Active */
7474                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7475                {  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7476        },
7477        { /* Intersil Default Active */
7478                { 0x00, 0x30, 0xB4 }, NULL,
7479                {  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7480        },
7481        { /* Luxtera 20m Active Optical */
7482                { 0x00, 0x25, 0x63 }, NULL,
7483                {  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7484        },
7485        { /* Molex 1M Cu loopback */
7486                { 0x00, 0x09, 0x3A }, "74763-0025      ",
7487                {  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7488        },
7489        { /* Molex 2m 28awg NoEq */
7490                { 0x00, 0x09, 0x3A }, "74757-2201      ",
7491                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7492        },
7493};
7494
7495static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7496        /* amp, pre, main, post */
7497        {  2, 2, 15,  6 },      /* Loopback */
7498        {  0, 0,  0,  1 },      /*  2 dB */
7499        {  0, 0,  0,  2 },      /*  3 dB */
7500        {  0, 0,  0,  3 },      /*  4 dB */
7501        {  0, 0,  0,  4 },      /*  5 dB */
7502        {  0, 0,  0,  5 },      /*  6 dB */
7503        {  0, 0,  0,  6 },      /*  7 dB */
7504        {  0, 0,  0,  7 },      /*  8 dB */
7505        {  0, 0,  0,  8 },      /*  9 dB */
7506        {  0, 0,  0,  9 },      /* 10 dB */
7507        {  0, 0,  0, 10 },      /* 11 dB */
7508        {  0, 0,  0, 11 },      /* 12 dB */
7509        {  0, 0,  0, 12 },      /* 13 dB */
7510        {  0, 0,  0, 13 },      /* 14 dB */
7511        {  0, 0,  0, 14 },      /* 15 dB */
7512        {  0, 0,  0, 15 },      /* 16 dB */
7513};
7514
7515static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7516        /* amp, pre, main, post */
7517        {  2, 2, 15,  6 },      /* Loopback */
7518        {  0, 0,  0,  8 },      /*  2 dB */
7519        {  0, 0,  0,  8 },      /*  3 dB */
7520        {  0, 0,  0,  9 },      /*  4 dB */
7521        {  0, 0,  0,  9 },      /*  5 dB */
7522        {  0, 0,  0, 10 },      /*  6 dB */
7523        {  0, 0,  0, 10 },      /*  7 dB */
7524        {  0, 0,  0, 11 },      /*  8 dB */
7525        {  0, 0,  0, 11 },      /*  9 dB */
7526        {  0, 0,  0, 12 },      /* 10 dB */
7527        {  0, 0,  0, 12 },      /* 11 dB */
7528        {  0, 0,  0, 13 },      /* 12 dB */
7529        {  0, 0,  0, 13 },      /* 13 dB */
7530        {  0, 0,  0, 14 },      /* 14 dB */
7531        {  0, 0,  0, 14 },      /* 15 dB */
7532        {  0, 0,  0, 15 },      /* 16 dB */
7533};
7534
7535static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7536        /* amp, pre, main, post */
7537        {  2, 2, 15,  6 },      /* Loopback */
7538        {  0, 1,  0,  7 },      /*  2 dB (also QMH7342) */
7539        {  0, 1,  0,  9 },      /*  3 dB (also QMH7342) */
7540        {  0, 1,  0, 11 },      /*  4 dB */
7541        {  0, 1,  0, 13 },      /*  5 dB */
7542        {  0, 1,  0, 15 },      /*  6 dB */
7543        {  0, 1,  3, 15 },      /*  7 dB */
7544        {  0, 1,  7, 15 },      /*  8 dB */
7545        {  0, 1,  7, 15 },      /*  9 dB */
7546        {  0, 1,  8, 15 },      /* 10 dB */
7547        {  0, 1,  9, 15 },      /* 11 dB */
7548        {  0, 1, 10, 15 },      /* 12 dB */
7549        {  0, 2,  6, 15 },      /* 13 dB */
7550        {  0, 2,  7, 15 },      /* 14 dB */
7551        {  0, 2,  8, 15 },      /* 15 dB */
7552        {  0, 2,  9, 15 },      /* 16 dB */
7553};
7554
7555/*
7556 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7557 * These are mostly used for mez cards going through connectors
7558 * and backplane traces, but can be used to add other "unusual"
7559 * table values as well.
7560 */
7561static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7562        /* amp, pre, main, post */
7563        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7564        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7565        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7566        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7567        {  0, 0, 0,  3 },       /* QMH7342 backplane settings */
7568        {  0, 0, 0,  4 },       /* QMH7342 backplane settings */
7569        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7570        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7571        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7572        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7573        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7574        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7575        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7576        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7577        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7578        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7579        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7580        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7581};
7582
7583static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7584        /* amp, pre, main, post */
7585        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7586        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7587        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7588        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7589        {  0, 0, 0,  9 },       /* QMH7342 backplane settings */
7590        {  0, 0, 0, 10 },       /* QMH7342 backplane settings */
7591        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7592        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7593        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7594        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7595        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7596        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7597        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7598        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7599        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7600        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7601        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7602        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7603};
7604
7605static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7606        /* amp, pre, main, post */
7607        {  0, 1,  0,  4 },      /* QMH7342 backplane settings */
7608        {  0, 1,  0,  5 },      /* QMH7342 backplane settings */
7609        {  0, 1,  0,  6 },      /* QMH7342 backplane settings */
7610        {  0, 1,  0,  8 },      /* QMH7342 backplane settings */
7611        {  0, 1,  0, 10 },      /* QMH7342 backplane settings */
7612        {  0, 1,  0, 12 },      /* QMH7342 backplane settings */
7613        {  0, 1,  4, 15 },      /* QME7342 backplane settings 1.0 */
7614        {  0, 1,  3, 15 },      /* QME7342 backplane settings 1.0 */
7615        {  0, 1,  0, 12 },      /* QME7342 backplane settings 1.0 */
7616        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.0 */
7617        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.0 */
7618        {  0, 1,  0, 14 },      /* QME7342 backplane settings 1.0 */
7619        {  0, 1,  2, 15 },      /* QME7342 backplane settings 1.0 */
7620        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7621        {  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7622        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7623        {  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7624        {  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7625};
7626
7627static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7628        /* amp, pre, main, post */
7629        { 0, 0, 0, 0 },         /* QME7342 mfg settings */
7630        { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7631};
7632
7633static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7634                                               unsigned atten)
7635{
7636        /*
7637         * The attenuation table starts at 2dB for entry 1,
7638         * with entry 0 being the loopback entry.
7639         */
7640        if (atten <= 2)
7641                atten = 1;
7642        else if (atten > TXDDS_TABLE_SZ)
7643                atten = TXDDS_TABLE_SZ - 1;
7644        else
7645                atten--;
7646        return txdds + atten;
7647}
7648
7649/*
7650 * if override is set, the module parameter txselect has a value
7651 * for this specific port, so use it, rather than our normal mechanism.
7652 */
7653static void find_best_ent(struct qib_pportdata *ppd,
7654                          const struct txdds_ent **sdr_dds,
7655                          const struct txdds_ent **ddr_dds,
7656                          const struct txdds_ent **qdr_dds, int override)
7657{
7658        struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7659        int idx;
7660
7661        /* Search table of known cables */
7662        for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7663                const struct vendor_txdds_ent *v = vendor_txdds + idx;
7664
7665                if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7666                    (!v->partnum ||
7667                     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7668                        *sdr_dds = &v->sdr;
7669                        *ddr_dds = &v->ddr;
7670                        *qdr_dds = &v->qdr;
7671                        return;
7672                }
7673        }
7674
7675        /* Active cables don't have attenuation so we only set SERDES
7676         * settings to account for the attenuation of the board traces. */
7677        if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7678                *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7679                *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7680                *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7681                return;
7682        }
7683
7684        if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7685                                                      qd->atten[1])) {
7686                *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7687                *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7688                *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7689                return;
7690        } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7691                /*
7692                 * If we have no (or incomplete) data from the cable
7693                 * EEPROM, or no QSFP, or override is set, use the
7694                 * module parameter value to index into the attentuation
7695                 * table.
7696                 */
7697                idx = ppd->cpspec->no_eep;
7698                *sdr_dds = &txdds_sdr[idx];
7699                *ddr_dds = &txdds_ddr[idx];
7700                *qdr_dds = &txdds_qdr[idx];
7701        } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7702                /* similar to above, but index into the "extra" table. */
7703                idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7704                *sdr_dds = &txdds_extra_sdr[idx];
7705                *ddr_dds = &txdds_extra_ddr[idx];
7706                *qdr_dds = &txdds_extra_qdr[idx];
7707        } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7708                   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7709                                          TXDDS_MFG_SZ)) {
7710                idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7711                pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7712                        ppd->dd->unit, ppd->port, idx);
7713                *sdr_dds = &txdds_extra_mfg[idx];
7714                *ddr_dds = &txdds_extra_mfg[idx];
7715                *qdr_dds = &txdds_extra_mfg[idx];
7716        } else {
7717                /* this shouldn't happen, it's range checked */
7718                *sdr_dds = txdds_sdr + qib_long_atten;
7719                *ddr_dds = txdds_ddr + qib_long_atten;
7720                *qdr_dds = txdds_qdr + qib_long_atten;
7721        }
7722}
7723
7724static void init_txdds_table(struct qib_pportdata *ppd, int override)
7725{
7726        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7727        struct txdds_ent *dds;
7728        int idx;
7729        int single_ent = 0;
7730
7731        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7732
7733        /* for mez cards or override, use the selected value for all entries */
7734        if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7735                single_ent = 1;
7736
7737        /* Fill in the first entry with the best entry found. */
7738        set_txdds(ppd, 0, sdr_dds);
7739        set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7740        set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7741        if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7742                QIBL_LINKACTIVE)) {
7743                dds = (struct txdds_ent *)(ppd->link_speed_active ==
7744                                           QIB_IB_QDR ?  qdr_dds :
7745                                           (ppd->link_speed_active ==
7746                                            QIB_IB_DDR ? ddr_dds : sdr_dds));
7747                write_tx_serdes_param(ppd, dds);
7748        }
7749
7750        /* Fill in the remaining entries with the default table values. */
7751        for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7752                set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7753                set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7754                          single_ent ? ddr_dds : txdds_ddr + idx);
7755                set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7756                          single_ent ? qdr_dds : txdds_qdr + idx);
7757        }
7758}
7759
7760#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7761#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7762#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7763#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7764#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7765#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7766#define AHB_TRANS_TRIES 10
7767
7768/*
7769 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7770 * 5=subsystem which is why most calls have "chan + chan >> 1"
7771 * for the channel argument.
7772 */
7773static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7774                    u32 data, u32 mask)
7775{
7776        u32 rd_data, wr_data, sz_mask;
7777        u64 trans, acc, prev_acc;
7778        u32 ret = 0xBAD0BAD;
7779        int tries;
7780
7781        prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7782        /* From this point on, make sure we return access */
7783        acc = (quad << 1) | 1;
7784        qib_write_kreg(dd, KR_AHB_ACC, acc);
7785
7786        for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7787                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7788                if (trans & AHB_TRANS_RDY)
7789                        break;
7790        }
7791        if (tries >= AHB_TRANS_TRIES) {
7792                qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7793                goto bail;
7794        }
7795
7796        /* If mask is not all 1s, we need to read, but different SerDes
7797         * entities have different sizes
7798         */
7799        sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7800        wr_data = data & mask & sz_mask;
7801        if ((~mask & sz_mask) != 0) {
7802                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7803                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7804
7805                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7806                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7807                        if (trans & AHB_TRANS_RDY)
7808                                break;
7809                }
7810                if (tries >= AHB_TRANS_TRIES) {
7811                        qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7812                                    AHB_TRANS_TRIES);
7813                        goto bail;
7814                }
7815                /* Re-read in case host split reads and read data first */
7816                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7817                rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7818                wr_data |= (rd_data & ~mask & sz_mask);
7819        }
7820
7821        /* If mask is not zero, we need to write. */
7822        if (mask & sz_mask) {
7823                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7824                trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7825                trans |= AHB_WR;
7826                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7827
7828                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7829                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7830                        if (trans & AHB_TRANS_RDY)
7831                                break;
7832                }
7833                if (tries >= AHB_TRANS_TRIES) {
7834                        qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7835                                    AHB_TRANS_TRIES);
7836                        goto bail;
7837                }
7838        }
7839        ret = wr_data;
7840bail:
7841        qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7842        return ret;
7843}
7844
7845static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7846                             unsigned mask)
7847{
7848        struct qib_devdata *dd = ppd->dd;
7849        int chan;
7850        u32 rbc;
7851
7852        for (chan = 0; chan < SERDES_CHANS; ++chan) {
7853                ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7854                        data, mask);
7855                rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7856                              addr, 0, 0);
7857        }
7858}
7859
7860static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7861{
7862        u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7863        u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7864
7865        if (enable && !state) {
7866                pr_info("IB%u:%u Turning LOS on\n",
7867                        ppd->dd->unit, ppd->port);
7868                data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7869        } else if (!enable && state) {
7870                pr_info("IB%u:%u Turning LOS off\n",
7871                        ppd->dd->unit, ppd->port);
7872                data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7873        }
7874        qib_write_kreg_port(ppd, krp_serdesctrl, data);
7875}
7876
7877static int serdes_7322_init(struct qib_pportdata *ppd)
7878{
7879        int ret = 0;
7880
7881        if (ppd->dd->cspec->r1)
7882                ret = serdes_7322_init_old(ppd);
7883        else
7884                ret = serdes_7322_init_new(ppd);
7885        return ret;
7886}
7887
7888static int serdes_7322_init_old(struct qib_pportdata *ppd)
7889{
7890        u32 le_val;
7891
7892        /*
7893         * Initialize the Tx DDS tables.  Also done every QSFP event,
7894         * for adapters with QSFP
7895         */
7896        init_txdds_table(ppd, 0);
7897
7898        /* ensure no tx overrides from earlier driver loads */
7899        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7900                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7901                reset_tx_deemphasis_override));
7902
7903        /* Patch some SerDes defaults to "Better for IB" */
7904        /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7905        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7906
7907        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7908        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7909        /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7910        ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7911
7912        /* May be overridden in qsfp_7322_event */
7913        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7914        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7915
7916        /* enable LE1 adaptation for all but QME, which is disabled */
7917        le_val = IS_QME(ppd->dd) ? 0 : 1;
7918        ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7919
7920        /* Clear cmode-override, may be set from older driver */
7921        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7922
7923        /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7924        ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7925
7926        /* setup LoS params; these are subsystem, so chan == 5 */
7927        /* LoS filter threshold_count on, ch 0-3, set to 8 */
7928        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7929        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7930        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7931        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7932
7933        /* LoS filter threshold_count off, ch 0-3, set to 4 */
7934        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7935        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7936        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7937        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7938
7939        /* LoS filter select enabled */
7940        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7941
7942        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7943        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7944        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7945        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7946
7947        serdes_7322_los_enable(ppd, 1);
7948
7949        /* rxbistena; set 0 to avoid effects of it switch later */
7950        ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7951
7952        /* Configure 4 DFE taps, and only they adapt */
7953        ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7954
7955        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7956        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7957        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7958
7959        /*
7960         * Set receive adaptation mode.  SDR and DDR adaptation are
7961         * always on, and QDR is initially enabled; later disabled.
7962         */
7963        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7964        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7965        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7966                            ppd->dd->cspec->r1 ?
7967                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7968        ppd->cpspec->qdr_dfe_on = 1;
7969
7970        /* FLoop LOS gate: PPM filter  enabled */
7971        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7972
7973        /* rx offset center enabled */
7974        ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7975
7976        if (!ppd->dd->cspec->r1) {
7977                ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7978                ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7979        }
7980
7981        /* Set the frequency loop bandwidth to 15 */
7982        ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7983
7984        return 0;
7985}
7986
7987static int serdes_7322_init_new(struct qib_pportdata *ppd)
7988{
7989        unsigned long tend;
7990        u32 le_val, rxcaldone;
7991        int chan, chan_done = (1 << SERDES_CHANS) - 1;
7992
7993        /* Clear cmode-override, may be set from older driver */
7994        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7995
7996        /* ensure no tx overrides from earlier driver loads */
7997        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7998                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7999                reset_tx_deemphasis_override));
8000
8001        /* START OF LSI SUGGESTED SERDES BRINGUP */
8002        /* Reset - Calibration Setup */
8003        /*       Stop DFE adaptaion */
8004        ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
8005        /*       Disable LE1 */
8006        ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
8007        /*       Disable autoadapt for LE1 */
8008        ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
8009        /*       Disable LE2 */
8010        ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
8011        /*       Disable VGA */
8012        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8013        /*       Disable AFE Offset Cancel */
8014        ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
8015        /*       Disable Timing Loop */
8016        ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
8017        /*       Disable Frequency Loop */
8018        ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
8019        /*       Disable Baseline Wander Correction */
8020        ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
8021        /*       Disable RX Calibration */
8022        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8023        /*       Disable RX Offset Calibration */
8024        ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
8025        /*       Select BB CDR */
8026        ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
8027        /*       CDR Step Size */
8028        ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
8029        /*       Enable phase Calibration */
8030        ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
8031        /*       DFE Bandwidth [2:14-12] */
8032        ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
8033        /*       DFE Config (4 taps only) */
8034        ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
8035        /*       Gain Loop Bandwidth */
8036        if (!ppd->dd->cspec->r1) {
8037                ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8038                ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8039        } else {
8040                ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8041        }
8042        /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8043        /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8044        /*       Data Rate Select [5:7-6] (leave as default) */
8045        /*       RX Parallel Word Width [3:10-8] (leave as default) */
8046
8047        /* RX REST */
8048        /*       Single- or Multi-channel reset */
8049        /*       RX Analog reset */
8050        /*       RX Digital reset */
8051        ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8052        msleep(20);
8053        /*       RX Analog reset */
8054        ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8055        msleep(20);
8056        /*       RX Digital reset */
8057        ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8058        msleep(20);
8059
8060        /* setup LoS params; these are subsystem, so chan == 5 */
8061        /* LoS filter threshold_count on, ch 0-3, set to 8 */
8062        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8063        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8064        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8065        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8066
8067        /* LoS filter threshold_count off, ch 0-3, set to 4 */
8068        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8069        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8070        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8071        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8072
8073        /* LoS filter select enabled */
8074        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8075
8076        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
8077        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8078        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8079        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8080
8081        /* Turn on LOS on initial SERDES init */
8082        serdes_7322_los_enable(ppd, 1);
8083        /* FLoop LOS gate: PPM filter  enabled */
8084        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8085
8086        /* RX LATCH CALIBRATION */
8087        /*       Enable Eyefinder Phase Calibration latch */
8088        ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8089        /*       Enable RX Offset Calibration latch */
8090        ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8091        msleep(20);
8092        /*       Start Calibration */
8093        ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8094        tend = jiffies + msecs_to_jiffies(500);
8095        while (chan_done && !time_is_before_jiffies(tend)) {
8096                msleep(20);
8097                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8098                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8099                                            (chan + (chan >> 1)),
8100                                            25, 0, 0);
8101                        if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8102                            (~chan_done & (1 << chan)) == 0)
8103                                chan_done &= ~(1 << chan);
8104                }
8105        }
8106        if (chan_done) {
8107                pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8108                         IBSD(ppd->hw_pidx), chan_done);
8109        } else {
8110                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8111                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8112                                            (chan + (chan >> 1)),
8113                                            25, 0, 0);
8114                        if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8115                                pr_info("Serdes %d chan %d calibration failed\n",
8116                                        IBSD(ppd->hw_pidx), chan);
8117                }
8118        }
8119
8120        /*       Turn off Calibration */
8121        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8122        msleep(20);
8123
8124        /* BRING RX UP */
8125        /*       Set LE2 value (May be overridden in qsfp_7322_event) */
8126        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8127        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8128        /*       Set LE2 Loop bandwidth */
8129        ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8130        /*       Enable LE2 */
8131        ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8132        msleep(20);
8133        /*       Enable H0 only */
8134        ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8135        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8136        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8137        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8138        /*       Enable VGA */
8139        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8140        msleep(20);
8141        /*       Set Frequency Loop Bandwidth */
8142        ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8143        /*       Enable Frequency Loop */
8144        ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8145        /*       Set Timing Loop Bandwidth */
8146        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8147        /*       Enable Timing Loop */
8148        ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8149        msleep(50);
8150        /*       Enable DFE
8151         *       Set receive adaptation mode.  SDR and DDR adaptation are
8152         *       always on, and QDR is initially enabled; later disabled.
8153         */
8154        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8155        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8156        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8157                            ppd->dd->cspec->r1 ?
8158                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8159        ppd->cpspec->qdr_dfe_on = 1;
8160        /*       Disable LE1  */
8161        ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8162        /*       Disable auto adapt for LE1 */
8163        ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8164        msleep(20);
8165        /*       Enable AFE Offset Cancel */
8166        ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8167        /*       Enable Baseline Wander Correction */
8168        ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8169        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8170        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8171        /* VGA output common mode */
8172        ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8173
8174        /*
8175         * Initialize the Tx DDS tables.  Also done every QSFP event,
8176         * for adapters with QSFP
8177         */
8178        init_txdds_table(ppd, 0);
8179
8180        return 0;
8181}
8182
8183/* start adjust QMH serdes parameters */
8184
8185static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8186{
8187        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8188                9, code << 9, 0x3f << 9);
8189}
8190
8191static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8192        int enable, u32 tapenable)
8193{
8194        if (enable)
8195                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8196                        1, 3 << 10, 0x1f << 10);
8197        else
8198                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8199                        1, 0, 0x1f << 10);
8200}
8201
8202/* Set clock to 1, 0, 1, 0 */
8203static void clock_man(struct qib_pportdata *ppd, int chan)
8204{
8205        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8206                4, 0x4000, 0x4000);
8207        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8208                4, 0, 0x4000);
8209        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8210                4, 0x4000, 0x4000);
8211        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8212                4, 0, 0x4000);
8213}
8214
8215/*
8216 * write the current Tx serdes pre,post,main,amp settings into the serdes.
8217 * The caller must pass the settings appropriate for the current speed,
8218 * or not care if they are correct for the current speed.
8219 */
8220static void write_tx_serdes_param(struct qib_pportdata *ppd,
8221                                  struct txdds_ent *txdds)
8222{
8223        u64 deemph;
8224
8225        deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8226        /* field names for amp, main, post, pre, respectively */
8227        deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8228                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8229                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8230                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8231
8232        deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8233                           tx_override_deemphasis_select);
8234        deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8235                    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8236                                       txampcntl_d2a);
8237        deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8238                     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8239                                   txc0_ena);
8240        deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8241                     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8242                                    txcp1_ena);
8243        deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8244                     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8245                                    txcn1_ena);
8246        qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8247}
8248
8249/*
8250 * Set the parameters for mez cards on link bounce, so they are
8251 * always exactly what was requested.  Similar logic to init_txdds
8252 * but does just the serdes.
8253 */
8254static void adj_tx_serdes(struct qib_pportdata *ppd)
8255{
8256        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8257        struct txdds_ent *dds;
8258
8259        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8260        dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8261                qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8262                                ddr_dds : sdr_dds));
8263        write_tx_serdes_param(ppd, dds);
8264}
8265
8266/* set QDR forced value for H1, if needed */
8267static void force_h1(struct qib_pportdata *ppd)
8268{
8269        int chan;
8270
8271        ppd->cpspec->qdr_reforce = 0;
8272        if (!ppd->dd->cspec->r1)
8273                return;
8274
8275        for (chan = 0; chan < SERDES_CHANS; chan++) {
8276                set_man_mode_h1(ppd, chan, 1, 0);
8277                set_man_code(ppd, chan, ppd->cpspec->h1_val);
8278                clock_man(ppd, chan);
8279                set_man_mode_h1(ppd, chan, 0, 0);
8280        }
8281}
8282
8283#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8284#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8285
8286#define R_OPCODE_LSB 3
8287#define R_OP_NOP 0
8288#define R_OP_SHIFT 2
8289#define R_OP_UPDATE 3
8290#define R_TDI_LSB 2
8291#define R_TDO_LSB 1
8292#define R_RDY 1
8293
8294static int qib_r_grab(struct qib_devdata *dd)
8295{
8296        u64 val = SJA_EN;
8297
8298        qib_write_kreg(dd, kr_r_access, val);
8299        qib_read_kreg32(dd, kr_scratch);
8300        return 0;
8301}
8302
8303/* qib_r_wait_for_rdy() not only waits for the ready bit, it
8304 * returns the current state of R_TDO
8305 */
8306static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8307{
8308        u64 val;
8309        int timeout;
8310
8311        for (timeout = 0; timeout < 100 ; ++timeout) {
8312                val = qib_read_kreg32(dd, kr_r_access);
8313                if (val & R_RDY)
8314                        return (val >> R_TDO_LSB) & 1;
8315        }
8316        return -1;
8317}
8318
8319static int qib_r_shift(struct qib_devdata *dd, int bisten,
8320                       int len, u8 *inp, u8 *outp)
8321{
8322        u64 valbase, val;
8323        int ret, pos;
8324
8325        valbase = SJA_EN | (bisten << BISTEN_LSB) |
8326                (R_OP_SHIFT << R_OPCODE_LSB);
8327        ret = qib_r_wait_for_rdy(dd);
8328        if (ret < 0)
8329                goto bail;
8330        for (pos = 0; pos < len; ++pos) {
8331                val = valbase;
8332                if (outp) {
8333                        outp[pos >> 3] &= ~(1 << (pos & 7));
8334                        outp[pos >> 3] |= (ret << (pos & 7));
8335                }
8336                if (inp) {
8337                        int tdi = inp[pos >> 3] >> (pos & 7);
8338
8339                        val |= ((tdi & 1) << R_TDI_LSB);
8340                }
8341                qib_write_kreg(dd, kr_r_access, val);
8342                qib_read_kreg32(dd, kr_scratch);
8343                ret = qib_r_wait_for_rdy(dd);
8344                if (ret < 0)
8345                        break;
8346        }
8347        /* Restore to NOP between operations. */
8348        val =  SJA_EN | (bisten << BISTEN_LSB);
8349        qib_write_kreg(dd, kr_r_access, val);
8350        qib_read_kreg32(dd, kr_scratch);
8351        ret = qib_r_wait_for_rdy(dd);
8352
8353        if (ret >= 0)
8354                ret = pos;
8355bail:
8356        return ret;
8357}
8358
8359static int qib_r_update(struct qib_devdata *dd, int bisten)
8360{
8361        u64 val;
8362        int ret;
8363
8364        val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8365        ret = qib_r_wait_for_rdy(dd);
8366        if (ret >= 0) {
8367                qib_write_kreg(dd, kr_r_access, val);
8368                qib_read_kreg32(dd, kr_scratch);
8369        }
8370        return ret;
8371}
8372
8373#define BISTEN_PORT_SEL 15
8374#define LEN_PORT_SEL 625
8375#define BISTEN_AT 17
8376#define LEN_AT 156
8377#define BISTEN_ETM 16
8378#define LEN_ETM 632
8379
8380#define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8381
8382/* these are common for all IB port use cases. */
8383static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8384        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8385        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8386};
8387static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8388        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8389        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8390        0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8391        0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8392        0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8393        0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8394        0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8395        0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8396};
8397static u8 at[BIT2BYTE(LEN_AT)] = {
8398        0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8399        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8400};
8401
8402/* used for IB1 or IB2, only one in use */
8403static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8404        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8405        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8406        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8407        0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8408        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8409        0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8410        0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8411        0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8412};
8413
8414/* used when both IB1 and IB2 are in use */
8415static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8416        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8417        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8418        0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8419        0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8420        0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8421        0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8422        0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8423        0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8424};
8425
8426/* used when only IB1 is in use */
8427static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8428        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8429        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8430        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8431        0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8432        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8433        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8434        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8435        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8436};
8437
8438/* used when only IB2 is in use */
8439static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8440        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8441        0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8442        0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8443        0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8444        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8445        0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8446        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8447        0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8448};
8449
8450/* used when both IB1 and IB2 are in use */
8451static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8452        0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8453        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8454        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8455        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8456        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8457        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8458        0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8459        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8460};
8461
8462/*
8463 * Do setup to properly handle IB link recovery; if port is zero, we
8464 * are initializing to cover both ports; otherwise we are initializing
8465 * to cover a single port card, or the port has reached INIT and we may
8466 * need to switch coverage types.
8467 */
8468static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8469{
8470        u8 *portsel, *etm;
8471        struct qib_devdata *dd = ppd->dd;
8472
8473        if (!ppd->dd->cspec->r1)
8474                return;
8475        if (!both) {
8476                dd->cspec->recovery_ports_initted++;
8477                ppd->cpspec->recovery_init = 1;
8478        }
8479        if (!both && dd->cspec->recovery_ports_initted == 1) {
8480                portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8481                etm = atetm_1port;
8482        } else {
8483                portsel = portsel_2port;
8484                etm = atetm_2port;
8485        }
8486
8487        if (qib_r_grab(dd) < 0 ||
8488                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8489                qib_r_update(dd, BISTEN_ETM) < 0 ||
8490                qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8491                qib_r_update(dd, BISTEN_AT) < 0 ||
8492                qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8493                            portsel, NULL) < 0 ||
8494                qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8495                qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8496                qib_r_update(dd, BISTEN_AT) < 0 ||
8497                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8498                qib_r_update(dd, BISTEN_ETM) < 0)
8499                qib_dev_err(dd, "Failed IB link recovery setup\n");
8500}
8501
8502static void check_7322_rxe_status(struct qib_pportdata *ppd)
8503{
8504        struct qib_devdata *dd = ppd->dd;
8505        u64 fmask;
8506
8507        if (dd->cspec->recovery_ports_initted != 1)
8508                return; /* rest doesn't apply to dualport */
8509        qib_write_kreg(dd, kr_control, dd->control |
8510                       SYM_MASK(Control, FreezeMode));
8511        (void)qib_read_kreg64(dd, kr_scratch);
8512        udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8513        fmask = qib_read_kreg64(dd, kr_act_fmask);
8514        if (!fmask) {
8515                /*
8516                 * require a powercycle before we'll work again, and make
8517                 * sure we get no more interrupts, and don't turn off
8518                 * freeze.
8519                 */
8520                ppd->dd->cspec->stay_in_freeze = 1;
8521                qib_7322_set_intr_state(ppd->dd, 0);
8522                qib_write_kreg(dd, kr_fmask, 0ULL);
8523                qib_dev_err(dd, "HCA unusable until powercycled\n");
8524                return; /* eventually reset */
8525        }
8526
8527        qib_write_kreg(ppd->dd, kr_hwerrclear,
8528            SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8529
8530        /* don't do the full clear_freeze(), not needed for this */
8531        qib_write_kreg(dd, kr_control, dd->control);
8532        qib_read_kreg32(dd, kr_scratch);
8533        /* take IBC out of reset */
8534        if (ppd->link_speed_supported) {
8535                ppd->cpspec->ibcctrl_a &=
8536                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8537                qib_write_kreg_port(ppd, krp_ibcctrl_a,
8538                                    ppd->cpspec->ibcctrl_a);
8539                qib_read_kreg32(dd, kr_scratch);
8540                if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8541                        qib_set_ib_7322_lstate(ppd, 0,
8542                                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8543        }
8544}
8545