linux/drivers/infiniband/hw/qib/qib_iba7322.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * This file contains all of the code that is specific to the
  36 * InfiniPath 7322 chip
  37 */
  38
  39#include <linux/interrupt.h>
  40#include <linux/pci.h>
  41#include <linux/delay.h>
  42#include <linux/io.h>
  43#include <linux/jiffies.h>
  44#include <linux/module.h>
  45#include <rdma/ib_verbs.h>
  46#include <rdma/ib_smi.h>
  47#ifdef CONFIG_INFINIBAND_QIB_DCA
  48#include <linux/dca.h>
  49#endif
  50
  51#include "qib.h"
  52#include "qib_7322_regs.h"
  53#include "qib_qsfp.h"
  54
  55#include "qib_mad.h"
  56#include "qib_verbs.h"
  57
  58#undef pr_fmt
  59#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  60
  61static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  62static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  63static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  64static irqreturn_t qib_7322intr(int irq, void *data);
  65static irqreturn_t qib_7322bufavail(int irq, void *data);
  66static irqreturn_t sdma_intr(int irq, void *data);
  67static irqreturn_t sdma_idle_intr(int irq, void *data);
  68static irqreturn_t sdma_progress_intr(int irq, void *data);
  69static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  70static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  71                                  struct qib_ctxtdata *rcd);
  72static u8 qib_7322_phys_portstate(u64);
  73static u32 qib_7322_iblink_state(u64);
  74static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  75                                   u16 linitcmd);
  76static void force_h1(struct qib_pportdata *);
  77static void adj_tx_serdes(struct qib_pportdata *);
  78static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  79static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  80
  81static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  82static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  83static void serdes_7322_los_enable(struct qib_pportdata *, int);
  84static int serdes_7322_init_old(struct qib_pportdata *);
  85static int serdes_7322_init_new(struct qib_pportdata *);
  86static void dump_sdma_7322_state(struct qib_pportdata *);
  87
  88#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  89
  90/* LE2 serdes values for different cases */
  91#define LE2_DEFAULT 5
  92#define LE2_5m 4
  93#define LE2_QME 0
  94
  95/* Below is special-purpose, so only really works for the IB SerDes blocks. */
  96#define IBSD(hw_pidx) (hw_pidx + 2)
  97
  98/* these are variables for documentation and experimentation purposes */
  99static const unsigned rcv_int_timeout = 375;
 100static const unsigned rcv_int_count = 16;
 101static const unsigned sdma_idle_cnt = 64;
 102
 103/* Time to stop altering Rx Equalization parameters, after link up. */
 104#define RXEQ_DISABLE_MSECS 2500
 105
 106/*
 107 * Number of VLs we are configured to use (to allow for more
 108 * credits per vl, etc.)
 109 */
 110ushort qib_num_cfg_vls = 2;
 111module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
 112MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
 113
 114static ushort qib_chase = 1;
 115module_param_named(chase, qib_chase, ushort, S_IRUGO);
 116MODULE_PARM_DESC(chase, "Enable state chase handling");
 117
 118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
 120MODULE_PARM_DESC(long_attenuation, \
 121                 "attenuation cutoff (dB) for long copper cable setup");
 122
 123static ushort qib_singleport;
 124module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 125MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 126
 127static ushort qib_krcvq01_no_msi;
 128module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
 129MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
 130
 131/*
 132 * Receive header queue sizes
 133 */
 134static unsigned qib_rcvhdrcnt;
 135module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
 136MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
 137
 138static unsigned qib_rcvhdrsize;
 139module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
 140MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
 141
 142static unsigned qib_rcvhdrentsize;
 143module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
 144MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
 145
 146#define MAX_ATTEN_LEN 64 /* plenty for any real system */
 147/* for read back, default index is ~5m copper cable */
 148static char txselect_list[MAX_ATTEN_LEN] = "10";
 149static struct kparam_string kp_txselect = {
 150        .string = txselect_list,
 151        .maxlen = MAX_ATTEN_LEN
 152};
 153static int  setup_txselect(const char *, struct kernel_param *);
 154module_param_call(txselect, setup_txselect, param_get_string,
 155                  &kp_txselect, S_IWUSR | S_IRUGO);
 156MODULE_PARM_DESC(txselect, \
 157                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 158
 159#define BOARD_QME7342 5
 160#define BOARD_QMH7342 6
 161#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 162                    BOARD_QMH7342)
 163#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 164                    BOARD_QME7342)
 165
 166#define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
 167
 168#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
 169
 170#define MASK_ACROSS(lsb, msb) \
 171        (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
 172
 173#define SYM_RMASK(regname, fldname) ((u64)              \
 174        QIB_7322_##regname##_##fldname##_RMASK)
 175
 176#define SYM_MASK(regname, fldname) ((u64)               \
 177        QIB_7322_##regname##_##fldname##_RMASK <<       \
 178         QIB_7322_##regname##_##fldname##_LSB)
 179
 180#define SYM_FIELD(value, regname, fldname) ((u64)       \
 181        (((value) >> SYM_LSB(regname, fldname)) &       \
 182         SYM_RMASK(regname, fldname)))
 183
 184/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
 185#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
 186        (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
 187
 188#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
 189#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
 190#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
 191#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
 192#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
 193/* Below because most, but not all, fields of IntMask have that full suffix */
 194#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
 195
 196
 197#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
 198
 199/*
 200 * the size bits give us 2^N, in KB units.  0 marks as invalid,
 201 * and 7 is reserved.  We currently use only 2KB and 4KB
 202 */
 203#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
 204#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
 205#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
 206#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
 207
 208#define SendIBSLIDAssignMask \
 209        QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
 210#define SendIBSLMCMask \
 211        QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
 212
 213#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
 214#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
 215#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
 216#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
 217#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
 218#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
 219
 220#define _QIB_GPIO_SDA_NUM 1
 221#define _QIB_GPIO_SCL_NUM 0
 222#define QIB_EEPROM_WEN_NUM 14
 223#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
 224
 225/* HW counter clock is at 4nsec */
 226#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
 227
 228/* full speed IB port 1 only */
 229#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
 230#define PORT_SPD_CAP_SHIFT 3
 231
 232/* full speed featuremask, both ports */
 233#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
 234
 235/*
 236 * This file contains almost all the chip-specific register information and
 237 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
 238 */
 239
 240/* Use defines to tie machine-generated names to lower-case names */
 241#define kr_contextcnt KREG_IDX(ContextCnt)
 242#define kr_control KREG_IDX(Control)
 243#define kr_counterregbase KREG_IDX(CntrRegBase)
 244#define kr_errclear KREG_IDX(ErrClear)
 245#define kr_errmask KREG_IDX(ErrMask)
 246#define kr_errstatus KREG_IDX(ErrStatus)
 247#define kr_extctrl KREG_IDX(EXTCtrl)
 248#define kr_extstatus KREG_IDX(EXTStatus)
 249#define kr_gpio_clear KREG_IDX(GPIOClear)
 250#define kr_gpio_mask KREG_IDX(GPIOMask)
 251#define kr_gpio_out KREG_IDX(GPIOOut)
 252#define kr_gpio_status KREG_IDX(GPIOStatus)
 253#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
 254#define kr_debugportval KREG_IDX(DebugPortValueReg)
 255#define kr_fmask KREG_IDX(feature_mask)
 256#define kr_act_fmask KREG_IDX(active_feature_mask)
 257#define kr_hwerrclear KREG_IDX(HwErrClear)
 258#define kr_hwerrmask KREG_IDX(HwErrMask)
 259#define kr_hwerrstatus KREG_IDX(HwErrStatus)
 260#define kr_intclear KREG_IDX(IntClear)
 261#define kr_intmask KREG_IDX(IntMask)
 262#define kr_intredirect KREG_IDX(IntRedirect0)
 263#define kr_intstatus KREG_IDX(IntStatus)
 264#define kr_pagealign KREG_IDX(PageAlign)
 265#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
 266#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
 267#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
 268#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
 269#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
 270#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
 271#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
 272#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
 273#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
 274#define kr_revision KREG_IDX(Revision)
 275#define kr_scratch KREG_IDX(Scratch)
 276#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
 277#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
 278#define kr_sendctrl KREG_IDX(SendCtrl)
 279#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
 280#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
 281#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
 282#define kr_sendpiobufbase KREG_IDX(SendBufBase)
 283#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
 284#define kr_sendpiosize KREG_IDX(SendBufSize)
 285#define kr_sendregbase KREG_IDX(SendRegBase)
 286#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
 287#define kr_userregbase KREG_IDX(UserRegBase)
 288#define kr_intgranted KREG_IDX(Int_Granted)
 289#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
 290#define kr_intblocked KREG_IDX(IntBlocked)
 291#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
 292
 293/*
 294 * per-port kernel registers.  Access only with qib_read_kreg_port()
 295 * or qib_write_kreg_port()
 296 */
 297#define krp_errclear KREG_IBPORT_IDX(ErrClear)
 298#define krp_errmask KREG_IBPORT_IDX(ErrMask)
 299#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
 300#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
 301#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
 302#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
 303#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
 304#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
 305#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
 306#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
 307#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
 308#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
 309#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
 310#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
 311#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
 312#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
 313#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
 314#define krp_psstart KREG_IBPORT_IDX(PSStart)
 315#define krp_psstat KREG_IBPORT_IDX(PSStat)
 316#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
 317#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
 318#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
 319#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
 320#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
 321#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
 322#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
 323#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
 324#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
 325#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
 326#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
 327#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
 328#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
 329#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
 330#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
 331#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
 332#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
 333#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
 334#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
 335#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
 336#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
 337#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
 338#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
 339#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
 340#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
 341#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
 342#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
 343#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
 344#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
 345#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
 346#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 347
 348/*
 349 * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
 350 * or qib_write_kreg_ctxt()
 351 */
 352#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
 353#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
 354
 355/*
 356 * TID Flow table, per context.  Reduces
 357 * number of hdrq updates to one per flow (or on errors).
 358 * context 0 and 1 share same memory, but have distinct
 359 * addresses.  Since for now, we never use expected sends
 360 * on kernel contexts, we don't worry about that (we initialize
 361 * those entries for ctxt 0/1 on driver load twice, for example).
 362 */
 363#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
 364#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
 365
 366/* these are the error bits in the tid flows, and are W1C */
 367#define TIDFLOW_ERRBITS  ( \
 368        (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
 369        SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
 370        (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
 371        SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
 372
 373/* Most (not all) Counters are per-IBport.
 374 * Requires LBIntCnt is at offset 0 in the group
 375 */
 376#define CREG_IDX(regname) \
 377((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 378
 379#define crp_badformat CREG_IDX(RxVersionErrCnt)
 380#define crp_err_rlen CREG_IDX(RxLenErrCnt)
 381#define crp_erricrc CREG_IDX(RxICRCErrCnt)
 382#define crp_errlink CREG_IDX(RxLinkMalformCnt)
 383#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
 384#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
 385#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
 386#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
 387#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
 388#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
 389#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
 390#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
 391#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
 392#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
 393#define crp_pktrcv CREG_IDX(RxDataPktCnt)
 394#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
 395#define crp_pktsend CREG_IDX(TxDataPktCnt)
 396#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
 397#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
 398#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
 399#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
 400#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
 401#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
 402#define crp_rcvebp CREG_IDX(RxEBPCnt)
 403#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
 404#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
 405#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
 406#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
 407#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
 408#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
 409#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
 410#define crp_sendstall CREG_IDX(TxFlowStallCnt)
 411#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
 412#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
 413#define crp_txlenerr CREG_IDX(TxLenErrCnt)
 414#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
 415#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
 416#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
 417#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
 418#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
 419#define crp_wordrcv CREG_IDX(RxDwordCnt)
 420#define crp_wordsend CREG_IDX(TxDwordCnt)
 421#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
 422
 423/* these are the (few) counters that are not port-specific */
 424#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
 425                        QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 426#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
 427#define cr_lbint CREG_DEVIDX(LBIntCnt)
 428#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
 429#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
 430#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
 431#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
 432#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
 433
 434/* no chip register for # of IB ports supported, so define */
 435#define NUM_IB_PORTS 2
 436
 437/* 1 VL15 buffer per hardware IB port, no register for this, so define */
 438#define NUM_VL15_BUFS NUM_IB_PORTS
 439
 440/*
 441 * context 0 and 1 are special, and there is no chip register that
 442 * defines this value, so we have to define it here.
 443 * These are all allocated to either 0 or 1 for single port
 444 * hardware configuration, otherwise each gets half
 445 */
 446#define KCTXT0_EGRCNT 2048
 447
 448/* values for vl and port fields in PBC, 7322-specific */
 449#define PBC_PORT_SEL_LSB 26
 450#define PBC_PORT_SEL_RMASK 1
 451#define PBC_VL_NUM_LSB 27
 452#define PBC_VL_NUM_RMASK 7
 453#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
 454#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
 455
 456static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 457        [IB_RATE_2_5_GBPS] = 16,
 458        [IB_RATE_5_GBPS] = 8,
 459        [IB_RATE_10_GBPS] = 4,
 460        [IB_RATE_20_GBPS] = 2,
 461        [IB_RATE_30_GBPS] = 2,
 462        [IB_RATE_40_GBPS] = 1
 463};
 464
 465#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
 466#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
 467
 468/* link training states, from IBC */
 469#define IB_7322_LT_STATE_DISABLED        0x00
 470#define IB_7322_LT_STATE_LINKUP          0x01
 471#define IB_7322_LT_STATE_POLLACTIVE      0x02
 472#define IB_7322_LT_STATE_POLLQUIET       0x03
 473#define IB_7322_LT_STATE_SLEEPDELAY      0x04
 474#define IB_7322_LT_STATE_SLEEPQUIET      0x05
 475#define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
 476#define IB_7322_LT_STATE_CFGRCVFCFG      0x09
 477#define IB_7322_LT_STATE_CFGWAITRMT      0x0a
 478#define IB_7322_LT_STATE_CFGIDLE         0x0b
 479#define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
 480#define IB_7322_LT_STATE_TXREVLANES      0x0d
 481#define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
 482#define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 483#define IB_7322_LT_STATE_CFGENH          0x10
 484#define IB_7322_LT_STATE_CFGTEST         0x11
 485#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
 486#define IB_7322_LT_STATE_CFGWAITENH      0x13
 487
 488/* link state machine states from IBC */
 489#define IB_7322_L_STATE_DOWN             0x0
 490#define IB_7322_L_STATE_INIT             0x1
 491#define IB_7322_L_STATE_ARM              0x2
 492#define IB_7322_L_STATE_ACTIVE           0x3
 493#define IB_7322_L_STATE_ACT_DEFER        0x4
 494
 495static const u8 qib_7322_physportstate[0x20] = {
 496        [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
 497        [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
 498        [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
 499        [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
 500        [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
 501        [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
 502        [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
 503        [IB_7322_LT_STATE_CFGRCVFCFG] =
 504                IB_PHYSPORTSTATE_CFG_TRAIN,
 505        [IB_7322_LT_STATE_CFGWAITRMT] =
 506                IB_PHYSPORTSTATE_CFG_TRAIN,
 507        [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
 508        [IB_7322_LT_STATE_RECOVERRETRAIN] =
 509                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 510        [IB_7322_LT_STATE_RECOVERWAITRMT] =
 511                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 512        [IB_7322_LT_STATE_RECOVERIDLE] =
 513                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 514        [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
 515        [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
 516        [IB_7322_LT_STATE_CFGWAITRMTTEST] =
 517                IB_PHYSPORTSTATE_CFG_TRAIN,
 518        [IB_7322_LT_STATE_CFGWAITENH] =
 519                IB_PHYSPORTSTATE_CFG_WAIT_ENH,
 520        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
 521        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
 522        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
 523        [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
 524};
 525
 526#ifdef CONFIG_INFINIBAND_QIB_DCA
 527struct qib_irq_notify {
 528        int rcv;
 529        void *arg;
 530        struct irq_affinity_notify notify;
 531};
 532#endif
 533
 534struct qib_chip_specific {
 535        u64 __iomem *cregbase;
 536        u64 *cntrs;
 537        spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
 538        spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
 539        u64 main_int_mask;      /* clear bits which have dedicated handlers */
 540        u64 int_enable_mask;  /* for per port interrupts in single port mode */
 541        u64 errormask;
 542        u64 hwerrmask;
 543        u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
 544        u64 gpio_mask; /* shadow the gpio mask register */
 545        u64 extctrl; /* shadow the gpio output enable, etc... */
 546        u32 ncntrs;
 547        u32 nportcntrs;
 548        u32 cntrnamelen;
 549        u32 portcntrnamelen;
 550        u32 numctxts;
 551        u32 rcvegrcnt;
 552        u32 updthresh; /* current AvailUpdThld */
 553        u32 updthresh_dflt; /* default AvailUpdThld */
 554        u32 r1;
 555        int irq;
 556        u32 num_msix_entries;
 557        u32 sdmabufcnt;
 558        u32 lastbuf_for_pio;
 559        u32 stay_in_freeze;
 560        u32 recovery_ports_initted;
 561#ifdef CONFIG_INFINIBAND_QIB_DCA
 562        u32 dca_ctrl;
 563        int rhdr_cpu[18];
 564        int sdma_cpu[2];
 565        u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
 566#endif
 567        struct qib_msix_entry *msix_entries;
 568        unsigned long *sendchkenable;
 569        unsigned long *sendgrhchk;
 570        unsigned long *sendibchk;
 571        u32 rcvavail_timeout[18];
 572        char emsgbuf[128]; /* for device error interrupt msg buffer */
 573};
 574
 575/* Table of entries in "human readable" form Tx Emphasis. */
 576struct txdds_ent {
 577        u8 amp;
 578        u8 pre;
 579        u8 main;
 580        u8 post;
 581};
 582
 583struct vendor_txdds_ent {
 584        u8 oui[QSFP_VOUI_LEN];
 585        u8 *partnum;
 586        struct txdds_ent sdr;
 587        struct txdds_ent ddr;
 588        struct txdds_ent qdr;
 589};
 590
 591static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 592
 593#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 594#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
 595#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 596#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 597
 598#define H1_FORCE_VAL 8
 599#define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
 600#define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
 601
 602/* The static and dynamic registers are paired, and the pairs indexed by spd */
 603#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
 604        + ((spd) * 2))
 605
 606#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
 607#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
 608#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
 609#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
 610#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
 611
 612struct qib_chippport_specific {
 613        u64 __iomem *kpregbase;
 614        u64 __iomem *cpregbase;
 615        u64 *portcntrs;
 616        struct qib_pportdata *ppd;
 617        wait_queue_head_t autoneg_wait;
 618        struct delayed_work autoneg_work;
 619        struct delayed_work ipg_work;
 620        struct timer_list chase_timer;
 621        /*
 622         * these 5 fields are used to establish deltas for IB symbol
 623         * errors and linkrecovery errors.  They can be reported on
 624         * some chips during link negotiation prior to INIT, and with
 625         * DDR when faking DDR negotiations with non-IBTA switches.
 626         * The chip counters are adjusted at driver unload if there is
 627         * a non-zero delta.
 628         */
 629        u64 ibdeltainprog;
 630        u64 ibsymdelta;
 631        u64 ibsymsnap;
 632        u64 iblnkerrdelta;
 633        u64 iblnkerrsnap;
 634        u64 iblnkdownsnap;
 635        u64 iblnkdowndelta;
 636        u64 ibmalfdelta;
 637        u64 ibmalfsnap;
 638        u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
 639        u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
 640        unsigned long qdr_dfe_time;
 641        unsigned long chase_end;
 642        u32 autoneg_tries;
 643        u32 recovery_init;
 644        u32 qdr_dfe_on;
 645        u32 qdr_reforce;
 646        /*
 647         * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
 648         * entry zero is unused, to simplify indexing
 649         */
 650        u8 h1_val;
 651        u8 no_eep;  /* txselect table index to use if no qsfp info */
 652        u8 ipg_tries;
 653        u8 ibmalfusesnap;
 654        struct qib_qsfp_data qsfp_data;
 655        char epmsgbuf[192]; /* for port error interrupt msg buffer */
 656        char sdmamsgbuf[192]; /* for per-port sdma error messages */
 657};
 658
 659static struct {
 660        const char *name;
 661        irq_handler_t handler;
 662        int lsb;
 663        int port; /* 0 if not port-specific, else port # */
 664        int dca;
 665} irq_table[] = {
 666        { "", qib_7322intr, -1, 0, 0 },
 667        { " (buf avail)", qib_7322bufavail,
 668                SYM_LSB(IntStatus, SendBufAvail), 0, 0},
 669        { " (sdma 0)", sdma_intr,
 670                SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
 671        { " (sdma 1)", sdma_intr,
 672                SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
 673        { " (sdmaI 0)", sdma_idle_intr,
 674                SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
 675        { " (sdmaI 1)", sdma_idle_intr,
 676                SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
 677        { " (sdmaP 0)", sdma_progress_intr,
 678                SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
 679        { " (sdmaP 1)", sdma_progress_intr,
 680                SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
 681        { " (sdmaC 0)", sdma_cleanup_intr,
 682                SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
 683        { " (sdmaC 1)", sdma_cleanup_intr,
 684                SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
 685};
 686
 687#ifdef CONFIG_INFINIBAND_QIB_DCA
 688
 689static const struct dca_reg_map {
 690        int     shadow_inx;
 691        int     lsb;
 692        u64     mask;
 693        u16     regno;
 694} dca_rcvhdr_reg_map[] = {
 695        { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
 696           ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
 697        { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
 698           ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
 699        { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
 700           ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
 701        { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
 702           ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
 703        { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
 704           ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
 705        { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
 706           ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
 707        { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
 708           ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
 709        { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
 710           ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
 711        { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
 712           ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
 713        { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
 714           ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
 715        { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
 716           ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
 717        { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
 718           ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
 719        { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
 720           ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
 721        { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
 722           ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
 723        { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
 724           ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
 725        { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
 726           ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
 727        { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
 728           ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
 729        { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
 730           ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
 731};
 732#endif
 733
 734/* ibcctrl bits */
 735#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
 736/* cycle through TS1/TS2 till OK */
 737#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
 738/* wait for TS1, then go on */
 739#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
 740#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
 741
 742#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
 743#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
 744#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
 745
 746#define BLOB_7322_IBCHG 0x101
 747
 748static inline void qib_write_kreg(const struct qib_devdata *dd,
 749                                  const u32 regno, u64 value);
 750static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
 751static void write_7322_initregs(struct qib_devdata *);
 752static void write_7322_init_portregs(struct qib_pportdata *);
 753static void setup_7322_link_recovery(struct qib_pportdata *, u32);
 754static void check_7322_rxe_status(struct qib_pportdata *);
 755static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
 756#ifdef CONFIG_INFINIBAND_QIB_DCA
 757static void qib_setup_dca(struct qib_devdata *dd);
 758static void setup_dca_notifier(struct qib_devdata *dd,
 759                               struct qib_msix_entry *m);
 760static void reset_dca_notifier(struct qib_devdata *dd,
 761                               struct qib_msix_entry *m);
 762#endif
 763
 764/**
 765 * qib_read_ureg32 - read 32-bit virtualized per-context register
 766 * @dd: device
 767 * @regno: register number
 768 * @ctxt: context number
 769 *
 770 * Return the contents of a register that is virtualized to be per context.
 771 * Returns -1 on errors (not distinguishable from valid contents at
 772 * runtime; we may add a separate error variable at some point).
 773 */
 774static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
 775                                  enum qib_ureg regno, int ctxt)
 776{
 777        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 778                return 0;
 779        return readl(regno + (u64 __iomem *)(
 780                (dd->ureg_align * ctxt) + (dd->userbase ?
 781                 (char __iomem *)dd->userbase :
 782                 (char __iomem *)dd->kregbase + dd->uregbase)));
 783}
 784
 785/**
 786 * qib_read_ureg - read virtualized per-context register
 787 * @dd: device
 788 * @regno: register number
 789 * @ctxt: context number
 790 *
 791 * Return the contents of a register that is virtualized to be per context.
 792 * Returns -1 on errors (not distinguishable from valid contents at
 793 * runtime; we may add a separate error variable at some point).
 794 */
 795static inline u64 qib_read_ureg(const struct qib_devdata *dd,
 796                                enum qib_ureg regno, int ctxt)
 797{
 798
 799        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 800                return 0;
 801        return readq(regno + (u64 __iomem *)(
 802                (dd->ureg_align * ctxt) + (dd->userbase ?
 803                 (char __iomem *)dd->userbase :
 804                 (char __iomem *)dd->kregbase + dd->uregbase)));
 805}
 806
 807/**
 808 * qib_write_ureg - write virtualized per-context register
 809 * @dd: device
 810 * @regno: register number
 811 * @value: value
 812 * @ctxt: context
 813 *
 814 * Write the contents of a register that is virtualized to be per context.
 815 */
 816static inline void qib_write_ureg(const struct qib_devdata *dd,
 817                                  enum qib_ureg regno, u64 value, int ctxt)
 818{
 819        u64 __iomem *ubase;
 820        if (dd->userbase)
 821                ubase = (u64 __iomem *)
 822                        ((char __iomem *) dd->userbase +
 823                         dd->ureg_align * ctxt);
 824        else
 825                ubase = (u64 __iomem *)
 826                        (dd->uregbase +
 827                         (char __iomem *) dd->kregbase +
 828                         dd->ureg_align * ctxt);
 829
 830        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 831                writeq(value, &ubase[regno]);
 832}
 833
 834static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
 835                                  const u32 regno)
 836{
 837        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 838                return -1;
 839        return readl((u32 __iomem *) &dd->kregbase[regno]);
 840}
 841
 842static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
 843                                  const u32 regno)
 844{
 845        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 846                return -1;
 847        return readq(&dd->kregbase[regno]);
 848}
 849
 850static inline void qib_write_kreg(const struct qib_devdata *dd,
 851                                  const u32 regno, u64 value)
 852{
 853        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 854                writeq(value, &dd->kregbase[regno]);
 855}
 856
 857/*
 858 * not many sanity checks for the port-specific kernel register routines,
 859 * since they are only used when it's known to be safe.
 860*/
 861static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
 862                                     const u16 regno)
 863{
 864        if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
 865                return 0ULL;
 866        return readq(&ppd->cpspec->kpregbase[regno]);
 867}
 868
 869static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
 870                                       const u16 regno, u64 value)
 871{
 872        if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
 873            (ppd->dd->flags & QIB_PRESENT))
 874                writeq(value, &ppd->cpspec->kpregbase[regno]);
 875}
 876
 877/**
 878 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
 879 * @dd: the qlogic_ib device
 880 * @regno: the register number to write
 881 * @ctxt: the context containing the register
 882 * @value: the value to write
 883 */
 884static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
 885                                       const u16 regno, unsigned ctxt,
 886                                       u64 value)
 887{
 888        qib_write_kreg(dd, regno + ctxt, value);
 889}
 890
 891static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
 892{
 893        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 894                return 0;
 895        return readq(&dd->cspec->cregbase[regno]);
 896
 897
 898}
 899
 900static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
 901{
 902        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 903                return 0;
 904        return readl(&dd->cspec->cregbase[regno]);
 905
 906
 907}
 908
 909static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
 910                                        u16 regno, u64 value)
 911{
 912        if (ppd->cpspec && ppd->cpspec->cpregbase &&
 913            (ppd->dd->flags & QIB_PRESENT))
 914                writeq(value, &ppd->cpspec->cpregbase[regno]);
 915}
 916
 917static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
 918                                      u16 regno)
 919{
 920        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 921            !(ppd->dd->flags & QIB_PRESENT))
 922                return 0;
 923        return readq(&ppd->cpspec->cpregbase[regno]);
 924}
 925
 926static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
 927                                        u16 regno)
 928{
 929        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 930            !(ppd->dd->flags & QIB_PRESENT))
 931                return 0;
 932        return readl(&ppd->cpspec->cpregbase[regno]);
 933}
 934
 935/* bits in Control register */
 936#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
 937#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
 938
 939/* bits in general interrupt regs */
 940#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
 941#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
 942#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
 943#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
 944#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
 945#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
 946#define QIB_I_C_ERROR INT_MASK(Err)
 947
 948#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
 949#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
 950#define QIB_I_GPIO INT_MASK(AssertGPIO)
 951#define QIB_I_P_SDMAINT(pidx) \
 952        (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 953         INT_MASK_P(SDmaProgress, pidx) | \
 954         INT_MASK_PM(SDmaCleanupDone, pidx))
 955
 956/* Interrupt bits that are "per port" */
 957#define QIB_I_P_BITSEXTANT(pidx) \
 958        (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
 959        INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 960        INT_MASK_P(SDmaProgress, pidx) | \
 961        INT_MASK_PM(SDmaCleanupDone, pidx))
 962
 963/* Interrupt bits that are common to a device */
 964/* currently unused: QIB_I_SPIOSENT */
 965#define QIB_I_C_BITSEXTANT \
 966        (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
 967        QIB_I_SPIOSENT | \
 968        QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
 969
 970#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
 971        QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
 972
 973/*
 974 * Error bits that are "per port".
 975 */
 976#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
 977#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
 978#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
 979#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
 980#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
 981#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
 982#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
 983#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
 984#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
 985#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
 986#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
 987#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
 988#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
 989#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
 990#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
 991#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
 992#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
 993#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
 994#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
 995#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
 996#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
 997#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
 998#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
 999#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1000#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1001#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1002#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1003#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1004
1005#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1006#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1007#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1008#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1009#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1010#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1011#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1012#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1013#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1014#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1015#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1016
1017/* Error bits that are common to a device */
1018#define QIB_E_RESET ERR_MASK(ResetNegated)
1019#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1020#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1021
1022
1023/*
1024 * Per chip (rather than per-port) errors.  Most either do
1025 * nothing but trigger a print (because they self-recover, or
1026 * always occur in tandem with other errors that handle the
1027 * issue), or because they indicate errors with no recovery,
1028 * but we want to know that they happened.
1029 */
1030#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1031#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1032#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1033#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1034#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1035#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1036#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1037#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1038
1039/* SDMA chip errors (not per port)
1040 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1041 * the SDMAHALT error immediately, so we just print the dup error via the
1042 * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1043 * as well, but since this is port-independent, by definition, it's
1044 * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1045 * packet send errors, and so are handled in the same manner as other
1046 * per-packet errors.
1047 */
1048#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1049#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1050#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1051
1052/*
1053 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1054 * it is used to print "common" packet errors.
1055 */
1056#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1057        QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1058        QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1059        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1060        QIB_E_P_REBP)
1061
1062/* Error Bits that Packet-related (Receive, per-port) */
1063#define QIB_E_P_RPKTERRS (\
1064        QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1065        QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1066        QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1067        QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1068        QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1069        QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1070
1071/*
1072 * Error bits that are Send-related (per port)
1073 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1074 * All of these potentially need to have a buffer disarmed
1075 */
1076#define QIB_E_P_SPKTERRS (\
1077        QIB_E_P_SUNEXP_PKTNUM |\
1078        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1079        QIB_E_P_SMAXPKTLEN |\
1080        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1081        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1082        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1083
1084#define QIB_E_SPKTERRS ( \
1085                QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1086                ERR_MASK_N(SendUnsupportedVLErr) |                      \
1087                QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1088
1089#define QIB_E_P_SDMAERRS ( \
1090        QIB_E_P_SDMAHALT | \
1091        QIB_E_P_SDMADESCADDRMISALIGN | \
1092        QIB_E_P_SDMAUNEXPDATA | \
1093        QIB_E_P_SDMAMISSINGDW | \
1094        QIB_E_P_SDMADWEN | \
1095        QIB_E_P_SDMARPYTAG | \
1096        QIB_E_P_SDMA1STDESC | \
1097        QIB_E_P_SDMABASE | \
1098        QIB_E_P_SDMATAILOUTOFBOUND | \
1099        QIB_E_P_SDMAOUTOFBOUND | \
1100        QIB_E_P_SDMAGENMISMATCH)
1101
1102/*
1103 * This sets some bits more than once, but makes it more obvious which
1104 * bits are not handled under other categories, and the repeat definition
1105 * is not a problem.
1106 */
1107#define QIB_E_P_BITSEXTANT ( \
1108        QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1109        QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1110        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1111        QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1112        )
1113
1114/*
1115 * These are errors that can occur when the link
1116 * changes state while a packet is being sent or received.  This doesn't
1117 * cover things like EBP or VCRC that can be the result of a sending
1118 * having the link change state, so we receive a "known bad" packet.
1119 * All of these are "per port", so renamed:
1120 */
1121#define QIB_E_P_LINK_PKTERRS (\
1122        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1123        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1124        QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1125        QIB_E_P_RUNEXPCHAR)
1126
1127/*
1128 * This sets some bits more than once, but makes it more obvious which
1129 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1130 * and the repeat definition is not a problem.
1131 */
1132#define QIB_E_C_BITSEXTANT (\
1133        QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1134        QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1135        QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1136
1137/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1138#define E_SPKT_ERRS_IGNORE 0
1139
1140#define QIB_EXTS_MEMBIST_DISABLED \
1141        SYM_MASK(EXTStatus, MemBISTDisabled)
1142#define QIB_EXTS_MEMBIST_ENDTEST \
1143        SYM_MASK(EXTStatus, MemBISTEndTest)
1144
1145#define QIB_E_SPIOARMLAUNCH \
1146        ERR_MASK(SendArmLaunchErr)
1147
1148#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1149#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1150
1151/*
1152 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1153 * and also if forced QDR (only QDR enabled).  It's enabled for the
1154 * forced QDR case so that scrambling will be enabled by the TS3
1155 * exchange, when supported by both sides of the link.
1156 */
1157#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1158#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1159#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1160#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1161#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1162#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1163        SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1164#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1165
1166#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1167#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1168
1169#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1170#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1171#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1172
1173#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1174#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1175#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1176        SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1177#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1178        SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1179#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1180
1181#define IBA7322_REDIRECT_VEC_PER_REG 12
1182
1183#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1184#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1185#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1186#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1187#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1188
1189#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1190
1191#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1192        .msg = #fldname , .sz = sizeof(#fldname) }
1193#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1194        fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1195static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1196        HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1197        HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1198        HWE_AUTO(PCIESerdesPClkNotDetect),
1199        HWE_AUTO(PowerOnBISTFailed),
1200        HWE_AUTO(TempsenseTholdReached),
1201        HWE_AUTO(MemoryErr),
1202        HWE_AUTO(PCIeBusParityErr),
1203        HWE_AUTO(PcieCplTimeout),
1204        HWE_AUTO(PciePoisonedTLP),
1205        HWE_AUTO_P(SDmaMemReadErr, 1),
1206        HWE_AUTO_P(SDmaMemReadErr, 0),
1207        HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1208        HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1209        HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1210        HWE_AUTO(statusValidNoEop),
1211        HWE_AUTO(LATriggered),
1212        { .mask = 0, .sz = 0 }
1213};
1214
1215#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1216        .msg = #fldname, .sz = sizeof(#fldname) }
1217#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1218        .msg = #fldname, .sz = sizeof(#fldname) }
1219static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1220        E_AUTO(RcvEgrFullErr),
1221        E_AUTO(RcvHdrFullErr),
1222        E_AUTO(ResetNegated),
1223        E_AUTO(HardwareErr),
1224        E_AUTO(InvalidAddrErr),
1225        E_AUTO(SDmaVL15Err),
1226        E_AUTO(SBufVL15MisUseErr),
1227        E_AUTO(InvalidEEPCmd),
1228        E_AUTO(RcvContextShareErr),
1229        E_AUTO(SendVLMismatchErr),
1230        E_AUTO(SendArmLaunchErr),
1231        E_AUTO(SendSpecialTriggerErr),
1232        E_AUTO(SDmaWrongPortErr),
1233        E_AUTO(SDmaBufMaskDuplicateErr),
1234        { .mask = 0, .sz = 0 }
1235};
1236
1237static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1238        E_P_AUTO(IBStatusChanged),
1239        E_P_AUTO(SHeadersErr),
1240        E_P_AUTO(VL15BufMisuseErr),
1241        /*
1242         * SDmaHaltErr is not really an error, make it clearer;
1243         */
1244        {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1245                .sz = 11},
1246        E_P_AUTO(SDmaDescAddrMisalignErr),
1247        E_P_AUTO(SDmaUnexpDataErr),
1248        E_P_AUTO(SDmaMissingDwErr),
1249        E_P_AUTO(SDmaDwEnErr),
1250        E_P_AUTO(SDmaRpyTagErr),
1251        E_P_AUTO(SDma1stDescErr),
1252        E_P_AUTO(SDmaBaseErr),
1253        E_P_AUTO(SDmaTailOutOfBoundErr),
1254        E_P_AUTO(SDmaOutOfBoundErr),
1255        E_P_AUTO(SDmaGenMismatchErr),
1256        E_P_AUTO(SendBufMisuseErr),
1257        E_P_AUTO(SendUnsupportedVLErr),
1258        E_P_AUTO(SendUnexpectedPktNumErr),
1259        E_P_AUTO(SendDroppedDataPktErr),
1260        E_P_AUTO(SendDroppedSmpPktErr),
1261        E_P_AUTO(SendPktLenErr),
1262        E_P_AUTO(SendUnderRunErr),
1263        E_P_AUTO(SendMaxPktLenErr),
1264        E_P_AUTO(SendMinPktLenErr),
1265        E_P_AUTO(RcvIBLostLinkErr),
1266        E_P_AUTO(RcvHdrErr),
1267        E_P_AUTO(RcvHdrLenErr),
1268        E_P_AUTO(RcvBadTidErr),
1269        E_P_AUTO(RcvBadVersionErr),
1270        E_P_AUTO(RcvIBFlowErr),
1271        E_P_AUTO(RcvEBPErr),
1272        E_P_AUTO(RcvUnsupportedVLErr),
1273        E_P_AUTO(RcvUnexpectedCharErr),
1274        E_P_AUTO(RcvShortPktLenErr),
1275        E_P_AUTO(RcvLongPktLenErr),
1276        E_P_AUTO(RcvMaxPktLenErr),
1277        E_P_AUTO(RcvMinPktLenErr),
1278        E_P_AUTO(RcvICRCErr),
1279        E_P_AUTO(RcvVCRCErr),
1280        E_P_AUTO(RcvFormatErr),
1281        { .mask = 0, .sz = 0 }
1282};
1283
1284/*
1285 * Below generates "auto-message" for interrupts not specific to any port or
1286 * context
1287 */
1288#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1289        .msg = #fldname, .sz = sizeof(#fldname) }
1290/* Below generates "auto-message" for interrupts specific to a port */
1291#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1292        SYM_LSB(IntMask, fldname##Mask##_0), \
1293        SYM_LSB(IntMask, fldname##Mask##_1)), \
1294        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1295/* For some reason, the SerDesTrimDone bits are reversed */
1296#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1297        SYM_LSB(IntMask, fldname##Mask##_1), \
1298        SYM_LSB(IntMask, fldname##Mask##_0)), \
1299        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1300/*
1301 * Below generates "auto-message" for interrupts specific to a context,
1302 * with ctxt-number appended
1303 */
1304#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1305        SYM_LSB(IntMask, fldname##0IntMask), \
1306        SYM_LSB(IntMask, fldname##17IntMask)), \
1307        .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1308
1309static const struct  qib_hwerror_msgs qib_7322_intr_msgs[] = {
1310        INTR_AUTO_P(SDmaInt),
1311        INTR_AUTO_P(SDmaProgressInt),
1312        INTR_AUTO_P(SDmaIdleInt),
1313        INTR_AUTO_P(SDmaCleanupDone),
1314        INTR_AUTO_C(RcvUrg),
1315        INTR_AUTO_P(ErrInt),
1316        INTR_AUTO(ErrInt),      /* non-port-specific errs */
1317        INTR_AUTO(AssertGPIOInt),
1318        INTR_AUTO_P(SendDoneInt),
1319        INTR_AUTO(SendBufAvailInt),
1320        INTR_AUTO_C(RcvAvail),
1321        { .mask = 0, .sz = 0 }
1322};
1323
1324#define TXSYMPTOM_AUTO_P(fldname) \
1325        { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1326        .msg = #fldname, .sz = sizeof(#fldname) }
1327static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1328        TXSYMPTOM_AUTO_P(NonKeyPacket),
1329        TXSYMPTOM_AUTO_P(GRHFail),
1330        TXSYMPTOM_AUTO_P(PkeyFail),
1331        TXSYMPTOM_AUTO_P(QPFail),
1332        TXSYMPTOM_AUTO_P(SLIDFail),
1333        TXSYMPTOM_AUTO_P(RawIPV6),
1334        TXSYMPTOM_AUTO_P(PacketTooSmall),
1335        { .mask = 0, .sz = 0 }
1336};
1337
1338#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1339
1340/*
1341 * Called when we might have an error that is specific to a particular
1342 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1343 * because we don't need to force the update of pioavail
1344 */
1345static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1346{
1347        struct qib_devdata *dd = ppd->dd;
1348        u32 i;
1349        int any;
1350        u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1351        u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1352        unsigned long sbuf[4];
1353
1354        /*
1355         * It's possible that sendbuffererror could have bits set; might
1356         * have already done this as a result of hardware error handling.
1357         */
1358        any = 0;
1359        for (i = 0; i < regcnt; ++i) {
1360                sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1361                if (sbuf[i]) {
1362                        any = 1;
1363                        qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1364                }
1365        }
1366
1367        if (any)
1368                qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1369}
1370
1371/* No txe_recover yet, if ever */
1372
1373/* No decode__errors yet */
1374static void err_decode(char *msg, size_t len, u64 errs,
1375                       const struct qib_hwerror_msgs *msp)
1376{
1377        u64 these, lmask;
1378        int took, multi, n = 0;
1379
1380        while (errs && msp && msp->mask) {
1381                multi = (msp->mask & (msp->mask - 1));
1382                while (errs & msp->mask) {
1383                        these = (errs & msp->mask);
1384                        lmask = (these & (these - 1)) ^ these;
1385                        if (len) {
1386                                if (n++) {
1387                                        /* separate the strings */
1388                                        *msg++ = ',';
1389                                        len--;
1390                                }
1391                                BUG_ON(!msp->sz);
1392                                /* msp->sz counts the nul */
1393                                took = min_t(size_t, msp->sz - (size_t)1, len);
1394                                memcpy(msg,  msp->msg, took);
1395                                len -= took;
1396                                msg += took;
1397                                if (len)
1398                                        *msg = '\0';
1399                        }
1400                        errs &= ~lmask;
1401                        if (len && multi) {
1402                                /* More than one bit this mask */
1403                                int idx = -1;
1404
1405                                while (lmask & msp->mask) {
1406                                        ++idx;
1407                                        lmask >>= 1;
1408                                }
1409                                took = scnprintf(msg, len, "_%d", idx);
1410                                len -= took;
1411                                msg += took;
1412                        }
1413                }
1414                ++msp;
1415        }
1416        /* If some bits are left, show in hex. */
1417        if (len && errs)
1418                snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1419                        (unsigned long long) errs);
1420}
1421
1422/* only called if r1 set */
1423static void flush_fifo(struct qib_pportdata *ppd)
1424{
1425        struct qib_devdata *dd = ppd->dd;
1426        u32 __iomem *piobuf;
1427        u32 bufn;
1428        u32 *hdr;
1429        u64 pbc;
1430        const unsigned hdrwords = 7;
1431        static struct qib_ib_header ibhdr = {
1432                .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1433                .lrh[1] = IB_LID_PERMISSIVE,
1434                .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1435                .lrh[3] = IB_LID_PERMISSIVE,
1436                .u.oth.bth[0] = cpu_to_be32(
1437                        (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1438                .u.oth.bth[1] = cpu_to_be32(0),
1439                .u.oth.bth[2] = cpu_to_be32(0),
1440                .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1441                .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1442        };
1443
1444        /*
1445         * Send a dummy VL15 packet to flush the launch FIFO.
1446         * This will not actually be sent since the TxeBypassIbc bit is set.
1447         */
1448        pbc = PBC_7322_VL15_SEND |
1449                (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1450                (hdrwords + SIZE_OF_CRC);
1451        piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1452        if (!piobuf)
1453                return;
1454        writeq(pbc, piobuf);
1455        hdr = (u32 *) &ibhdr;
1456        if (dd->flags & QIB_PIO_FLUSH_WC) {
1457                qib_flush_wc();
1458                qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1459                qib_flush_wc();
1460                __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1461                qib_flush_wc();
1462        } else
1463                qib_pio_copy(piobuf + 2, hdr, hdrwords);
1464        qib_sendbuf_done(dd, bufn);
1465}
1466
1467/*
1468 * This is called with interrupts disabled and sdma_lock held.
1469 */
1470static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1471{
1472        struct qib_devdata *dd = ppd->dd;
1473        u64 set_sendctrl = 0;
1474        u64 clr_sendctrl = 0;
1475
1476        if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1477                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1478        else
1479                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1480
1481        if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1482                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1483        else
1484                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1485
1486        if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1487                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1488        else
1489                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1490
1491        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1492                set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1493                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1494                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1495        else
1496                clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1497                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1498                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1499
1500        spin_lock(&dd->sendctrl_lock);
1501
1502        /* If we are draining everything, block sends first */
1503        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1504                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1505                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1506                qib_write_kreg(dd, kr_scratch, 0);
1507        }
1508
1509        ppd->p_sendctrl |= set_sendctrl;
1510        ppd->p_sendctrl &= ~clr_sendctrl;
1511
1512        if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1513                qib_write_kreg_port(ppd, krp_sendctrl,
1514                                    ppd->p_sendctrl |
1515                                    SYM_MASK(SendCtrl_0, SDmaCleanup));
1516        else
1517                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1518        qib_write_kreg(dd, kr_scratch, 0);
1519
1520        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1521                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1522                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1523                qib_write_kreg(dd, kr_scratch, 0);
1524        }
1525
1526        spin_unlock(&dd->sendctrl_lock);
1527
1528        if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1529                flush_fifo(ppd);
1530}
1531
1532static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1533{
1534        __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1535}
1536
1537static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1538{
1539        /*
1540         * Set SendDmaLenGen and clear and set
1541         * the MSB of the generation count to enable generation checking
1542         * and load the internal generation counter.
1543         */
1544        qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1545        qib_write_kreg_port(ppd, krp_senddmalengen,
1546                            ppd->sdma_descq_cnt |
1547                            (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1548}
1549
1550/*
1551 * Must be called with sdma_lock held, or before init finished.
1552 */
1553static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1554{
1555        /* Commit writes to memory and advance the tail on the chip */
1556        wmb();
1557        ppd->sdma_descq_tail = tail;
1558        qib_write_kreg_port(ppd, krp_senddmatail, tail);
1559}
1560
1561/*
1562 * This is called with interrupts disabled and sdma_lock held.
1563 */
1564static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1565{
1566        /*
1567         * Drain all FIFOs.
1568         * The hardware doesn't require this but we do it so that verbs
1569         * and user applications don't wait for link active to send stale
1570         * data.
1571         */
1572        sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1573
1574        qib_sdma_7322_setlengen(ppd);
1575        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1576        ppd->sdma_head_dma[0] = 0;
1577        qib_7322_sdma_sendctrl(ppd,
1578                ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1579}
1580
1581#define DISABLES_SDMA ( \
1582        QIB_E_P_SDMAHALT | \
1583        QIB_E_P_SDMADESCADDRMISALIGN | \
1584        QIB_E_P_SDMAMISSINGDW | \
1585        QIB_E_P_SDMADWEN | \
1586        QIB_E_P_SDMARPYTAG | \
1587        QIB_E_P_SDMA1STDESC | \
1588        QIB_E_P_SDMABASE | \
1589        QIB_E_P_SDMATAILOUTOFBOUND | \
1590        QIB_E_P_SDMAOUTOFBOUND | \
1591        QIB_E_P_SDMAGENMISMATCH)
1592
1593static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1594{
1595        unsigned long flags;
1596        struct qib_devdata *dd = ppd->dd;
1597
1598        errs &= QIB_E_P_SDMAERRS;
1599        err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1600                   errs, qib_7322p_error_msgs);
1601
1602        if (errs & QIB_E_P_SDMAUNEXPDATA)
1603                qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1604                            ppd->port);
1605
1606        spin_lock_irqsave(&ppd->sdma_lock, flags);
1607
1608        if (errs != QIB_E_P_SDMAHALT) {
1609                /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1610                qib_dev_porterr(dd, ppd->port,
1611                        "SDMA %s 0x%016llx %s\n",
1612                        qib_sdma_state_names[ppd->sdma_state.current_state],
1613                        errs, ppd->cpspec->sdmamsgbuf);
1614                dump_sdma_7322_state(ppd);
1615        }
1616
1617        switch (ppd->sdma_state.current_state) {
1618        case qib_sdma_state_s00_hw_down:
1619                break;
1620
1621        case qib_sdma_state_s10_hw_start_up_wait:
1622                if (errs & QIB_E_P_SDMAHALT)
1623                        __qib_sdma_process_event(ppd,
1624                                qib_sdma_event_e20_hw_started);
1625                break;
1626
1627        case qib_sdma_state_s20_idle:
1628                break;
1629
1630        case qib_sdma_state_s30_sw_clean_up_wait:
1631                break;
1632
1633        case qib_sdma_state_s40_hw_clean_up_wait:
1634                if (errs & QIB_E_P_SDMAHALT)
1635                        __qib_sdma_process_event(ppd,
1636                                qib_sdma_event_e50_hw_cleaned);
1637                break;
1638
1639        case qib_sdma_state_s50_hw_halt_wait:
1640                if (errs & QIB_E_P_SDMAHALT)
1641                        __qib_sdma_process_event(ppd,
1642                                qib_sdma_event_e60_hw_halted);
1643                break;
1644
1645        case qib_sdma_state_s99_running:
1646                __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1647                __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1648                break;
1649        }
1650
1651        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1652}
1653
1654/*
1655 * handle per-device errors (not per-port errors)
1656 */
1657static noinline void handle_7322_errors(struct qib_devdata *dd)
1658{
1659        char *msg;
1660        u64 iserr = 0;
1661        u64 errs;
1662        u64 mask;
1663        int log_idx;
1664
1665        qib_stats.sps_errints++;
1666        errs = qib_read_kreg64(dd, kr_errstatus);
1667        if (!errs) {
1668                qib_devinfo(dd->pcidev,
1669                        "device error interrupt, but no error bits set!\n");
1670                goto done;
1671        }
1672
1673        /* don't report errors that are masked */
1674        errs &= dd->cspec->errormask;
1675        msg = dd->cspec->emsgbuf;
1676
1677        /* do these first, they are most important */
1678        if (errs & QIB_E_HARDWARE) {
1679                *msg = '\0';
1680                qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1681        } else
1682                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1683                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1684                                qib_inc_eeprom_err(dd, log_idx, 1);
1685
1686        if (errs & QIB_E_SPKTERRS) {
1687                qib_disarm_7322_senderrbufs(dd->pport);
1688                qib_stats.sps_txerrs++;
1689        } else if (errs & QIB_E_INVALIDADDR)
1690                qib_stats.sps_txerrs++;
1691        else if (errs & QIB_E_ARMLAUNCH) {
1692                qib_stats.sps_txerrs++;
1693                qib_disarm_7322_senderrbufs(dd->pport);
1694        }
1695        qib_write_kreg(dd, kr_errclear, errs);
1696
1697        /*
1698         * The ones we mask off are handled specially below
1699         * or above.  Also mask SDMADISABLED by default as it
1700         * is too chatty.
1701         */
1702        mask = QIB_E_HARDWARE;
1703        *msg = '\0';
1704
1705        err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1706                   qib_7322error_msgs);
1707
1708        /*
1709         * Getting reset is a tragedy for all ports. Mark the device
1710         * _and_ the ports as "offline" in way meaningful to each.
1711         */
1712        if (errs & QIB_E_RESET) {
1713                int pidx;
1714
1715                qib_dev_err(dd,
1716                        "Got reset, requires re-init (unload and reload driver)\n");
1717                dd->flags &= ~QIB_INITTED;  /* needs re-init */
1718                /* mark as having had error */
1719                *dd->devstatusp |= QIB_STATUS_HWERROR;
1720                for (pidx = 0; pidx < dd->num_pports; ++pidx)
1721                        if (dd->pport[pidx].link_speed_supported)
1722                                *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1723        }
1724
1725        if (*msg && iserr)
1726                qib_dev_err(dd, "%s error\n", msg);
1727
1728        /*
1729         * If there were hdrq or egrfull errors, wake up any processes
1730         * waiting in poll.  We used to try to check which contexts had
1731         * the overflow, but given the cost of that and the chip reads
1732         * to support it, it's better to just wake everybody up if we
1733         * get an overflow; waiters can poll again if it's not them.
1734         */
1735        if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1736                qib_handle_urcv(dd, ~0U);
1737                if (errs & ERR_MASK(RcvEgrFullErr))
1738                        qib_stats.sps_buffull++;
1739                else
1740                        qib_stats.sps_hdrfull++;
1741        }
1742
1743done:
1744        return;
1745}
1746
1747static void qib_error_tasklet(unsigned long data)
1748{
1749        struct qib_devdata *dd = (struct qib_devdata *)data;
1750
1751        handle_7322_errors(dd);
1752        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1753}
1754
1755static void reenable_chase(unsigned long opaque)
1756{
1757        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1758
1759        ppd->cpspec->chase_timer.expires = 0;
1760        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1761                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1762}
1763
1764static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1765                u8 ibclt)
1766{
1767        ppd->cpspec->chase_end = 0;
1768
1769        if (!qib_chase)
1770                return;
1771
1772        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1773                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1774        ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1775        add_timer(&ppd->cpspec->chase_timer);
1776}
1777
1778static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1779{
1780        u8 ibclt;
1781        unsigned long tnow;
1782
1783        ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1784
1785        /*
1786         * Detect and handle the state chase issue, where we can
1787         * get stuck if we are unlucky on timing on both sides of
1788         * the link.   If we are, we disable, set a timer, and
1789         * then re-enable.
1790         */
1791        switch (ibclt) {
1792        case IB_7322_LT_STATE_CFGRCVFCFG:
1793        case IB_7322_LT_STATE_CFGWAITRMT:
1794        case IB_7322_LT_STATE_TXREVLANES:
1795        case IB_7322_LT_STATE_CFGENH:
1796                tnow = jiffies;
1797                if (ppd->cpspec->chase_end &&
1798                     time_after(tnow, ppd->cpspec->chase_end))
1799                        disable_chase(ppd, tnow, ibclt);
1800                else if (!ppd->cpspec->chase_end)
1801                        ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1802                break;
1803        default:
1804                ppd->cpspec->chase_end = 0;
1805                break;
1806        }
1807
1808        if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1809              ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1810             ibclt == IB_7322_LT_STATE_LINKUP) &&
1811            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1812                force_h1(ppd);
1813                ppd->cpspec->qdr_reforce = 1;
1814                if (!ppd->dd->cspec->r1)
1815                        serdes_7322_los_enable(ppd, 0);
1816        } else if (ppd->cpspec->qdr_reforce &&
1817                (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1818                 (ibclt == IB_7322_LT_STATE_CFGENH ||
1819                ibclt == IB_7322_LT_STATE_CFGIDLE ||
1820                ibclt == IB_7322_LT_STATE_LINKUP))
1821                force_h1(ppd);
1822
1823        if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1824            ppd->link_speed_enabled == QIB_IB_QDR &&
1825            (ibclt == IB_7322_LT_STATE_CFGTEST ||
1826             ibclt == IB_7322_LT_STATE_CFGENH ||
1827             (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1828              ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1829                adj_tx_serdes(ppd);
1830
1831        if (ibclt != IB_7322_LT_STATE_LINKUP) {
1832                u8 ltstate = qib_7322_phys_portstate(ibcst);
1833                u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1834                                          LinkTrainingState);
1835                if (!ppd->dd->cspec->r1 &&
1836                    pibclt == IB_7322_LT_STATE_LINKUP &&
1837                    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1838                    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1839                    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1840                    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1841                        /* If the link went down (but no into recovery,
1842                         * turn LOS back on */
1843                        serdes_7322_los_enable(ppd, 1);
1844                if (!ppd->cpspec->qdr_dfe_on &&
1845                    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1846                        ppd->cpspec->qdr_dfe_on = 1;
1847                        ppd->cpspec->qdr_dfe_time = 0;
1848                        /* On link down, reenable QDR adaptation */
1849                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1850                                            ppd->dd->cspec->r1 ?
1851                                            QDR_STATIC_ADAPT_DOWN_R1 :
1852                                            QDR_STATIC_ADAPT_DOWN);
1853                        pr_info(
1854                                "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1855                                ppd->dd->unit, ppd->port, ibclt);
1856                }
1857        }
1858}
1859
1860static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1861
1862/*
1863 * This is per-pport error handling.
1864 * will likely get it's own MSIx interrupt (one for each port,
1865 * although just a single handler).
1866 */
1867static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1868{
1869        char *msg;
1870        u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1871        struct qib_devdata *dd = ppd->dd;
1872
1873        /* do this as soon as possible */
1874        fmask = qib_read_kreg64(dd, kr_act_fmask);
1875        if (!fmask)
1876                check_7322_rxe_status(ppd);
1877
1878        errs = qib_read_kreg_port(ppd, krp_errstatus);
1879        if (!errs)
1880                qib_devinfo(dd->pcidev,
1881                         "Port%d error interrupt, but no error bits set!\n",
1882                         ppd->port);
1883        if (!fmask)
1884                errs &= ~QIB_E_P_IBSTATUSCHANGED;
1885        if (!errs)
1886                goto done;
1887
1888        msg = ppd->cpspec->epmsgbuf;
1889        *msg = '\0';
1890
1891        if (errs & ~QIB_E_P_BITSEXTANT) {
1892                err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1893                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1894                if (!*msg)
1895                        snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1896                                 "no others");
1897                qib_dev_porterr(dd, ppd->port,
1898                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1899                        (errs & ~QIB_E_P_BITSEXTANT), msg);
1900                *msg = '\0';
1901        }
1902
1903        if (errs & QIB_E_P_SHDR) {
1904                u64 symptom;
1905
1906                /* determine cause, then write to clear */
1907                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1908                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1909                err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1910                           hdrchk_msgs);
1911                *msg = '\0';
1912                /* senderrbuf cleared in SPKTERRS below */
1913        }
1914
1915        if (errs & QIB_E_P_SPKTERRS) {
1916                if ((errs & QIB_E_P_LINK_PKTERRS) &&
1917                    !(ppd->lflags & QIBL_LINKACTIVE)) {
1918                        /*
1919                         * This can happen when trying to bring the link
1920                         * up, but the IB link changes state at the "wrong"
1921                         * time. The IB logic then complains that the packet
1922                         * isn't valid.  We don't want to confuse people, so
1923                         * we just don't print them, except at debug
1924                         */
1925                        err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1926                                   (errs & QIB_E_P_LINK_PKTERRS),
1927                                   qib_7322p_error_msgs);
1928                        *msg = '\0';
1929                        ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1930                }
1931                qib_disarm_7322_senderrbufs(ppd);
1932        } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1933                   !(ppd->lflags & QIBL_LINKACTIVE)) {
1934                /*
1935                 * This can happen when SMA is trying to bring the link
1936                 * up, but the IB link changes state at the "wrong" time.
1937                 * The IB logic then complains that the packet isn't
1938                 * valid.  We don't want to confuse people, so we just
1939                 * don't print them, except at debug
1940                 */
1941                err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1942                           qib_7322p_error_msgs);
1943                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1944                *msg = '\0';
1945        }
1946
1947        qib_write_kreg_port(ppd, krp_errclear, errs);
1948
1949        errs &= ~ignore_this_time;
1950        if (!errs)
1951                goto done;
1952
1953        if (errs & QIB_E_P_RPKTERRS)
1954                qib_stats.sps_rcverrs++;
1955        if (errs & QIB_E_P_SPKTERRS)
1956                qib_stats.sps_txerrs++;
1957
1958        iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1959
1960        if (errs & QIB_E_P_SDMAERRS)
1961                sdma_7322_p_errors(ppd, errs);
1962
1963        if (errs & QIB_E_P_IBSTATUSCHANGED) {
1964                u64 ibcs;
1965                u8 ltstate;
1966
1967                ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1968                ltstate = qib_7322_phys_portstate(ibcs);
1969
1970                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1971                        handle_serdes_issues(ppd, ibcs);
1972                if (!(ppd->cpspec->ibcctrl_a &
1973                      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1974                        /*
1975                         * We got our interrupt, so init code should be
1976                         * happy and not try alternatives. Now squelch
1977                         * other "chatter" from link-negotiation (pre Init)
1978                         */
1979                        ppd->cpspec->ibcctrl_a |=
1980                                SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1981                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
1982                                            ppd->cpspec->ibcctrl_a);
1983                }
1984
1985                /* Update our picture of width and speed from chip */
1986                ppd->link_width_active =
1987                        (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1988                            IB_WIDTH_4X : IB_WIDTH_1X;
1989                ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1990                        LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1991                          SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1992                                   QIB_IB_DDR : QIB_IB_SDR;
1993
1994                if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1995                    IB_PHYSPORTSTATE_DISABLED)
1996                        qib_set_ib_7322_lstate(ppd, 0,
1997                               QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1998                else
1999                        /*
2000                         * Since going into a recovery state causes the link
2001                         * state to go down and since recovery is transitory,
2002                         * it is better if we "miss" ever seeing the link
2003                         * training state go into recovery (i.e., ignore this
2004                         * transition for link state special handling purposes)
2005                         * without updating lastibcstat.
2006                         */
2007                        if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
2008                            ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
2009                            ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2010                            ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2011                                qib_handle_e_ibstatuschanged(ppd, ibcs);
2012        }
2013        if (*msg && iserr)
2014                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2015
2016        if (ppd->state_wanted & ppd->lflags)
2017                wake_up_interruptible(&ppd->state_wait);
2018done:
2019        return;
2020}
2021
2022/* enable/disable chip from delivering interrupts */
2023static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2024{
2025        if (enable) {
2026                if (dd->flags & QIB_BADINTR)
2027                        return;
2028                qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2029                /* cause any pending enabled interrupts to be re-delivered */
2030                qib_write_kreg(dd, kr_intclear, 0ULL);
2031                if (dd->cspec->num_msix_entries) {
2032                        /* and same for MSIx */
2033                        u64 val = qib_read_kreg64(dd, kr_intgranted);
2034                        if (val)
2035                                qib_write_kreg(dd, kr_intgranted, val);
2036                }
2037        } else
2038                qib_write_kreg(dd, kr_intmask, 0ULL);
2039}
2040
2041/*
2042 * Try to cleanup as much as possible for anything that might have gone
2043 * wrong while in freeze mode, such as pio buffers being written by user
2044 * processes (causing armlaunch), send errors due to going into freeze mode,
2045 * etc., and try to avoid causing extra interrupts while doing so.
2046 * Forcibly update the in-memory pioavail register copies after cleanup
2047 * because the chip won't do it while in freeze mode (the register values
2048 * themselves are kept correct).
2049 * Make sure that we don't lose any important interrupts by using the chip
2050 * feature that says that writing 0 to a bit in *clear that is set in
2051 * *status will cause an interrupt to be generated again (if allowed by
2052 * the *mask value).
2053 * This is in chip-specific code because of all of the register accesses,
2054 * even though the details are similar on most chips.
2055 */
2056static void qib_7322_clear_freeze(struct qib_devdata *dd)
2057{
2058        int pidx;
2059
2060        /* disable error interrupts, to avoid confusion */
2061        qib_write_kreg(dd, kr_errmask, 0ULL);
2062
2063        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2064                if (dd->pport[pidx].link_speed_supported)
2065                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2066                                            0ULL);
2067
2068        /* also disable interrupts; errormask is sometimes overwriten */
2069        qib_7322_set_intr_state(dd, 0);
2070
2071        /* clear the freeze, and be sure chip saw it */
2072        qib_write_kreg(dd, kr_control, dd->control);
2073        qib_read_kreg32(dd, kr_scratch);
2074
2075        /*
2076         * Force new interrupt if any hwerr, error or interrupt bits are
2077         * still set, and clear "safe" send packet errors related to freeze
2078         * and cancelling sends.  Re-enable error interrupts before possible
2079         * force of re-interrupt on pending interrupts.
2080         */
2081        qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2082        qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2083        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2084        /* We need to purge per-port errs and reset mask, too */
2085        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2086                if (!dd->pport[pidx].link_speed_supported)
2087                        continue;
2088                qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2089                qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2090        }
2091        qib_7322_set_intr_state(dd, 1);
2092}
2093
2094/* no error handling to speak of */
2095/**
2096 * qib_7322_handle_hwerrors - display hardware errors.
2097 * @dd: the qlogic_ib device
2098 * @msg: the output buffer
2099 * @msgl: the size of the output buffer
2100 *
2101 * Use same msg buffer as regular errors to avoid excessive stack
2102 * use.  Most hardware errors are catastrophic, but for right now,
2103 * we'll print them and continue.  We reuse the same message buffer as
2104 * qib_handle_errors() to avoid excessive stack usage.
2105 */
2106static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2107                                     size_t msgl)
2108{
2109        u64 hwerrs;
2110        u32 ctrl;
2111        int isfatal = 0;
2112
2113        hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2114        if (!hwerrs)
2115                goto bail;
2116        if (hwerrs == ~0ULL) {
2117                qib_dev_err(dd,
2118                        "Read of hardware error status failed (all bits set); ignoring\n");
2119                goto bail;
2120        }
2121        qib_stats.sps_hwerrs++;
2122
2123        /* Always clear the error status register, except BIST fail */
2124        qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2125                       ~HWE_MASK(PowerOnBISTFailed));
2126
2127        hwerrs &= dd->cspec->hwerrmask;
2128
2129        /* no EEPROM logging, yet */
2130
2131        if (hwerrs)
2132                qib_devinfo(dd->pcidev,
2133                        "Hardware error: hwerr=0x%llx (cleared)\n",
2134                        (unsigned long long) hwerrs);
2135
2136        ctrl = qib_read_kreg32(dd, kr_control);
2137        if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2138                /*
2139                 * No recovery yet...
2140                 */
2141                if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2142                    dd->cspec->stay_in_freeze) {
2143                        /*
2144                         * If any set that we aren't ignoring only make the
2145                         * complaint once, in case it's stuck or recurring,
2146                         * and we get here multiple times
2147                         * Force link down, so switch knows, and
2148                         * LEDs are turned off.
2149                         */
2150                        if (dd->flags & QIB_INITTED)
2151                                isfatal = 1;
2152                } else
2153                        qib_7322_clear_freeze(dd);
2154        }
2155
2156        if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2157                isfatal = 1;
2158                strlcpy(msg,
2159                        "[Memory BIST test failed, InfiniPath hardware unusable]",
2160                        msgl);
2161                /* ignore from now on, so disable until driver reloaded */
2162                dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2163                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2164        }
2165
2166        err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2167
2168        /* Ignore esoteric PLL failures et al. */
2169
2170        qib_dev_err(dd, "%s hardware error\n", msg);
2171
2172        if (hwerrs &
2173                   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2174                    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2175                int pidx = 0;
2176                int err;
2177                unsigned long flags;
2178                struct qib_pportdata *ppd = dd->pport;
2179                for (; pidx < dd->num_pports; ++pidx, ppd++) {
2180                        err = 0;
2181                        if (pidx == 0 && (hwerrs &
2182                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2183                                err++;
2184                        if (pidx == 1 && (hwerrs &
2185                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2186                                err++;
2187                        if (err) {
2188                                spin_lock_irqsave(&ppd->sdma_lock, flags);
2189                                dump_sdma_7322_state(ppd);
2190                                spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2191                        }
2192                }
2193        }
2194
2195        if (isfatal && !dd->diag_client) {
2196                qib_dev_err(dd,
2197                        "Fatal Hardware Error, no longer usable, SN %.16s\n",
2198                        dd->serial);
2199                /*
2200                 * for /sys status file and user programs to print; if no
2201                 * trailing brace is copied, we'll know it was truncated.
2202                 */
2203                if (dd->freezemsg)
2204                        snprintf(dd->freezemsg, dd->freezelen,
2205                                 "{%s}", msg);
2206                qib_disable_after_error(dd);
2207        }
2208bail:;
2209}
2210
2211/**
2212 * qib_7322_init_hwerrors - enable hardware errors
2213 * @dd: the qlogic_ib device
2214 *
2215 * now that we have finished initializing everything that might reasonably
2216 * cause a hardware error, and cleared those errors bits as they occur,
2217 * we can enable hardware errors in the mask (potentially enabling
2218 * freeze mode), and enable hardware errors as errors (along with
2219 * everything else) in errormask
2220 */
2221static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2222{
2223        int pidx;
2224        u64 extsval;
2225
2226        extsval = qib_read_kreg64(dd, kr_extstatus);
2227        if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2228                         QIB_EXTS_MEMBIST_ENDTEST)))
2229                qib_dev_err(dd, "MemBIST did not complete!\n");
2230
2231        /* never clear BIST failure, so reported on each driver load */
2232        qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2233        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2234
2235        /* clear all */
2236        qib_write_kreg(dd, kr_errclear, ~0ULL);
2237        /* enable errors that are masked, at least this first time. */
2238        qib_write_kreg(dd, kr_errmask, ~0ULL);
2239        dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2240        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2241                if (dd->pport[pidx].link_speed_supported)
2242                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2243                                            ~0ULL);
2244}
2245
2246/*
2247 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2248 * on chips that are count-based, rather than trigger-based.  There is no
2249 * reference counting, but that's also fine, given the intended use.
2250 * Only chip-specific because it's all register accesses
2251 */
2252static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2253{
2254        if (enable) {
2255                qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2256                dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2257        } else
2258                dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2259        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2260}
2261
2262/*
2263 * Formerly took parameter <which> in pre-shifted,
2264 * pre-merged form with LinkCmd and LinkInitCmd
2265 * together, and assuming the zero was NOP.
2266 */
2267static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2268                                   u16 linitcmd)
2269{
2270        u64 mod_wd;
2271        struct qib_devdata *dd = ppd->dd;
2272        unsigned long flags;
2273
2274        if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2275                /*
2276                 * If we are told to disable, note that so link-recovery
2277                 * code does not attempt to bring us back up.
2278                 * Also reset everything that we can, so we start
2279                 * completely clean when re-enabled (before we
2280                 * actually issue the disable to the IBC)
2281                 */
2282                qib_7322_mini_pcs_reset(ppd);
2283                spin_lock_irqsave(&ppd->lflags_lock, flags);
2284                ppd->lflags |= QIBL_IB_LINK_DISABLED;
2285                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2286        } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2287                /*
2288                 * Any other linkinitcmd will lead to LINKDOWN and then
2289                 * to INIT (if all is well), so clear flag to let
2290                 * link-recovery code attempt to bring us back up.
2291                 */
2292                spin_lock_irqsave(&ppd->lflags_lock, flags);
2293                ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2294                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2295                /*
2296                 * Clear status change interrupt reduction so the
2297                 * new state is seen.
2298                 */
2299                ppd->cpspec->ibcctrl_a &=
2300                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2301        }
2302
2303        mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2304                (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2305
2306        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2307                            mod_wd);
2308        /* write to chip to prevent back-to-back writes of ibc reg */
2309        qib_write_kreg(dd, kr_scratch, 0);
2310
2311}
2312
2313/*
2314 * The total RCV buffer memory is 64KB, used for both ports, and is
2315 * in units of 64 bytes (same as IB flow control credit unit).
2316 * The consumedVL unit in the same registers are in 32 byte units!
2317 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2318 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2319 * in krp_rxcreditvl15, rather than 10.
2320 */
2321#define RCV_BUF_UNITSZ 64
2322#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2323
2324static void set_vls(struct qib_pportdata *ppd)
2325{
2326        int i, numvls, totcred, cred_vl, vl0extra;
2327        struct qib_devdata *dd = ppd->dd;
2328        u64 val;
2329
2330        numvls = qib_num_vls(ppd->vls_operational);
2331
2332        /*
2333         * Set up per-VL credits. Below is kluge based on these assumptions:
2334         * 1) port is disabled at the time early_init is called.
2335         * 2) give VL15 17 credits, for two max-plausible packets.
2336         * 3) Give VL0-N the rest, with any rounding excess used for VL0
2337         */
2338        /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2339        totcred = NUM_RCV_BUF_UNITS(dd);
2340        cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2341        totcred -= cred_vl;
2342        qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2343        cred_vl = totcred / numvls;
2344        vl0extra = totcred - cred_vl * numvls;
2345        qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2346        for (i = 1; i < numvls; i++)
2347                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2348        for (; i < 8; i++) /* no buffer space for other VLs */
2349                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2350
2351        /* Notify IBC that credits need to be recalculated */
2352        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2353        val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2354        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2355        qib_write_kreg(dd, kr_scratch, 0ULL);
2356        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2357        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2358
2359        for (i = 0; i < numvls; i++)
2360                val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2361        val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2362
2363        /* Change the number of operational VLs */
2364        ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2365                                ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2366                ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2367        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2368        qib_write_kreg(dd, kr_scratch, 0ULL);
2369}
2370
2371/*
2372 * The code that deals with actual SerDes is in serdes_7322_init().
2373 * Compared to the code for iba7220, it is minimal.
2374 */
2375static int serdes_7322_init(struct qib_pportdata *ppd);
2376
2377/**
2378 * qib_7322_bringup_serdes - bring up the serdes
2379 * @ppd: physical port on the qlogic_ib device
2380 */
2381static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2382{
2383        struct qib_devdata *dd = ppd->dd;
2384        u64 val, guid, ibc;
2385        unsigned long flags;
2386        int ret = 0;
2387
2388        /*
2389         * SerDes model not in Pd, but still need to
2390         * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2391         * eventually.
2392         */
2393        /* Put IBC in reset, sends disabled (should be in reset already) */
2394        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2395        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2396        qib_write_kreg(dd, kr_scratch, 0ULL);
2397
2398        /* ensure previous Tx parameters are not still forced */
2399        qib_write_kreg_port(ppd, krp_tx_deemph_override,
2400                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2401                reset_tx_deemphasis_override));
2402
2403        if (qib_compat_ddr_negotiate) {
2404                ppd->cpspec->ibdeltainprog = 1;
2405                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2406                                                crp_ibsymbolerr);
2407                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2408                                                crp_iblinkerrrecov);
2409        }
2410
2411        /* flowcontrolwatermark is in units of KBytes */
2412        ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2413        /*
2414         * Flow control is sent this often, even if no changes in
2415         * buffer space occur.  Units are 128ns for this chip.
2416         * Set to 3usec.
2417         */
2418        ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2419        /* max error tolerance */
2420        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2421        /* IB credit flow control. */
2422        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2423        /*
2424         * set initial max size pkt IBC will send, including ICRC; it's the
2425         * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2426         */
2427        ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2428                SYM_LSB(IBCCtrlA_0, MaxPktLen);
2429        ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2430
2431        /*
2432         * Reset the PCS interface to the serdes (and also ibc, which is still
2433         * in reset from above).  Writes new value of ibcctrl_a as last step.
2434         */
2435        qib_7322_mini_pcs_reset(ppd);
2436
2437        if (!ppd->cpspec->ibcctrl_b) {
2438                unsigned lse = ppd->link_speed_enabled;
2439
2440                /*
2441                 * Not on re-init after reset, establish shadow
2442                 * and force initial config.
2443                 */
2444                ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2445                                                             krp_ibcctrl_b);
2446                ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2447                                IBA7322_IBC_SPEED_DDR |
2448                                IBA7322_IBC_SPEED_SDR |
2449                                IBA7322_IBC_WIDTH_AUTONEG |
2450                                SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2451                if (lse & (lse - 1)) /* Muliple speeds enabled */
2452                        ppd->cpspec->ibcctrl_b |=
2453                                (lse << IBA7322_IBC_SPEED_LSB) |
2454                                IBA7322_IBC_IBTA_1_2_MASK |
2455                                IBA7322_IBC_MAX_SPEED_MASK;
2456                else
2457                        ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2458                                IBA7322_IBC_SPEED_QDR |
2459                                 IBA7322_IBC_IBTA_1_2_MASK :
2460                                (lse == QIB_IB_DDR) ?
2461                                        IBA7322_IBC_SPEED_DDR :
2462                                        IBA7322_IBC_SPEED_SDR;
2463                if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2464                    (IB_WIDTH_1X | IB_WIDTH_4X))
2465                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2466                else
2467                        ppd->cpspec->ibcctrl_b |=
2468                                ppd->link_width_enabled == IB_WIDTH_4X ?
2469                                IBA7322_IBC_WIDTH_4X_ONLY :
2470                                IBA7322_IBC_WIDTH_1X_ONLY;
2471
2472                /* always enable these on driver reload, not sticky */
2473                ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2474                        IBA7322_IBC_HRTBT_MASK);
2475        }
2476        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2477
2478        /* setup so we have more time at CFGTEST to change H1 */
2479        val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2480        val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2481        val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2482        qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2483
2484        serdes_7322_init(ppd);
2485
2486        guid = be64_to_cpu(ppd->guid);
2487        if (!guid) {
2488                if (dd->base_guid)
2489                        guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2490                ppd->guid = cpu_to_be64(guid);
2491        }
2492
2493        qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2494        /* write to chip to prevent back-to-back writes of ibc reg */
2495        qib_write_kreg(dd, kr_scratch, 0);
2496
2497        /* Enable port */
2498        ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2499        set_vls(ppd);
2500
2501        /* initially come up DISABLED, without sending anything. */
2502        val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2503                                        QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2504        qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2505        qib_write_kreg(dd, kr_scratch, 0ULL);
2506        /* clear the linkinit cmds */
2507        ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2508
2509        /* be paranoid against later code motion, etc. */
2510        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2511        ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2512        qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2513        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2514
2515        /* Also enable IBSTATUSCHG interrupt.  */
2516        val = qib_read_kreg_port(ppd, krp_errmask);
2517        qib_write_kreg_port(ppd, krp_errmask,
2518                val | ERR_MASK_N(IBStatusChanged));
2519
2520        /* Always zero until we start messing with SerDes for real */
2521        return ret;
2522}
2523
2524/**
2525 * qib_7322_quiet_serdes - set serdes to txidle
2526 * @dd: the qlogic_ib device
2527 * Called when driver is being unloaded
2528 */
2529static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2530{
2531        u64 val;
2532        unsigned long flags;
2533
2534        qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2535
2536        spin_lock_irqsave(&ppd->lflags_lock, flags);
2537        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2538        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2539        wake_up(&ppd->cpspec->autoneg_wait);
2540        cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2541        if (ppd->dd->cspec->r1)
2542                cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2543
2544        ppd->cpspec->chase_end = 0;
2545        if (ppd->cpspec->chase_timer.data) /* if initted */
2546                del_timer_sync(&ppd->cpspec->chase_timer);
2547
2548        /*
2549         * Despite the name, actually disables IBC as well. Do it when
2550         * we are as sure as possible that no more packets can be
2551         * received, following the down and the PCS reset.
2552         * The actual disabling happens in qib_7322_mini_pci_reset(),
2553         * along with the PCS being reset.
2554         */
2555        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2556        qib_7322_mini_pcs_reset(ppd);
2557
2558        /*
2559         * Update the adjusted counters so the adjustment persists
2560         * across driver reload.
2561         */
2562        if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2563            ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2564                struct qib_devdata *dd = ppd->dd;
2565                u64 diagc;
2566
2567                /* enable counter writes */
2568                diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2569                qib_write_kreg(dd, kr_hwdiagctrl,
2570                               diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2571
2572                if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2573                        val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2574                        if (ppd->cpspec->ibdeltainprog)
2575                                val -= val - ppd->cpspec->ibsymsnap;
2576                        val -= ppd->cpspec->ibsymdelta;
2577                        write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2578                }
2579                if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2580                        val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2581                        if (ppd->cpspec->ibdeltainprog)
2582                                val -= val - ppd->cpspec->iblnkerrsnap;
2583                        val -= ppd->cpspec->iblnkerrdelta;
2584                        write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2585                }
2586                if (ppd->cpspec->iblnkdowndelta) {
2587                        val = read_7322_creg32_port(ppd, crp_iblinkdown);
2588                        val += ppd->cpspec->iblnkdowndelta;
2589                        write_7322_creg_port(ppd, crp_iblinkdown, val);
2590                }
2591                /*
2592                 * No need to save ibmalfdelta since IB perfcounters
2593                 * are cleared on driver reload.
2594                 */
2595
2596                /* and disable counter writes */
2597                qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2598        }
2599}
2600
2601/**
2602 * qib_setup_7322_setextled - set the state of the two external LEDs
2603 * @ppd: physical port on the qlogic_ib device
2604 * @on: whether the link is up or not
2605 *
2606 * The exact combo of LEDs if on is true is determined by looking
2607 * at the ibcstatus.
2608 *
2609 * These LEDs indicate the physical and logical state of IB link.
2610 * For this chip (at least with recommended board pinouts), LED1
2611 * is Yellow (logical state) and LED2 is Green (physical state),
2612 *
2613 * Note:  We try to match the Mellanox HCA LED behavior as best
2614 * we can.  Green indicates physical link state is OK (something is
2615 * plugged in, and we can train).
2616 * Amber indicates the link is logically up (ACTIVE).
2617 * Mellanox further blinks the amber LED to indicate data packet
2618 * activity, but we have no hardware support for that, so it would
2619 * require waking up every 10-20 msecs and checking the counters
2620 * on the chip, and then turning the LED off if appropriate.  That's
2621 * visible overhead, so not something we will do.
2622 */
2623static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2624{
2625        struct qib_devdata *dd = ppd->dd;
2626        u64 extctl, ledblink = 0, val;
2627        unsigned long flags;
2628        int yel, grn;
2629
2630        /*
2631         * The diags use the LED to indicate diag info, so we leave
2632         * the external LED alone when the diags are running.
2633         */
2634        if (dd->diag_client)
2635                return;
2636
2637        /* Allow override of LED display for, e.g. Locating system in rack */
2638        if (ppd->led_override) {
2639                grn = (ppd->led_override & QIB_LED_PHYS);
2640                yel = (ppd->led_override & QIB_LED_LOG);
2641        } else if (on) {
2642                val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2643                grn = qib_7322_phys_portstate(val) ==
2644                        IB_PHYSPORTSTATE_LINKUP;
2645                yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2646        } else {
2647                grn = 0;
2648                yel = 0;
2649        }
2650
2651        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2652        extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2653                ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2654        if (grn) {
2655                extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2656                /*
2657                 * Counts are in chip clock (4ns) periods.
2658                 * This is 1/16 sec (66.6ms) on,
2659                 * 3/16 sec (187.5 ms) off, with packets rcvd.
2660                 */
2661                ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2662                        ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2663        }
2664        if (yel)
2665                extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2666        dd->cspec->extctrl = extctl;
2667        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2668        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2669
2670        if (ledblink) /* blink the LED on packet receive */
2671                qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2672}
2673
2674#ifdef CONFIG_INFINIBAND_QIB_DCA
2675
2676static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2677{
2678        switch (event) {
2679        case DCA_PROVIDER_ADD:
2680                if (dd->flags & QIB_DCA_ENABLED)
2681                        break;
2682                if (!dca_add_requester(&dd->pcidev->dev)) {
2683                        qib_devinfo(dd->pcidev, "DCA enabled\n");
2684                        dd->flags |= QIB_DCA_ENABLED;
2685                        qib_setup_dca(dd);
2686                }
2687                break;
2688        case DCA_PROVIDER_REMOVE:
2689                if (dd->flags & QIB_DCA_ENABLED) {
2690                        dca_remove_requester(&dd->pcidev->dev);
2691                        dd->flags &= ~QIB_DCA_ENABLED;
2692                        dd->cspec->dca_ctrl = 0;
2693                        qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2694                                dd->cspec->dca_ctrl);
2695                }
2696                break;
2697        }
2698        return 0;
2699}
2700
2701static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2702{
2703        struct qib_devdata *dd = rcd->dd;
2704        struct qib_chip_specific *cspec = dd->cspec;
2705
2706        if (!(dd->flags & QIB_DCA_ENABLED))
2707                return;
2708        if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2709                const struct dca_reg_map *rmp;
2710
2711                cspec->rhdr_cpu[rcd->ctxt] = cpu;
2712                rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2713                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2714                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2715                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2716                qib_devinfo(dd->pcidev,
2717                        "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2718                        (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2719                qib_write_kreg(dd, rmp->regno,
2720                               cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2721                cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2722                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2723        }
2724}
2725
2726static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2727{
2728        struct qib_devdata *dd = ppd->dd;
2729        struct qib_chip_specific *cspec = dd->cspec;
2730        unsigned pidx = ppd->port - 1;
2731
2732        if (!(dd->flags & QIB_DCA_ENABLED))
2733                return;
2734        if (cspec->sdma_cpu[pidx] != cpu) {
2735                cspec->sdma_cpu[pidx] = cpu;
2736                cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2737                        SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2738                        SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2739                cspec->dca_rcvhdr_ctrl[4] |=
2740                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2741                                (ppd->hw_pidx ?
2742                                        SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2743                                        SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2744                qib_devinfo(dd->pcidev,
2745                        "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2746                        (long long) cspec->dca_rcvhdr_ctrl[4]);
2747                qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2748                               cspec->dca_rcvhdr_ctrl[4]);
2749                cspec->dca_ctrl |= ppd->hw_pidx ?
2750                        SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2751                        SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2752                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2753        }
2754}
2755
2756static void qib_setup_dca(struct qib_devdata *dd)
2757{
2758        struct qib_chip_specific *cspec = dd->cspec;
2759        int i;
2760
2761        for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2762                cspec->rhdr_cpu[i] = -1;
2763        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2764                cspec->sdma_cpu[i] = -1;
2765        cspec->dca_rcvhdr_ctrl[0] =
2766                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2767                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2768                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2769                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2770        cspec->dca_rcvhdr_ctrl[1] =
2771                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2772                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2773                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2774                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2775        cspec->dca_rcvhdr_ctrl[2] =
2776                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2777                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2778                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2779                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2780        cspec->dca_rcvhdr_ctrl[3] =
2781                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2782                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2783                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2784                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2785        cspec->dca_rcvhdr_ctrl[4] =
2786                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2787                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2788        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2789                qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2790                               cspec->dca_rcvhdr_ctrl[i]);
2791        for (i = 0; i < cspec->num_msix_entries; i++)
2792                setup_dca_notifier(dd, &cspec->msix_entries[i]);
2793}
2794
2795static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2796                             const cpumask_t *mask)
2797{
2798        struct qib_irq_notify *n =
2799                container_of(notify, struct qib_irq_notify, notify);
2800        int cpu = cpumask_first(mask);
2801
2802        if (n->rcv) {
2803                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2804                qib_update_rhdrq_dca(rcd, cpu);
2805        } else {
2806                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2807                qib_update_sdma_dca(ppd, cpu);
2808        }
2809}
2810
2811static void qib_irq_notifier_release(struct kref *ref)
2812{
2813        struct qib_irq_notify *n =
2814                container_of(ref, struct qib_irq_notify, notify.kref);
2815        struct qib_devdata *dd;
2816
2817        if (n->rcv) {
2818                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2819                dd = rcd->dd;
2820        } else {
2821                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2822                dd = ppd->dd;
2823        }
2824        qib_devinfo(dd->pcidev,
2825                "release on HCA notify 0x%p n 0x%p\n", ref, n);
2826        kfree(n);
2827}
2828#endif
2829
2830/*
2831 * Disable MSIx interrupt if enabled, call generic MSIx code
2832 * to cleanup, and clear pending MSIx interrupts.
2833 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2834 */
2835static void qib_7322_nomsix(struct qib_devdata *dd)
2836{
2837        u64 intgranted;
2838        int n;
2839
2840        dd->cspec->main_int_mask = ~0ULL;
2841        n = dd->cspec->num_msix_entries;
2842        if (n) {
2843                int i;
2844
2845                dd->cspec->num_msix_entries = 0;
2846                for (i = 0; i < n; i++) {
2847#ifdef CONFIG_INFINIBAND_QIB_DCA
2848                        reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2849#endif
2850                        irq_set_affinity_hint(
2851                          dd->cspec->msix_entries[i].msix.vector, NULL);
2852                        free_cpumask_var(dd->cspec->msix_entries[i].mask);
2853                        free_irq(dd->cspec->msix_entries[i].msix.vector,
2854                           dd->cspec->msix_entries[i].arg);
2855                }
2856                qib_nomsix(dd);
2857        }
2858        /* make sure no MSIx interrupts are left pending */
2859        intgranted = qib_read_kreg64(dd, kr_intgranted);
2860        if (intgranted)
2861                qib_write_kreg(dd, kr_intgranted, intgranted);
2862}
2863
2864static void qib_7322_free_irq(struct qib_devdata *dd)
2865{
2866        if (dd->cspec->irq) {
2867                free_irq(dd->cspec->irq, dd);
2868                dd->cspec->irq = 0;
2869        }
2870        qib_7322_nomsix(dd);
2871}
2872
2873static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2874{
2875        int i;
2876
2877#ifdef CONFIG_INFINIBAND_QIB_DCA
2878        if (dd->flags & QIB_DCA_ENABLED) {
2879                dca_remove_requester(&dd->pcidev->dev);
2880                dd->flags &= ~QIB_DCA_ENABLED;
2881                dd->cspec->dca_ctrl = 0;
2882                qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2883        }
2884#endif
2885
2886        qib_7322_free_irq(dd);
2887        kfree(dd->cspec->cntrs);
2888        kfree(dd->cspec->sendchkenable);
2889        kfree(dd->cspec->sendgrhchk);
2890        kfree(dd->cspec->sendibchk);
2891        kfree(dd->cspec->msix_entries);
2892        for (i = 0; i < dd->num_pports; i++) {
2893                unsigned long flags;
2894                u32 mask = QSFP_GPIO_MOD_PRS_N |
2895                        (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2896
2897                kfree(dd->pport[i].cpspec->portcntrs);
2898                if (dd->flags & QIB_HAS_QSFP) {
2899                        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2900                        dd->cspec->gpio_mask &= ~mask;
2901                        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2902                        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2903                        qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2904                }
2905                if (dd->pport[i].ibport_data.smi_ah)
2906                        ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2907        }
2908}
2909
2910/* handle SDMA interrupts */
2911static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2912{
2913        struct qib_pportdata *ppd0 = &dd->pport[0];
2914        struct qib_pportdata *ppd1 = &dd->pport[1];
2915        u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2916                INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2917        u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2918                INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2919
2920        if (intr0)
2921                qib_sdma_intr(ppd0);
2922        if (intr1)
2923                qib_sdma_intr(ppd1);
2924
2925        if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2926                qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2927        if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2928                qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2929}
2930
2931/*
2932 * Set or clear the Send buffer available interrupt enable bit.
2933 */
2934static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2935{
2936        unsigned long flags;
2937
2938        spin_lock_irqsave(&dd->sendctrl_lock, flags);
2939        if (needint)
2940                dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2941        else
2942                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2943        qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2944        qib_write_kreg(dd, kr_scratch, 0ULL);
2945        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2946}
2947
2948/*
2949 * Somehow got an interrupt with reserved bits set in interrupt status.
2950 * Print a message so we know it happened, then clear them.
2951 * keep mainline interrupt handler cache-friendly
2952 */
2953static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2954{
2955        u64 kills;
2956        char msg[128];
2957
2958        kills = istat & ~QIB_I_BITSEXTANT;
2959        qib_dev_err(dd,
2960                "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2961                (unsigned long long) kills, msg);
2962        qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2963}
2964
2965/* keep mainline interrupt handler cache-friendly */
2966static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2967{
2968        u32 gpiostatus;
2969        int handled = 0;
2970        int pidx;
2971
2972        /*
2973         * Boards for this chip currently don't use GPIO interrupts,
2974         * so clear by writing GPIOstatus to GPIOclear, and complain
2975         * to developer.  To avoid endless repeats, clear
2976         * the bits in the mask, since there is some kind of
2977         * programming error or chip problem.
2978         */
2979        gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2980        /*
2981         * In theory, writing GPIOstatus to GPIOclear could
2982         * have a bad side-effect on some diagnostic that wanted
2983         * to poll for a status-change, but the various shadows
2984         * make that problematic at best. Diags will just suppress
2985         * all GPIO interrupts during such tests.
2986         */
2987        qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2988        /*
2989         * Check for QSFP MOD_PRS changes
2990         * only works for single port if IB1 != pidx1
2991         */
2992        for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2993             ++pidx) {
2994                struct qib_pportdata *ppd;
2995                struct qib_qsfp_data *qd;
2996                u32 mask;
2997                if (!dd->pport[pidx].link_speed_supported)
2998                        continue;
2999                mask = QSFP_GPIO_MOD_PRS_N;
3000                ppd = dd->pport + pidx;
3001                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
3002                if (gpiostatus & dd->cspec->gpio_mask & mask) {
3003                        u64 pins;
3004                        qd = &ppd->cpspec->qsfp_data;
3005                        gpiostatus &= ~mask;
3006                        pins = qib_read_kreg64(dd, kr_extstatus);
3007                        pins >>= SYM_LSB(EXTStatus, GPIOIn);
3008                        if (!(pins & mask)) {
3009                                ++handled;
3010                                qd->t_insert = jiffies;
3011                                queue_work(ib_wq, &qd->work);
3012                        }
3013                }
3014        }
3015
3016        if (gpiostatus && !handled) {
3017                const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3018                u32 gpio_irq = mask & gpiostatus;
3019
3020                /*
3021                 * Clear any troublemakers, and update chip from shadow
3022                 */
3023                dd->cspec->gpio_mask &= ~gpio_irq;
3024                qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3025        }
3026}
3027
3028/*
3029 * Handle errors and unusual events first, separate function
3030 * to improve cache hits for fast path interrupt handling.
3031 */
3032static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3033{
3034        if (istat & ~QIB_I_BITSEXTANT)
3035                unknown_7322_ibits(dd, istat);
3036        if (istat & QIB_I_GPIO)
3037                unknown_7322_gpio_intr(dd);
3038        if (istat & QIB_I_C_ERROR) {
3039                qib_write_kreg(dd, kr_errmask, 0ULL);
3040                tasklet_schedule(&dd->error_tasklet);
3041        }
3042        if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3043                handle_7322_p_errors(dd->rcd[0]->ppd);
3044        if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3045                handle_7322_p_errors(dd->rcd[1]->ppd);
3046}
3047
3048/*
3049 * Dynamically adjust the rcv int timeout for a context based on incoming
3050 * packet rate.
3051 */
3052static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3053{
3054        struct qib_devdata *dd = rcd->dd;
3055        u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3056
3057        /*
3058         * Dynamically adjust idle timeout on chip
3059         * based on number of packets processed.
3060         */
3061        if (npkts < rcv_int_count && timeout > 2)
3062                timeout >>= 1;
3063        else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3064                timeout = min(timeout << 1, rcv_int_timeout);
3065        else
3066                return;
3067
3068        dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3069        qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3070}
3071
3072/*
3073 * This is the main interrupt handler.
3074 * It will normally only be used for low frequency interrupts but may
3075 * have to handle all interrupts if INTx is enabled or fewer than normal
3076 * MSIx interrupts were allocated.
3077 * This routine should ignore the interrupt bits for any of the
3078 * dedicated MSIx handlers.
3079 */
3080static irqreturn_t qib_7322intr(int irq, void *data)
3081{
3082        struct qib_devdata *dd = data;
3083        irqreturn_t ret;
3084        u64 istat;
3085        u64 ctxtrbits;
3086        u64 rmask;
3087        unsigned i;
3088        u32 npkts;
3089
3090        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3091                /*
3092                 * This return value is not great, but we do not want the
3093                 * interrupt core code to remove our interrupt handler
3094                 * because we don't appear to be handling an interrupt
3095                 * during a chip reset.
3096                 */
3097                ret = IRQ_HANDLED;
3098                goto bail;
3099        }
3100
3101        istat = qib_read_kreg64(dd, kr_intstatus);
3102
3103        if (unlikely(istat == ~0ULL)) {
3104                qib_bad_intrstatus(dd);
3105                qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3106                /* don't know if it was our interrupt or not */
3107                ret = IRQ_NONE;
3108                goto bail;
3109        }
3110
3111        istat &= dd->cspec->main_int_mask;
3112        if (unlikely(!istat)) {
3113                /* already handled, or shared and not us */
3114                ret = IRQ_NONE;
3115                goto bail;
3116        }
3117
3118        this_cpu_inc(*dd->int_counter);
3119
3120        /* handle "errors" of various kinds first, device ahead of port */
3121        if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3122                              QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3123                              INT_MASK_P(Err, 1))))
3124                unlikely_7322_intr(dd, istat);
3125
3126        /*
3127         * Clear the interrupt bits we found set, relatively early, so we
3128         * "know" know the chip will have seen this by the time we process
3129         * the queue, and will re-interrupt if necessary.  The processor
3130         * itself won't take the interrupt again until we return.
3131         */
3132        qib_write_kreg(dd, kr_intclear, istat);
3133
3134        /*
3135         * Handle kernel receive queues before checking for pio buffers
3136         * available since receives can overflow; piobuf waiters can afford
3137         * a few extra cycles, since they were waiting anyway.
3138         */
3139        ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3140        if (ctxtrbits) {
3141                rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3142                        (1ULL << QIB_I_RCVURG_LSB);
3143                for (i = 0; i < dd->first_user_ctxt; i++) {
3144                        if (ctxtrbits & rmask) {
3145                                ctxtrbits &= ~rmask;
3146                                if (dd->rcd[i])
3147                                        qib_kreceive(dd->rcd[i], NULL, &npkts);
3148                        }
3149                        rmask <<= 1;
3150                }
3151                if (ctxtrbits) {
3152                        ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3153                                (ctxtrbits >> QIB_I_RCVURG_LSB);
3154                        qib_handle_urcv(dd, ctxtrbits);
3155                }
3156        }
3157
3158        if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3159                sdma_7322_intr(dd, istat);
3160
3161        if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3162                qib_ib_piobufavail(dd);
3163
3164        ret = IRQ_HANDLED;
3165bail:
3166        return ret;
3167}
3168
3169/*
3170 * Dedicated receive packet available interrupt handler.
3171 */
3172static irqreturn_t qib_7322pintr(int irq, void *data)
3173{
3174        struct qib_ctxtdata *rcd = data;
3175        struct qib_devdata *dd = rcd->dd;
3176        u32 npkts;
3177
3178        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3179                /*
3180                 * This return value is not great, but we do not want the
3181                 * interrupt core code to remove our interrupt handler
3182                 * because we don't appear to be handling an interrupt
3183                 * during a chip reset.
3184                 */
3185                return IRQ_HANDLED;
3186
3187        this_cpu_inc(*dd->int_counter);
3188
3189        /* Clear the interrupt bit we expect to be set. */
3190        qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3191                       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3192
3193        qib_kreceive(rcd, NULL, &npkts);
3194
3195        return IRQ_HANDLED;
3196}
3197
3198/*
3199 * Dedicated Send buffer available interrupt handler.
3200 */
3201static irqreturn_t qib_7322bufavail(int irq, void *data)
3202{
3203        struct qib_devdata *dd = data;
3204
3205        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3206                /*
3207                 * This return value is not great, but we do not want the
3208                 * interrupt core code to remove our interrupt handler
3209                 * because we don't appear to be handling an interrupt
3210                 * during a chip reset.
3211                 */
3212                return IRQ_HANDLED;
3213
3214        this_cpu_inc(*dd->int_counter);
3215
3216        /* Clear the interrupt bit we expect to be set. */
3217        qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3218
3219        /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3220        if (dd->flags & QIB_INITTED)
3221                qib_ib_piobufavail(dd);
3222        else
3223                qib_wantpiobuf_7322_intr(dd, 0);
3224
3225        return IRQ_HANDLED;
3226}
3227
3228/*
3229 * Dedicated Send DMA interrupt handler.
3230 */
3231static irqreturn_t sdma_intr(int irq, void *data)
3232{
3233        struct qib_pportdata *ppd = data;
3234        struct qib_devdata *dd = ppd->dd;
3235
3236        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3237                /*
3238                 * This return value is not great, but we do not want the
3239                 * interrupt core code to remove our interrupt handler
3240                 * because we don't appear to be handling an interrupt
3241                 * during a chip reset.
3242                 */
3243                return IRQ_HANDLED;
3244
3245        this_cpu_inc(*dd->int_counter);
3246
3247        /* Clear the interrupt bit we expect to be set. */
3248        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3249                       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3250        qib_sdma_intr(ppd);
3251
3252        return IRQ_HANDLED;
3253}
3254
3255/*
3256 * Dedicated Send DMA idle interrupt handler.
3257 */
3258static irqreturn_t sdma_idle_intr(int irq, void *data)
3259{
3260        struct qib_pportdata *ppd = data;
3261        struct qib_devdata *dd = ppd->dd;
3262
3263        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3264                /*
3265                 * This return value is not great, but we do not want the
3266                 * interrupt core code to remove our interrupt handler
3267                 * because we don't appear to be handling an interrupt
3268                 * during a chip reset.
3269                 */
3270                return IRQ_HANDLED;
3271
3272        this_cpu_inc(*dd->int_counter);
3273
3274        /* Clear the interrupt bit we expect to be set. */
3275        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3276                       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3277        qib_sdma_intr(ppd);
3278
3279        return IRQ_HANDLED;
3280}
3281
3282/*
3283 * Dedicated Send DMA progress interrupt handler.
3284 */
3285static irqreturn_t sdma_progress_intr(int irq, void *data)
3286{
3287        struct qib_pportdata *ppd = data;
3288        struct qib_devdata *dd = ppd->dd;
3289
3290        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3291                /*
3292                 * This return value is not great, but we do not want the
3293                 * interrupt core code to remove our interrupt handler
3294                 * because we don't appear to be handling an interrupt
3295                 * during a chip reset.
3296                 */
3297                return IRQ_HANDLED;
3298
3299        this_cpu_inc(*dd->int_counter);
3300
3301        /* Clear the interrupt bit we expect to be set. */
3302        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3303                       INT_MASK_P(SDmaProgress, 1) :
3304                       INT_MASK_P(SDmaProgress, 0));
3305        qib_sdma_intr(ppd);
3306
3307        return IRQ_HANDLED;
3308}
3309
3310/*
3311 * Dedicated Send DMA cleanup interrupt handler.
3312 */
3313static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3314{
3315        struct qib_pportdata *ppd = data;
3316        struct qib_devdata *dd = ppd->dd;
3317
3318        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3319                /*
3320                 * This return value is not great, but we do not want the
3321                 * interrupt core code to remove our interrupt handler
3322                 * because we don't appear to be handling an interrupt
3323                 * during a chip reset.
3324                 */
3325                return IRQ_HANDLED;
3326
3327        this_cpu_inc(*dd->int_counter);
3328
3329        /* Clear the interrupt bit we expect to be set. */
3330        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3331                       INT_MASK_PM(SDmaCleanupDone, 1) :
3332                       INT_MASK_PM(SDmaCleanupDone, 0));
3333        qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3334
3335        return IRQ_HANDLED;
3336}
3337
3338#ifdef CONFIG_INFINIBAND_QIB_DCA
3339
3340static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3341{
3342        if (!m->dca)
3343                return;
3344        qib_devinfo(dd->pcidev,
3345                "Disabling notifier on HCA %d irq %d\n",
3346                dd->unit,
3347                m->msix.vector);
3348        irq_set_affinity_notifier(
3349                m->msix.vector,
3350                NULL);
3351        m->notifier = NULL;
3352}
3353
3354static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3355{
3356        struct qib_irq_notify *n;
3357
3358        if (!m->dca)
3359                return;
3360        n = kzalloc(sizeof(*n), GFP_KERNEL);
3361        if (n) {
3362                int ret;
3363
3364                m->notifier = n;
3365                n->notify.irq = m->msix.vector;
3366                n->notify.notify = qib_irq_notifier_notify;
3367                n->notify.release = qib_irq_notifier_release;
3368                n->arg = m->arg;
3369                n->rcv = m->rcv;
3370                qib_devinfo(dd->pcidev,
3371                        "set notifier irq %d rcv %d notify %p\n",
3372                        n->notify.irq, n->rcv, &n->notify);
3373                ret = irq_set_affinity_notifier(
3374                                n->notify.irq,
3375                                &n->notify);
3376                if (ret) {
3377                        m->notifier = NULL;
3378                        kfree(n);
3379                }
3380        }
3381}
3382
3383#endif
3384
3385/*
3386 * Set up our chip-specific interrupt handler.
3387 * The interrupt type has already been setup, so
3388 * we just need to do the registration and error checking.
3389 * If we are using MSIx interrupts, we may fall back to
3390 * INTx later, if the interrupt handler doesn't get called
3391 * within 1/2 second (see verify_interrupt()).
3392 */
3393static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3394{
3395        int ret, i, msixnum;
3396        u64 redirect[6];
3397        u64 mask;
3398        const struct cpumask *local_mask;
3399        int firstcpu, secondcpu = 0, currrcvcpu = 0;
3400
3401        if (!dd->num_pports)
3402                return;
3403
3404        if (clearpend) {
3405                /*
3406                 * if not switching interrupt types, be sure interrupts are
3407                 * disabled, and then clear anything pending at this point,
3408                 * because we are starting clean.
3409                 */
3410                qib_7322_set_intr_state(dd, 0);
3411
3412                /* clear the reset error, init error/hwerror mask */
3413                qib_7322_init_hwerrors(dd);
3414
3415                /* clear any interrupt bits that might be set */
3416                qib_write_kreg(dd, kr_intclear, ~0ULL);
3417
3418                /* make sure no pending MSIx intr, and clear diag reg */
3419                qib_write_kreg(dd, kr_intgranted, ~0ULL);
3420                qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3421        }
3422
3423        if (!dd->cspec->num_msix_entries) {
3424                /* Try to get INTx interrupt */
3425try_intx:
3426                if (!dd->pcidev->irq) {
3427                        qib_dev_err(dd,
3428                                "irq is 0, BIOS error?  Interrupts won't work\n");
3429                        goto bail;
3430                }
3431                ret = request_irq(dd->pcidev->irq, qib_7322intr,
3432                                  IRQF_SHARED, QIB_DRV_NAME, dd);
3433                if (ret) {
3434                        qib_dev_err(dd,
3435                                "Couldn't setup INTx interrupt (irq=%d): %d\n",
3436                                dd->pcidev->irq, ret);
3437                        goto bail;
3438                }
3439                dd->cspec->irq = dd->pcidev->irq;
3440                dd->cspec->main_int_mask = ~0ULL;
3441                goto bail;
3442        }
3443
3444        /* Try to get MSIx interrupts */
3445        memset(redirect, 0, sizeof redirect);
3446        mask = ~0ULL;
3447        msixnum = 0;
3448        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3449        firstcpu = cpumask_first(local_mask);
3450        if (firstcpu >= nr_cpu_ids ||
3451                        cpumask_weight(local_mask) == num_online_cpus()) {
3452                local_mask = topology_core_cpumask(0);
3453                firstcpu = cpumask_first(local_mask);
3454        }
3455        if (firstcpu < nr_cpu_ids) {
3456                secondcpu = cpumask_next(firstcpu, local_mask);
3457                if (secondcpu >= nr_cpu_ids)
3458                        secondcpu = firstcpu;
3459                currrcvcpu = secondcpu;
3460        }
3461        for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3462                irq_handler_t handler;
3463                void *arg;
3464                u64 val;
3465                int lsb, reg, sh;
3466#ifdef CONFIG_INFINIBAND_QIB_DCA
3467                int dca = 0;
3468#endif
3469
3470                dd->cspec->msix_entries[msixnum].
3471                        name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3472                        = '\0';
3473                if (i < ARRAY_SIZE(irq_table)) {
3474                        if (irq_table[i].port) {
3475                                /* skip if for a non-configured port */
3476                                if (irq_table[i].port > dd->num_pports)
3477                                        continue;
3478                                arg = dd->pport + irq_table[i].port - 1;
3479                        } else
3480                                arg = dd;
3481#ifdef CONFIG_INFINIBAND_QIB_DCA
3482                        dca = irq_table[i].dca;
3483#endif
3484                        lsb = irq_table[i].lsb;
3485                        handler = irq_table[i].handler;
3486                        snprintf(dd->cspec->msix_entries[msixnum].name,
3487                                sizeof(dd->cspec->msix_entries[msixnum].name)
3488                                 - 1,
3489                                QIB_DRV_NAME "%d%s", dd->unit,
3490                                irq_table[i].name);
3491                } else {
3492                        unsigned ctxt;
3493
3494                        ctxt = i - ARRAY_SIZE(irq_table);
3495                        /* per krcvq context receive interrupt */
3496                        arg = dd->rcd[ctxt];
3497                        if (!arg)
3498                                continue;
3499                        if (qib_krcvq01_no_msi && ctxt < 2)
3500                                continue;
3501#ifdef CONFIG_INFINIBAND_QIB_DCA
3502                        dca = 1;
3503#endif
3504                        lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3505                        handler = qib_7322pintr;
3506                        snprintf(dd->cspec->msix_entries[msixnum].name,
3507                                sizeof(dd->cspec->msix_entries[msixnum].name)
3508                                 - 1,
3509                                QIB_DRV_NAME "%d (kctx)", dd->unit);
3510                }
3511                ret = request_irq(
3512                        dd->cspec->msix_entries[msixnum].msix.vector,
3513                        handler, 0, dd->cspec->msix_entries[msixnum].name,
3514                        arg);
3515                if (ret) {
3516                        /*
3517                         * Shouldn't happen since the enable said we could
3518                         * have as many as we are trying to setup here.
3519                         */
3520                        qib_dev_err(dd,
3521                                "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3522                                msixnum,
3523                                dd->cspec->msix_entries[msixnum].msix.vector,
3524                                ret);
3525                        qib_7322_nomsix(dd);
3526                        goto try_intx;
3527                }
3528                dd->cspec->msix_entries[msixnum].arg = arg;
3529#ifdef CONFIG_INFINIBAND_QIB_DCA
3530                dd->cspec->msix_entries[msixnum].dca = dca;
3531                dd->cspec->msix_entries[msixnum].rcv =
3532                        handler == qib_7322pintr;
3533#endif
3534                if (lsb >= 0) {
3535                        reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3536                        sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3537                                SYM_LSB(IntRedirect0, vec1);
3538                        mask &= ~(1ULL << lsb);
3539                        redirect[reg] |= ((u64) msixnum) << sh;
3540                }
3541                val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3542                        (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3543                if (firstcpu < nr_cpu_ids &&
3544                        zalloc_cpumask_var(
3545                                &dd->cspec->msix_entries[msixnum].mask,
3546                                GFP_KERNEL)) {
3547                        if (handler == qib_7322pintr) {
3548                                cpumask_set_cpu(currrcvcpu,
3549                                        dd->cspec->msix_entries[msixnum].mask);
3550                                currrcvcpu = cpumask_next(currrcvcpu,
3551                                        local_mask);
3552                                if (currrcvcpu >= nr_cpu_ids)
3553                                        currrcvcpu = secondcpu;
3554                        } else {
3555                                cpumask_set_cpu(firstcpu,
3556                                        dd->cspec->msix_entries[msixnum].mask);
3557                        }
3558                        irq_set_affinity_hint(
3559                                dd->cspec->msix_entries[msixnum].msix.vector,
3560                                dd->cspec->msix_entries[msixnum].mask);
3561                }
3562                msixnum++;
3563        }
3564        /* Initialize the vector mapping */
3565        for (i = 0; i < ARRAY_SIZE(redirect); i++)
3566                qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3567        dd->cspec->main_int_mask = mask;
3568        tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3569                (unsigned long)dd);
3570bail:;
3571}
3572
3573/**
3574 * qib_7322_boardname - fill in the board name and note features
3575 * @dd: the qlogic_ib device
3576 *
3577 * info will be based on the board revision register
3578 */
3579static unsigned qib_7322_boardname(struct qib_devdata *dd)
3580{
3581        /* Will need enumeration of board-types here */
3582        char *n;
3583        u32 boardid, namelen;
3584        unsigned features = DUAL_PORT_CAP;
3585
3586        boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3587
3588        switch (boardid) {
3589        case 0:
3590                n = "InfiniPath_QLE7342_Emulation";
3591                break;
3592        case 1:
3593                n = "InfiniPath_QLE7340";
3594                dd->flags |= QIB_HAS_QSFP;
3595                features = PORT_SPD_CAP;
3596                break;
3597        case 2:
3598                n = "InfiniPath_QLE7342";
3599                dd->flags |= QIB_HAS_QSFP;
3600                break;
3601        case 3:
3602                n = "InfiniPath_QMI7342";
3603                break;
3604        case 4:
3605                n = "InfiniPath_Unsupported7342";
3606                qib_dev_err(dd, "Unsupported version of QMH7342\n");
3607                features = 0;
3608                break;
3609        case BOARD_QMH7342:
3610                n = "InfiniPath_QMH7342";
3611                features = 0x24;
3612                break;
3613        case BOARD_QME7342:
3614                n = "InfiniPath_QME7342";
3615                break;
3616        case 8:
3617                n = "InfiniPath_QME7362";
3618                dd->flags |= QIB_HAS_QSFP;
3619                break;
3620        case 15:
3621                n = "InfiniPath_QLE7342_TEST";
3622                dd->flags |= QIB_HAS_QSFP;
3623                break;
3624        default:
3625                n = "InfiniPath_QLE73xy_UNKNOWN";
3626                qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3627                break;
3628        }
3629        dd->board_atten = 1; /* index into txdds_Xdr */
3630
3631        namelen = strlen(n) + 1;
3632        dd->boardname = kmalloc(namelen, GFP_KERNEL);
3633        if (!dd->boardname)
3634                qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3635        else
3636                snprintf(dd->boardname, namelen, "%s", n);
3637
3638        snprintf(dd->boardversion, sizeof(dd->boardversion),
3639                 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3640                 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3641                 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3642                 dd->majrev, dd->minrev,
3643                 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3644
3645        if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3646                qib_devinfo(dd->pcidev,
3647                        "IB%u: Forced to single port mode by module parameter\n",
3648                        dd->unit);
3649                features &= PORT_SPD_CAP;
3650        }
3651
3652        return features;
3653}
3654
3655/*
3656 * This routine sleeps, so it can only be called from user context, not
3657 * from interrupt context.
3658 */
3659static int qib_do_7322_reset(struct qib_devdata *dd)
3660{
3661        u64 val;
3662        u64 *msix_vecsave;
3663        int i, msix_entries, ret = 1;
3664        u16 cmdval;
3665        u8 int_line, clinesz;
3666        unsigned long flags;
3667
3668        /* Use dev_err so it shows up in logs, etc. */
3669        qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3670
3671        qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3672
3673        msix_entries = dd->cspec->num_msix_entries;
3674
3675        /* no interrupts till re-initted */
3676        qib_7322_set_intr_state(dd, 0);
3677
3678        if (msix_entries) {
3679                qib_7322_nomsix(dd);
3680                /* can be up to 512 bytes, too big for stack */
3681                msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3682                        sizeof(u64), GFP_KERNEL);
3683                if (!msix_vecsave)
3684                        qib_dev_err(dd, "No mem to save MSIx data\n");
3685        } else
3686                msix_vecsave = NULL;
3687
3688        /*
3689         * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3690         * info that is set up by the BIOS, so we have to save and restore
3691         * it ourselves.   There is some risk something could change it,
3692         * after we save it, but since we have disabled the MSIx, it
3693         * shouldn't be touched...
3694         */
3695        for (i = 0; i < msix_entries; i++) {
3696                u64 vecaddr, vecdata;
3697                vecaddr = qib_read_kreg64(dd, 2 * i +
3698                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3699                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3700                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3701                if (msix_vecsave) {
3702                        msix_vecsave[2 * i] = vecaddr;
3703                        /* save it without the masked bit set */
3704                        msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3705                }
3706        }
3707
3708        dd->pport->cpspec->ibdeltainprog = 0;
3709        dd->pport->cpspec->ibsymdelta = 0;
3710        dd->pport->cpspec->iblnkerrdelta = 0;
3711        dd->pport->cpspec->ibmalfdelta = 0;
3712        /* so we check interrupts work again */
3713        dd->z_int_counter = qib_int_counter(dd);
3714
3715        /*
3716         * Keep chip from being accessed until we are ready.  Use
3717         * writeq() directly, to allow the write even though QIB_PRESENT
3718         * isn't set.
3719         */
3720        dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3721        dd->flags |= QIB_DOING_RESET;
3722        val = dd->control | QLOGIC_IB_C_RESET;
3723        writeq(val, &dd->kregbase[kr_control]);
3724
3725        for (i = 1; i <= 5; i++) {
3726                /*
3727                 * Allow MBIST, etc. to complete; longer on each retry.
3728                 * We sometimes get machine checks from bus timeout if no
3729                 * response, so for now, make it *really* long.
3730                 */
3731                msleep(1000 + (1 + i) * 3000);
3732
3733                qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3734
3735                /*
3736                 * Use readq directly, so we don't need to mark it as PRESENT
3737                 * until we get a successful indication that all is well.
3738                 */
3739                val = readq(&dd->kregbase[kr_revision]);
3740                if (val == dd->revision)
3741                        break;
3742                if (i == 5) {
3743                        qib_dev_err(dd,
3744                                "Failed to initialize after reset, unusable\n");
3745                        ret = 0;
3746                        goto  bail;
3747                }
3748        }
3749
3750        dd->flags |= QIB_PRESENT; /* it's back */
3751
3752        if (msix_entries) {
3753                /* restore the MSIx vector address and data if saved above */
3754                for (i = 0; i < msix_entries; i++) {
3755                        dd->cspec->msix_entries[i].msix.entry = i;
3756                        if (!msix_vecsave || !msix_vecsave[2 * i])
3757                                continue;
3758                        qib_write_kreg(dd, 2 * i +
3759                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3760                                msix_vecsave[2 * i]);
3761                        qib_write_kreg(dd, 1 + 2 * i +
3762                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3763                                msix_vecsave[1 + 2 * i]);
3764                }
3765        }
3766
3767        /* initialize the remaining registers.  */
3768        for (i = 0; i < dd->num_pports; ++i)
3769                write_7322_init_portregs(&dd->pport[i]);
3770        write_7322_initregs(dd);
3771
3772        if (qib_pcie_params(dd, dd->lbus_width,
3773                            &dd->cspec->num_msix_entries,
3774                            dd->cspec->msix_entries))
3775                qib_dev_err(dd,
3776                        "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3777
3778        qib_setup_7322_interrupt(dd, 1);
3779
3780        for (i = 0; i < dd->num_pports; ++i) {
3781                struct qib_pportdata *ppd = &dd->pport[i];
3782
3783                spin_lock_irqsave(&ppd->lflags_lock, flags);
3784                ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3785                ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3786                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3787        }
3788
3789bail:
3790        dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3791        kfree(msix_vecsave);
3792        return ret;
3793}
3794
3795/**
3796 * qib_7322_put_tid - write a TID to the chip
3797 * @dd: the qlogic_ib device
3798 * @tidptr: pointer to the expected TID (in chip) to update
3799 * @tidtype: 0 for eager, 1 for expected
3800 * @pa: physical address of in memory buffer; tidinvalid if freeing
3801 */
3802static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3803                             u32 type, unsigned long pa)
3804{
3805        if (!(dd->flags & QIB_PRESENT))
3806                return;
3807        if (pa != dd->tidinvalid) {
3808                u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3809
3810                /* paranoia checks */
3811                if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3812                        qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3813                                    pa);
3814                        return;
3815                }
3816                if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3817                        qib_dev_err(dd,
3818                                "Physical page address 0x%lx larger than supported\n",
3819                                pa);
3820                        return;
3821                }
3822
3823                if (type == RCVHQ_RCV_TYPE_EAGER)
3824                        chippa |= dd->tidtemplate;
3825                else /* for now, always full 4KB page */
3826                        chippa |= IBA7322_TID_SZ_4K;
3827                pa = chippa;
3828        }
3829        writeq(pa, tidptr);
3830        mmiowb();
3831}
3832
3833/**
3834 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3835 * @dd: the qlogic_ib device
3836 * @ctxt: the ctxt
3837 *
3838 * clear all TID entries for a ctxt, expected and eager.
3839 * Used from qib_close().
3840 */
3841static void qib_7322_clear_tids(struct qib_devdata *dd,
3842                                struct qib_ctxtdata *rcd)
3843{
3844        u64 __iomem *tidbase;
3845        unsigned long tidinv;
3846        u32 ctxt;
3847        int i;
3848
3849        if (!dd->kregbase || !rcd)
3850                return;
3851
3852        ctxt = rcd->ctxt;
3853
3854        tidinv = dd->tidinvalid;
3855        tidbase = (u64 __iomem *)
3856                ((char __iomem *) dd->kregbase +
3857                 dd->rcvtidbase +
3858                 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3859
3860        for (i = 0; i < dd->rcvtidcnt; i++)
3861                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3862                                 tidinv);
3863
3864        tidbase = (u64 __iomem *)
3865                ((char __iomem *) dd->kregbase +
3866                 dd->rcvegrbase +
3867                 rcd->rcvegr_tid_base * sizeof(*tidbase));
3868
3869        for (i = 0; i < rcd->rcvegrcnt; i++)
3870                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3871                                 tidinv);
3872}
3873
3874/**
3875 * qib_7322_tidtemplate - setup constants for TID updates
3876 * @dd: the qlogic_ib device
3877 *
3878 * We setup stuff that we use a lot, to avoid calculating each time
3879 */
3880static void qib_7322_tidtemplate(struct qib_devdata *dd)
3881{
3882        /*
3883         * For now, we always allocate 4KB buffers (at init) so we can
3884         * receive max size packets.  We may want a module parameter to
3885         * specify 2KB or 4KB and/or make it per port instead of per device
3886         * for those who want to reduce memory footprint.  Note that the
3887         * rcvhdrentsize size must be large enough to hold the largest
3888         * IB header (currently 96 bytes) that we expect to handle (plus of
3889         * course the 2 dwords of RHF).
3890         */
3891        if (dd->rcvegrbufsize == 2048)
3892                dd->tidtemplate = IBA7322_TID_SZ_2K;
3893        else if (dd->rcvegrbufsize == 4096)
3894                dd->tidtemplate = IBA7322_TID_SZ_4K;
3895        dd->tidinvalid = 0;
3896}
3897
3898/**
3899 * qib_init_7322_get_base_info - set chip-specific flags for user code
3900 * @rcd: the qlogic_ib ctxt
3901 * @kbase: qib_base_info pointer
3902 *
3903 * We set the PCIE flag because the lower bandwidth on PCIe vs
3904 * HyperTransport can affect some user packet algorithims.
3905 */
3906
3907static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3908                                  struct qib_base_info *kinfo)
3909{
3910        kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3911                QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3912                QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3913        if (rcd->dd->cspec->r1)
3914                kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3915        if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3916                kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3917
3918        return 0;
3919}
3920
3921static struct qib_message_header *
3922qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3923{
3924        u32 offset = qib_hdrget_offset(rhf_addr);
3925
3926        return (struct qib_message_header *)
3927                (rhf_addr - dd->rhf_offset + offset);
3928}
3929
3930/*
3931 * Configure number of contexts.
3932 */
3933static void qib_7322_config_ctxts(struct qib_devdata *dd)
3934{
3935        unsigned long flags;
3936        u32 nchipctxts;
3937
3938        nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3939        dd->cspec->numctxts = nchipctxts;
3940        if (qib_n_krcv_queues > 1 && dd->num_pports) {
3941                dd->first_user_ctxt = NUM_IB_PORTS +
3942                        (qib_n_krcv_queues - 1) * dd->num_pports;
3943                if (dd->first_user_ctxt > nchipctxts)
3944                        dd->first_user_ctxt = nchipctxts;
3945                dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3946        } else {
3947                dd->first_user_ctxt = NUM_IB_PORTS;
3948                dd->n_krcv_queues = 1;
3949        }
3950
3951        if (!qib_cfgctxts) {
3952                int nctxts = dd->first_user_ctxt + num_online_cpus();
3953
3954                if (nctxts <= 6)
3955                        dd->ctxtcnt = 6;
3956                else if (nctxts <= 10)
3957                        dd->ctxtcnt = 10;
3958                else if (nctxts <= nchipctxts)
3959                        dd->ctxtcnt = nchipctxts;
3960        } else if (qib_cfgctxts < dd->num_pports)
3961                dd->ctxtcnt = dd->num_pports;
3962        else if (qib_cfgctxts <= nchipctxts)
3963                dd->ctxtcnt = qib_cfgctxts;
3964        if (!dd->ctxtcnt) /* none of the above, set to max */
3965                dd->ctxtcnt = nchipctxts;
3966
3967        /*
3968         * Chip can be configured for 6, 10, or 18 ctxts, and choice
3969         * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3970         * Lock to be paranoid about later motion, etc.
3971         */
3972        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3973        if (dd->ctxtcnt > 10)
3974                dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3975        else if (dd->ctxtcnt > 6)
3976                dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3977        /* else configure for default 6 receive ctxts */
3978
3979        /* The XRC opcode is 5. */
3980        dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3981
3982        /*
3983         * RcvCtrl *must* be written here so that the
3984         * chip understands how to change rcvegrcnt below.
3985         */
3986        qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3987        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3988
3989        /* kr_rcvegrcnt changes based on the number of contexts enabled */
3990        dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3991        if (qib_rcvhdrcnt)
3992                dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3993        else
3994                dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3995                                    dd->num_pports > 1 ? 1024U : 2048U);
3996}
3997
3998static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3999{
4000
4001        int lsb, ret = 0;
4002        u64 maskr; /* right-justified mask */
4003
4004        switch (which) {
4005
4006        case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
4007                ret = ppd->link_width_enabled;
4008                goto done;
4009
4010        case QIB_IB_CFG_LWID: /* Get currently active Link-width */
4011                ret = ppd->link_width_active;
4012                goto done;
4013
4014        case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
4015                ret = ppd->link_speed_enabled;
4016                goto done;
4017
4018        case QIB_IB_CFG_SPD: /* Get current Link spd */
4019                ret = ppd->link_speed_active;
4020                goto done;
4021
4022        case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
4023                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4024                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4025                break;
4026
4027        case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
4028                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4029                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4030                break;
4031
4032        case QIB_IB_CFG_LINKLATENCY:
4033                ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4034                        SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4035                goto done;
4036
4037        case QIB_IB_CFG_OP_VLS:
4038                ret = ppd->vls_operational;
4039                goto done;
4040
4041        case QIB_IB_CFG_VL_HIGH_CAP:
4042                ret = 16;
4043                goto done;
4044
4045        case QIB_IB_CFG_VL_LOW_CAP:
4046                ret = 16;
4047                goto done;
4048
4049        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4050                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4051                                OverrunThreshold);
4052                goto done;
4053
4054        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4055                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4056                                PhyerrThreshold);
4057                goto done;
4058
4059        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4060                /* will only take effect when the link state changes */
4061                ret = (ppd->cpspec->ibcctrl_a &
4062                       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4063                        IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4064                goto done;
4065
4066        case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4067                lsb = IBA7322_IBC_HRTBT_LSB;
4068                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4069                break;
4070
4071        case QIB_IB_CFG_PMA_TICKS:
4072                /*
4073                 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4074                 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4075                 */
4076                if (ppd->link_speed_active == QIB_IB_QDR)
4077                        ret = 3;
4078                else if (ppd->link_speed_active == QIB_IB_DDR)
4079                        ret = 1;
4080                else
4081                        ret = 0;
4082                goto done;
4083
4084        default:
4085                ret = -EINVAL;
4086                goto done;
4087        }
4088        ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4089done:
4090        return ret;
4091}
4092
4093/*
4094 * Below again cribbed liberally from older version. Do not lean
4095 * heavily on it.
4096 */
4097#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4098#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4099        | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4100
4101static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4102{
4103        struct qib_devdata *dd = ppd->dd;
4104        u64 maskr; /* right-justified mask */
4105        int lsb, ret = 0;
4106        u16 lcmd, licmd;
4107        unsigned long flags;
4108
4109        switch (which) {
4110        case QIB_IB_CFG_LIDLMC:
4111                /*
4112                 * Set LID and LMC. Combined to avoid possible hazard
4113                 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4114                 */
4115                lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4116                maskr = IBA7322_IBC_DLIDLMC_MASK;
4117                /*
4118                 * For header-checking, the SLID in the packet will
4119                 * be masked with SendIBSLMCMask, and compared
4120                 * with SendIBSLIDAssignMask. Make sure we do not
4121                 * set any bits not covered by the mask, or we get
4122                 * false-positives.
4123                 */
4124                qib_write_kreg_port(ppd, krp_sendslid,
4125                                    val & (val >> 16) & SendIBSLIDAssignMask);
4126                qib_write_kreg_port(ppd, krp_sendslidmask,
4127                                    (val >> 16) & SendIBSLMCMask);
4128                break;
4129
4130        case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4131                ppd->link_width_enabled = val;
4132                /* convert IB value to chip register value */
4133                if (val == IB_WIDTH_1X)
4134                        val = 0;
4135                else if (val == IB_WIDTH_4X)
4136                        val = 1;
4137                else
4138                        val = 3;
4139                maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4140                lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4141                break;
4142
4143        case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4144                /*
4145                 * As with width, only write the actual register if the
4146                 * link is currently down, otherwise takes effect on next
4147                 * link change.  Since setting is being explicitly requested
4148                 * (via MAD or sysfs), clear autoneg failure status if speed
4149                 * autoneg is enabled.
4150                 */
4151                ppd->link_speed_enabled = val;
4152                val <<= IBA7322_IBC_SPEED_LSB;
4153                maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4154                        IBA7322_IBC_MAX_SPEED_MASK;
4155                if (val & (val - 1)) {
4156                        /* Muliple speeds enabled */
4157                        val |= IBA7322_IBC_IBTA_1_2_MASK |
4158                                IBA7322_IBC_MAX_SPEED_MASK;
4159                        spin_lock_irqsave(&ppd->lflags_lock, flags);
4160                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4161                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4162                } else if (val & IBA7322_IBC_SPEED_QDR)
4163                        val |= IBA7322_IBC_IBTA_1_2_MASK;
4164                /* IBTA 1.2 mode + min/max + speed bits are contiguous */
4165                lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4166                break;
4167
4168        case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4169                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4170                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4171                break;
4172
4173        case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4174                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4175                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4176                break;
4177
4178        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4179                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4180                                  OverrunThreshold);
4181                if (maskr != val) {
4182                        ppd->cpspec->ibcctrl_a &=
4183                                ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4184                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4185                                SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4186                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4187                                            ppd->cpspec->ibcctrl_a);
4188                        qib_write_kreg(dd, kr_scratch, 0ULL);
4189                }
4190                goto bail;
4191
4192        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4193                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4194                                  PhyerrThreshold);
4195                if (maskr != val) {
4196                        ppd->cpspec->ibcctrl_a &=
4197                                ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4198                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4199                                SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4200                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4201                                            ppd->cpspec->ibcctrl_a);
4202                        qib_write_kreg(dd, kr_scratch, 0ULL);
4203                }
4204                goto bail;
4205
4206        case QIB_IB_CFG_PKEYS: /* update pkeys */
4207                maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4208                        ((u64) ppd->pkeys[2] << 32) |
4209                        ((u64) ppd->pkeys[3] << 48);
4210                qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4211                goto bail;
4212
4213        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4214                /* will only take effect when the link state changes */
4215                if (val == IB_LINKINITCMD_POLL)
4216                        ppd->cpspec->ibcctrl_a &=
4217                                ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4218                else /* SLEEP */
4219                        ppd->cpspec->ibcctrl_a |=
4220                                SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4221                qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4222                qib_write_kreg(dd, kr_scratch, 0ULL);
4223                goto bail;
4224
4225        case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4226                /*
4227                 * Update our housekeeping variables, and set IBC max
4228                 * size, same as init code; max IBC is max we allow in
4229                 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4230                 * Set even if it's unchanged, print debug message only
4231                 * on changes.
4232                 */
4233                val = (ppd->ibmaxlen >> 2) + 1;
4234                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4235                ppd->cpspec->ibcctrl_a |= (u64)val <<
4236                        SYM_LSB(IBCCtrlA_0, MaxPktLen);
4237                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4238                                    ppd->cpspec->ibcctrl_a);
4239                qib_write_kreg(dd, kr_scratch, 0ULL);
4240                goto bail;
4241
4242        case QIB_IB_CFG_LSTATE: /* set the IB link state */
4243                switch (val & 0xffff0000) {
4244                case IB_LINKCMD_DOWN:
4245                        lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4246                        ppd->cpspec->ibmalfusesnap = 1;
4247                        ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4248                                crp_errlink);
4249                        if (!ppd->cpspec->ibdeltainprog &&
4250                            qib_compat_ddr_negotiate) {
4251                                ppd->cpspec->ibdeltainprog = 1;
4252                                ppd->cpspec->ibsymsnap =
4253                                        read_7322_creg32_port(ppd,
4254                                                              crp_ibsymbolerr);
4255                                ppd->cpspec->iblnkerrsnap =
4256                                        read_7322_creg32_port(ppd,
4257                                                      crp_iblinkerrrecov);
4258                        }
4259                        break;
4260
4261                case IB_LINKCMD_ARMED:
4262                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4263                        if (ppd->cpspec->ibmalfusesnap) {
4264                                ppd->cpspec->ibmalfusesnap = 0;
4265                                ppd->cpspec->ibmalfdelta +=
4266                                        read_7322_creg32_port(ppd,
4267                                                              crp_errlink) -
4268                                        ppd->cpspec->ibmalfsnap;
4269                        }
4270                        break;
4271
4272                case IB_LINKCMD_ACTIVE:
4273                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4274                        break;
4275
4276                default:
4277                        ret = -EINVAL;
4278                        qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4279                        goto bail;
4280                }
4281                switch (val & 0xffff) {
4282                case IB_LINKINITCMD_NOP:
4283                        licmd = 0;
4284                        break;
4285
4286                case IB_LINKINITCMD_POLL:
4287                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4288                        break;
4289
4290                case IB_LINKINITCMD_SLEEP:
4291                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4292                        break;
4293
4294                case IB_LINKINITCMD_DISABLE:
4295                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4296                        ppd->cpspec->chase_end = 0;
4297                        /*
4298                         * stop state chase counter and timer, if running.
4299                         * wait forpending timer, but don't clear .data (ppd)!
4300                         */
4301                        if (ppd->cpspec->chase_timer.expires) {
4302                                del_timer_sync(&ppd->cpspec->chase_timer);
4303                                ppd->cpspec->chase_timer.expires = 0;
4304                        }
4305                        break;
4306
4307                default:
4308                        ret = -EINVAL;
4309                        qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4310                                    val & 0xffff);
4311                        goto bail;
4312                }
4313                qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4314                goto bail;
4315
4316        case QIB_IB_CFG_OP_VLS:
4317                if (ppd->vls_operational != val) {
4318                        ppd->vls_operational = val;
4319                        set_vls(ppd);
4320                }
4321                goto bail;
4322
4323        case QIB_IB_CFG_VL_HIGH_LIMIT:
4324                qib_write_kreg_port(ppd, krp_highprio_limit, val);
4325                goto bail;
4326
4327        case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4328                if (val > 3) {
4329                        ret = -EINVAL;
4330                        goto bail;
4331                }
4332                lsb = IBA7322_IBC_HRTBT_LSB;
4333                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4334                break;
4335
4336        case QIB_IB_CFG_PORT:
4337                /* val is the port number of the switch we are connected to. */
4338                if (ppd->dd->cspec->r1) {
4339                        cancel_delayed_work(&ppd->cpspec->ipg_work);
4340                        ppd->cpspec->ipg_tries = 0;
4341                }
4342                goto bail;
4343
4344        default:
4345                ret = -EINVAL;
4346                goto bail;
4347        }
4348        ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4349        ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4350        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4351        qib_write_kreg(dd, kr_scratch, 0);
4352bail:
4353        return ret;
4354}
4355
4356static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4357{
4358        int ret = 0;
4359        u64 val, ctrlb;
4360
4361        /* only IBC loopback, may add serdes and xgxs loopbacks later */
4362        if (!strncmp(what, "ibc", 3)) {
4363                ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4364                                                       Loopback);
4365                val = 0; /* disable heart beat, so link will come up */
4366                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4367                         ppd->dd->unit, ppd->port);
4368        } else if (!strncmp(what, "off", 3)) {
4369                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4370                                                        Loopback);
4371                /* enable heart beat again */
4372                val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4373                qib_devinfo(ppd->dd->pcidev,
4374                        "Disabling IB%u:%u IBC loopback (normal)\n",
4375                        ppd->dd->unit, ppd->port);
4376        } else
4377                ret = -EINVAL;
4378        if (!ret) {
4379                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4380                                    ppd->cpspec->ibcctrl_a);
4381                ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4382                                             << IBA7322_IBC_HRTBT_LSB);
4383                ppd->cpspec->ibcctrl_b = ctrlb | val;
4384                qib_write_kreg_port(ppd, krp_ibcctrl_b,
4385                                    ppd->cpspec->ibcctrl_b);
4386                qib_write_kreg(ppd->dd, kr_scratch, 0);
4387        }
4388        return ret;
4389}
4390
4391static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4392                           struct ib_vl_weight_elem *vl)
4393{
4394        unsigned i;
4395
4396        for (i = 0; i < 16; i++, regno++, vl++) {
4397                u32 val = qib_read_kreg_port(ppd, regno);
4398
4399                vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4400                        SYM_RMASK(LowPriority0_0, VirtualLane);
4401                vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4402                        SYM_RMASK(LowPriority0_0, Weight);
4403        }
4404}
4405
4406static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4407                           struct ib_vl_weight_elem *vl)
4408{
4409        unsigned i;
4410
4411        for (i = 0; i < 16; i++, regno++, vl++) {
4412                u64 val;
4413
4414                val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4415                        SYM_LSB(LowPriority0_0, VirtualLane)) |
4416                      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4417                        SYM_LSB(LowPriority0_0, Weight));
4418                qib_write_kreg_port(ppd, regno, val);
4419        }
4420        if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4421                struct qib_devdata *dd = ppd->dd;
4422                unsigned long flags;
4423
4424                spin_lock_irqsave(&dd->sendctrl_lock, flags);
4425                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4426                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4427                qib_write_kreg(dd, kr_scratch, 0);
4428                spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4429        }
4430}
4431
4432static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4433{
4434        switch (which) {
4435        case QIB_IB_TBL_VL_HIGH_ARB:
4436                get_vl_weights(ppd, krp_highprio_0, t);
4437                break;
4438
4439        case QIB_IB_TBL_VL_LOW_ARB:
4440                get_vl_weights(ppd, krp_lowprio_0, t);
4441                break;
4442
4443        default:
4444                return -EINVAL;
4445        }
4446        return 0;
4447}
4448
4449static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4450{
4451        switch (which) {
4452        case QIB_IB_TBL_VL_HIGH_ARB:
4453                set_vl_weights(ppd, krp_highprio_0, t);
4454                break;
4455
4456        case QIB_IB_TBL_VL_LOW_ARB:
4457                set_vl_weights(ppd, krp_lowprio_0, t);
4458                break;
4459
4460        default:
4461                return -EINVAL;
4462        }
4463        return 0;
4464}
4465
4466static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4467                                    u32 updegr, u32 egrhd, u32 npkts)
4468{
4469        /*
4470         * Need to write timeout register before updating rcvhdrhead to ensure
4471         * that the timer is enabled on reception of a packet.
4472         */
4473        if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4474                adjust_rcv_timeout(rcd, npkts);
4475        if (updegr)
4476                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4477        mmiowb();
4478        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4479        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4480        mmiowb();
4481}
4482
4483static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4484{
4485        u32 head, tail;
4486
4487        head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4488        if (rcd->rcvhdrtail_kvaddr)
4489                tail = qib_get_rcvhdrtail(rcd);
4490        else
4491                tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4492        return head == tail;
4493}
4494
4495#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4496        QIB_RCVCTRL_CTXT_DIS | \
4497        QIB_RCVCTRL_TIDFLOW_ENB | \
4498        QIB_RCVCTRL_TIDFLOW_DIS | \
4499        QIB_RCVCTRL_TAILUPD_ENB | \
4500        QIB_RCVCTRL_TAILUPD_DIS | \
4501        QIB_RCVCTRL_INTRAVAIL_ENB | \
4502        QIB_RCVCTRL_INTRAVAIL_DIS | \
4503        QIB_RCVCTRL_BP_ENB | \
4504        QIB_RCVCTRL_BP_DIS)
4505
4506#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4507        QIB_RCVCTRL_CTXT_DIS | \
4508        QIB_RCVCTRL_PKEY_DIS | \
4509        QIB_RCVCTRL_PKEY_ENB)
4510
4511/*
4512 * Modify the RCVCTRL register in chip-specific way. This
4513 * is a function because bit positions and (future) register
4514 * location is chip-specifc, but the needed operations are
4515 * generic. <op> is a bit-mask because we often want to
4516 * do multiple modifications.
4517 */
4518static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4519                             int ctxt)
4520{
4521        struct qib_devdata *dd = ppd->dd;
4522        struct qib_ctxtdata *rcd;
4523        u64 mask, val;
4524        unsigned long flags;
4525
4526        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4527
4528        if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4529                dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4530        if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4531                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4532        if (op & QIB_RCVCTRL_TAILUPD_ENB)
4533                dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4534        if (op & QIB_RCVCTRL_TAILUPD_DIS)
4535                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4536        if (op & QIB_RCVCTRL_PKEY_ENB)
4537                ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4538        if (op & QIB_RCVCTRL_PKEY_DIS)
4539                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4540        if (ctxt < 0) {
4541                mask = (1ULL << dd->ctxtcnt) - 1;
4542                rcd = NULL;
4543        } else {
4544                mask = (1ULL << ctxt);
4545                rcd = dd->rcd[ctxt];
4546        }
4547        if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4548                ppd->p_rcvctrl |=
4549                        (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4550                if (!(dd->flags & QIB_NODMA_RTAIL)) {
4551                        op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4552                        dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4553                }
4554                /* Write these registers before the context is enabled. */
4555                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4556                                    rcd->rcvhdrqtailaddr_phys);
4557                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4558                                    rcd->rcvhdrq_phys);
4559                rcd->seq_cnt = 1;
4560        }
4561        if (op & QIB_RCVCTRL_CTXT_DIS)
4562                ppd->p_rcvctrl &=
4563                        ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4564        if (op & QIB_RCVCTRL_BP_ENB)
4565                dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4566        if (op & QIB_RCVCTRL_BP_DIS)
4567                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4568        if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4569                dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4570        if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4571                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4572        /*
4573         * Decide which registers to write depending on the ops enabled.
4574         * Special case is "flush" (no bits set at all)
4575         * which needs to write both.
4576         */
4577        if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4578                qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4579        if (op == 0 || (op & RCVCTRL_PORT_MODS))
4580                qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4581        if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4582                /*
4583                 * Init the context registers also; if we were
4584                 * disabled, tail and head should both be zero
4585                 * already from the enable, but since we don't
4586                 * know, we have to do it explicitly.
4587                 */
4588                val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4589                qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4590
4591                /* be sure enabling write seen; hd/tl should be 0 */
4592                (void) qib_read_kreg32(dd, kr_scratch);
4593                val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4594                dd->rcd[ctxt]->head = val;
4595                /* If kctxt, interrupt on next receive. */
4596                if (ctxt < dd->first_user_ctxt)
4597                        val |= dd->rhdrhead_intr_off;
4598                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4599        } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4600                dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4601                /* arm rcv interrupt */
4602                val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4603                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4604        }
4605        if (op & QIB_RCVCTRL_CTXT_DIS) {
4606                unsigned f;
4607
4608                /* Now that the context is disabled, clear these registers. */
4609                if (ctxt >= 0) {
4610                        qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4611                        qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4612                        for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4613                                qib_write_ureg(dd, ur_rcvflowtable + f,
4614                                               TIDFLOW_ERRBITS, ctxt);
4615                } else {
4616                        unsigned i;
4617
4618                        for (i = 0; i < dd->cfgctxts; i++) {
4619                                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4620                                                    i, 0);
4621                                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4622                                for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4623                                        qib_write_ureg(dd, ur_rcvflowtable + f,
4624                                                       TIDFLOW_ERRBITS, i);
4625                        }
4626                }
4627        }
4628        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4629}
4630
4631/*
4632 * Modify the SENDCTRL register in chip-specific way. This
4633 * is a function where there are multiple such registers with
4634 * slightly different layouts.
4635 * The chip doesn't allow back-to-back sendctrl writes, so write
4636 * the scratch register after writing sendctrl.
4637 *
4638 * Which register is written depends on the operation.
4639 * Most operate on the common register, while
4640 * SEND_ENB and SEND_DIS operate on the per-port ones.
4641 * SEND_ENB is included in common because it can change SPCL_TRIG
4642 */
4643#define SENDCTRL_COMMON_MODS (\
4644        QIB_SENDCTRL_CLEAR | \
4645        QIB_SENDCTRL_AVAIL_DIS | \
4646        QIB_SENDCTRL_AVAIL_ENB | \
4647        QIB_SENDCTRL_AVAIL_BLIP | \
4648        QIB_SENDCTRL_DISARM | \
4649        QIB_SENDCTRL_DISARM_ALL | \
4650        QIB_SENDCTRL_SEND_ENB)
4651
4652#define SENDCTRL_PORT_MODS (\
4653        QIB_SENDCTRL_CLEAR | \
4654        QIB_SENDCTRL_SEND_ENB | \
4655        QIB_SENDCTRL_SEND_DIS | \
4656        QIB_SENDCTRL_FLUSH)
4657
4658static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4659{
4660        struct qib_devdata *dd = ppd->dd;
4661        u64 tmp_dd_sendctrl;
4662        unsigned long flags;
4663
4664        spin_lock_irqsave(&dd->sendctrl_lock, flags);
4665
4666        /* First the dd ones that are "sticky", saved in shadow */
4667        if (op & QIB_SENDCTRL_CLEAR)
4668                dd->sendctrl = 0;
4669        if (op & QIB_SENDCTRL_AVAIL_DIS)
4670                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4671        else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4672                dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4673                if (dd->flags & QIB_USE_SPCL_TRIG)
4674                        dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4675        }
4676
4677        /* Then the ppd ones that are "sticky", saved in shadow */
4678        if (op & QIB_SENDCTRL_SEND_DIS)
4679                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4680        else if (op & QIB_SENDCTRL_SEND_ENB)
4681                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4682
4683        if (op & QIB_SENDCTRL_DISARM_ALL) {
4684                u32 i, last;
4685
4686                tmp_dd_sendctrl = dd->sendctrl;
4687                last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4688                /*
4689                 * Disarm any buffers that are not yet launched,
4690                 * disabling updates until done.
4691                 */
4692                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4693                for (i = 0; i < last; i++) {
4694                        qib_write_kreg(dd, kr_sendctrl,
4695                                       tmp_dd_sendctrl |
4696                                       SYM_MASK(SendCtrl, Disarm) | i);
4697                        qib_write_kreg(dd, kr_scratch, 0);
4698                }
4699        }
4700
4701        if (op & QIB_SENDCTRL_FLUSH) {
4702                u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4703
4704                /*
4705                 * Now drain all the fifos.  The Abort bit should never be
4706                 * needed, so for now, at least, we don't use it.
4707                 */
4708                tmp_ppd_sendctrl |=
4709                        SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4710                        SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4711                        SYM_MASK(SendCtrl_0, TxeBypassIbc);
4712                qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4713                qib_write_kreg(dd, kr_scratch, 0);
4714        }
4715
4716        tmp_dd_sendctrl = dd->sendctrl;
4717
4718        if (op & QIB_SENDCTRL_DISARM)
4719                tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4720                        ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4721                         SYM_LSB(SendCtrl, DisarmSendBuf));
4722        if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4723            (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4724                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4725
4726        if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4727                qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4728                qib_write_kreg(dd, kr_scratch, 0);
4729        }
4730
4731        if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4732                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4733                qib_write_kreg(dd, kr_scratch, 0);
4734        }
4735
4736        if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4737                qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4738                qib_write_kreg(dd, kr_scratch, 0);
4739        }
4740
4741        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4742
4743        if (op & QIB_SENDCTRL_FLUSH) {
4744                u32 v;
4745                /*
4746                 * ensure writes have hit chip, then do a few
4747                 * more reads, to allow DMA of pioavail registers
4748                 * to occur, so in-memory copy is in sync with
4749                 * the chip.  Not always safe to sleep.
4750                 */
4751                v = qib_read_kreg32(dd, kr_scratch);
4752                qib_write_kreg(dd, kr_scratch, v);
4753                v = qib_read_kreg32(dd, kr_scratch);
4754                qib_write_kreg(dd, kr_scratch, v);
4755                qib_read_kreg32(dd, kr_scratch);
4756        }
4757}
4758
4759#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4760#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4761#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4762
4763/**
4764 * qib_portcntr_7322 - read a per-port chip counter
4765 * @ppd: the qlogic_ib pport
4766 * @creg: the counter to read (not a chip offset)
4767 */
4768static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4769{
4770        struct qib_devdata *dd = ppd->dd;
4771        u64 ret = 0ULL;
4772        u16 creg;
4773        /* 0xffff for unimplemented or synthesized counters */
4774        static const u32 xlator[] = {
4775                [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4776                [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4777                [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4778                [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4779                [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4780                [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4781                [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4782                [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4783                [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4784                [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4785                [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4786                [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4787                [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4788                [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4789                [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4790                [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4791                [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4792                [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4793                [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4794                [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4795                [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4796                [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4797                [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4798                [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4799                [QIBPORTCNTR_ERRLINK] = crp_errlink,
4800                [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4801                [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4802                [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4803                [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4804                [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4805                /*
4806                 * the next 3 aren't really counters, but were implemented
4807                 * as counters in older chips, so still get accessed as
4808                 * though they were counters from this code.
4809                 */
4810                [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4811                [QIBPORTCNTR_PSSTART] = krp_psstart,
4812                [QIBPORTCNTR_PSSTAT] = krp_psstat,
4813                /* pseudo-counter, summed for all ports */
4814                [QIBPORTCNTR_KHDROVFL] = 0xffff,
4815        };
4816
4817        if (reg >= ARRAY_SIZE(xlator)) {
4818                qib_devinfo(ppd->dd->pcidev,
4819                         "Unimplemented portcounter %u\n", reg);
4820                goto done;
4821        }
4822        creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4823
4824        /* handle non-counters and special cases first */
4825        if (reg == QIBPORTCNTR_KHDROVFL) {
4826                int i;
4827
4828                /* sum over all kernel contexts (skip if mini_init) */
4829                for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4830                        struct qib_ctxtdata *rcd = dd->rcd[i];
4831
4832                        if (!rcd || rcd->ppd != ppd)
4833                                continue;
4834                        ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4835                }
4836                goto done;
4837        } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4838                /*
4839                 * Used as part of the synthesis of port_rcv_errors
4840                 * in the verbs code for IBTA counters.  Not needed for 7322,
4841                 * because all the errors are already counted by other cntrs.
4842                 */
4843                goto done;
4844        } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4845                   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4846                /* were counters in older chips, now per-port kernel regs */
4847                ret = qib_read_kreg_port(ppd, creg);
4848                goto done;
4849        }
4850
4851        /*
4852         * Only fast increment counters are 64 bits; use 32 bit reads to
4853         * avoid two independent reads when on Opteron.
4854         */
4855        if (xlator[reg] & _PORT_64BIT_FLAG)
4856                ret = read_7322_creg_port(ppd, creg);
4857        else
4858                ret = read_7322_creg32_port(ppd, creg);
4859        if (creg == crp_ibsymbolerr) {
4860                if (ppd->cpspec->ibdeltainprog)
4861                        ret -= ret - ppd->cpspec->ibsymsnap;
4862                ret -= ppd->cpspec->ibsymdelta;
4863        } else if (creg == crp_iblinkerrrecov) {
4864                if (ppd->cpspec->ibdeltainprog)
4865                        ret -= ret - ppd->cpspec->iblnkerrsnap;
4866                ret -= ppd->cpspec->iblnkerrdelta;
4867        } else if (creg == crp_errlink)
4868                ret -= ppd->cpspec->ibmalfdelta;
4869        else if (creg == crp_iblinkdown)
4870                ret += ppd->cpspec->iblnkdowndelta;
4871done:
4872        return ret;
4873}
4874
4875/*
4876 * Device counter names (not port-specific), one line per stat,
4877 * single string.  Used by utilities like ipathstats to print the stats
4878 * in a way which works for different versions of drivers, without changing
4879 * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4880 * display by utility.
4881 * Non-error counters are first.
4882 * Start of "error" conters is indicated by a leading "E " on the first
4883 * "error" counter, and doesn't count in label length.
4884 * The EgrOvfl list needs to be last so we truncate them at the configured
4885 * context count for the device.
4886 * cntr7322indices contains the corresponding register indices.
4887 */
4888static const char cntr7322names[] =
4889        "Interrupts\n"
4890        "HostBusStall\n"
4891        "E RxTIDFull\n"
4892        "RxTIDInvalid\n"
4893        "RxTIDFloDrop\n" /* 7322 only */
4894        "Ctxt0EgrOvfl\n"
4895        "Ctxt1EgrOvfl\n"
4896        "Ctxt2EgrOvfl\n"
4897        "Ctxt3EgrOvfl\n"
4898        "Ctxt4EgrOvfl\n"
4899        "Ctxt5EgrOvfl\n"
4900        "Ctxt6EgrOvfl\n"
4901        "Ctxt7EgrOvfl\n"
4902        "Ctxt8EgrOvfl\n"
4903        "Ctxt9EgrOvfl\n"
4904        "Ctx10EgrOvfl\n"
4905        "Ctx11EgrOvfl\n"
4906        "Ctx12EgrOvfl\n"
4907        "Ctx13EgrOvfl\n"
4908        "Ctx14EgrOvfl\n"
4909        "Ctx15EgrOvfl\n"
4910        "Ctx16EgrOvfl\n"
4911        "Ctx17EgrOvfl\n"
4912        ;
4913
4914static const u32 cntr7322indices[] = {
4915        cr_lbint | _PORT_64BIT_FLAG,
4916        cr_lbstall | _PORT_64BIT_FLAG,
4917        cr_tidfull,
4918        cr_tidinvalid,
4919        cr_rxtidflowdrop,
4920        cr_base_egrovfl + 0,
4921        cr_base_egrovfl + 1,
4922        cr_base_egrovfl + 2,
4923        cr_base_egrovfl + 3,
4924        cr_base_egrovfl + 4,
4925        cr_base_egrovfl + 5,
4926        cr_base_egrovfl + 6,
4927        cr_base_egrovfl + 7,
4928        cr_base_egrovfl + 8,
4929        cr_base_egrovfl + 9,
4930        cr_base_egrovfl + 10,
4931        cr_base_egrovfl + 11,
4932        cr_base_egrovfl + 12,
4933        cr_base_egrovfl + 13,
4934        cr_base_egrovfl + 14,
4935        cr_base_egrovfl + 15,
4936        cr_base_egrovfl + 16,
4937        cr_base_egrovfl + 17,
4938};
4939
4940/*
4941 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4942 * portcntr7322indices is somewhat complicated by some registers needing
4943 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4944 */
4945static const char portcntr7322names[] =
4946        "TxPkt\n"
4947        "TxFlowPkt\n"
4948        "TxWords\n"
4949        "RxPkt\n"
4950        "RxFlowPkt\n"
4951        "RxWords\n"
4952        "TxFlowStall\n"
4953        "TxDmaDesc\n"  /* 7220 and 7322-only */
4954        "E RxDlidFltr\n"  /* 7220 and 7322-only */
4955        "IBStatusChng\n"
4956        "IBLinkDown\n"
4957        "IBLnkRecov\n"
4958        "IBRxLinkErr\n"
4959        "IBSymbolErr\n"
4960        "RxLLIErr\n"
4961        "RxBadFormat\n"
4962        "RxBadLen\n"
4963        "RxBufOvrfl\n"
4964        "RxEBP\n"
4965        "RxFlowCtlErr\n"
4966        "RxICRCerr\n"
4967        "RxLPCRCerr\n"
4968        "RxVCRCerr\n"
4969        "RxInvalLen\n"
4970        "RxInvalPKey\n"
4971        "RxPktDropped\n"
4972        "TxBadLength\n"
4973        "TxDropped\n"
4974        "TxInvalLen\n"
4975        "TxUnderrun\n"
4976        "TxUnsupVL\n"
4977        "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4978        "RxVL15Drop\n"
4979        "RxVlErr\n"
4980        "XcessBufOvfl\n"
4981        "RxQPBadCtxt\n" /* 7322-only from here down */
4982        "TXBadHeader\n"
4983        ;
4984
4985static const u32 portcntr7322indices[] = {
4986        QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4987        crp_pktsendflow,
4988        QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4989        QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4990        crp_pktrcvflowctrl,
4991        QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4992        QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4993        crp_txsdmadesc | _PORT_64BIT_FLAG,
4994        crp_rxdlidfltr,
4995        crp_ibstatuschange,
4996        QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4997        QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4998        QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4999        QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
5000        QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
5001        QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
5002        QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
5003        QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
5004        QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
5005        crp_rcvflowctrlviol,
5006        QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
5007        QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
5008        QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
5009        QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
5010        QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
5011        QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
5012        crp_txminmaxlenerr,
5013        crp_txdroppedpkt,
5014        crp_txlenerr,
5015        crp_txunderrun,
5016        crp_txunsupvl,
5017        QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
5018        QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
5019        QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
5020        QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
5021        crp_rxqpinvalidctxt,
5022        crp_txhdrerr,
5023};
5024
5025/* do all the setup to make the counter reads efficient later */
5026static void init_7322_cntrnames(struct qib_devdata *dd)
5027{
5028        int i, j = 0;
5029        char *s;
5030
5031        for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
5032             i++) {
5033                /* we always have at least one counter before the egrovfl */
5034                if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5035                        j = 1;
5036                s = strchr(s + 1, '\n');
5037                if (s && j)
5038                        j++;
5039        }
5040        dd->cspec->ncntrs = i;
5041        if (!s)
5042                /* full list; size is without terminating null */
5043                dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5044        else
5045                dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5046        dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5047                * sizeof(u64), GFP_KERNEL);
5048        if (!dd->cspec->cntrs)
5049                qib_dev_err(dd, "Failed allocation for counters\n");
5050
5051        for (i = 0, s = (char *)portcntr7322names; s; i++)
5052                s = strchr(s + 1, '\n');
5053        dd->cspec->nportcntrs = i - 1;
5054        dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5055        for (i = 0; i < dd->num_pports; ++i) {
5056                dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5057                        * sizeof(u64), GFP_KERNEL);
5058                if (!dd->pport[i].cpspec->portcntrs)
5059                        qib_dev_err(dd,
5060                                "Failed allocation for portcounters\n");
5061        }
5062}
5063
5064static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5065                              u64 **cntrp)
5066{
5067        u32 ret;
5068
5069        if (namep) {
5070                ret = dd->cspec->cntrnamelen;
5071                if (pos >= ret)
5072                        ret = 0; /* final read after getting everything */
5073                else
5074                        *namep = (char *) cntr7322names;
5075        } else {
5076                u64 *cntr = dd->cspec->cntrs;
5077                int i;
5078
5079                ret = dd->cspec->ncntrs * sizeof(u64);
5080                if (!cntr || pos >= ret) {
5081                        /* everything read, or couldn't get memory */
5082                        ret = 0;
5083                        goto done;
5084                }
5085                *cntrp = cntr;
5086                for (i = 0; i < dd->cspec->ncntrs; i++)
5087                        if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5088                                *cntr++ = read_7322_creg(dd,
5089                                                         cntr7322indices[i] &
5090                                                         _PORT_CNTR_IDXMASK);
5091                        else
5092                                *cntr++ = read_7322_creg32(dd,
5093                                                           cntr7322indices[i]);
5094        }
5095done:
5096        return ret;
5097}
5098
5099static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5100                                  char **namep, u64 **cntrp)
5101{
5102        u32 ret;
5103
5104        if (namep) {
5105                ret = dd->cspec->portcntrnamelen;
5106                if (pos >= ret)
5107                        ret = 0; /* final read after getting everything */
5108                else
5109                        *namep = (char *)portcntr7322names;
5110        } else {
5111                struct qib_pportdata *ppd = &dd->pport[port];
5112                u64 *cntr = ppd->cpspec->portcntrs;
5113                int i;
5114
5115                ret = dd->cspec->nportcntrs * sizeof(u64);
5116                if (!cntr || pos >= ret) {
5117                        /* everything read, or couldn't get memory */
5118                        ret = 0;
5119                        goto done;
5120                }
5121                *cntrp = cntr;
5122                for (i = 0; i < dd->cspec->nportcntrs; i++) {
5123                        if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5124                                *cntr++ = qib_portcntr_7322(ppd,
5125                                        portcntr7322indices[i] &
5126                                        _PORT_CNTR_IDXMASK);
5127                        else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5128                                *cntr++ = read_7322_creg_port(ppd,
5129                                           portcntr7322indices[i] &
5130                                            _PORT_CNTR_IDXMASK);
5131                        else
5132                                *cntr++ = read_7322_creg32_port(ppd,
5133                                           portcntr7322indices[i]);
5134                }
5135        }
5136done:
5137        return ret;
5138}
5139
5140/**
5141 * qib_get_7322_faststats - get word counters from chip before they overflow
5142 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5143 *
5144 * VESTIGIAL IBA7322 has no "small fast counters", so the only
5145 * real purpose of this function is to maintain the notion of
5146 * "active time", which in turn is only logged into the eeprom,
5147 * which we don;t have, yet, for 7322-based boards.
5148 *
5149 * called from add_timer
5150 */
5151static void qib_get_7322_faststats(unsigned long opaque)
5152{
5153        struct qib_devdata *dd = (struct qib_devdata *) opaque;
5154        struct qib_pportdata *ppd;
5155        unsigned long flags;
5156        u64 traffic_wds;
5157        int pidx;
5158
5159        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5160                ppd = dd->pport + pidx;
5161
5162                /*
5163                 * If port isn't enabled or not operational ports, or
5164                 * diags is running (can cause memory diags to fail)
5165                 * skip this port this time.
5166                 */
5167                if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5168                    || dd->diag_client)
5169                        continue;
5170
5171                /*
5172                 * Maintain an activity timer, based on traffic
5173                 * exceeding a threshold, so we need to check the word-counts
5174                 * even if they are 64-bit.
5175                 */
5176                traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5177                        qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5178                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5179                traffic_wds -= ppd->dd->traffic_wds;
5180                ppd->dd->traffic_wds += traffic_wds;
5181                if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
5182                        atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
5183                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5184                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5185                                                QIB_IB_QDR) &&
5186                    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5187                                    QIBL_LINKACTIVE)) &&
5188                    ppd->cpspec->qdr_dfe_time &&
5189                    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5190                        ppd->cpspec->qdr_dfe_on = 0;
5191
5192                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5193                                            ppd->dd->cspec->r1 ?
5194                                            QDR_STATIC_ADAPT_INIT_R1 :
5195                                            QDR_STATIC_ADAPT_INIT);
5196                        force_h1(ppd);
5197                }
5198        }
5199        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5200}
5201
5202/*
5203 * If we were using MSIx, try to fallback to INTx.
5204 */
5205static int qib_7322_intr_fallback(struct qib_devdata *dd)
5206{
5207        if (!dd->cspec->num_msix_entries)
5208                return 0; /* already using INTx */
5209
5210        qib_devinfo(dd->pcidev,
5211                "MSIx interrupt not detected, trying INTx interrupts\n");
5212        qib_7322_nomsix(dd);
5213        qib_enable_intx(dd->pcidev);
5214        qib_setup_7322_interrupt(dd, 0);
5215        return 1;
5216}
5217
5218/*
5219 * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5220 * than resetting the IBC or external link state, and useful in some
5221 * cases to cause some retraining.  To do this right, we reset IBC
5222 * as well, then return to previous state (which may be still in reset)
5223 * NOTE: some callers of this "know" this writes the current value
5224 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5225 * check all callers.
5226 */
5227static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5228{
5229        u64 val;
5230        struct qib_devdata *dd = ppd->dd;
5231        const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5232                SYM_MASK(IBPCSConfig_0, xcv_treset) |
5233                SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5234
5235        val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5236        qib_write_kreg(dd, kr_hwerrmask,
5237                       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5238        qib_write_kreg_port(ppd, krp_ibcctrl_a,
5239                            ppd->cpspec->ibcctrl_a &
5240                            ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5241
5242        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5243        qib_read_kreg32(dd, kr_scratch);
5244        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5245        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5246        qib_write_kreg(dd, kr_scratch, 0ULL);
5247        qib_write_kreg(dd, kr_hwerrclear,
5248                       SYM_MASK(HwErrClear, statusValidNoEopClear));
5249        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5250}
5251
5252/*
5253 * This code for non-IBTA-compliant IB speed negotiation is only known to
5254 * work for the SDR to DDR transition, and only between an HCA and a switch
5255 * with recent firmware.  It is based on observed heuristics, rather than
5256 * actual knowledge of the non-compliant speed negotiation.
5257 * It has a number of hard-coded fields, since the hope is to rewrite this
5258 * when a spec is available on how the negoation is intended to work.
5259 */
5260static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5261                                 u32 dcnt, u32 *data)
5262{
5263        int i;
5264        u64 pbc;
5265        u32 __iomem *piobuf;
5266        u32 pnum, control, len;
5267        struct qib_devdata *dd = ppd->dd;
5268
5269        i = 0;
5270        len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5271        control = qib_7322_setpbc_control(ppd, len, 0, 15);
5272        pbc = ((u64) control << 32) | len;
5273        while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5274                if (i++ > 15)
5275                        return;
5276                udelay(2);
5277        }
5278        /* disable header check on this packet, since it can't be valid */
5279        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5280        writeq(pbc, piobuf);
5281        qib_flush_wc();
5282        qib_pio_copy(piobuf + 2, hdr, 7);
5283        qib_pio_copy(piobuf + 9, data, dcnt);
5284        if (dd->flags & QIB_USE_SPCL_TRIG) {
5285                u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5286
5287                qib_flush_wc();
5288                __raw_writel(0xaebecede, piobuf + spcl_off);
5289        }
5290        qib_flush_wc();
5291        qib_sendbuf_done(dd, pnum);
5292        /* and re-enable hdr check */
5293        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5294}
5295
5296/*
5297 * _start packet gets sent twice at start, _done gets sent twice at end
5298 */
5299static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5300{
5301        struct qib_devdata *dd = ppd->dd;
5302        static u32 swapped;
5303        u32 dw, i, hcnt, dcnt, *data;
5304        static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5305        static u32 madpayload_start[0x40] = {
5306                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5307                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5308                0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5309                };
5310        static u32 madpayload_done[0x40] = {
5311                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5312                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5313                0x40000001, 0x1388, 0x15e, /* rest 0's */
5314                };
5315
5316        dcnt = ARRAY_SIZE(madpayload_start);
5317        hcnt = ARRAY_SIZE(hdr);
5318        if (!swapped) {
5319                /* for maintainability, do it at runtime */
5320                for (i = 0; i < hcnt; i++) {
5321                        dw = (__force u32) cpu_to_be32(hdr[i]);
5322                        hdr[i] = dw;
5323                }
5324                for (i = 0; i < dcnt; i++) {
5325                        dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5326                        madpayload_start[i] = dw;
5327                        dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5328                        madpayload_done[i] = dw;
5329                }
5330                swapped = 1;
5331        }
5332
5333        data = which ? madpayload_done : madpayload_start;
5334
5335        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5336        qib_read_kreg64(dd, kr_scratch);
5337        udelay(2);
5338        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5339        qib_read_kreg64(dd, kr_scratch);
5340        udelay(2);
5341}
5342
5343/*
5344 * Do the absolute minimum to cause an IB speed change, and make it
5345 * ready, but don't actually trigger the change.   The caller will
5346 * do that when ready (if link is in Polling training state, it will
5347 * happen immediately, otherwise when link next goes down)
5348 *
5349 * This routine should only be used as part of the DDR autonegotation
5350 * code for devices that are not compliant with IB 1.2 (or code that
5351 * fixes things up for same).
5352 *
5353 * When link has gone down, and autoneg enabled, or autoneg has
5354 * failed and we give up until next time we set both speeds, and
5355 * then we want IBTA enabled as well as "use max enabled speed.
5356 */
5357static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5358{
5359        u64 newctrlb;
5360        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5361                                    IBA7322_IBC_IBTA_1_2_MASK |
5362                                    IBA7322_IBC_MAX_SPEED_MASK);
5363
5364        if (speed & (speed - 1)) /* multiple speeds */
5365                newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5366                                    IBA7322_IBC_IBTA_1_2_MASK |
5367                                    IBA7322_IBC_MAX_SPEED_MASK;
5368        else
5369                newctrlb |= speed == QIB_IB_QDR ?
5370                        IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5371                        ((speed == QIB_IB_DDR ?
5372                          IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5373
5374        if (newctrlb == ppd->cpspec->ibcctrl_b)
5375                return;
5376
5377        ppd->cpspec->ibcctrl_b = newctrlb;
5378        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5379        qib_write_kreg(ppd->dd, kr_scratch, 0);
5380}
5381
5382/*
5383 * This routine is only used when we are not talking to another
5384 * IB 1.2-compliant device that we think can do DDR.
5385 * (This includes all existing switch chips as of Oct 2007.)
5386 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5387 */
5388static void try_7322_autoneg(struct qib_pportdata *ppd)
5389{
5390        unsigned long flags;
5391
5392        spin_lock_irqsave(&ppd->lflags_lock, flags);
5393        ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5394        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5395        qib_autoneg_7322_send(ppd, 0);
5396        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5397        qib_7322_mini_pcs_reset(ppd);
5398        /* 2 msec is minimum length of a poll cycle */
5399        queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5400                           msecs_to_jiffies(2));
5401}
5402
5403/*
5404 * Handle the empirically determined mechanism for auto-negotiation
5405 * of DDR speed with switches.
5406 */
5407static void autoneg_7322_work(struct work_struct *work)
5408{
5409        struct qib_pportdata *ppd;
5410        struct qib_devdata *dd;
5411        u64 startms;
5412        u32 i;
5413        unsigned long flags;
5414
5415        ppd = container_of(work, struct qib_chippport_specific,
5416                            autoneg_work.work)->ppd;
5417        dd = ppd->dd;
5418
5419        startms = jiffies_to_msecs(jiffies);
5420
5421        /*
5422         * Busy wait for this first part, it should be at most a
5423         * few hundred usec, since we scheduled ourselves for 2msec.
5424         */
5425        for (i = 0; i < 25; i++) {
5426                if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5427                     == IB_7322_LT_STATE_POLLQUIET) {
5428                        qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5429                        break;
5430                }
5431                udelay(100);
5432        }
5433
5434        if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5435                goto done; /* we got there early or told to stop */
5436
5437        /* we expect this to timeout */
5438        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5439                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5440                               msecs_to_jiffies(90)))
5441                goto done;
5442        qib_7322_mini_pcs_reset(ppd);
5443
5444        /* we expect this to timeout */
5445        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5446                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5447                               msecs_to_jiffies(1700)))
5448                goto done;
5449        qib_7322_mini_pcs_reset(ppd);
5450
5451        set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5452
5453        /*
5454         * Wait up to 250 msec for link to train and get to INIT at DDR;
5455         * this should terminate early.
5456         */
5457        wait_event_timeout(ppd->cpspec->autoneg_wait,
5458                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5459                msecs_to_jiffies(250));
5460done:
5461        if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5462                spin_lock_irqsave(&ppd->lflags_lock, flags);
5463                ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5464                if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5465                        ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5466                        ppd->cpspec->autoneg_tries = 0;
5467                }
5468                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5469                set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5470        }
5471}
5472
5473/*
5474 * This routine is used to request IPG set in the QLogic switch.
5475 * Only called if r1.
5476 */
5477static void try_7322_ipg(struct qib_pportdata *ppd)
5478{
5479        struct qib_ibport *ibp = &ppd->ibport_data;
5480        struct ib_mad_send_buf *send_buf;
5481        struct ib_mad_agent *agent;
5482        struct ib_smp *smp;
5483        unsigned delay;
5484        int ret;
5485
5486        agent = ibp->send_agent;
5487        if (!agent)
5488                goto retry;
5489
5490        send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5491                                      IB_MGMT_MAD_DATA, GFP_ATOMIC);
5492        if (IS_ERR(send_buf))
5493                goto retry;
5494
5495        if (!ibp->smi_ah) {
5496                struct ib_ah *ah;
5497
5498                ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5499                if (IS_ERR(ah))
5500                        ret = PTR_ERR(ah);
5501                else {
5502                        send_buf->ah = ah;
5503                        ibp->smi_ah = to_iah(ah);
5504                        ret = 0;
5505                }
5506        } else {
5507                send_buf->ah = &ibp->smi_ah->ibah;
5508                ret = 0;
5509        }
5510
5511        smp = send_buf->mad;
5512        smp->base_version = IB_MGMT_BASE_VERSION;
5513        smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5514        smp->class_version = 1;
5515        smp->method = IB_MGMT_METHOD_SEND;
5516        smp->hop_cnt = 1;
5517        smp->attr_id = QIB_VENDOR_IPG;
5518        smp->attr_mod = 0;
5519
5520        if (!ret)
5521                ret = ib_post_send_mad(send_buf, NULL);
5522        if (ret)
5523                ib_free_send_mad(send_buf);
5524retry:
5525        delay = 2 << ppd->cpspec->ipg_tries;
5526        queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5527                           msecs_to_jiffies(delay));
5528}
5529
5530/*
5531 * Timeout handler for setting IPG.
5532 * Only called if r1.
5533 */
5534static void ipg_7322_work(struct work_struct *work)
5535{
5536        struct qib_pportdata *ppd;
5537
5538        ppd = container_of(work, struct qib_chippport_specific,
5539                           ipg_work.work)->ppd;
5540        if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5541            && ++ppd->cpspec->ipg_tries <= 10)
5542                try_7322_ipg(ppd);
5543}
5544
5545static u32 qib_7322_iblink_state(u64 ibcs)
5546{
5547        u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5548
5549        switch (state) {
5550        case IB_7322_L_STATE_INIT:
5551                state = IB_PORT_INIT;
5552                break;
5553        case IB_7322_L_STATE_ARM:
5554                state = IB_PORT_ARMED;
5555                break;
5556        case IB_7322_L_STATE_ACTIVE:
5557                /* fall through */
5558        case IB_7322_L_STATE_ACT_DEFER:
5559                state = IB_PORT_ACTIVE;
5560                break;
5561        default: /* fall through */
5562        case IB_7322_L_STATE_DOWN:
5563                state = IB_PORT_DOWN;
5564                break;
5565        }
5566        return state;
5567}
5568
5569/* returns the IBTA port state, rather than the IBC link training state */
5570static u8 qib_7322_phys_portstate(u64 ibcs)
5571{
5572        u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5573        return qib_7322_physportstate[state];
5574}
5575
5576static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5577{
5578        int ret = 0, symadj = 0;
5579        unsigned long flags;
5580        int mult;
5581
5582        spin_lock_irqsave(&ppd->lflags_lock, flags);
5583        ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5584        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5585
5586        /* Update our picture of width and speed from chip */
5587        if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5588                ppd->link_speed_active = QIB_IB_QDR;
5589                mult = 4;
5590        } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5591                ppd->link_speed_active = QIB_IB_DDR;
5592                mult = 2;
5593        } else {
5594                ppd->link_speed_active = QIB_IB_SDR;
5595                mult = 1;
5596        }
5597        if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5598                ppd->link_width_active = IB_WIDTH_4X;
5599                mult *= 4;
5600        } else
5601                ppd->link_width_active = IB_WIDTH_1X;
5602        ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5603
5604        if (!ibup) {
5605                u64 clr;
5606
5607                /* Link went down. */
5608                /* do IPG MAD again after linkdown, even if last time failed */
5609                ppd->cpspec->ipg_tries = 0;
5610                clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5611                        (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5612                         SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5613                if (clr)
5614                        qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5615                if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5616                                     QIBL_IB_AUTONEG_INPROG)))
5617                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5618                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5619                        struct qib_qsfp_data *qd =
5620                                &ppd->cpspec->qsfp_data;
5621                        /* unlock the Tx settings, speed may change */
5622                        qib_write_kreg_port(ppd, krp_tx_deemph_override,
5623                                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5624                                reset_tx_deemphasis_override));
5625                        qib_cancel_sends(ppd);
5626                        /* on link down, ensure sane pcs state */
5627                        qib_7322_mini_pcs_reset(ppd);
5628                        /* schedule the qsfp refresh which should turn the link
5629                           off */
5630                        if (ppd->dd->flags & QIB_HAS_QSFP) {
5631                                qd->t_insert = jiffies;
5632                                queue_work(ib_wq, &qd->work);
5633                        }
5634                        spin_lock_irqsave(&ppd->sdma_lock, flags);
5635                        if (__qib_sdma_running(ppd))
5636                                __qib_sdma_process_event(ppd,
5637                                        qib_sdma_event_e70_go_idle);
5638                        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5639                }
5640                clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5641                if (clr == ppd->cpspec->iblnkdownsnap)
5642                        ppd->cpspec->iblnkdowndelta++;
5643        } else {
5644                if (qib_compat_ddr_negotiate &&
5645                    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5646                                     QIBL_IB_AUTONEG_INPROG)) &&
5647                    ppd->link_speed_active == QIB_IB_SDR &&
5648                    (ppd->link_speed_enabled & QIB_IB_DDR)
5649                    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5650                        /* we are SDR, and auto-negotiation enabled */
5651                        ++ppd->cpspec->autoneg_tries;
5652                        if (!ppd->cpspec->ibdeltainprog) {
5653                                ppd->cpspec->ibdeltainprog = 1;
5654                                ppd->cpspec->ibsymdelta +=
5655                                        read_7322_creg32_port(ppd,
5656                                                crp_ibsymbolerr) -
5657                                                ppd->cpspec->ibsymsnap;
5658                                ppd->cpspec->iblnkerrdelta +=
5659                                        read_7322_creg32_port(ppd,
5660                                                crp_iblinkerrrecov) -
5661                                                ppd->cpspec->iblnkerrsnap;
5662                        }
5663                        try_7322_autoneg(ppd);
5664                        ret = 1; /* no other IB status change processing */
5665                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5666                           ppd->link_speed_active == QIB_IB_SDR) {
5667                        qib_autoneg_7322_send(ppd, 1);
5668                        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5669                        qib_7322_mini_pcs_reset(ppd);
5670                        udelay(2);
5671                        ret = 1; /* no other IB status change processing */
5672                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5673                           (ppd->link_speed_active & QIB_IB_DDR)) {
5674                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5675                        ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5676                                         QIBL_IB_AUTONEG_FAILED);
5677                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5678                        ppd->cpspec->autoneg_tries = 0;
5679                        /* re-enable SDR, for next link down */
5680                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5681                        wake_up(&ppd->cpspec->autoneg_wait);
5682                        symadj = 1;
5683                } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5684                        /*
5685                         * Clear autoneg failure flag, and do setup
5686                         * so we'll try next time link goes down and
5687                         * back to INIT (possibly connected to a
5688                         * different device).
5689                         */
5690                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5691                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5692                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5693                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5694                        symadj = 1;
5695                }
5696                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5697                        symadj = 1;
5698                        if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5699                                try_7322_ipg(ppd);
5700                        if (!ppd->cpspec->recovery_init)
5701                                setup_7322_link_recovery(ppd, 0);
5702                        ppd->cpspec->qdr_dfe_time = jiffies +
5703                                msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5704                }
5705                ppd->cpspec->ibmalfusesnap = 0;
5706                ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5707                        crp_errlink);
5708        }
5709        if (symadj) {
5710                ppd->cpspec->iblnkdownsnap =
5711                        read_7322_creg32_port(ppd, crp_iblinkdown);
5712                if (ppd->cpspec->ibdeltainprog) {
5713                        ppd->cpspec->ibdeltainprog = 0;
5714                        ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5715                                crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5716                        ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5717                                crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5718                }
5719        } else if (!ibup && qib_compat_ddr_negotiate &&
5720                   !ppd->cpspec->ibdeltainprog &&
5721                        !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5722                ppd->cpspec->ibdeltainprog = 1;
5723                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5724                        crp_ibsymbolerr);
5725                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5726                        crp_iblinkerrrecov);
5727        }
5728
5729        if (!ret)
5730                qib_setup_7322_setextled(ppd, ibup);
5731        return ret;
5732}
5733
5734/*
5735 * Does read/modify/write to appropriate registers to
5736 * set output and direction bits selected by mask.
5737 * these are in their canonical postions (e.g. lsb of
5738 * dir will end up in D48 of extctrl on existing chips).
5739 * returns contents of GP Inputs.
5740 */
5741static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5742{
5743        u64 read_val, new_out;
5744        unsigned long flags;
5745
5746        if (mask) {
5747                /* some bits being written, lock access to GPIO */
5748                dir &= mask;
5749                out &= mask;
5750                spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5751                dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5752                dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5753                new_out = (dd->cspec->gpio_out & ~mask) | out;
5754
5755                qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5756                qib_write_kreg(dd, kr_gpio_out, new_out);
5757                dd->cspec->gpio_out = new_out;
5758                spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5759        }
5760        /*
5761         * It is unlikely that a read at this time would get valid
5762         * data on a pin whose direction line was set in the same
5763         * call to this function. We include the read here because
5764         * that allows us to potentially combine a change on one pin with
5765         * a read on another, and because the old code did something like
5766         * this.
5767         */
5768        read_val = qib_read_kreg64(dd, kr_extstatus);
5769        return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5770}
5771
5772/* Enable writes to config EEPROM, if possible. Returns previous state */
5773static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5774{
5775        int prev_wen;
5776        u32 mask;
5777
5778        mask = 1 << QIB_EEPROM_WEN_NUM;
5779        prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5780        gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5781
5782        return prev_wen & 1;
5783}
5784
5785/*
5786 * Read fundamental info we need to use the chip.  These are
5787 * the registers that describe chip capabilities, and are
5788 * saved in shadow registers.
5789 */
5790static void get_7322_chip_params(struct qib_devdata *dd)
5791{
5792        u64 val;
5793        u32 piobufs;
5794        int mtu;
5795
5796        dd->palign = qib_read_kreg32(dd, kr_pagealign);
5797
5798        dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5799
5800        dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5801        dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5802        dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5803        dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5804        dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5805
5806        val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5807        dd->piobcnt2k = val & ~0U;
5808        dd->piobcnt4k = val >> 32;
5809        val = qib_read_kreg64(dd, kr_sendpiosize);
5810        dd->piosize2k = val & ~0U;
5811        dd->piosize4k = val >> 32;
5812
5813        mtu = ib_mtu_enum_to_int(qib_ibmtu);
5814        if (mtu == -1)
5815                mtu = QIB_DEFAULT_MTU;
5816        dd->pport[0].ibmtu = (u32)mtu;
5817        dd->pport[1].ibmtu = (u32)mtu;
5818
5819        /* these may be adjusted in init_chip_wc_pat() */
5820        dd->pio2kbase = (u32 __iomem *)
5821                ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5822        dd->pio4kbase = (u32 __iomem *)
5823                ((char __iomem *) dd->kregbase +
5824                 (dd->piobufbase >> 32));
5825        /*
5826         * 4K buffers take 2 pages; we use roundup just to be
5827         * paranoid; we calculate it once here, rather than on
5828         * ever buf allocate
5829         */
5830        dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5831
5832        piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5833
5834        dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5835                (sizeof(u64) * BITS_PER_BYTE / 2);
5836}
5837
5838/*
5839 * The chip base addresses in cspec and cpspec have to be set
5840 * after possible init_chip_wc_pat(), rather than in
5841 * get_7322_chip_params(), so split out as separate function
5842 */
5843static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5844{
5845        u32 cregbase;
5846        cregbase = qib_read_kreg32(dd, kr_counterregbase);
5847
5848        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5849                (char __iomem *)dd->kregbase);
5850
5851        dd->egrtidbase = (u64 __iomem *)
5852                ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5853
5854        /* port registers are defined as relative to base of chip */
5855        dd->pport[0].cpspec->kpregbase =
5856                (u64 __iomem *)((char __iomem *)dd->kregbase);
5857        dd->pport[1].cpspec->kpregbase =
5858                (u64 __iomem *)(dd->palign +
5859                (char __iomem *)dd->kregbase);
5860        dd->pport[0].cpspec->cpregbase =
5861                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5862                kr_counterregbase) + (char __iomem *)dd->kregbase);
5863        dd->pport[1].cpspec->cpregbase =
5864                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5865                kr_counterregbase) + (char __iomem *)dd->kregbase);
5866}
5867
5868/*
5869 * This is a fairly special-purpose observer, so we only support
5870 * the port-specific parts of SendCtrl
5871 */
5872
5873#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |           \
5874                           SYM_MASK(SendCtrl_0, SDmaEnable) |           \
5875                           SYM_MASK(SendCtrl_0, SDmaIntEnable) |        \
5876                           SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5877                           SYM_MASK(SendCtrl_0, SDmaHalt) |             \
5878                           SYM_MASK(SendCtrl_0, IBVLArbiterEn) |        \
5879                           SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5880
5881static int sendctrl_hook(struct qib_devdata *dd,
5882                         const struct diag_observer *op, u32 offs,
5883                         u64 *data, u64 mask, int only_32)
5884{
5885        unsigned long flags;
5886        unsigned idx;
5887        unsigned pidx;
5888        struct qib_pportdata *ppd = NULL;
5889        u64 local_data, all_bits;
5890
5891        /*
5892         * The fixed correspondence between Physical ports and pports is
5893         * severed. We need to hunt for the ppd that corresponds
5894         * to the offset we got. And we have to do that without admitting
5895         * we know the stride, apparently.
5896         */
5897        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5898                u64 __iomem *psptr;
5899                u32 psoffs;
5900
5901                ppd = dd->pport + pidx;
5902                if (!ppd->cpspec->kpregbase)
5903                        continue;
5904
5905                psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5906                psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5907                if (psoffs == offs)
5908                        break;
5909        }
5910
5911        /* If pport is not being managed by driver, just avoid shadows. */
5912        if (pidx >= dd->num_pports)
5913                ppd = NULL;
5914
5915        /* In any case, "idx" is flat index in kreg space */
5916        idx = offs / sizeof(u64);
5917
5918        all_bits = ~0ULL;
5919        if (only_32)
5920                all_bits >>= 32;
5921
5922        spin_lock_irqsave(&dd->sendctrl_lock, flags);
5923        if (!ppd || (mask & all_bits) != all_bits) {
5924                /*
5925                 * At least some mask bits are zero, so we need
5926                 * to read. The judgement call is whether from
5927                 * reg or shadow. First-cut: read reg, and complain
5928                 * if any bits which should be shadowed are different
5929                 * from their shadowed value.
5930                 */
5931                if (only_32)
5932                        local_data = (u64)qib_read_kreg32(dd, idx);
5933                else
5934                        local_data = qib_read_kreg64(dd, idx);
5935                *data = (local_data & ~mask) | (*data & mask);
5936        }
5937        if (mask) {
5938                /*
5939                 * At least some mask bits are one, so we need
5940                 * to write, but only shadow some bits.
5941                 */
5942                u64 sval, tval; /* Shadowed, transient */
5943
5944                /*
5945                 * New shadow val is bits we don't want to touch,
5946                 * ORed with bits we do, that are intended for shadow.
5947                 */
5948                if (ppd) {
5949                        sval = ppd->p_sendctrl & ~mask;
5950                        sval |= *data & SENDCTRL_SHADOWED & mask;
5951                        ppd->p_sendctrl = sval;
5952                } else
5953                        sval = *data & SENDCTRL_SHADOWED & mask;
5954                tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5955                qib_write_kreg(dd, idx, tval);
5956                qib_write_kreg(dd, kr_scratch, 0Ull);
5957        }
5958        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5959        return only_32 ? 4 : 8;
5960}
5961
5962static const struct diag_observer sendctrl_0_observer = {
5963        sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5964        KREG_IDX(SendCtrl_0) * sizeof(u64)
5965};
5966
5967static const struct diag_observer sendctrl_1_observer = {
5968        sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5969        KREG_IDX(SendCtrl_1) * sizeof(u64)
5970};
5971
5972static ushort sdma_fetch_prio = 8;
5973module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5974MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5975
5976/* Besides logging QSFP events, we set appropriate TxDDS values */
5977static void init_txdds_table(struct qib_pportdata *ppd, int override);
5978
5979static void qsfp_7322_event(struct work_struct *work)
5980{
5981        struct qib_qsfp_data *qd;
5982        struct qib_pportdata *ppd;
5983        unsigned long pwrup;
5984        unsigned long flags;
5985        int ret;
5986        u32 le2;
5987
5988        qd = container_of(work, struct qib_qsfp_data, work);
5989        ppd = qd->ppd;
5990        pwrup = qd->t_insert +
5991                msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5992
5993        /* Delay for 20 msecs to allow ModPrs resistor to setup */
5994        mdelay(QSFP_MODPRS_LAG_MSEC);
5995
5996        if (!qib_qsfp_mod_present(ppd)) {
5997                ppd->cpspec->qsfp_data.modpresent = 0;
5998                /* Set the physical link to disabled */
5999                qib_set_ib_7322_lstate(ppd, 0,
6000                                       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
6001                spin_lock_irqsave(&ppd->lflags_lock, flags);
6002                ppd->lflags &= ~QIBL_LINKV;
6003                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6004        } else {
6005                /*
6006                 * Some QSFP's not only do not respond until the full power-up
6007                 * time, but may behave badly if we try. So hold off responding
6008                 * to insertion.
6009                 */
6010                while (1) {
6011                        if (time_is_before_jiffies(pwrup))
6012                                break;
6013                        msleep(20);
6014                }
6015
6016                ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
6017
6018                /*
6019                 * Need to change LE2 back to defaults if we couldn't
6020                 * read the cable type (to handle cable swaps), so do this
6021                 * even on failure to read cable information.  We don't
6022                 * get here for QME, so IS_QME check not needed here.
6023                 */
6024                if (!ret && !ppd->dd->cspec->r1) {
6025                        if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
6026                                le2 = LE2_QME;
6027                        else if (qd->cache.atten[1] >= qib_long_atten &&
6028                                 QSFP_IS_CU(qd->cache.tech))
6029                                le2 = LE2_5m;
6030                        else
6031                                le2 = LE2_DEFAULT;
6032                } else
6033                        le2 = LE2_DEFAULT;
6034                ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
6035                /*
6036                 * We always change parameteters, since we can choose
6037                 * values for cables without eeproms, and the cable may have
6038                 * changed from a cable with full or partial eeprom content
6039                 * to one with partial or no content.
6040                 */
6041                init_txdds_table(ppd, 0);
6042                /* The physical link is being re-enabled only when the
6043                 * previous state was DISABLED and the VALID bit is not
6044                 * set. This should only happen when  the cable has been
6045                 * physically pulled. */
6046                if (!ppd->cpspec->qsfp_data.modpresent &&
6047                    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6048                        ppd->cpspec->qsfp_data.modpresent = 1;
6049                        qib_set_ib_7322_lstate(ppd, 0,
6050                                QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6051                        spin_lock_irqsave(&ppd->lflags_lock, flags);
6052                        ppd->lflags |= QIBL_LINKV;
6053                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6054                }
6055        }
6056}
6057
6058/*
6059 * There is little we can do but complain to the user if QSFP
6060 * initialization fails.
6061 */
6062static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6063{
6064        unsigned long flags;
6065        struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6066        struct qib_devdata *dd = ppd->dd;
6067        u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6068
6069        mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6070        qd->ppd = ppd;
6071        qib_qsfp_init(qd, qsfp_7322_event);
6072        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6073        dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6074        dd->cspec->gpio_mask |= mod_prs_bit;
6075        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6076        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6077        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6078}
6079
6080/*
6081 * called at device initialization time, and also if the txselect
6082 * module parameter is changed.  This is used for cables that don't
6083 * have valid QSFP EEPROMs (not present, or attenuation is zero).
6084 * We initialize to the default, then if there is a specific
6085 * unit,port match, we use that (and set it immediately, for the
6086 * current speed, if the link is at INIT or better).
6087 * String format is "default# unit#,port#=# ... u,p=#", separators must
6088 * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6089 * optionally have "u,p=#,#", where the final # is the H1 value
6090 * The last specific match is used (actually, all are used, but last
6091 * one is the one that winds up set); if none at all, fall back on default.
6092 */
6093static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6094{
6095        char *nxt, *str;
6096        u32 pidx, unit, port, deflt, h1;
6097        unsigned long val;
6098        int any = 0, seth1;
6099        int txdds_size;
6100
6101        str = txselect_list;
6102
6103        /* default number is validated in setup_txselect() */
6104        deflt = simple_strtoul(str, &nxt, 0);
6105        for (pidx = 0; pidx < dd->num_pports; ++pidx)
6106                dd->pport[pidx].cpspec->no_eep = deflt;
6107
6108        txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6109        if (IS_QME(dd) || IS_QMH(dd))
6110                txdds_size += TXDDS_MFG_SZ;
6111
6112        while (*nxt && nxt[1]) {
6113                str = ++nxt;
6114                unit = simple_strtoul(str, &nxt, 0);
6115                if (nxt == str || !*nxt || *nxt != ',') {
6116                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6117                                ;
6118                        continue;
6119                }
6120                str = ++nxt;
6121                port = simple_strtoul(str, &nxt, 0);
6122                if (nxt == str || *nxt != '=') {
6123                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6124                                ;
6125                        continue;
6126                }
6127                str = ++nxt;
6128                val = simple_strtoul(str, &nxt, 0);
6129                if (nxt == str) {
6130                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6131                                ;
6132                        continue;
6133                }
6134                if (val >= txdds_size)
6135                        continue;
6136                seth1 = 0;
6137                h1 = 0; /* gcc thinks it might be used uninitted */
6138                if (*nxt == ',' && nxt[1]) {
6139                        str = ++nxt;
6140                        h1 = (u32)simple_strtoul(str, &nxt, 0);
6141                        if (nxt == str)
6142                                while (*nxt && *nxt++ != ' ') /* skip */
6143                                        ;
6144                        else
6145                                seth1 = 1;
6146                }
6147                for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6148                     ++pidx) {
6149                        struct qib_pportdata *ppd = &dd->pport[pidx];
6150
6151                        if (ppd->port != port || !ppd->link_speed_supported)
6152                                continue;
6153                        ppd->cpspec->no_eep = val;
6154                        if (seth1)
6155                                ppd->cpspec->h1_val = h1;
6156                        /* now change the IBC and serdes, overriding generic */
6157                        init_txdds_table(ppd, 1);
6158                        /* Re-enable the physical state machine on mezz boards
6159                         * now that the correct settings have been set.
6160                         * QSFP boards are handles by the QSFP event handler */
6161                        if (IS_QMH(dd) || IS_QME(dd))
6162                                qib_set_ib_7322_lstate(ppd, 0,
6163                                            QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6164                        any++;
6165                }
6166                if (*nxt == '\n')
6167                        break; /* done */
6168        }
6169        if (change && !any) {
6170                /* no specific setting, use the default.
6171                 * Change the IBC and serdes, but since it's
6172                 * general, don't override specific settings.
6173                 */
6174                for (pidx = 0; pidx < dd->num_pports; ++pidx)
6175                        if (dd->pport[pidx].link_speed_supported)
6176                                init_txdds_table(&dd->pport[pidx], 0);
6177        }
6178}
6179
6180/* handle the txselect parameter changing */
6181static int setup_txselect(const char *str, struct kernel_param *kp)
6182{
6183        struct qib_devdata *dd;
6184        unsigned long val;
6185        char *n;
6186        if (strlen(str) >= MAX_ATTEN_LEN) {
6187                pr_info("txselect_values string too long\n");
6188                return -ENOSPC;
6189        }
6190        val = simple_strtoul(str, &n, 0);
6191        if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6192                                TXDDS_MFG_SZ)) {
6193                pr_info("txselect_values must start with a number < %d\n",
6194                        TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6195                return -EINVAL;
6196        }
6197        strcpy(txselect_list, str);
6198
6199        list_for_each_entry(dd, &qib_dev_list, list)
6200                if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6201                        set_no_qsfp_atten(dd, 1);
6202        return 0;
6203}
6204
6205/*
6206 * Write the final few registers that depend on some of the
6207 * init setup.  Done late in init, just before bringing up
6208 * the serdes.
6209 */
6210static int qib_late_7322_initreg(struct qib_devdata *dd)
6211{
6212        int ret = 0, n;
6213        u64 val;
6214
6215        qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6216        qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6217        qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6218        qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6219        val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6220        if (val != dd->pioavailregs_phys) {
6221                qib_dev_err(dd,
6222                        "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6223                        (unsigned long) dd->pioavailregs_phys,
6224                        (unsigned long long) val);
6225                ret = -EINVAL;
6226        }
6227
6228        n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6229        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6230        /* driver sends get pkey, lid, etc. checking also, to catch bugs */
6231        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6232
6233        qib_register_observer(dd, &sendctrl_0_observer);
6234        qib_register_observer(dd, &sendctrl_1_observer);
6235
6236        dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6237        qib_write_kreg(dd, kr_control, dd->control);
6238        /*
6239         * Set SendDmaFetchPriority and init Tx params, including
6240         * QSFP handler on boards that have QSFP.
6241         * First set our default attenuation entry for cables that
6242         * don't have valid attenuation.
6243         */
6244        set_no_qsfp_atten(dd, 0);
6245        for (n = 0; n < dd->num_pports; ++n) {
6246                struct qib_pportdata *ppd = dd->pport + n;
6247
6248                qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6249                                    sdma_fetch_prio & 0xf);
6250                /* Initialize qsfp if present on board. */
6251                if (dd->flags & QIB_HAS_QSFP)
6252                        qib_init_7322_qsfp(ppd);
6253        }
6254        dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6255        qib_write_kreg(dd, kr_control, dd->control);
6256
6257        return ret;
6258}
6259
6260/* per IB port errors.  */
6261#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6262        MASK_ACROSS(8, 15))
6263#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6264#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6265        MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6266        MASK_ACROSS(0, 11))
6267
6268/*
6269 * Write the initialization per-port registers that need to be done at
6270 * driver load and after reset completes (i.e., that aren't done as part
6271 * of other init procedures called from qib_init.c).
6272 * Some of these should be redundant on reset, but play safe.
6273 */
6274static void write_7322_init_portregs(struct qib_pportdata *ppd)
6275{
6276        u64 val;
6277        int i;
6278
6279        if (!ppd->link_speed_supported) {
6280                /* no buffer credits for this port */
6281                for (i = 1; i < 8; i++)
6282                        qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6283                qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6284                qib_write_kreg(ppd->dd, kr_scratch, 0);
6285                return;
6286        }
6287
6288        /*
6289         * Set the number of supported virtual lanes in IBC,
6290         * for flow control packet handling on unsupported VLs
6291         */
6292        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6293        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6294        val |= (u64)(ppd->vls_supported - 1) <<
6295                SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6296        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6297
6298        qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6299
6300        /* enable tx header checking */
6301        qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6302                            IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6303                            IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6304
6305        qib_write_kreg_port(ppd, krp_ncmodectrl,
6306                SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6307
6308        /*
6309         * Unconditionally clear the bufmask bits.  If SDMA is
6310         * enabled, we'll set them appropriately later.
6311         */
6312        qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6313        qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6314        qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6315        if (ppd->dd->cspec->r1)
6316                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6317}
6318
6319/*
6320 * Write the initialization per-device registers that need to be done at
6321 * driver load and after reset completes (i.e., that aren't done as part
6322 * of other init procedures called from qib_init.c).  Also write per-port
6323 * registers that are affected by overall device config, such as QP mapping
6324 * Some of these should be redundant on reset, but play safe.
6325 */
6326static void write_7322_initregs(struct qib_devdata *dd)
6327{
6328        struct qib_pportdata *ppd;
6329        int i, pidx;
6330        u64 val;
6331
6332        /* Set Multicast QPs received by port 2 to map to context one. */
6333        qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6334
6335        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6336                unsigned n, regno;
6337                unsigned long flags;
6338
6339                if (dd->n_krcv_queues < 2 ||
6340                        !dd->pport[pidx].link_speed_supported)
6341                        continue;
6342
6343                ppd = &dd->pport[pidx];
6344
6345                /* be paranoid against later code motion, etc. */
6346                spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6347                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6348                spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6349
6350                /* Initialize QP to context mapping */
6351                regno = krp_rcvqpmaptable;
6352                val = 0;
6353                if (dd->num_pports > 1)
6354                        n = dd->first_user_ctxt / dd->num_pports;
6355                else
6356                        n = dd->first_user_ctxt - 1;
6357                for (i = 0; i < 32; ) {
6358                        unsigned ctxt;
6359
6360                        if (dd->num_pports > 1)
6361                                ctxt = (i % n) * dd->num_pports + pidx;
6362                        else if (i % n)
6363                                ctxt = (i % n) + 1;
6364                        else
6365                                ctxt = ppd->hw_pidx;
6366                        val |= ctxt << (5 * (i % 6));
6367                        i++;
6368                        if (i % 6 == 0) {
6369                                qib_write_kreg_port(ppd, regno, val);
6370                                val = 0;
6371                                regno++;
6372                        }
6373                }
6374                qib_write_kreg_port(ppd, regno, val);
6375        }
6376
6377        /*
6378         * Setup up interrupt mitigation for kernel contexts, but
6379         * not user contexts (user contexts use interrupts when
6380         * stalled waiting for any packet, so want those interrupts
6381         * right away).
6382         */
6383        for (i = 0; i < dd->first_user_ctxt; i++) {
6384                dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6385                qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6386        }
6387
6388        /*
6389         * Initialize  as (disabled) rcvflow tables.  Application code
6390         * will setup each flow as it uses the flow.
6391         * Doesn't clear any of the error bits that might be set.
6392         */
6393        val = TIDFLOW_ERRBITS; /* these are W1C */
6394        for (i = 0; i < dd->cfgctxts; i++) {
6395                int flow;
6396                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6397                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6398        }
6399
6400        /*
6401         * dual cards init to dual port recovery, single port cards to
6402         * the one port.  Dual port cards may later adjust to 1 port,
6403         * and then back to dual port if both ports are connected
6404         * */
6405        if (dd->num_pports)
6406                setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6407}
6408
6409static int qib_init_7322_variables(struct qib_devdata *dd)
6410{
6411        struct qib_pportdata *ppd;
6412        unsigned features, pidx, sbufcnt;
6413        int ret, mtu;
6414        u32 sbufs, updthresh;
6415
6416        /* pport structs are contiguous, allocated after devdata */
6417        ppd = (struct qib_pportdata *)(dd + 1);
6418        dd->pport = ppd;
6419        ppd[0].dd = dd;
6420        ppd[1].dd = dd;
6421
6422        dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6423
6424        ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6425        ppd[1].cpspec = &ppd[0].cpspec[1];
6426        ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6427        ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6428
6429        spin_lock_init(&dd->cspec->rcvmod_lock);
6430        spin_lock_init(&dd->cspec->gpio_lock);
6431
6432        /* we haven't yet set QIB_PRESENT, so use read directly */
6433        dd->revision = readq(&dd->kregbase[kr_revision]);
6434
6435        if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6436                qib_dev_err(dd,
6437                        "Revision register read failure, giving up initialization\n");
6438                ret = -ENODEV;
6439                goto bail;
6440        }
6441        dd->flags |= QIB_PRESENT;  /* now register routines work */
6442
6443        dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6444        dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6445        dd->cspec->r1 = dd->minrev == 1;
6446
6447        get_7322_chip_params(dd);
6448        features = qib_7322_boardname(dd);
6449
6450        /* now that piobcnt2k and 4k set, we can allocate these */
6451        sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6452                NUM_VL15_BUFS + BITS_PER_LONG - 1;
6453        sbufcnt /= BITS_PER_LONG;
6454        dd->cspec->sendchkenable = kmalloc(sbufcnt *
6455                sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6456        dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6457                sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6458        dd->cspec->sendibchk = kmalloc(sbufcnt *
6459                sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6460        if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6461                !dd->cspec->sendibchk) {
6462                qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6463                ret = -ENOMEM;
6464                goto bail;
6465        }
6466
6467        ppd = dd->pport;
6468
6469        /*
6470         * GPIO bits for TWSI data and clock,
6471         * used for serial EEPROM.
6472         */
6473        dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6474        dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6475        dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6476
6477        dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6478                QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6479                QIB_HAS_THRESH_UPDATE |
6480                (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6481        dd->flags |= qib_special_trigger ?
6482                QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6483
6484        /*
6485         * Setup initial values.  These may change when PAT is enabled, but
6486         * we need these to do initial chip register accesses.
6487         */
6488        qib_7322_set_baseaddrs(dd);
6489
6490        mtu = ib_mtu_enum_to_int(qib_ibmtu);
6491        if (mtu == -1)
6492                mtu = QIB_DEFAULT_MTU;
6493
6494        dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6495        /* all hwerrors become interrupts, unless special purposed */
6496        dd->cspec->hwerrmask = ~0ULL;
6497        /*  link_recovery setup causes these errors, so ignore them,
6498         *  other than clearing them when they occur */
6499        dd->cspec->hwerrmask &=
6500                ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6501                  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6502                  HWE_MASK(LATriggered));
6503
6504        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6505                struct qib_chippport_specific *cp = ppd->cpspec;
6506                ppd->link_speed_supported = features & PORT_SPD_CAP;
6507                features >>=  PORT_SPD_CAP_SHIFT;
6508                if (!ppd->link_speed_supported) {
6509                        /* single port mode (7340, or configured) */
6510                        dd->skip_kctxt_mask |= 1 << pidx;
6511                        if (pidx == 0) {
6512                                /* Make sure port is disabled. */
6513                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6514                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6515                                ppd[0] = ppd[1];
6516                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6517                                                  IBSerdesPClkNotDetectMask_0)
6518                                                  | SYM_MASK(HwErrMask,
6519                                                  SDmaMemReadErrMask_0));
6520                                dd->cspec->int_enable_mask &= ~(
6521                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6522                                     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6523                                     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6524                                     SYM_MASK(IntMask, SDmaIntMask_0) |
6525                                     SYM_MASK(IntMask, ErrIntMask_0) |
6526                                     SYM_MASK(IntMask, SendDoneIntMask_0));
6527                        } else {
6528                                /* Make sure port is disabled. */
6529                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6530                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6531                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6532                                                  IBSerdesPClkNotDetectMask_1)
6533                                                  | SYM_MASK(HwErrMask,
6534                                                  SDmaMemReadErrMask_1));
6535                                dd->cspec->int_enable_mask &= ~(
6536                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6537                                     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6538                                     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6539                                     SYM_MASK(IntMask, SDmaIntMask_1) |
6540                                     SYM_MASK(IntMask, ErrIntMask_1) |
6541                                     SYM_MASK(IntMask, SendDoneIntMask_1));
6542                        }
6543                        continue;
6544                }
6545
6546                dd->num_pports++;
6547                ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6548                if (ret) {
6549                        dd->num_pports--;
6550                        goto bail;
6551                }
6552
6553                ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6554                ppd->link_width_enabled = IB_WIDTH_4X;
6555                ppd->link_speed_enabled = ppd->link_speed_supported;
6556                /*
6557                 * Set the initial values to reasonable default, will be set
6558                 * for real when link is up.
6559                 */
6560                ppd->link_width_active = IB_WIDTH_4X;
6561                ppd->link_speed_active = QIB_IB_SDR;
6562                ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6563                switch (qib_num_cfg_vls) {
6564                case 1:
6565                        ppd->vls_supported = IB_VL_VL0;
6566                        break;
6567                case 2:
6568                        ppd->vls_supported = IB_VL_VL0_1;
6569                        break;
6570                default:
6571                        qib_devinfo(dd->pcidev,
6572                                    "Invalid num_vls %u, using 4 VLs\n",
6573                                    qib_num_cfg_vls);
6574                        qib_num_cfg_vls = 4;
6575                        /* fall through */
6576                case 4:
6577                        ppd->vls_supported = IB_VL_VL0_3;
6578                        break;
6579                case 8:
6580                        if (mtu <= 2048)
6581                                ppd->vls_supported = IB_VL_VL0_7;
6582                        else {
6583                                qib_devinfo(dd->pcidev,
6584                                            "Invalid num_vls %u for MTU %d "
6585                                            ", using 4 VLs\n",
6586                                            qib_num_cfg_vls, mtu);
6587                                ppd->vls_supported = IB_VL_VL0_3;
6588                                qib_num_cfg_vls = 4;
6589                        }
6590                        break;
6591                }
6592                ppd->vls_operational = ppd->vls_supported;
6593
6594                init_waitqueue_head(&cp->autoneg_wait);
6595                INIT_DELAYED_WORK(&cp->autoneg_work,
6596                                  autoneg_7322_work);
6597                if (ppd->dd->cspec->r1)
6598                        INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6599
6600                /*
6601                 * For Mez and similar cards, no qsfp info, so do
6602                 * the "cable info" setup here.  Can be overridden
6603                 * in adapter-specific routines.
6604                 */
6605                if (!(dd->flags & QIB_HAS_QSFP)) {
6606                        if (!IS_QMH(dd) && !IS_QME(dd))
6607                                qib_devinfo(dd->pcidev,
6608                                        "IB%u:%u: Unknown mezzanine card type\n",
6609                                        dd->unit, ppd->port);
6610                        cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6611                        /*
6612                         * Choose center value as default tx serdes setting
6613                         * until changed through module parameter.
6614                         */
6615                        ppd->cpspec->no_eep = IS_QMH(dd) ?
6616                                TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6617                } else
6618                        cp->h1_val = H1_FORCE_VAL;
6619
6620                /* Avoid writes to chip for mini_init */
6621                if (!qib_mini_init)
6622                        write_7322_init_portregs(ppd);
6623
6624                init_timer(&cp->chase_timer);
6625                cp->chase_timer.function = reenable_chase;
6626                cp->chase_timer.data = (unsigned long)ppd;
6627
6628                ppd++;
6629        }
6630
6631        dd->rcvhdrentsize = qib_rcvhdrentsize ?
6632                qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6633        dd->rcvhdrsize = qib_rcvhdrsize ?
6634                qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6635        dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6636
6637        /* we always allocate at least 2048 bytes for eager buffers */
6638        dd->rcvegrbufsize = max(mtu, 2048);
6639        BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6640        dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6641
6642        qib_7322_tidtemplate(dd);
6643
6644        /*
6645         * We can request a receive interrupt for 1 or
6646         * more packets from current offset.
6647         */
6648        dd->rhdrhead_intr_off =
6649                (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6650
6651        /* setup the stats timer; the add_timer is done at end of init */
6652        init_timer(&dd->stats_timer);
6653        dd->stats_timer.function = qib_get_7322_faststats;
6654        dd->stats_timer.data = (unsigned long) dd;
6655
6656        dd->ureg_align = 0x10000;  /* 64KB alignment */
6657
6658        dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6659
6660        qib_7322_config_ctxts(dd);
6661        qib_set_ctxtcnt(dd);
6662
6663        if (qib_wc_pat) {
6664                resource_size_t vl15off;
6665                /*
6666                 * We do not set WC on the VL15 buffers to avoid
6667                 * a rare problem with unaligned writes from
6668                 * interrupt-flushed store buffers, so we need
6669                 * to map those separately here.  We can't solve
6670                 * this for the rarely used mtrr case.
6671                 */
6672                ret = init_chip_wc_pat(dd, 0);
6673                if (ret)
6674                        goto bail;
6675
6676                /* vl15 buffers start just after the 4k buffers */
6677                vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6678                        dd->piobcnt4k * dd->align4k;
6679                dd->piovl15base = ioremap_nocache(vl15off,
6680                                                  NUM_VL15_BUFS * dd->align4k);
6681                if (!dd->piovl15base) {
6682                        ret = -ENOMEM;
6683                        goto bail;
6684                }
6685        }
6686        qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6687
6688        ret = 0;
6689        if (qib_mini_init)
6690                goto bail;
6691        if (!dd->num_pports) {
6692                qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6693                goto bail; /* no error, so can still figure out why err */
6694        }
6695
6696        write_7322_initregs(dd);
6697        ret = qib_create_ctxts(dd);
6698        init_7322_cntrnames(dd);
6699
6700        updthresh = 8U; /* update threshold */
6701
6702        /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6703         * reserve the update threshold amount for other kernel use, such
6704         * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6705         * unless we aren't enabling SDMA, in which case we want to use
6706         * all the 4k bufs for the kernel.
6707         * if this was less than the update threshold, we could wait
6708         * a long time for an update.  Coded this way because we
6709         * sometimes change the update threshold for various reasons,
6710         * and we want this to remain robust.
6711         */
6712        if (dd->flags & QIB_HAS_SEND_DMA) {
6713                dd->cspec->sdmabufcnt = dd->piobcnt4k;
6714                sbufs = updthresh > 3 ? updthresh : 3;
6715        } else {
6716                dd->cspec->sdmabufcnt = 0;
6717                sbufs = dd->piobcnt4k;
6718        }
6719        dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6720                dd->cspec->sdmabufcnt;
6721        dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6722        dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6723        dd->last_pio = dd->cspec->lastbuf_for_pio;
6724        dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6725                dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6726
6727        /*
6728         * If we have 16 user contexts, we will have 7 sbufs
6729         * per context, so reduce the update threshold to match.  We
6730         * want to update before we actually run out, at low pbufs/ctxt
6731         * so give ourselves some margin.
6732         */
6733        if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6734                updthresh = dd->pbufsctxt - 2;
6735        dd->cspec->updthresh_dflt = updthresh;
6736        dd->cspec->updthresh = updthresh;
6737
6738        /* before full enable, no interrupts, no locking needed */
6739        dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6740                             << SYM_LSB(SendCtrl, AvailUpdThld)) |
6741                        SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6742
6743        dd->psxmitwait_supported = 1;
6744        dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6745bail:
6746        if (!dd->ctxtcnt)
6747                dd->ctxtcnt = 1; /* for other initialization code */
6748
6749        return ret;
6750}
6751
6752static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6753                                        u32 *pbufnum)
6754{
6755        u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6756        struct qib_devdata *dd = ppd->dd;
6757
6758        /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6759        if (pbc & PBC_7322_VL15_SEND) {
6760                first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6761                last = first;
6762        } else {
6763                if ((plen + 1) > dd->piosize2kmax_dwords)
6764                        first = dd->piobcnt2k;
6765                else
6766                        first = 0;
6767                last = dd->cspec->lastbuf_for_pio;
6768        }
6769        return qib_getsendbuf_range(dd, pbufnum, first, last);
6770}
6771
6772static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6773                                     u32 start)
6774{
6775        qib_write_kreg_port(ppd, krp_psinterval, intv);
6776        qib_write_kreg_port(ppd, krp_psstart, start);
6777}
6778
6779/*
6780 * Must be called with sdma_lock held, or before init finished.
6781 */
6782static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6783{
6784        qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6785}
6786
6787/*
6788 * sdma_lock should be acquired before calling this routine
6789 */
6790static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6791{
6792        u64 reg, reg1, reg2;
6793
6794        reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6795        qib_dev_porterr(ppd->dd, ppd->port,
6796                "SDMA senddmastatus: 0x%016llx\n", reg);
6797
6798        reg = qib_read_kreg_port(ppd, krp_sendctrl);
6799        qib_dev_porterr(ppd->dd, ppd->port,
6800                "SDMA sendctrl: 0x%016llx\n", reg);
6801
6802        reg = qib_read_kreg_port(ppd, krp_senddmabase);
6803        qib_dev_porterr(ppd->dd, ppd->port,
6804                "SDMA senddmabase: 0x%016llx\n", reg);
6805
6806        reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6807        reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6808        reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6809        qib_dev_porterr(ppd->dd, ppd->port,
6810                "SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6811                 reg, reg1, reg2);
6812
6813        /* get bufuse bits, clear them, and print them again if non-zero */
6814        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6815        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6816        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6817        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6818        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6819        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6820        /* 0 and 1 should always be zero, so print as short form */
6821        qib_dev_porterr(ppd->dd, ppd->port,
6822                 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6823                 reg, reg1, reg2);
6824        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6825        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6826        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6827        /* 0 and 1 should always be zero, so print as short form */
6828        qib_dev_porterr(ppd->dd, ppd->port,
6829                 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6830                 reg, reg1, reg2);
6831
6832        reg = qib_read_kreg_port(ppd, krp_senddmatail);
6833        qib_dev_porterr(ppd->dd, ppd->port,
6834                "SDMA senddmatail: 0x%016llx\n", reg);
6835
6836        reg = qib_read_kreg_port(ppd, krp_senddmahead);
6837        qib_dev_porterr(ppd->dd, ppd->port,
6838                "SDMA senddmahead: 0x%016llx\n", reg);
6839
6840        reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6841        qib_dev_porterr(ppd->dd, ppd->port,
6842                "SDMA senddmaheadaddr: 0x%016llx\n", reg);
6843
6844        reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6845        qib_dev_porterr(ppd->dd, ppd->port,
6846                "SDMA senddmalengen: 0x%016llx\n", reg);
6847
6848        reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6849        qib_dev_porterr(ppd->dd, ppd->port,
6850                "SDMA senddmadesccnt: 0x%016llx\n", reg);
6851
6852        reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6853        qib_dev_porterr(ppd->dd, ppd->port,
6854                "SDMA senddmaidlecnt: 0x%016llx\n", reg);
6855
6856        reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6857        qib_dev_porterr(ppd->dd, ppd->port,
6858                "SDMA senddmapriorityhld: 0x%016llx\n", reg);
6859
6860        reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6861        qib_dev_porterr(ppd->dd, ppd->port,
6862                "SDMA senddmareloadcnt: 0x%016llx\n", reg);
6863
6864        dump_sdma_state(ppd);
6865}
6866
6867static struct sdma_set_state_action sdma_7322_action_table[] = {
6868        [qib_sdma_state_s00_hw_down] = {
6869                .go_s99_running_tofalse = 1,
6870                .op_enable = 0,
6871                .op_intenable = 0,
6872                .op_halt = 0,
6873                .op_drain = 0,
6874        },
6875        [qib_sdma_state_s10_hw_start_up_wait] = {
6876                .op_enable = 0,
6877                .op_intenable = 1,
6878                .op_halt = 1,
6879                .op_drain = 0,
6880        },
6881        [qib_sdma_state_s20_idle] = {
6882                .op_enable = 1,
6883                .op_intenable = 1,
6884                .op_halt = 1,
6885                .op_drain = 0,
6886        },
6887        [qib_sdma_state_s30_sw_clean_up_wait] = {
6888                .op_enable = 0,
6889                .op_intenable = 1,
6890                .op_halt = 1,
6891                .op_drain = 0,
6892        },
6893        [qib_sdma_state_s40_hw_clean_up_wait] = {
6894                .op_enable = 1,
6895                .op_intenable = 1,
6896                .op_halt = 1,
6897                .op_drain = 0,
6898        },
6899        [qib_sdma_state_s50_hw_halt_wait] = {
6900                .op_enable = 1,
6901                .op_intenable = 1,
6902                .op_halt = 1,
6903                .op_drain = 1,
6904        },
6905        [qib_sdma_state_s99_running] = {
6906                .op_enable = 1,
6907                .op_intenable = 1,
6908                .op_halt = 0,
6909                .op_drain = 0,
6910                .go_s99_running_totrue = 1,
6911        },
6912};
6913
6914static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6915{
6916        ppd->sdma_state.set_state_action = sdma_7322_action_table;
6917}
6918
6919static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6920{
6921        struct qib_devdata *dd = ppd->dd;
6922        unsigned lastbuf, erstbuf;
6923        u64 senddmabufmask[3] = { 0 };
6924        int n, ret = 0;
6925
6926        qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6927        qib_sdma_7322_setlengen(ppd);
6928        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6929        qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6930        qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6931        qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6932
6933        if (dd->num_pports)
6934                n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6935        else
6936                n = dd->cspec->sdmabufcnt; /* failsafe for init */
6937        erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6938                ((dd->num_pports == 1 || ppd->port == 2) ? n :
6939                dd->cspec->sdmabufcnt);
6940        lastbuf = erstbuf + n;
6941
6942        ppd->sdma_state.first_sendbuf = erstbuf;
6943        ppd->sdma_state.last_sendbuf = lastbuf;
6944        for (; erstbuf < lastbuf; ++erstbuf) {
6945                unsigned word = erstbuf / BITS_PER_LONG;
6946                unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6947
6948                BUG_ON(word >= 3);
6949                senddmabufmask[word] |= 1ULL << bit;
6950        }
6951        qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6952        qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6953        qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6954        return ret;
6955}
6956
6957/* sdma_lock must be held */
6958static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6959{
6960        struct qib_devdata *dd = ppd->dd;
6961        int sane;
6962        int use_dmahead;
6963        u16 swhead;
6964        u16 swtail;
6965        u16 cnt;
6966        u16 hwhead;
6967
6968        use_dmahead = __qib_sdma_running(ppd) &&
6969                (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6970retry:
6971        hwhead = use_dmahead ?
6972                (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6973                (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6974
6975        swhead = ppd->sdma_descq_head;
6976        swtail = ppd->sdma_descq_tail;
6977        cnt = ppd->sdma_descq_cnt;
6978
6979        if (swhead < swtail)
6980                /* not wrapped */
6981                sane = (hwhead >= swhead) & (hwhead <= swtail);
6982        else if (swhead > swtail)
6983                /* wrapped around */
6984                sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6985                        (hwhead <= swtail);
6986        else
6987                /* empty */
6988                sane = (hwhead == swhead);
6989
6990        if (unlikely(!sane)) {
6991                if (use_dmahead) {
6992                        /* try one more time, directly from the register */
6993                        use_dmahead = 0;
6994                        goto retry;
6995                }
6996                /* proceed as if no progress */
6997                hwhead = swhead;
6998        }
6999
7000        return hwhead;
7001}
7002
7003static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
7004{
7005        u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
7006
7007        return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
7008               (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
7009               !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
7010               !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
7011}
7012
7013/*
7014 * Compute the amount of delay before sending the next packet if the
7015 * port's send rate differs from the static rate set for the QP.
7016 * The delay affects the next packet and the amount of the delay is
7017 * based on the length of the this packet.
7018 */
7019static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
7020                                   u8 srate, u8 vl)
7021{
7022        u8 snd_mult = ppd->delay_mult;
7023        u8 rcv_mult = ib_rate_to_delay[srate];
7024        u32 ret;
7025
7026        ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
7027
7028        /* Indicate VL15, else set the VL in the control word */
7029        if (vl == 15)
7030                ret |= PBC_7322_VL15_SEND_CTRL;
7031        else
7032                ret |= vl << PBC_VL_NUM_LSB;
7033        ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7034
7035        return ret;
7036}
7037
7038/*
7039 * Enable the per-port VL15 send buffers for use.
7040 * They follow the rest of the buffers, without a config parameter.
7041 * This was in initregs, but that is done before the shadow
7042 * is set up, and this has to be done after the shadow is
7043 * set up.
7044 */
7045static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7046{
7047        unsigned vl15bufs;
7048
7049        vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7050        qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7051                               TXCHK_CHG_TYPE_KERN, NULL);
7052}
7053
7054static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7055{
7056        if (rcd->ctxt < NUM_IB_PORTS) {
7057                if (rcd->dd->num_pports > 1) {
7058                        rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7059                        rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7060                } else {
7061                        rcd->rcvegrcnt = KCTXT0_EGRCNT;
7062                        rcd->rcvegr_tid_base = 0;
7063                }
7064        } else {
7065                rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7066                rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7067                        (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7068        }
7069}
7070
7071#define QTXSLEEPS 5000
7072static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7073                                  u32 len, u32 which, struct qib_ctxtdata *rcd)
7074{
7075        int i;
7076        const int last = start + len - 1;
7077        const int lastr = last / BITS_PER_LONG;
7078        u32 sleeps = 0;
7079        int wait = rcd != NULL;
7080        unsigned long flags;
7081
7082        while (wait) {
7083                unsigned long shadow;
7084                int cstart, previ = -1;
7085
7086                /*
7087                 * when flipping from kernel to user, we can't change
7088                 * the checking type if the buffer is allocated to the
7089                 * driver.   It's OK the other direction, because it's
7090                 * from close, and we have just disarm'ed all the
7091                 * buffers.  All the kernel to kernel changes are also
7092                 * OK.
7093                 */
7094                for (cstart = start; cstart <= last; cstart++) {
7095                        i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7096                                / BITS_PER_LONG;
7097                        if (i != previ) {
7098                                shadow = (unsigned long)
7099                                        le64_to_cpu(dd->pioavailregs_dma[i]);
7100                                previ = i;
7101                        }
7102                        if (test_bit(((2 * cstart) +
7103                                      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7104                                     % BITS_PER_LONG, &shadow))
7105                                break;
7106                }
7107
7108                if (cstart > last)
7109                        break;
7110
7111                if (sleeps == QTXSLEEPS)
7112                        break;
7113                /* make sure we see an updated copy next time around */
7114                sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7115                sleeps++;
7116                msleep(20);
7117        }
7118
7119        switch (which) {
7120        case TXCHK_CHG_TYPE_DIS1:
7121                /*
7122                 * disable checking on a range; used by diags; just
7123                 * one buffer, but still written generically
7124                 */
7125                for (i = start; i <= last; i++)
7126                        clear_bit(i, dd->cspec->sendchkenable);
7127                break;
7128
7129        case TXCHK_CHG_TYPE_ENAB1:
7130                /*
7131                 * (re)enable checking on a range; used by diags; just
7132                 * one buffer, but still written generically; read
7133                 * scratch to be sure buffer actually triggered, not
7134                 * just flushed from processor.
7135                 */
7136                qib_read_kreg32(dd, kr_scratch);
7137                for (i = start; i <= last; i++)
7138                        set_bit(i, dd->cspec->sendchkenable);
7139                break;
7140
7141        case TXCHK_CHG_TYPE_KERN:
7142                /* usable by kernel */
7143                for (i = start; i <= last; i++) {
7144                        set_bit(i, dd->cspec->sendibchk);
7145                        clear_bit(i, dd->cspec->sendgrhchk);
7146                }
7147                spin_lock_irqsave(&dd->uctxt_lock, flags);
7148                /* see if we need to raise avail update threshold */
7149                for (i = dd->first_user_ctxt;
7150                     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7151                     && i < dd->cfgctxts; i++)
7152                        if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7153                           ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7154                           < dd->cspec->updthresh_dflt)
7155                                break;
7156                spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7157                if (i == dd->cfgctxts) {
7158                        spin_lock_irqsave(&dd->sendctrl_lock, flags);
7159                        dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7160                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7161                        dd->sendctrl |= (dd->cspec->updthresh &
7162                                         SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7163                                           SYM_LSB(SendCtrl, AvailUpdThld);
7164                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7165                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7166                }
7167                break;
7168
7169        case TXCHK_CHG_TYPE_USER:
7170                /* for user process */
7171                for (i = start; i <= last; i++) {
7172                        clear_bit(i, dd->cspec->sendibchk);
7173                        set_bit(i, dd->cspec->sendgrhchk);
7174                }
7175                spin_lock_irqsave(&dd->sendctrl_lock, flags);
7176                if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7177                        / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7178                        dd->cspec->updthresh = (rcd->piocnt /
7179                                                rcd->subctxt_cnt) - 1;
7180                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7181                        dd->sendctrl |= (dd->cspec->updthresh &
7182                                        SYM_RMASK(SendCtrl, AvailUpdThld))
7183                                        << SYM_LSB(SendCtrl, AvailUpdThld);
7184                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7185                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7186                } else
7187                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7188                break;
7189
7190        default:
7191                break;
7192        }
7193
7194        for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7195                qib_write_kreg(dd, kr_sendcheckmask + i,
7196                               dd->cspec->sendchkenable[i]);
7197
7198        for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7199                qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7200                               dd->cspec->sendgrhchk[i]);
7201                qib_write_kreg(dd, kr_sendibpktmask + i,
7202                               dd->cspec->sendibchk[i]);
7203        }
7204
7205        /*
7206         * Be sure whatever we did was seen by the chip and acted upon,
7207         * before we return.  Mostly important for which >= 2.
7208         */
7209        qib_read_kreg32(dd, kr_scratch);
7210}
7211
7212
7213/* useful for trigger analyzers, etc. */
7214static void writescratch(struct qib_devdata *dd, u32 val)
7215{
7216        qib_write_kreg(dd, kr_scratch, val);
7217}
7218
7219/* Dummy for now, use chip regs soon */
7220static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7221{
7222        return -ENXIO;
7223}
7224
7225/**
7226 * qib_init_iba7322_funcs - set up the chip-specific function pointers
7227 * @dev: the pci_dev for qlogic_ib device
7228 * @ent: pci_device_id struct for this dev
7229 *
7230 * Also allocates, inits, and returns the devdata struct for this
7231 * device instance
7232 *
7233 * This is global, and is called directly at init to set up the
7234 * chip-specific function pointers for later use.
7235 */
7236struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7237                                           const struct pci_device_id *ent)
7238{
7239        struct qib_devdata *dd;
7240        int ret, i;
7241        u32 tabsize, actual_cnt = 0;
7242
7243        dd = qib_alloc_devdata(pdev,
7244                NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7245                sizeof(struct qib_chip_specific) +
7246                NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7247        if (IS_ERR(dd))
7248                goto bail;
7249
7250        dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7251        dd->f_cleanup           = qib_setup_7322_cleanup;
7252        dd->f_clear_tids        = qib_7322_clear_tids;
7253        dd->f_free_irq          = qib_7322_free_irq;
7254        dd->f_get_base_info     = qib_7322_get_base_info;
7255        dd->f_get_msgheader     = qib_7322_get_msgheader;
7256        dd->f_getsendbuf        = qib_7322_getsendbuf;
7257        dd->f_gpio_mod          = gpio_7322_mod;
7258        dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7259        dd->f_hdrqempty         = qib_7322_hdrqempty;
7260        dd->f_ib_updown         = qib_7322_ib_updown;
7261        dd->f_init_ctxt         = qib_7322_init_ctxt;
7262        dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7263        dd->f_intr_fallback     = qib_7322_intr_fallback;
7264        dd->f_late_initreg      = qib_late_7322_initreg;
7265        dd->f_setpbc_control    = qib_7322_setpbc_control;
7266        dd->f_portcntr          = qib_portcntr_7322;
7267        dd->f_put_tid           = qib_7322_put_tid;
7268        dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7269        dd->f_rcvctrl           = rcvctrl_7322_mod;
7270        dd->f_read_cntrs        = qib_read_7322cntrs;
7271        dd->f_read_portcntrs    = qib_read_7322portcntrs;
7272        dd->f_reset             = qib_do_7322_reset;
7273        dd->f_init_sdma_regs    = init_sdma_7322_regs;
7274        dd->f_sdma_busy         = qib_sdma_7322_busy;
7275        dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7276        dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7277        dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7278        dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7279        dd->f_sendctrl          = sendctrl_7322_mod;
7280        dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7281        dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7282        dd->f_iblink_state      = qib_7322_iblink_state;
7283        dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7284        dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7285        dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7286        dd->f_set_ib_loopback   = qib_7322_set_loopback;
7287        dd->f_get_ib_table      = qib_7322_get_ib_table;
7288        dd->f_set_ib_table      = qib_7322_set_ib_table;
7289        dd->f_set_intr_state    = qib_7322_set_intr_state;
7290        dd->f_setextled         = qib_setup_7322_setextled;
7291        dd->f_txchk_change      = qib_7322_txchk_change;
7292        dd->f_update_usrhead    = qib_update_7322_usrhead;
7293        dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7294        dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7295        dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7296        dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7297        dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7298        dd->f_writescratch      = writescratch;
7299        dd->f_tempsense_rd      = qib_7322_tempsense_rd;
7300#ifdef CONFIG_INFINIBAND_QIB_DCA
7301        dd->f_notify_dca        = qib_7322_notify_dca;
7302#endif
7303        /*
7304         * Do remaining PCIe setup and save PCIe values in dd.
7305         * Any error printing is already done by the init code.
7306         * On return, we have the chip mapped, but chip registers
7307         * are not set up until start of qib_init_7322_variables.
7308         */
7309        ret = qib_pcie_ddinit(dd, pdev, ent);
7310        if (ret < 0)
7311                goto bail_free;
7312
7313        /* initialize chip-specific variables */
7314        ret = qib_init_7322_variables(dd);
7315        if (ret)
7316                goto bail_cleanup;
7317
7318        if (qib_mini_init || !dd->num_pports)
7319                goto bail;
7320
7321        /*
7322         * Determine number of vectors we want; depends on port count
7323         * and number of configured kernel receive queues actually used.
7324         * Should also depend on whether sdma is enabled or not, but
7325         * that's such a rare testing case it's not worth worrying about.
7326         */
7327        tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7328        for (i = 0; i < tabsize; i++)
7329                if ((i < ARRAY_SIZE(irq_table) &&
7330                     irq_table[i].port <= dd->num_pports) ||
7331                    (i >= ARRAY_SIZE(irq_table) &&
7332                     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7333                        actual_cnt++;
7334        /* reduce by ctxt's < 2 */
7335        if (qib_krcvq01_no_msi)
7336                actual_cnt -= dd->num_pports;
7337
7338        tabsize = actual_cnt;
7339        dd->cspec->msix_entries = kzalloc(tabsize *
7340                        sizeof(struct qib_msix_entry), GFP_KERNEL);
7341        if (!dd->cspec->msix_entries) {
7342                qib_dev_err(dd, "No memory for MSIx table\n");
7343                tabsize = 0;
7344        }
7345        for (i = 0; i < tabsize; i++)
7346                dd->cspec->msix_entries[i].msix.entry = i;
7347
7348        if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
7349                qib_dev_err(dd,
7350                        "Failed to setup PCIe or interrupts; continuing anyway\n");
7351        /* may be less than we wanted, if not enough available */
7352        dd->cspec->num_msix_entries = tabsize;
7353
7354        /* setup interrupt handler */
7355        qib_setup_7322_interrupt(dd, 1);
7356
7357        /* clear diagctrl register, in case diags were running and crashed */
7358        qib_write_kreg(dd, kr_hwdiagctrl, 0);
7359#ifdef CONFIG_INFINIBAND_QIB_DCA
7360        if (!dca_add_requester(&pdev->dev)) {
7361                qib_devinfo(dd->pcidev, "DCA enabled\n");
7362                dd->flags |= QIB_DCA_ENABLED;
7363                qib_setup_dca(dd);
7364        }
7365#endif
7366        goto bail;
7367
7368bail_cleanup:
7369        qib_pcie_ddcleanup(dd);
7370bail_free:
7371        qib_free_devdata(dd);
7372        dd = ERR_PTR(ret);
7373bail:
7374        return dd;
7375}
7376
7377/*
7378 * Set the table entry at the specified index from the table specifed.
7379 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7380 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7381 * 'idx' below addresses the correct entry, while its 4 LSBs select the
7382 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7383 */
7384#define DDS_ENT_AMP_LSB 14
7385#define DDS_ENT_MAIN_LSB 9
7386#define DDS_ENT_POST_LSB 5
7387#define DDS_ENT_PRE_XTRA_LSB 3
7388#define DDS_ENT_PRE_LSB 0
7389
7390/*
7391 * Set one entry in the TxDDS table for spec'd port
7392 * ridx picks one of the entries, while tp points
7393 * to the appropriate table entry.
7394 */
7395static void set_txdds(struct qib_pportdata *ppd, int ridx,
7396                      const struct txdds_ent *tp)
7397{
7398        struct qib_devdata *dd = ppd->dd;
7399        u32 pack_ent;
7400        int regidx;
7401
7402        /* Get correct offset in chip-space, and in source table */
7403        regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7404        /*
7405         * We do not use qib_write_kreg_port() because it was intended
7406         * only for registers in the lower "port specific" pages.
7407         * So do index calculation  by hand.
7408         */
7409        if (ppd->hw_pidx)
7410                regidx += (dd->palign / sizeof(u64));
7411
7412        pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7413        pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7414        pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7415        pack_ent |= tp->post << DDS_ENT_POST_LSB;
7416        qib_write_kreg(dd, regidx, pack_ent);
7417        /* Prevent back-to-back writes by hitting scratch */
7418        qib_write_kreg(ppd->dd, kr_scratch, 0);
7419}
7420
7421static const struct vendor_txdds_ent vendor_txdds[] = {
7422        { /* Amphenol 1m 30awg NoEq */
7423                { 0x41, 0x50, 0x48 }, "584470002       ",
7424                { 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7425        },
7426        { /* Amphenol 3m 28awg NoEq */
7427                { 0x41, 0x50, 0x48 }, "584470004       ",
7428                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7429        },
7430        { /* Finisar 3m OM2 Optical */
7431                { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7432                {  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7433        },
7434        { /* Finisar 30m OM2 Optical */
7435                { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7436                {  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7437        },
7438        { /* Finisar Default OM2 Optical */
7439                { 0x00, 0x90, 0x65 }, NULL,
7440                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7441        },
7442        { /* Gore 1m 30awg NoEq */
7443                { 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7444                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7445        },
7446        { /* Gore 2m 30awg NoEq */
7447                { 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7448                {  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7449        },
7450        { /* Gore 1m 28awg NoEq */
7451                { 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7452                {  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7453        },
7454        { /* Gore 3m 28awg NoEq */
7455                { 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7456                {  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7457        },
7458        { /* Gore 5m 24awg Eq */
7459                { 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7460                {  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7461        },
7462        { /* Gore 7m 24awg Eq */
7463                { 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7464                {  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7465        },
7466        { /* Gore 5m 26awg Eq */
7467                { 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7468                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7469        },
7470        { /* Gore 7m 26awg Eq */
7471                { 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7472                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7473        },
7474        { /* Intersil 12m 24awg Active */
7475                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7476                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7477        },
7478        { /* Intersil 10m 28awg Active */
7479                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7480                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7481        },
7482        { /* Intersil 7m 30awg Active */
7483                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7484                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7485        },
7486        { /* Intersil 5m 32awg Active */
7487                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7488                {  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7489        },
7490        { /* Intersil Default Active */
7491                { 0x00, 0x30, 0xB4 }, NULL,
7492                {  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7493        },
7494        { /* Luxtera 20m Active Optical */
7495                { 0x00, 0x25, 0x63 }, NULL,
7496                {  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7497        },
7498        { /* Molex 1M Cu loopback */
7499                { 0x00, 0x09, 0x3A }, "74763-0025      ",
7500                {  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7501        },
7502        { /* Molex 2m 28awg NoEq */
7503                { 0x00, 0x09, 0x3A }, "74757-2201      ",
7504                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7505        },
7506};
7507
7508static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7509        /* amp, pre, main, post */
7510        {  2, 2, 15,  6 },      /* Loopback */
7511        {  0, 0,  0,  1 },      /*  2 dB */
7512        {  0, 0,  0,  2 },      /*  3 dB */
7513        {  0, 0,  0,  3 },      /*  4 dB */
7514        {  0, 0,  0,  4 },      /*  5 dB */
7515        {  0, 0,  0,  5 },      /*  6 dB */
7516        {  0, 0,  0,  6 },      /*  7 dB */
7517        {  0, 0,  0,  7 },      /*  8 dB */
7518        {  0, 0,  0,  8 },      /*  9 dB */
7519        {  0, 0,  0,  9 },      /* 10 dB */
7520        {  0, 0,  0, 10 },      /* 11 dB */
7521        {  0, 0,  0, 11 },      /* 12 dB */
7522        {  0, 0,  0, 12 },      /* 13 dB */
7523        {  0, 0,  0, 13 },      /* 14 dB */
7524        {  0, 0,  0, 14 },      /* 15 dB */
7525        {  0, 0,  0, 15 },      /* 16 dB */
7526};
7527
7528static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7529        /* amp, pre, main, post */
7530        {  2, 2, 15,  6 },      /* Loopback */
7531        {  0, 0,  0,  8 },      /*  2 dB */
7532        {  0, 0,  0,  8 },      /*  3 dB */
7533        {  0, 0,  0,  9 },      /*  4 dB */
7534        {  0, 0,  0,  9 },      /*  5 dB */
7535        {  0, 0,  0, 10 },      /*  6 dB */
7536        {  0, 0,  0, 10 },      /*  7 dB */
7537        {  0, 0,  0, 11 },      /*  8 dB */
7538        {  0, 0,  0, 11 },      /*  9 dB */
7539        {  0, 0,  0, 12 },      /* 10 dB */
7540        {  0, 0,  0, 12 },      /* 11 dB */
7541        {  0, 0,  0, 13 },      /* 12 dB */
7542        {  0, 0,  0, 13 },      /* 13 dB */
7543        {  0, 0,  0, 14 },      /* 14 dB */
7544        {  0, 0,  0, 14 },      /* 15 dB */
7545        {  0, 0,  0, 15 },      /* 16 dB */
7546};
7547
7548static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7549        /* amp, pre, main, post */
7550        {  2, 2, 15,  6 },      /* Loopback */
7551        {  0, 1,  0,  7 },      /*  2 dB (also QMH7342) */
7552        {  0, 1,  0,  9 },      /*  3 dB (also QMH7342) */
7553        {  0, 1,  0, 11 },      /*  4 dB */
7554        {  0, 1,  0, 13 },      /*  5 dB */
7555        {  0, 1,  0, 15 },      /*  6 dB */
7556        {  0, 1,  3, 15 },      /*  7 dB */
7557        {  0, 1,  7, 15 },      /*  8 dB */
7558        {  0, 1,  7, 15 },      /*  9 dB */
7559        {  0, 1,  8, 15 },      /* 10 dB */
7560        {  0, 1,  9, 15 },      /* 11 dB */
7561        {  0, 1, 10, 15 },      /* 12 dB */
7562        {  0, 2,  6, 15 },      /* 13 dB */
7563        {  0, 2,  7, 15 },      /* 14 dB */
7564        {  0, 2,  8, 15 },      /* 15 dB */
7565        {  0, 2,  9, 15 },      /* 16 dB */
7566};
7567
7568/*
7569 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7570 * These are mostly used for mez cards going through connectors
7571 * and backplane traces, but can be used to add other "unusual"
7572 * table values as well.
7573 */
7574static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7575        /* amp, pre, main, post */
7576        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7577        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7578        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7579        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7580        {  0, 0, 0,  3 },       /* QMH7342 backplane settings */
7581        {  0, 0, 0,  4 },       /* QMH7342 backplane settings */
7582        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7583        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7584        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7585        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7586        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7587        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7588        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7589        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7590        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7591        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7592        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7593        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7594};
7595
7596static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7597        /* amp, pre, main, post */
7598        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7599        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7600        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7601        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7602        {  0, 0, 0,  9 },       /* QMH7342 backplane settings */
7603        {  0, 0, 0, 10 },       /* QMH7342 backplane settings */
7604        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7605        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7606        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7607        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7608        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7609        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7610        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7611        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7612        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7613        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7614        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7615        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7616};
7617
7618static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7619        /* amp, pre, main, post */
7620        {  0, 1,  0,  4 },      /* QMH7342 backplane settings */
7621        {  0, 1,  0,  5 },      /* QMH7342 backplane settings */
7622        {  0, 1,  0,  6 },      /* QMH7342 backplane settings */
7623        {  0, 1,  0,  8 },      /* QMH7342 backplane settings */
7624        {  0, 1,  0, 10 },      /* QMH7342 backplane settings */
7625        {  0, 1,  0, 12 },      /* QMH7342 backplane settings */
7626        {  0, 1,  4, 15 },      /* QME7342 backplane settings 1.0 */
7627        {  0, 1,  3, 15 },      /* QME7342 backplane settings 1.0 */
7628        {  0, 1,  0, 12 },      /* QME7342 backplane settings 1.0 */
7629        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.0 */
7630        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.0 */
7631        {  0, 1,  0, 14 },      /* QME7342 backplane settings 1.0 */
7632        {  0, 1,  2, 15 },      /* QME7342 backplane settings 1.0 */
7633        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7634        {  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7635        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7636        {  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7637        {  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7638};
7639
7640static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7641        /* amp, pre, main, post */
7642        { 0, 0, 0, 0 },         /* QME7342 mfg settings */
7643        { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7644};
7645
7646static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7647                                               unsigned atten)
7648{
7649        /*
7650         * The attenuation table starts at 2dB for entry 1,
7651         * with entry 0 being the loopback entry.
7652         */
7653        if (atten <= 2)
7654                atten = 1;
7655        else if (atten > TXDDS_TABLE_SZ)
7656                atten = TXDDS_TABLE_SZ - 1;
7657        else
7658                atten--;
7659        return txdds + atten;
7660}
7661
7662/*
7663 * if override is set, the module parameter txselect has a value
7664 * for this specific port, so use it, rather than our normal mechanism.
7665 */
7666static void find_best_ent(struct qib_pportdata *ppd,
7667                          const struct txdds_ent **sdr_dds,
7668                          const struct txdds_ent **ddr_dds,
7669                          const struct txdds_ent **qdr_dds, int override)
7670{
7671        struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7672        int idx;
7673
7674        /* Search table of known cables */
7675        for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7676                const struct vendor_txdds_ent *v = vendor_txdds + idx;
7677
7678                if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7679                    (!v->partnum ||
7680                     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7681                        *sdr_dds = &v->sdr;
7682                        *ddr_dds = &v->ddr;
7683                        *qdr_dds = &v->qdr;
7684                        return;
7685                }
7686        }
7687
7688        /* Active cables don't have attenuation so we only set SERDES
7689         * settings to account for the attenuation of the board traces. */
7690        if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7691                *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7692                *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7693                *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7694                return;
7695        }
7696
7697        if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7698                                                      qd->atten[1])) {
7699                *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7700                *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7701                *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7702                return;
7703        } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7704                /*
7705                 * If we have no (or incomplete) data from the cable
7706                 * EEPROM, or no QSFP, or override is set, use the
7707                 * module parameter value to index into the attentuation
7708                 * table.
7709                 */
7710                idx = ppd->cpspec->no_eep;
7711                *sdr_dds = &txdds_sdr[idx];
7712                *ddr_dds = &txdds_ddr[idx];
7713                *qdr_dds = &txdds_qdr[idx];
7714        } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7715                /* similar to above, but index into the "extra" table. */
7716                idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7717                *sdr_dds = &txdds_extra_sdr[idx];
7718                *ddr_dds = &txdds_extra_ddr[idx];
7719                *qdr_dds = &txdds_extra_qdr[idx];
7720        } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7721                   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7722                                          TXDDS_MFG_SZ)) {
7723                idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7724                pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7725                        ppd->dd->unit, ppd->port, idx);
7726                *sdr_dds = &txdds_extra_mfg[idx];
7727                *ddr_dds = &txdds_extra_mfg[idx];
7728                *qdr_dds = &txdds_extra_mfg[idx];
7729        } else {
7730                /* this shouldn't happen, it's range checked */
7731                *sdr_dds = txdds_sdr + qib_long_atten;
7732                *ddr_dds = txdds_ddr + qib_long_atten;
7733                *qdr_dds = txdds_qdr + qib_long_atten;
7734        }
7735}
7736
7737static void init_txdds_table(struct qib_pportdata *ppd, int override)
7738{
7739        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7740        struct txdds_ent *dds;
7741        int idx;
7742        int single_ent = 0;
7743
7744        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7745
7746        /* for mez cards or override, use the selected value for all entries */
7747        if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7748                single_ent = 1;
7749
7750        /* Fill in the first entry with the best entry found. */
7751        set_txdds(ppd, 0, sdr_dds);
7752        set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7753        set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7754        if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7755                QIBL_LINKACTIVE)) {
7756                dds = (struct txdds_ent *)(ppd->link_speed_active ==
7757                                           QIB_IB_QDR ?  qdr_dds :
7758                                           (ppd->link_speed_active ==
7759                                            QIB_IB_DDR ? ddr_dds : sdr_dds));
7760                write_tx_serdes_param(ppd, dds);
7761        }
7762
7763        /* Fill in the remaining entries with the default table values. */
7764        for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7765                set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7766                set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7767                          single_ent ? ddr_dds : txdds_ddr + idx);
7768                set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7769                          single_ent ? qdr_dds : txdds_qdr + idx);
7770        }
7771}
7772
7773#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7774#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7775#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7776#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7777#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7778#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7779#define AHB_TRANS_TRIES 10
7780
7781/*
7782 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7783 * 5=subsystem which is why most calls have "chan + chan >> 1"
7784 * for the channel argument.
7785 */
7786static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7787                    u32 data, u32 mask)
7788{
7789        u32 rd_data, wr_data, sz_mask;
7790        u64 trans, acc, prev_acc;
7791        u32 ret = 0xBAD0BAD;
7792        int tries;
7793
7794        prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7795        /* From this point on, make sure we return access */
7796        acc = (quad << 1) | 1;
7797        qib_write_kreg(dd, KR_AHB_ACC, acc);
7798
7799        for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7800                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7801                if (trans & AHB_TRANS_RDY)
7802                        break;
7803        }
7804        if (tries >= AHB_TRANS_TRIES) {
7805                qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7806                goto bail;
7807        }
7808
7809        /* If mask is not all 1s, we need to read, but different SerDes
7810         * entities have different sizes
7811         */
7812        sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7813        wr_data = data & mask & sz_mask;
7814        if ((~mask & sz_mask) != 0) {
7815                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7816                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7817
7818                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7819                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7820                        if (trans & AHB_TRANS_RDY)
7821                                break;
7822                }
7823                if (tries >= AHB_TRANS_TRIES) {
7824                        qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7825                                    AHB_TRANS_TRIES);
7826                        goto bail;
7827                }
7828                /* Re-read in case host split reads and read data first */
7829                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7830                rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7831                wr_data |= (rd_data & ~mask & sz_mask);
7832        }
7833
7834        /* If mask is not zero, we need to write. */
7835        if (mask & sz_mask) {
7836                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7837                trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7838                trans |= AHB_WR;
7839                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7840
7841                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7842                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7843                        if (trans & AHB_TRANS_RDY)
7844                                break;
7845                }
7846                if (tries >= AHB_TRANS_TRIES) {
7847                        qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7848                                    AHB_TRANS_TRIES);
7849                        goto bail;
7850                }
7851        }
7852        ret = wr_data;
7853bail:
7854        qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7855        return ret;
7856}
7857
7858static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7859                             unsigned mask)
7860{
7861        struct qib_devdata *dd = ppd->dd;
7862        int chan;
7863        u32 rbc;
7864
7865        for (chan = 0; chan < SERDES_CHANS; ++chan) {
7866                ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7867                        data, mask);
7868                rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7869                              addr, 0, 0);
7870        }
7871}
7872
7873static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7874{
7875        u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7876        u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7877
7878        if (enable && !state) {
7879                pr_info("IB%u:%u Turning LOS on\n",
7880                        ppd->dd->unit, ppd->port);
7881                data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7882        } else if (!enable && state) {
7883                pr_info("IB%u:%u Turning LOS off\n",
7884                        ppd->dd->unit, ppd->port);
7885                data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7886        }
7887        qib_write_kreg_port(ppd, krp_serdesctrl, data);
7888}
7889
7890static int serdes_7322_init(struct qib_pportdata *ppd)
7891{
7892        int ret = 0;
7893        if (ppd->dd->cspec->r1)
7894                ret = serdes_7322_init_old(ppd);
7895        else
7896                ret = serdes_7322_init_new(ppd);
7897        return ret;
7898}
7899
7900static int serdes_7322_init_old(struct qib_pportdata *ppd)
7901{
7902        u32 le_val;
7903
7904        /*
7905         * Initialize the Tx DDS tables.  Also done every QSFP event,
7906         * for adapters with QSFP
7907         */
7908        init_txdds_table(ppd, 0);
7909
7910        /* ensure no tx overrides from earlier driver loads */
7911        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7912                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7913                reset_tx_deemphasis_override));
7914
7915        /* Patch some SerDes defaults to "Better for IB" */
7916        /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7917        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7918
7919        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7920        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7921        /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7922        ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7923
7924        /* May be overridden in qsfp_7322_event */
7925        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7926        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7927
7928        /* enable LE1 adaptation for all but QME, which is disabled */
7929        le_val = IS_QME(ppd->dd) ? 0 : 1;
7930        ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7931
7932        /* Clear cmode-override, may be set from older driver */
7933        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7934
7935        /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7936        ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7937
7938        /* setup LoS params; these are subsystem, so chan == 5 */
7939        /* LoS filter threshold_count on, ch 0-3, set to 8 */
7940        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7941        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7942        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7943        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7944
7945        /* LoS filter threshold_count off, ch 0-3, set to 4 */
7946        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7947        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7948        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7949        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7950
7951        /* LoS filter select enabled */
7952        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7953
7954        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7955        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7956        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7957        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7958
7959        serdes_7322_los_enable(ppd, 1);
7960
7961        /* rxbistena; set 0 to avoid effects of it switch later */
7962        ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7963
7964        /* Configure 4 DFE taps, and only they adapt */
7965        ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7966
7967        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7968        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7969        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7970
7971        /*
7972         * Set receive adaptation mode.  SDR and DDR adaptation are
7973         * always on, and QDR is initially enabled; later disabled.
7974         */
7975        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7976        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7977        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7978                            ppd->dd->cspec->r1 ?
7979                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7980        ppd->cpspec->qdr_dfe_on = 1;
7981
7982        /* FLoop LOS gate: PPM filter  enabled */
7983        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7984
7985        /* rx offset center enabled */
7986        ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7987
7988        if (!ppd->dd->cspec->r1) {
7989                ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7990                ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7991        }
7992
7993        /* Set the frequency loop bandwidth to 15 */
7994        ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7995
7996        return 0;
7997}
7998
7999static int serdes_7322_init_new(struct qib_pportdata *ppd)
8000{
8001        unsigned long tend;
8002        u32 le_val, rxcaldone;
8003        int chan, chan_done = (1 << SERDES_CHANS) - 1;
8004
8005        /* Clear cmode-override, may be set from older driver */
8006        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
8007
8008        /* ensure no tx overrides from earlier driver loads */
8009        qib_write_kreg_port(ppd, krp_tx_deemph_override,
8010                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8011                reset_tx_deemphasis_override));
8012
8013        /* START OF LSI SUGGESTED SERDES BRINGUP */
8014        /* Reset - Calibration Setup */
8015        /*       Stop DFE adaptaion */
8016        ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
8017        /*       Disable LE1 */
8018        ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
8019        /*       Disable autoadapt for LE1 */
8020        ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
8021        /*       Disable LE2 */
8022        ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
8023        /*       Disable VGA */
8024        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8025        /*       Disable AFE Offset Cancel */
8026        ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
8027        /*       Disable Timing Loop */
8028        ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
8029        /*       Disable Frequency Loop */
8030        ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
8031        /*       Disable Baseline Wander Correction */
8032        ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
8033        /*       Disable RX Calibration */
8034        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8035        /*       Disable RX Offset Calibration */
8036        ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
8037        /*       Select BB CDR */
8038        ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
8039        /*       CDR Step Size */
8040        ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
8041        /*       Enable phase Calibration */
8042        ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
8043        /*       DFE Bandwidth [2:14-12] */
8044        ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
8045        /*       DFE Config (4 taps only) */
8046        ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
8047        /*       Gain Loop Bandwidth */
8048        if (!ppd->dd->cspec->r1) {
8049                ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8050                ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8051        } else {
8052                ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8053        }
8054        /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8055        /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8056        /*       Data Rate Select [5:7-6] (leave as default) */
8057        /*       RX Parallel Word Width [3:10-8] (leave as default) */
8058
8059        /* RX REST */
8060        /*       Single- or Multi-channel reset */
8061        /*       RX Analog reset */
8062        /*       RX Digital reset */
8063        ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8064        msleep(20);
8065        /*       RX Analog reset */
8066        ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8067        msleep(20);
8068        /*       RX Digital reset */
8069        ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8070        msleep(20);
8071
8072        /* setup LoS params; these are subsystem, so chan == 5 */
8073        /* LoS filter threshold_count on, ch 0-3, set to 8 */
8074        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8075        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8076        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8077        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8078
8079        /* LoS filter threshold_count off, ch 0-3, set to 4 */
8080        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8081        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8082        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8083        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8084
8085        /* LoS filter select enabled */
8086        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8087
8088        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
8089        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8090        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8091        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8092
8093        /* Turn on LOS on initial SERDES init */
8094        serdes_7322_los_enable(ppd, 1);
8095        /* FLoop LOS gate: PPM filter  enabled */
8096        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8097
8098        /* RX LATCH CALIBRATION */
8099        /*       Enable Eyefinder Phase Calibration latch */
8100        ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8101        /*       Enable RX Offset Calibration latch */
8102        ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8103        msleep(20);
8104        /*       Start Calibration */
8105        ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8106        tend = jiffies + msecs_to_jiffies(500);
8107        while (chan_done && !time_is_before_jiffies(tend)) {
8108                msleep(20);
8109                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8110                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8111                                            (chan + (chan >> 1)),
8112                                            25, 0, 0);
8113                        if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8114                            (~chan_done & (1 << chan)) == 0)
8115                                chan_done &= ~(1 << chan);
8116                }
8117        }
8118        if (chan_done) {
8119                pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8120                         IBSD(ppd->hw_pidx), chan_done);
8121        } else {
8122                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8123                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8124                                            (chan + (chan >> 1)),
8125                                            25, 0, 0);
8126                        if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8127                                pr_info("Serdes %d chan %d calibration failed\n",
8128                                        IBSD(ppd->hw_pidx), chan);
8129                }
8130        }
8131
8132        /*       Turn off Calibration */
8133        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8134        msleep(20);
8135
8136        /* BRING RX UP */
8137        /*       Set LE2 value (May be overridden in qsfp_7322_event) */
8138        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8139        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8140        /*       Set LE2 Loop bandwidth */
8141        ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8142        /*       Enable LE2 */
8143        ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8144        msleep(20);
8145        /*       Enable H0 only */
8146        ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8147        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8148        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8149        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8150        /*       Enable VGA */
8151        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8152        msleep(20);
8153        /*       Set Frequency Loop Bandwidth */
8154        ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8155        /*       Enable Frequency Loop */
8156        ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8157        /*       Set Timing Loop Bandwidth */
8158        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8159        /*       Enable Timing Loop */
8160        ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8161        msleep(50);
8162        /*       Enable DFE
8163         *       Set receive adaptation mode.  SDR and DDR adaptation are
8164         *       always on, and QDR is initially enabled; later disabled.
8165         */
8166        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8167        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8168        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8169                            ppd->dd->cspec->r1 ?
8170                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8171        ppd->cpspec->qdr_dfe_on = 1;
8172        /*       Disable LE1  */
8173        ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8174        /*       Disable auto adapt for LE1 */
8175        ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8176        msleep(20);
8177        /*       Enable AFE Offset Cancel */
8178        ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8179        /*       Enable Baseline Wander Correction */
8180        ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8181        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8182        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8183        /* VGA output common mode */
8184        ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8185
8186        /*
8187         * Initialize the Tx DDS tables.  Also done every QSFP event,
8188         * for adapters with QSFP
8189         */
8190        init_txdds_table(ppd, 0);
8191
8192        return 0;
8193}
8194
8195/* start adjust QMH serdes parameters */
8196
8197static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8198{
8199        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8200                9, code << 9, 0x3f << 9);
8201}
8202
8203static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8204        int enable, u32 tapenable)
8205{
8206        if (enable)
8207                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8208                        1, 3 << 10, 0x1f << 10);
8209        else
8210                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8211                        1, 0, 0x1f << 10);
8212}
8213
8214/* Set clock to 1, 0, 1, 0 */
8215static void clock_man(struct qib_pportdata *ppd, int chan)
8216{
8217        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8218                4, 0x4000, 0x4000);
8219        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8220                4, 0, 0x4000);
8221        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8222                4, 0x4000, 0x4000);
8223        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8224                4, 0, 0x4000);
8225}
8226
8227/*
8228 * write the current Tx serdes pre,post,main,amp settings into the serdes.
8229 * The caller must pass the settings appropriate for the current speed,
8230 * or not care if they are correct for the current speed.
8231 */
8232static void write_tx_serdes_param(struct qib_pportdata *ppd,
8233                                  struct txdds_ent *txdds)
8234{
8235        u64 deemph;
8236
8237        deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8238        /* field names for amp, main, post, pre, respectively */
8239        deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8240                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8241                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8242                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8243
8244        deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8245                           tx_override_deemphasis_select);
8246        deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8247                    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8248                                       txampcntl_d2a);
8249        deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8250                     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8251                                   txc0_ena);
8252        deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8253                     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8254                                    txcp1_ena);
8255        deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8256                     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8257                                    txcn1_ena);
8258        qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8259}
8260
8261/*
8262 * Set the parameters for mez cards on link bounce, so they are
8263 * always exactly what was requested.  Similar logic to init_txdds
8264 * but does just the serdes.
8265 */
8266static void adj_tx_serdes(struct qib_pportdata *ppd)
8267{
8268        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8269        struct txdds_ent *dds;
8270
8271        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8272        dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8273                qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8274                                ddr_dds : sdr_dds));
8275        write_tx_serdes_param(ppd, dds);
8276}
8277
8278/* set QDR forced value for H1, if needed */
8279static void force_h1(struct qib_pportdata *ppd)
8280{
8281        int chan;
8282
8283        ppd->cpspec->qdr_reforce = 0;
8284        if (!ppd->dd->cspec->r1)
8285                return;
8286
8287        for (chan = 0; chan < SERDES_CHANS; chan++) {
8288                set_man_mode_h1(ppd, chan, 1, 0);
8289                set_man_code(ppd, chan, ppd->cpspec->h1_val);
8290                clock_man(ppd, chan);
8291                set_man_mode_h1(ppd, chan, 0, 0);
8292        }
8293}
8294
8295#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8296#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8297
8298#define R_OPCODE_LSB 3
8299#define R_OP_NOP 0
8300#define R_OP_SHIFT 2
8301#define R_OP_UPDATE 3
8302#define R_TDI_LSB 2
8303#define R_TDO_LSB 1
8304#define R_RDY 1
8305
8306static int qib_r_grab(struct qib_devdata *dd)
8307{
8308        u64 val;
8309        val = SJA_EN;
8310        qib_write_kreg(dd, kr_r_access, val);
8311        qib_read_kreg32(dd, kr_scratch);
8312        return 0;
8313}
8314
8315/* qib_r_wait_for_rdy() not only waits for the ready bit, it
8316 * returns the current state of R_TDO
8317 */
8318static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8319{
8320        u64 val;
8321        int timeout;
8322        for (timeout = 0; timeout < 100 ; ++timeout) {
8323                val = qib_read_kreg32(dd, kr_r_access);
8324                if (val & R_RDY)
8325                        return (val >> R_TDO_LSB) & 1;
8326        }
8327        return -1;
8328}
8329
8330static int qib_r_shift(struct qib_devdata *dd, int bisten,
8331                       int len, u8 *inp, u8 *outp)
8332{
8333        u64 valbase, val;
8334        int ret, pos;
8335
8336        valbase = SJA_EN | (bisten << BISTEN_LSB) |
8337                (R_OP_SHIFT << R_OPCODE_LSB);
8338        ret = qib_r_wait_for_rdy(dd);
8339        if (ret < 0)
8340                goto bail;
8341        for (pos = 0; pos < len; ++pos) {
8342                val = valbase;
8343                if (outp) {
8344                        outp[pos >> 3] &= ~(1 << (pos & 7));
8345                        outp[pos >> 3] |= (ret << (pos & 7));
8346                }
8347                if (inp) {
8348                        int tdi = inp[pos >> 3] >> (pos & 7);
8349                        val |= ((tdi & 1) << R_TDI_LSB);
8350                }
8351                qib_write_kreg(dd, kr_r_access, val);
8352                qib_read_kreg32(dd, kr_scratch);
8353                ret = qib_r_wait_for_rdy(dd);
8354                if (ret < 0)
8355                        break;
8356        }
8357        /* Restore to NOP between operations. */
8358        val =  SJA_EN | (bisten << BISTEN_LSB);
8359        qib_write_kreg(dd, kr_r_access, val);
8360        qib_read_kreg32(dd, kr_scratch);
8361        ret = qib_r_wait_for_rdy(dd);
8362
8363        if (ret >= 0)
8364                ret = pos;
8365bail:
8366        return ret;
8367}
8368
8369static int qib_r_update(struct qib_devdata *dd, int bisten)
8370{
8371        u64 val;
8372        int ret;
8373
8374        val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8375        ret = qib_r_wait_for_rdy(dd);
8376        if (ret >= 0) {
8377                qib_write_kreg(dd, kr_r_access, val);
8378                qib_read_kreg32(dd, kr_scratch);
8379        }
8380        return ret;
8381}
8382
8383#define BISTEN_PORT_SEL 15
8384#define LEN_PORT_SEL 625
8385#define BISTEN_AT 17
8386#define LEN_AT 156
8387#define BISTEN_ETM 16
8388#define LEN_ETM 632
8389
8390#define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8391
8392/* these are common for all IB port use cases. */
8393static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8394        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8395        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8396};
8397static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8398        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8399        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8400        0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8401        0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8402        0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8403        0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8404        0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8405        0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8406};
8407static u8 at[BIT2BYTE(LEN_AT)] = {
8408        0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8409        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8410};
8411
8412/* used for IB1 or IB2, only one in use */
8413static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8414        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8415        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8416        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8417        0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8418        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8419        0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8420        0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8421        0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8422};
8423
8424/* used when both IB1 and IB2 are in use */
8425static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8426        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8427        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8428        0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8429        0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8430        0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8431        0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8432        0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8433        0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8434};
8435
8436/* used when only IB1 is in use */
8437static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8438        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8439        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8440        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8441        0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8442        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8443        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8444        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8445        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8446};
8447
8448/* used when only IB2 is in use */
8449static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8450        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8451        0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8452        0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8453        0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8454        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8455        0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8456        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8457        0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8458};
8459
8460/* used when both IB1 and IB2 are in use */
8461static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8462        0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8463        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8464        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8465        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8466        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8467        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8468        0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8469        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8470};
8471
8472/*
8473 * Do setup to properly handle IB link recovery; if port is zero, we
8474 * are initializing to cover both ports; otherwise we are initializing
8475 * to cover a single port card, or the port has reached INIT and we may
8476 * need to switch coverage types.
8477 */
8478static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8479{
8480        u8 *portsel, *etm;
8481        struct qib_devdata *dd = ppd->dd;
8482
8483        if (!ppd->dd->cspec->r1)
8484                return;
8485        if (!both) {
8486                dd->cspec->recovery_ports_initted++;
8487                ppd->cpspec->recovery_init = 1;
8488        }
8489        if (!both && dd->cspec->recovery_ports_initted == 1) {
8490                portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8491                etm = atetm_1port;
8492        } else {
8493                portsel = portsel_2port;
8494                etm = atetm_2port;
8495        }
8496
8497        if (qib_r_grab(dd) < 0 ||
8498                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8499                qib_r_update(dd, BISTEN_ETM) < 0 ||
8500                qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8501                qib_r_update(dd, BISTEN_AT) < 0 ||
8502                qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8503                            portsel, NULL) < 0 ||
8504                qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8505                qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8506                qib_r_update(dd, BISTEN_AT) < 0 ||
8507                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8508                qib_r_update(dd, BISTEN_ETM) < 0)
8509                qib_dev_err(dd, "Failed IB link recovery setup\n");
8510}
8511
8512static void check_7322_rxe_status(struct qib_pportdata *ppd)
8513{
8514        struct qib_devdata *dd = ppd->dd;
8515        u64 fmask;
8516
8517        if (dd->cspec->recovery_ports_initted != 1)
8518                return; /* rest doesn't apply to dualport */
8519        qib_write_kreg(dd, kr_control, dd->control |
8520                       SYM_MASK(Control, FreezeMode));
8521        (void)qib_read_kreg64(dd, kr_scratch);
8522        udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8523        fmask = qib_read_kreg64(dd, kr_act_fmask);
8524        if (!fmask) {
8525                /*
8526                 * require a powercycle before we'll work again, and make
8527                 * sure we get no more interrupts, and don't turn off
8528                 * freeze.
8529                 */
8530                ppd->dd->cspec->stay_in_freeze = 1;
8531                qib_7322_set_intr_state(ppd->dd, 0);
8532                qib_write_kreg(dd, kr_fmask, 0ULL);
8533                qib_dev_err(dd, "HCA unusable until powercycled\n");
8534                return; /* eventually reset */
8535        }
8536
8537        qib_write_kreg(ppd->dd, kr_hwerrclear,
8538            SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8539
8540        /* don't do the full clear_freeze(), not needed for this */
8541        qib_write_kreg(dd, kr_control, dd->control);
8542        qib_read_kreg32(dd, kr_scratch);
8543        /* take IBC out of reset */
8544        if (ppd->link_speed_supported) {
8545                ppd->cpspec->ibcctrl_a &=
8546                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8547                qib_write_kreg_port(ppd, krp_ibcctrl_a,
8548                                    ppd->cpspec->ibcctrl_a);
8549                qib_read_kreg32(dd, kr_scratch);
8550                if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8551                        qib_set_ib_7322_lstate(ppd, 0,
8552                                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8553        }
8554}
8555