linux/drivers/infiniband/hw/qib/qib_iba7322.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * This file contains all of the code that is specific to the
  36 * InfiniPath 7322 chip
  37 */
  38
  39#include <linux/interrupt.h>
  40#include <linux/pci.h>
  41#include <linux/delay.h>
  42#include <linux/io.h>
  43#include <linux/jiffies.h>
  44#include <linux/module.h>
  45#include <rdma/ib_verbs.h>
  46#include <rdma/ib_smi.h>
  47
  48#include "qib.h"
  49#include "qib_7322_regs.h"
  50#include "qib_qsfp.h"
  51
  52#include "qib_mad.h"
  53#include "qib_verbs.h"
  54
  55#undef pr_fmt
  56#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  57
  58static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  59static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  60static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  61static irqreturn_t qib_7322intr(int irq, void *data);
  62static irqreturn_t qib_7322bufavail(int irq, void *data);
  63static irqreturn_t sdma_intr(int irq, void *data);
  64static irqreturn_t sdma_idle_intr(int irq, void *data);
  65static irqreturn_t sdma_progress_intr(int irq, void *data);
  66static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  67static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  68                                  struct qib_ctxtdata *rcd);
  69static u8 qib_7322_phys_portstate(u64);
  70static u32 qib_7322_iblink_state(u64);
  71static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  72                                   u16 linitcmd);
  73static void force_h1(struct qib_pportdata *);
  74static void adj_tx_serdes(struct qib_pportdata *);
  75static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  76static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  77
  78static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  79static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  80static void serdes_7322_los_enable(struct qib_pportdata *, int);
  81static int serdes_7322_init_old(struct qib_pportdata *);
  82static int serdes_7322_init_new(struct qib_pportdata *);
  83
  84#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  85
  86/* LE2 serdes values for different cases */
  87#define LE2_DEFAULT 5
  88#define LE2_5m 4
  89#define LE2_QME 0
  90
  91/* Below is special-purpose, so only really works for the IB SerDes blocks. */
  92#define IBSD(hw_pidx) (hw_pidx + 2)
  93
  94/* these are variables for documentation and experimentation purposes */
  95static const unsigned rcv_int_timeout = 375;
  96static const unsigned rcv_int_count = 16;
  97static const unsigned sdma_idle_cnt = 64;
  98
  99/* Time to stop altering Rx Equalization parameters, after link up. */
 100#define RXEQ_DISABLE_MSECS 2500
 101
 102/*
 103 * Number of VLs we are configured to use (to allow for more
 104 * credits per vl, etc.)
 105 */
 106ushort qib_num_cfg_vls = 2;
 107module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
 108MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
 109
 110static ushort qib_chase = 1;
 111module_param_named(chase, qib_chase, ushort, S_IRUGO);
 112MODULE_PARM_DESC(chase, "Enable state chase handling");
 113
 114static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 115module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
 116MODULE_PARM_DESC(long_attenuation, \
 117                 "attenuation cutoff (dB) for long copper cable setup");
 118
 119static ushort qib_singleport;
 120module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 121MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 122
 123static ushort qib_krcvq01_no_msi;
 124module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
 125MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
 126
 127/*
 128 * Receive header queue sizes
 129 */
 130static unsigned qib_rcvhdrcnt;
 131module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
 132MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
 133
 134static unsigned qib_rcvhdrsize;
 135module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
 136MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
 137
 138static unsigned qib_rcvhdrentsize;
 139module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
 140MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
 141
 142#define MAX_ATTEN_LEN 64 /* plenty for any real system */
 143/* for read back, default index is ~5m copper cable */
 144static char txselect_list[MAX_ATTEN_LEN] = "10";
 145static struct kparam_string kp_txselect = {
 146        .string = txselect_list,
 147        .maxlen = MAX_ATTEN_LEN
 148};
 149static int  setup_txselect(const char *, struct kernel_param *);
 150module_param_call(txselect, setup_txselect, param_get_string,
 151                  &kp_txselect, S_IWUSR | S_IRUGO);
 152MODULE_PARM_DESC(txselect, \
 153                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 154
 155#define BOARD_QME7342 5
 156#define BOARD_QMH7342 6
 157#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 158                    BOARD_QMH7342)
 159#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 160                    BOARD_QME7342)
 161
 162#define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
 163
 164#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
 165
 166#define MASK_ACROSS(lsb, msb) \
 167        (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
 168
 169#define SYM_RMASK(regname, fldname) ((u64)              \
 170        QIB_7322_##regname##_##fldname##_RMASK)
 171
 172#define SYM_MASK(regname, fldname) ((u64)               \
 173        QIB_7322_##regname##_##fldname##_RMASK <<       \
 174         QIB_7322_##regname##_##fldname##_LSB)
 175
 176#define SYM_FIELD(value, regname, fldname) ((u64)       \
 177        (((value) >> SYM_LSB(regname, fldname)) &       \
 178         SYM_RMASK(regname, fldname)))
 179
 180/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
 181#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
 182        (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
 183
 184#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
 185#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
 186#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
 187#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
 188#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
 189/* Below because most, but not all, fields of IntMask have that full suffix */
 190#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
 191
 192
 193#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
 194
 195/*
 196 * the size bits give us 2^N, in KB units.  0 marks as invalid,
 197 * and 7 is reserved.  We currently use only 2KB and 4KB
 198 */
 199#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
 200#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
 201#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
 202#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
 203
 204#define SendIBSLIDAssignMask \
 205        QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
 206#define SendIBSLMCMask \
 207        QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
 208
 209#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
 210#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
 211#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
 212#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
 213#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
 214#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
 215
 216#define _QIB_GPIO_SDA_NUM 1
 217#define _QIB_GPIO_SCL_NUM 0
 218#define QIB_EEPROM_WEN_NUM 14
 219#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
 220
 221/* HW counter clock is at 4nsec */
 222#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
 223
 224/* full speed IB port 1 only */
 225#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
 226#define PORT_SPD_CAP_SHIFT 3
 227
 228/* full speed featuremask, both ports */
 229#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
 230
 231/*
 232 * This file contains almost all the chip-specific register information and
 233 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
 234 */
 235
 236/* Use defines to tie machine-generated names to lower-case names */
 237#define kr_contextcnt KREG_IDX(ContextCnt)
 238#define kr_control KREG_IDX(Control)
 239#define kr_counterregbase KREG_IDX(CntrRegBase)
 240#define kr_errclear KREG_IDX(ErrClear)
 241#define kr_errmask KREG_IDX(ErrMask)
 242#define kr_errstatus KREG_IDX(ErrStatus)
 243#define kr_extctrl KREG_IDX(EXTCtrl)
 244#define kr_extstatus KREG_IDX(EXTStatus)
 245#define kr_gpio_clear KREG_IDX(GPIOClear)
 246#define kr_gpio_mask KREG_IDX(GPIOMask)
 247#define kr_gpio_out KREG_IDX(GPIOOut)
 248#define kr_gpio_status KREG_IDX(GPIOStatus)
 249#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
 250#define kr_debugportval KREG_IDX(DebugPortValueReg)
 251#define kr_fmask KREG_IDX(feature_mask)
 252#define kr_act_fmask KREG_IDX(active_feature_mask)
 253#define kr_hwerrclear KREG_IDX(HwErrClear)
 254#define kr_hwerrmask KREG_IDX(HwErrMask)
 255#define kr_hwerrstatus KREG_IDX(HwErrStatus)
 256#define kr_intclear KREG_IDX(IntClear)
 257#define kr_intmask KREG_IDX(IntMask)
 258#define kr_intredirect KREG_IDX(IntRedirect0)
 259#define kr_intstatus KREG_IDX(IntStatus)
 260#define kr_pagealign KREG_IDX(PageAlign)
 261#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
 262#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
 263#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
 264#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
 265#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
 266#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
 267#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
 268#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
 269#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
 270#define kr_revision KREG_IDX(Revision)
 271#define kr_scratch KREG_IDX(Scratch)
 272#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
 273#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
 274#define kr_sendctrl KREG_IDX(SendCtrl)
 275#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
 276#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
 277#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
 278#define kr_sendpiobufbase KREG_IDX(SendBufBase)
 279#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
 280#define kr_sendpiosize KREG_IDX(SendBufSize)
 281#define kr_sendregbase KREG_IDX(SendRegBase)
 282#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
 283#define kr_userregbase KREG_IDX(UserRegBase)
 284#define kr_intgranted KREG_IDX(Int_Granted)
 285#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
 286#define kr_intblocked KREG_IDX(IntBlocked)
 287#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
 288
 289/*
 290 * per-port kernel registers.  Access only with qib_read_kreg_port()
 291 * or qib_write_kreg_port()
 292 */
 293#define krp_errclear KREG_IBPORT_IDX(ErrClear)
 294#define krp_errmask KREG_IBPORT_IDX(ErrMask)
 295#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
 296#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
 297#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
 298#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
 299#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
 300#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
 301#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
 302#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
 303#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
 304#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
 305#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
 306#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
 307#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
 308#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
 309#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
 310#define krp_psstart KREG_IBPORT_IDX(PSStart)
 311#define krp_psstat KREG_IBPORT_IDX(PSStat)
 312#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
 313#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
 314#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
 315#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
 316#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
 317#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
 318#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
 319#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
 320#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
 321#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
 322#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
 323#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
 324#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
 325#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
 326#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
 327#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
 328#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
 329#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
 330#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
 331#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
 332#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
 333#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
 334#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
 335#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
 336#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
 337#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
 338#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
 339#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
 340#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
 341#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
 342#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 343
 344/*
 345 * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
 346 * or qib_write_kreg_ctxt()
 347 */
 348#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
 349#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
 350
 351/*
 352 * TID Flow table, per context.  Reduces
 353 * number of hdrq updates to one per flow (or on errors).
 354 * context 0 and 1 share same memory, but have distinct
 355 * addresses.  Since for now, we never use expected sends
 356 * on kernel contexts, we don't worry about that (we initialize
 357 * those entries for ctxt 0/1 on driver load twice, for example).
 358 */
 359#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
 360#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
 361
 362/* these are the error bits in the tid flows, and are W1C */
 363#define TIDFLOW_ERRBITS  ( \
 364        (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
 365        SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
 366        (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
 367        SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
 368
 369/* Most (not all) Counters are per-IBport.
 370 * Requires LBIntCnt is at offset 0 in the group
 371 */
 372#define CREG_IDX(regname) \
 373((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 374
 375#define crp_badformat CREG_IDX(RxVersionErrCnt)
 376#define crp_err_rlen CREG_IDX(RxLenErrCnt)
 377#define crp_erricrc CREG_IDX(RxICRCErrCnt)
 378#define crp_errlink CREG_IDX(RxLinkMalformCnt)
 379#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
 380#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
 381#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
 382#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
 383#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
 384#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
 385#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
 386#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
 387#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
 388#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
 389#define crp_pktrcv CREG_IDX(RxDataPktCnt)
 390#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
 391#define crp_pktsend CREG_IDX(TxDataPktCnt)
 392#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
 393#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
 394#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
 395#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
 396#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
 397#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
 398#define crp_rcvebp CREG_IDX(RxEBPCnt)
 399#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
 400#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
 401#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
 402#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
 403#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
 404#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
 405#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
 406#define crp_sendstall CREG_IDX(TxFlowStallCnt)
 407#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
 408#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
 409#define crp_txlenerr CREG_IDX(TxLenErrCnt)
 410#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
 411#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
 412#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
 413#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
 414#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
 415#define crp_wordrcv CREG_IDX(RxDwordCnt)
 416#define crp_wordsend CREG_IDX(TxDwordCnt)
 417#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
 418
 419/* these are the (few) counters that are not port-specific */
 420#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
 421                        QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 422#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
 423#define cr_lbint CREG_DEVIDX(LBIntCnt)
 424#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
 425#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
 426#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
 427#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
 428#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
 429
 430/* no chip register for # of IB ports supported, so define */
 431#define NUM_IB_PORTS 2
 432
 433/* 1 VL15 buffer per hardware IB port, no register for this, so define */
 434#define NUM_VL15_BUFS NUM_IB_PORTS
 435
 436/*
 437 * context 0 and 1 are special, and there is no chip register that
 438 * defines this value, so we have to define it here.
 439 * These are all allocated to either 0 or 1 for single port
 440 * hardware configuration, otherwise each gets half
 441 */
 442#define KCTXT0_EGRCNT 2048
 443
 444/* values for vl and port fields in PBC, 7322-specific */
 445#define PBC_PORT_SEL_LSB 26
 446#define PBC_PORT_SEL_RMASK 1
 447#define PBC_VL_NUM_LSB 27
 448#define PBC_VL_NUM_RMASK 7
 449#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
 450#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
 451
 452static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 453        [IB_RATE_2_5_GBPS] = 16,
 454        [IB_RATE_5_GBPS] = 8,
 455        [IB_RATE_10_GBPS] = 4,
 456        [IB_RATE_20_GBPS] = 2,
 457        [IB_RATE_30_GBPS] = 2,
 458        [IB_RATE_40_GBPS] = 1
 459};
 460
 461#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
 462#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
 463
 464/* link training states, from IBC */
 465#define IB_7322_LT_STATE_DISABLED        0x00
 466#define IB_7322_LT_STATE_LINKUP          0x01
 467#define IB_7322_LT_STATE_POLLACTIVE      0x02
 468#define IB_7322_LT_STATE_POLLQUIET       0x03
 469#define IB_7322_LT_STATE_SLEEPDELAY      0x04
 470#define IB_7322_LT_STATE_SLEEPQUIET      0x05
 471#define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
 472#define IB_7322_LT_STATE_CFGRCVFCFG      0x09
 473#define IB_7322_LT_STATE_CFGWAITRMT      0x0a
 474#define IB_7322_LT_STATE_CFGIDLE         0x0b
 475#define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
 476#define IB_7322_LT_STATE_TXREVLANES      0x0d
 477#define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
 478#define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 479#define IB_7322_LT_STATE_CFGENH          0x10
 480#define IB_7322_LT_STATE_CFGTEST         0x11
 481#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
 482#define IB_7322_LT_STATE_CFGWAITENH      0x13
 483
 484/* link state machine states from IBC */
 485#define IB_7322_L_STATE_DOWN             0x0
 486#define IB_7322_L_STATE_INIT             0x1
 487#define IB_7322_L_STATE_ARM              0x2
 488#define IB_7322_L_STATE_ACTIVE           0x3
 489#define IB_7322_L_STATE_ACT_DEFER        0x4
 490
 491static const u8 qib_7322_physportstate[0x20] = {
 492        [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
 493        [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
 494        [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
 495        [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
 496        [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
 497        [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
 498        [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
 499        [IB_7322_LT_STATE_CFGRCVFCFG] =
 500                IB_PHYSPORTSTATE_CFG_TRAIN,
 501        [IB_7322_LT_STATE_CFGWAITRMT] =
 502                IB_PHYSPORTSTATE_CFG_TRAIN,
 503        [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
 504        [IB_7322_LT_STATE_RECOVERRETRAIN] =
 505                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 506        [IB_7322_LT_STATE_RECOVERWAITRMT] =
 507                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 508        [IB_7322_LT_STATE_RECOVERIDLE] =
 509                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 510        [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
 511        [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
 512        [IB_7322_LT_STATE_CFGWAITRMTTEST] =
 513                IB_PHYSPORTSTATE_CFG_TRAIN,
 514        [IB_7322_LT_STATE_CFGWAITENH] =
 515                IB_PHYSPORTSTATE_CFG_WAIT_ENH,
 516        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
 517        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
 518        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
 519        [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
 520};
 521
 522struct qib_chip_specific {
 523        u64 __iomem *cregbase;
 524        u64 *cntrs;
 525        spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
 526        spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
 527        u64 main_int_mask;      /* clear bits which have dedicated handlers */
 528        u64 int_enable_mask;  /* for per port interrupts in single port mode */
 529        u64 errormask;
 530        u64 hwerrmask;
 531        u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
 532        u64 gpio_mask; /* shadow the gpio mask register */
 533        u64 extctrl; /* shadow the gpio output enable, etc... */
 534        u32 ncntrs;
 535        u32 nportcntrs;
 536        u32 cntrnamelen;
 537        u32 portcntrnamelen;
 538        u32 numctxts;
 539        u32 rcvegrcnt;
 540        u32 updthresh; /* current AvailUpdThld */
 541        u32 updthresh_dflt; /* default AvailUpdThld */
 542        u32 r1;
 543        int irq;
 544        u32 num_msix_entries;
 545        u32 sdmabufcnt;
 546        u32 lastbuf_for_pio;
 547        u32 stay_in_freeze;
 548        u32 recovery_ports_initted;
 549        struct qib_msix_entry *msix_entries;
 550        unsigned long *sendchkenable;
 551        unsigned long *sendgrhchk;
 552        unsigned long *sendibchk;
 553        u32 rcvavail_timeout[18];
 554        char emsgbuf[128]; /* for device error interrupt msg buffer */
 555};
 556
 557/* Table of entries in "human readable" form Tx Emphasis. */
 558struct txdds_ent {
 559        u8 amp;
 560        u8 pre;
 561        u8 main;
 562        u8 post;
 563};
 564
 565struct vendor_txdds_ent {
 566        u8 oui[QSFP_VOUI_LEN];
 567        u8 *partnum;
 568        struct txdds_ent sdr;
 569        struct txdds_ent ddr;
 570        struct txdds_ent qdr;
 571};
 572
 573static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 574
 575#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 576#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
 577#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 578#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 579
 580#define H1_FORCE_VAL 8
 581#define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
 582#define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
 583
 584/* The static and dynamic registers are paired, and the pairs indexed by spd */
 585#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
 586        + ((spd) * 2))
 587
 588#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
 589#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
 590#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
 591#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
 592#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
 593
 594struct qib_chippport_specific {
 595        u64 __iomem *kpregbase;
 596        u64 __iomem *cpregbase;
 597        u64 *portcntrs;
 598        struct qib_pportdata *ppd;
 599        wait_queue_head_t autoneg_wait;
 600        struct delayed_work autoneg_work;
 601        struct delayed_work ipg_work;
 602        struct timer_list chase_timer;
 603        /*
 604         * these 5 fields are used to establish deltas for IB symbol
 605         * errors and linkrecovery errors.  They can be reported on
 606         * some chips during link negotiation prior to INIT, and with
 607         * DDR when faking DDR negotiations with non-IBTA switches.
 608         * The chip counters are adjusted at driver unload if there is
 609         * a non-zero delta.
 610         */
 611        u64 ibdeltainprog;
 612        u64 ibsymdelta;
 613        u64 ibsymsnap;
 614        u64 iblnkerrdelta;
 615        u64 iblnkerrsnap;
 616        u64 iblnkdownsnap;
 617        u64 iblnkdowndelta;
 618        u64 ibmalfdelta;
 619        u64 ibmalfsnap;
 620        u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
 621        u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
 622        unsigned long qdr_dfe_time;
 623        unsigned long chase_end;
 624        u32 autoneg_tries;
 625        u32 recovery_init;
 626        u32 qdr_dfe_on;
 627        u32 qdr_reforce;
 628        /*
 629         * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
 630         * entry zero is unused, to simplify indexing
 631         */
 632        u8 h1_val;
 633        u8 no_eep;  /* txselect table index to use if no qsfp info */
 634        u8 ipg_tries;
 635        u8 ibmalfusesnap;
 636        struct qib_qsfp_data qsfp_data;
 637        char epmsgbuf[192]; /* for port error interrupt msg buffer */
 638};
 639
 640static struct {
 641        const char *name;
 642        irq_handler_t handler;
 643        int lsb;
 644        int port; /* 0 if not port-specific, else port # */
 645} irq_table[] = {
 646        { "", qib_7322intr, -1, 0 },
 647        { " (buf avail)", qib_7322bufavail,
 648                SYM_LSB(IntStatus, SendBufAvail), 0 },
 649        { " (sdma 0)", sdma_intr,
 650                SYM_LSB(IntStatus, SDmaInt_0), 1 },
 651        { " (sdma 1)", sdma_intr,
 652                SYM_LSB(IntStatus, SDmaInt_1), 2 },
 653        { " (sdmaI 0)", sdma_idle_intr,
 654                SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
 655        { " (sdmaI 1)", sdma_idle_intr,
 656                SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
 657        { " (sdmaP 0)", sdma_progress_intr,
 658                SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
 659        { " (sdmaP 1)", sdma_progress_intr,
 660                SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
 661        { " (sdmaC 0)", sdma_cleanup_intr,
 662                SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
 663        { " (sdmaC 1)", sdma_cleanup_intr,
 664                SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
 665};
 666
 667/* ibcctrl bits */
 668#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
 669/* cycle through TS1/TS2 till OK */
 670#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
 671/* wait for TS1, then go on */
 672#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
 673#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
 674
 675#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
 676#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
 677#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
 678
 679#define BLOB_7322_IBCHG 0x101
 680
 681static inline void qib_write_kreg(const struct qib_devdata *dd,
 682                                  const u32 regno, u64 value);
 683static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
 684static void write_7322_initregs(struct qib_devdata *);
 685static void write_7322_init_portregs(struct qib_pportdata *);
 686static void setup_7322_link_recovery(struct qib_pportdata *, u32);
 687static void check_7322_rxe_status(struct qib_pportdata *);
 688static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
 689
 690/**
 691 * qib_read_ureg32 - read 32-bit virtualized per-context register
 692 * @dd: device
 693 * @regno: register number
 694 * @ctxt: context number
 695 *
 696 * Return the contents of a register that is virtualized to be per context.
 697 * Returns -1 on errors (not distinguishable from valid contents at
 698 * runtime; we may add a separate error variable at some point).
 699 */
 700static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
 701                                  enum qib_ureg regno, int ctxt)
 702{
 703        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 704                return 0;
 705        return readl(regno + (u64 __iomem *)(
 706                (dd->ureg_align * ctxt) + (dd->userbase ?
 707                 (char __iomem *)dd->userbase :
 708                 (char __iomem *)dd->kregbase + dd->uregbase)));
 709}
 710
 711/**
 712 * qib_read_ureg - read virtualized per-context register
 713 * @dd: device
 714 * @regno: register number
 715 * @ctxt: context number
 716 *
 717 * Return the contents of a register that is virtualized to be per context.
 718 * Returns -1 on errors (not distinguishable from valid contents at
 719 * runtime; we may add a separate error variable at some point).
 720 */
 721static inline u64 qib_read_ureg(const struct qib_devdata *dd,
 722                                enum qib_ureg regno, int ctxt)
 723{
 724
 725        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 726                return 0;
 727        return readq(regno + (u64 __iomem *)(
 728                (dd->ureg_align * ctxt) + (dd->userbase ?
 729                 (char __iomem *)dd->userbase :
 730                 (char __iomem *)dd->kregbase + dd->uregbase)));
 731}
 732
 733/**
 734 * qib_write_ureg - write virtualized per-context register
 735 * @dd: device
 736 * @regno: register number
 737 * @value: value
 738 * @ctxt: context
 739 *
 740 * Write the contents of a register that is virtualized to be per context.
 741 */
 742static inline void qib_write_ureg(const struct qib_devdata *dd,
 743                                  enum qib_ureg regno, u64 value, int ctxt)
 744{
 745        u64 __iomem *ubase;
 746        if (dd->userbase)
 747                ubase = (u64 __iomem *)
 748                        ((char __iomem *) dd->userbase +
 749                         dd->ureg_align * ctxt);
 750        else
 751                ubase = (u64 __iomem *)
 752                        (dd->uregbase +
 753                         (char __iomem *) dd->kregbase +
 754                         dd->ureg_align * ctxt);
 755
 756        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 757                writeq(value, &ubase[regno]);
 758}
 759
 760static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
 761                                  const u32 regno)
 762{
 763        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 764                return -1;
 765        return readl((u32 __iomem *) &dd->kregbase[regno]);
 766}
 767
 768static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
 769                                  const u32 regno)
 770{
 771        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 772                return -1;
 773        return readq(&dd->kregbase[regno]);
 774}
 775
 776static inline void qib_write_kreg(const struct qib_devdata *dd,
 777                                  const u32 regno, u64 value)
 778{
 779        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 780                writeq(value, &dd->kregbase[regno]);
 781}
 782
 783/*
 784 * not many sanity checks for the port-specific kernel register routines,
 785 * since they are only used when it's known to be safe.
 786*/
 787static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
 788                                     const u16 regno)
 789{
 790        if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
 791                return 0ULL;
 792        return readq(&ppd->cpspec->kpregbase[regno]);
 793}
 794
 795static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
 796                                       const u16 regno, u64 value)
 797{
 798        if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
 799            (ppd->dd->flags & QIB_PRESENT))
 800                writeq(value, &ppd->cpspec->kpregbase[regno]);
 801}
 802
 803/**
 804 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
 805 * @dd: the qlogic_ib device
 806 * @regno: the register number to write
 807 * @ctxt: the context containing the register
 808 * @value: the value to write
 809 */
 810static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
 811                                       const u16 regno, unsigned ctxt,
 812                                       u64 value)
 813{
 814        qib_write_kreg(dd, regno + ctxt, value);
 815}
 816
 817static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
 818{
 819        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 820                return 0;
 821        return readq(&dd->cspec->cregbase[regno]);
 822
 823
 824}
 825
 826static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
 827{
 828        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 829                return 0;
 830        return readl(&dd->cspec->cregbase[regno]);
 831
 832
 833}
 834
 835static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
 836                                        u16 regno, u64 value)
 837{
 838        if (ppd->cpspec && ppd->cpspec->cpregbase &&
 839            (ppd->dd->flags & QIB_PRESENT))
 840                writeq(value, &ppd->cpspec->cpregbase[regno]);
 841}
 842
 843static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
 844                                      u16 regno)
 845{
 846        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 847            !(ppd->dd->flags & QIB_PRESENT))
 848                return 0;
 849        return readq(&ppd->cpspec->cpregbase[regno]);
 850}
 851
 852static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
 853                                        u16 regno)
 854{
 855        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 856            !(ppd->dd->flags & QIB_PRESENT))
 857                return 0;
 858        return readl(&ppd->cpspec->cpregbase[regno]);
 859}
 860
 861/* bits in Control register */
 862#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
 863#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
 864
 865/* bits in general interrupt regs */
 866#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
 867#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
 868#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
 869#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
 870#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
 871#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
 872#define QIB_I_C_ERROR INT_MASK(Err)
 873
 874#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
 875#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
 876#define QIB_I_GPIO INT_MASK(AssertGPIO)
 877#define QIB_I_P_SDMAINT(pidx) \
 878        (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 879         INT_MASK_P(SDmaProgress, pidx) | \
 880         INT_MASK_PM(SDmaCleanupDone, pidx))
 881
 882/* Interrupt bits that are "per port" */
 883#define QIB_I_P_BITSEXTANT(pidx) \
 884        (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
 885        INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 886        INT_MASK_P(SDmaProgress, pidx) | \
 887        INT_MASK_PM(SDmaCleanupDone, pidx))
 888
 889/* Interrupt bits that are common to a device */
 890/* currently unused: QIB_I_SPIOSENT */
 891#define QIB_I_C_BITSEXTANT \
 892        (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
 893        QIB_I_SPIOSENT | \
 894        QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
 895
 896#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
 897        QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
 898
 899/*
 900 * Error bits that are "per port".
 901 */
 902#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
 903#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
 904#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
 905#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
 906#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
 907#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
 908#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
 909#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
 910#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
 911#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
 912#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
 913#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
 914#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
 915#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
 916#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
 917#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
 918#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
 919#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
 920#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
 921#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
 922#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
 923#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
 924#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
 925#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
 926#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
 927#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
 928#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
 929#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
 930
 931#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
 932#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
 933#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
 934#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
 935#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
 936#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
 937#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
 938#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
 939#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
 940#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
 941#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
 942
 943/* Error bits that are common to a device */
 944#define QIB_E_RESET ERR_MASK(ResetNegated)
 945#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
 946#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
 947
 948
 949/*
 950 * Per chip (rather than per-port) errors.  Most either do
 951 * nothing but trigger a print (because they self-recover, or
 952 * always occur in tandem with other errors that handle the
 953 * issue), or because they indicate errors with no recovery,
 954 * but we want to know that they happened.
 955 */
 956#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
 957#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
 958#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
 959#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
 960#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
 961#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
 962#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
 963#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
 964
 965/* SDMA chip errors (not per port)
 966 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
 967 * the SDMAHALT error immediately, so we just print the dup error via the
 968 * E_AUTO mechanism.  This is true of most of the per-port fatal errors
 969 * as well, but since this is port-independent, by definition, it's
 970 * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
 971 * packet send errors, and so are handled in the same manner as other
 972 * per-packet errors.
 973 */
 974#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
 975#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
 976#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
 977
 978/*
 979 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
 980 * it is used to print "common" packet errors.
 981 */
 982#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
 983        QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
 984        QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
 985        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
 986        QIB_E_P_REBP)
 987
 988/* Error Bits that Packet-related (Receive, per-port) */
 989#define QIB_E_P_RPKTERRS (\
 990        QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
 991        QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
 992        QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
 993        QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
 994        QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
 995        QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
 996
 997/*
 998 * Error bits that are Send-related (per port)
 999 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1000 * All of these potentially need to have a buffer disarmed
1001 */
1002#define QIB_E_P_SPKTERRS (\
1003        QIB_E_P_SUNEXP_PKTNUM |\
1004        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1005        QIB_E_P_SMAXPKTLEN |\
1006        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1007        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1008        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1009
1010#define QIB_E_SPKTERRS ( \
1011                QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1012                ERR_MASK_N(SendUnsupportedVLErr) |                      \
1013                QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1014
1015#define QIB_E_P_SDMAERRS ( \
1016        QIB_E_P_SDMAHALT | \
1017        QIB_E_P_SDMADESCADDRMISALIGN | \
1018        QIB_E_P_SDMAUNEXPDATA | \
1019        QIB_E_P_SDMAMISSINGDW | \
1020        QIB_E_P_SDMADWEN | \
1021        QIB_E_P_SDMARPYTAG | \
1022        QIB_E_P_SDMA1STDESC | \
1023        QIB_E_P_SDMABASE | \
1024        QIB_E_P_SDMATAILOUTOFBOUND | \
1025        QIB_E_P_SDMAOUTOFBOUND | \
1026        QIB_E_P_SDMAGENMISMATCH)
1027
1028/*
1029 * This sets some bits more than once, but makes it more obvious which
1030 * bits are not handled under other categories, and the repeat definition
1031 * is not a problem.
1032 */
1033#define QIB_E_P_BITSEXTANT ( \
1034        QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1035        QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1036        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1037        QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1038        )
1039
1040/*
1041 * These are errors that can occur when the link
1042 * changes state while a packet is being sent or received.  This doesn't
1043 * cover things like EBP or VCRC that can be the result of a sending
1044 * having the link change state, so we receive a "known bad" packet.
1045 * All of these are "per port", so renamed:
1046 */
1047#define QIB_E_P_LINK_PKTERRS (\
1048        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1049        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1050        QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1051        QIB_E_P_RUNEXPCHAR)
1052
1053/*
1054 * This sets some bits more than once, but makes it more obvious which
1055 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1056 * and the repeat definition is not a problem.
1057 */
1058#define QIB_E_C_BITSEXTANT (\
1059        QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1060        QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1061        QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1062
1063/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1064#define E_SPKT_ERRS_IGNORE 0
1065
1066#define QIB_EXTS_MEMBIST_DISABLED \
1067        SYM_MASK(EXTStatus, MemBISTDisabled)
1068#define QIB_EXTS_MEMBIST_ENDTEST \
1069        SYM_MASK(EXTStatus, MemBISTEndTest)
1070
1071#define QIB_E_SPIOARMLAUNCH \
1072        ERR_MASK(SendArmLaunchErr)
1073
1074#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1075#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1076
1077/*
1078 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1079 * and also if forced QDR (only QDR enabled).  It's enabled for the
1080 * forced QDR case so that scrambling will be enabled by the TS3
1081 * exchange, when supported by both sides of the link.
1082 */
1083#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1084#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1085#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1086#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1087#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1088#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1089        SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1090#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1091
1092#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1093#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1094
1095#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1096#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1097#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1098
1099#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1100#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1101#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1102        SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1103#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1104        SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1105#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1106
1107#define IBA7322_REDIRECT_VEC_PER_REG 12
1108
1109#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1110#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1111#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1112#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1113#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1114
1115#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1116
1117#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1118        .msg = #fldname , .sz = sizeof(#fldname) }
1119#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1120        fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1121static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1122        HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1123        HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1124        HWE_AUTO(PCIESerdesPClkNotDetect),
1125        HWE_AUTO(PowerOnBISTFailed),
1126        HWE_AUTO(TempsenseTholdReached),
1127        HWE_AUTO(MemoryErr),
1128        HWE_AUTO(PCIeBusParityErr),
1129        HWE_AUTO(PcieCplTimeout),
1130        HWE_AUTO(PciePoisonedTLP),
1131        HWE_AUTO_P(SDmaMemReadErr, 1),
1132        HWE_AUTO_P(SDmaMemReadErr, 0),
1133        HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1134        HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1135        HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1136        HWE_AUTO(statusValidNoEop),
1137        HWE_AUTO(LATriggered),
1138        { .mask = 0, .sz = 0 }
1139};
1140
1141#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1142        .msg = #fldname, .sz = sizeof(#fldname) }
1143#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1144        .msg = #fldname, .sz = sizeof(#fldname) }
1145static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1146        E_AUTO(RcvEgrFullErr),
1147        E_AUTO(RcvHdrFullErr),
1148        E_AUTO(ResetNegated),
1149        E_AUTO(HardwareErr),
1150        E_AUTO(InvalidAddrErr),
1151        E_AUTO(SDmaVL15Err),
1152        E_AUTO(SBufVL15MisUseErr),
1153        E_AUTO(InvalidEEPCmd),
1154        E_AUTO(RcvContextShareErr),
1155        E_AUTO(SendVLMismatchErr),
1156        E_AUTO(SendArmLaunchErr),
1157        E_AUTO(SendSpecialTriggerErr),
1158        E_AUTO(SDmaWrongPortErr),
1159        E_AUTO(SDmaBufMaskDuplicateErr),
1160        { .mask = 0, .sz = 0 }
1161};
1162
1163static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1164        E_P_AUTO(IBStatusChanged),
1165        E_P_AUTO(SHeadersErr),
1166        E_P_AUTO(VL15BufMisuseErr),
1167        /*
1168         * SDmaHaltErr is not really an error, make it clearer;
1169         */
1170        {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1171                .sz = 11},
1172        E_P_AUTO(SDmaDescAddrMisalignErr),
1173        E_P_AUTO(SDmaUnexpDataErr),
1174        E_P_AUTO(SDmaMissingDwErr),
1175        E_P_AUTO(SDmaDwEnErr),
1176        E_P_AUTO(SDmaRpyTagErr),
1177        E_P_AUTO(SDma1stDescErr),
1178        E_P_AUTO(SDmaBaseErr),
1179        E_P_AUTO(SDmaTailOutOfBoundErr),
1180        E_P_AUTO(SDmaOutOfBoundErr),
1181        E_P_AUTO(SDmaGenMismatchErr),
1182        E_P_AUTO(SendBufMisuseErr),
1183        E_P_AUTO(SendUnsupportedVLErr),
1184        E_P_AUTO(SendUnexpectedPktNumErr),
1185        E_P_AUTO(SendDroppedDataPktErr),
1186        E_P_AUTO(SendDroppedSmpPktErr),
1187        E_P_AUTO(SendPktLenErr),
1188        E_P_AUTO(SendUnderRunErr),
1189        E_P_AUTO(SendMaxPktLenErr),
1190        E_P_AUTO(SendMinPktLenErr),
1191        E_P_AUTO(RcvIBLostLinkErr),
1192        E_P_AUTO(RcvHdrErr),
1193        E_P_AUTO(RcvHdrLenErr),
1194        E_P_AUTO(RcvBadTidErr),
1195        E_P_AUTO(RcvBadVersionErr),
1196        E_P_AUTO(RcvIBFlowErr),
1197        E_P_AUTO(RcvEBPErr),
1198        E_P_AUTO(RcvUnsupportedVLErr),
1199        E_P_AUTO(RcvUnexpectedCharErr),
1200        E_P_AUTO(RcvShortPktLenErr),
1201        E_P_AUTO(RcvLongPktLenErr),
1202        E_P_AUTO(RcvMaxPktLenErr),
1203        E_P_AUTO(RcvMinPktLenErr),
1204        E_P_AUTO(RcvICRCErr),
1205        E_P_AUTO(RcvVCRCErr),
1206        E_P_AUTO(RcvFormatErr),
1207        { .mask = 0, .sz = 0 }
1208};
1209
1210/*
1211 * Below generates "auto-message" for interrupts not specific to any port or
1212 * context
1213 */
1214#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1215        .msg = #fldname, .sz = sizeof(#fldname) }
1216/* Below generates "auto-message" for interrupts specific to a port */
1217#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1218        SYM_LSB(IntMask, fldname##Mask##_0), \
1219        SYM_LSB(IntMask, fldname##Mask##_1)), \
1220        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1221/* For some reason, the SerDesTrimDone bits are reversed */
1222#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1223        SYM_LSB(IntMask, fldname##Mask##_1), \
1224        SYM_LSB(IntMask, fldname##Mask##_0)), \
1225        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1226/*
1227 * Below generates "auto-message" for interrupts specific to a context,
1228 * with ctxt-number appended
1229 */
1230#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1231        SYM_LSB(IntMask, fldname##0IntMask), \
1232        SYM_LSB(IntMask, fldname##17IntMask)), \
1233        .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1234
1235static const struct  qib_hwerror_msgs qib_7322_intr_msgs[] = {
1236        INTR_AUTO_P(SDmaInt),
1237        INTR_AUTO_P(SDmaProgressInt),
1238        INTR_AUTO_P(SDmaIdleInt),
1239        INTR_AUTO_P(SDmaCleanupDone),
1240        INTR_AUTO_C(RcvUrg),
1241        INTR_AUTO_P(ErrInt),
1242        INTR_AUTO(ErrInt),      /* non-port-specific errs */
1243        INTR_AUTO(AssertGPIOInt),
1244        INTR_AUTO_P(SendDoneInt),
1245        INTR_AUTO(SendBufAvailInt),
1246        INTR_AUTO_C(RcvAvail),
1247        { .mask = 0, .sz = 0 }
1248};
1249
1250#define TXSYMPTOM_AUTO_P(fldname) \
1251        { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1252        .msg = #fldname, .sz = sizeof(#fldname) }
1253static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1254        TXSYMPTOM_AUTO_P(NonKeyPacket),
1255        TXSYMPTOM_AUTO_P(GRHFail),
1256        TXSYMPTOM_AUTO_P(PkeyFail),
1257        TXSYMPTOM_AUTO_P(QPFail),
1258        TXSYMPTOM_AUTO_P(SLIDFail),
1259        TXSYMPTOM_AUTO_P(RawIPV6),
1260        TXSYMPTOM_AUTO_P(PacketTooSmall),
1261        { .mask = 0, .sz = 0 }
1262};
1263
1264#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1265
1266/*
1267 * Called when we might have an error that is specific to a particular
1268 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1269 * because we don't need to force the update of pioavail
1270 */
1271static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1272{
1273        struct qib_devdata *dd = ppd->dd;
1274        u32 i;
1275        int any;
1276        u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1277        u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1278        unsigned long sbuf[4];
1279
1280        /*
1281         * It's possible that sendbuffererror could have bits set; might
1282         * have already done this as a result of hardware error handling.
1283         */
1284        any = 0;
1285        for (i = 0; i < regcnt; ++i) {
1286                sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1287                if (sbuf[i]) {
1288                        any = 1;
1289                        qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1290                }
1291        }
1292
1293        if (any)
1294                qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1295}
1296
1297/* No txe_recover yet, if ever */
1298
1299/* No decode__errors yet */
1300static void err_decode(char *msg, size_t len, u64 errs,
1301                       const struct qib_hwerror_msgs *msp)
1302{
1303        u64 these, lmask;
1304        int took, multi, n = 0;
1305
1306        while (errs && msp && msp->mask) {
1307                multi = (msp->mask & (msp->mask - 1));
1308                while (errs & msp->mask) {
1309                        these = (errs & msp->mask);
1310                        lmask = (these & (these - 1)) ^ these;
1311                        if (len) {
1312                                if (n++) {
1313                                        /* separate the strings */
1314                                        *msg++ = ',';
1315                                        len--;
1316                                }
1317                                BUG_ON(!msp->sz);
1318                                /* msp->sz counts the nul */
1319                                took = min_t(size_t, msp->sz - (size_t)1, len);
1320                                memcpy(msg,  msp->msg, took);
1321                                len -= took;
1322                                msg += took;
1323                                if (len)
1324                                        *msg = '\0';
1325                        }
1326                        errs &= ~lmask;
1327                        if (len && multi) {
1328                                /* More than one bit this mask */
1329                                int idx = -1;
1330
1331                                while (lmask & msp->mask) {
1332                                        ++idx;
1333                                        lmask >>= 1;
1334                                }
1335                                took = scnprintf(msg, len, "_%d", idx);
1336                                len -= took;
1337                                msg += took;
1338                        }
1339                }
1340                ++msp;
1341        }
1342        /* If some bits are left, show in hex. */
1343        if (len && errs)
1344                snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1345                        (unsigned long long) errs);
1346}
1347
1348/* only called if r1 set */
1349static void flush_fifo(struct qib_pportdata *ppd)
1350{
1351        struct qib_devdata *dd = ppd->dd;
1352        u32 __iomem *piobuf;
1353        u32 bufn;
1354        u32 *hdr;
1355        u64 pbc;
1356        const unsigned hdrwords = 7;
1357        static struct qib_ib_header ibhdr = {
1358                .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1359                .lrh[1] = IB_LID_PERMISSIVE,
1360                .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1361                .lrh[3] = IB_LID_PERMISSIVE,
1362                .u.oth.bth[0] = cpu_to_be32(
1363                        (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1364                .u.oth.bth[1] = cpu_to_be32(0),
1365                .u.oth.bth[2] = cpu_to_be32(0),
1366                .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1367                .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1368        };
1369
1370        /*
1371         * Send a dummy VL15 packet to flush the launch FIFO.
1372         * This will not actually be sent since the TxeBypassIbc bit is set.
1373         */
1374        pbc = PBC_7322_VL15_SEND |
1375                (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1376                (hdrwords + SIZE_OF_CRC);
1377        piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1378        if (!piobuf)
1379                return;
1380        writeq(pbc, piobuf);
1381        hdr = (u32 *) &ibhdr;
1382        if (dd->flags & QIB_PIO_FLUSH_WC) {
1383                qib_flush_wc();
1384                qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1385                qib_flush_wc();
1386                __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1387                qib_flush_wc();
1388        } else
1389                qib_pio_copy(piobuf + 2, hdr, hdrwords);
1390        qib_sendbuf_done(dd, bufn);
1391}
1392
1393/*
1394 * This is called with interrupts disabled and sdma_lock held.
1395 */
1396static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1397{
1398        struct qib_devdata *dd = ppd->dd;
1399        u64 set_sendctrl = 0;
1400        u64 clr_sendctrl = 0;
1401
1402        if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1403                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1404        else
1405                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1406
1407        if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1408                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1409        else
1410                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1411
1412        if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1413                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1414        else
1415                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1416
1417        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1418                set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1419                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1420                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1421        else
1422                clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1423                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1424                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1425
1426        spin_lock(&dd->sendctrl_lock);
1427
1428        /* If we are draining everything, block sends first */
1429        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1430                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1431                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1432                qib_write_kreg(dd, kr_scratch, 0);
1433        }
1434
1435        ppd->p_sendctrl |= set_sendctrl;
1436        ppd->p_sendctrl &= ~clr_sendctrl;
1437
1438        if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1439                qib_write_kreg_port(ppd, krp_sendctrl,
1440                                    ppd->p_sendctrl |
1441                                    SYM_MASK(SendCtrl_0, SDmaCleanup));
1442        else
1443                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1444        qib_write_kreg(dd, kr_scratch, 0);
1445
1446        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1447                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1448                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1449                qib_write_kreg(dd, kr_scratch, 0);
1450        }
1451
1452        spin_unlock(&dd->sendctrl_lock);
1453
1454        if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1455                flush_fifo(ppd);
1456}
1457
1458static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1459{
1460        __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1461}
1462
1463static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1464{
1465        /*
1466         * Set SendDmaLenGen and clear and set
1467         * the MSB of the generation count to enable generation checking
1468         * and load the internal generation counter.
1469         */
1470        qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1471        qib_write_kreg_port(ppd, krp_senddmalengen,
1472                            ppd->sdma_descq_cnt |
1473                            (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1474}
1475
1476/*
1477 * Must be called with sdma_lock held, or before init finished.
1478 */
1479static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1480{
1481        /* Commit writes to memory and advance the tail on the chip */
1482        wmb();
1483        ppd->sdma_descq_tail = tail;
1484        qib_write_kreg_port(ppd, krp_senddmatail, tail);
1485}
1486
1487/*
1488 * This is called with interrupts disabled and sdma_lock held.
1489 */
1490static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1491{
1492        /*
1493         * Drain all FIFOs.
1494         * The hardware doesn't require this but we do it so that verbs
1495         * and user applications don't wait for link active to send stale
1496         * data.
1497         */
1498        sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1499
1500        qib_sdma_7322_setlengen(ppd);
1501        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1502        ppd->sdma_head_dma[0] = 0;
1503        qib_7322_sdma_sendctrl(ppd,
1504                ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1505}
1506
1507#define DISABLES_SDMA ( \
1508        QIB_E_P_SDMAHALT | \
1509        QIB_E_P_SDMADESCADDRMISALIGN | \
1510        QIB_E_P_SDMAMISSINGDW | \
1511        QIB_E_P_SDMADWEN | \
1512        QIB_E_P_SDMARPYTAG | \
1513        QIB_E_P_SDMA1STDESC | \
1514        QIB_E_P_SDMABASE | \
1515        QIB_E_P_SDMATAILOUTOFBOUND | \
1516        QIB_E_P_SDMAOUTOFBOUND | \
1517        QIB_E_P_SDMAGENMISMATCH)
1518
1519static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1520{
1521        unsigned long flags;
1522        struct qib_devdata *dd = ppd->dd;
1523
1524        errs &= QIB_E_P_SDMAERRS;
1525
1526        if (errs & QIB_E_P_SDMAUNEXPDATA)
1527                qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1528                            ppd->port);
1529
1530        spin_lock_irqsave(&ppd->sdma_lock, flags);
1531
1532        switch (ppd->sdma_state.current_state) {
1533        case qib_sdma_state_s00_hw_down:
1534                break;
1535
1536        case qib_sdma_state_s10_hw_start_up_wait:
1537                if (errs & QIB_E_P_SDMAHALT)
1538                        __qib_sdma_process_event(ppd,
1539                                qib_sdma_event_e20_hw_started);
1540                break;
1541
1542        case qib_sdma_state_s20_idle:
1543                break;
1544
1545        case qib_sdma_state_s30_sw_clean_up_wait:
1546                break;
1547
1548        case qib_sdma_state_s40_hw_clean_up_wait:
1549                if (errs & QIB_E_P_SDMAHALT)
1550                        __qib_sdma_process_event(ppd,
1551                                qib_sdma_event_e50_hw_cleaned);
1552                break;
1553
1554        case qib_sdma_state_s50_hw_halt_wait:
1555                if (errs & QIB_E_P_SDMAHALT)
1556                        __qib_sdma_process_event(ppd,
1557                                qib_sdma_event_e60_hw_halted);
1558                break;
1559
1560        case qib_sdma_state_s99_running:
1561                __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1562                __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1563                break;
1564        }
1565
1566        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1567}
1568
1569/*
1570 * handle per-device errors (not per-port errors)
1571 */
1572static noinline void handle_7322_errors(struct qib_devdata *dd)
1573{
1574        char *msg;
1575        u64 iserr = 0;
1576        u64 errs;
1577        u64 mask;
1578        int log_idx;
1579
1580        qib_stats.sps_errints++;
1581        errs = qib_read_kreg64(dd, kr_errstatus);
1582        if (!errs) {
1583                qib_devinfo(dd->pcidev,
1584                        "device error interrupt, but no error bits set!\n");
1585                goto done;
1586        }
1587
1588        /* don't report errors that are masked */
1589        errs &= dd->cspec->errormask;
1590        msg = dd->cspec->emsgbuf;
1591
1592        /* do these first, they are most important */
1593        if (errs & QIB_E_HARDWARE) {
1594                *msg = '\0';
1595                qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1596        } else
1597                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1598                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1599                                qib_inc_eeprom_err(dd, log_idx, 1);
1600
1601        if (errs & QIB_E_SPKTERRS) {
1602                qib_disarm_7322_senderrbufs(dd->pport);
1603                qib_stats.sps_txerrs++;
1604        } else if (errs & QIB_E_INVALIDADDR)
1605                qib_stats.sps_txerrs++;
1606        else if (errs & QIB_E_ARMLAUNCH) {
1607                qib_stats.sps_txerrs++;
1608                qib_disarm_7322_senderrbufs(dd->pport);
1609        }
1610        qib_write_kreg(dd, kr_errclear, errs);
1611
1612        /*
1613         * The ones we mask off are handled specially below
1614         * or above.  Also mask SDMADISABLED by default as it
1615         * is too chatty.
1616         */
1617        mask = QIB_E_HARDWARE;
1618        *msg = '\0';
1619
1620        err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1621                   qib_7322error_msgs);
1622
1623        /*
1624         * Getting reset is a tragedy for all ports. Mark the device
1625         * _and_ the ports as "offline" in way meaningful to each.
1626         */
1627        if (errs & QIB_E_RESET) {
1628                int pidx;
1629
1630                qib_dev_err(dd,
1631                        "Got reset, requires re-init (unload and reload driver)\n");
1632                dd->flags &= ~QIB_INITTED;  /* needs re-init */
1633                /* mark as having had error */
1634                *dd->devstatusp |= QIB_STATUS_HWERROR;
1635                for (pidx = 0; pidx < dd->num_pports; ++pidx)
1636                        if (dd->pport[pidx].link_speed_supported)
1637                                *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1638        }
1639
1640        if (*msg && iserr)
1641                qib_dev_err(dd, "%s error\n", msg);
1642
1643        /*
1644         * If there were hdrq or egrfull errors, wake up any processes
1645         * waiting in poll.  We used to try to check which contexts had
1646         * the overflow, but given the cost of that and the chip reads
1647         * to support it, it's better to just wake everybody up if we
1648         * get an overflow; waiters can poll again if it's not them.
1649         */
1650        if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1651                qib_handle_urcv(dd, ~0U);
1652                if (errs & ERR_MASK(RcvEgrFullErr))
1653                        qib_stats.sps_buffull++;
1654                else
1655                        qib_stats.sps_hdrfull++;
1656        }
1657
1658done:
1659        return;
1660}
1661
1662static void qib_error_tasklet(unsigned long data)
1663{
1664        struct qib_devdata *dd = (struct qib_devdata *)data;
1665
1666        handle_7322_errors(dd);
1667        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1668}
1669
1670static void reenable_chase(unsigned long opaque)
1671{
1672        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1673
1674        ppd->cpspec->chase_timer.expires = 0;
1675        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1676                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1677}
1678
1679static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1680                u8 ibclt)
1681{
1682        ppd->cpspec->chase_end = 0;
1683
1684        if (!qib_chase)
1685                return;
1686
1687        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1688                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1689        ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1690        add_timer(&ppd->cpspec->chase_timer);
1691}
1692
1693static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1694{
1695        u8 ibclt;
1696        unsigned long tnow;
1697
1698        ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1699
1700        /*
1701         * Detect and handle the state chase issue, where we can
1702         * get stuck if we are unlucky on timing on both sides of
1703         * the link.   If we are, we disable, set a timer, and
1704         * then re-enable.
1705         */
1706        switch (ibclt) {
1707        case IB_7322_LT_STATE_CFGRCVFCFG:
1708        case IB_7322_LT_STATE_CFGWAITRMT:
1709        case IB_7322_LT_STATE_TXREVLANES:
1710        case IB_7322_LT_STATE_CFGENH:
1711                tnow = jiffies;
1712                if (ppd->cpspec->chase_end &&
1713                     time_after(tnow, ppd->cpspec->chase_end))
1714                        disable_chase(ppd, tnow, ibclt);
1715                else if (!ppd->cpspec->chase_end)
1716                        ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1717                break;
1718        default:
1719                ppd->cpspec->chase_end = 0;
1720                break;
1721        }
1722
1723        if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1724              ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1725             ibclt == IB_7322_LT_STATE_LINKUP) &&
1726            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1727                force_h1(ppd);
1728                ppd->cpspec->qdr_reforce = 1;
1729                if (!ppd->dd->cspec->r1)
1730                        serdes_7322_los_enable(ppd, 0);
1731        } else if (ppd->cpspec->qdr_reforce &&
1732                (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1733                 (ibclt == IB_7322_LT_STATE_CFGENH ||
1734                ibclt == IB_7322_LT_STATE_CFGIDLE ||
1735                ibclt == IB_7322_LT_STATE_LINKUP))
1736                force_h1(ppd);
1737
1738        if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1739            ppd->link_speed_enabled == QIB_IB_QDR &&
1740            (ibclt == IB_7322_LT_STATE_CFGTEST ||
1741             ibclt == IB_7322_LT_STATE_CFGENH ||
1742             (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1743              ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1744                adj_tx_serdes(ppd);
1745
1746        if (ibclt != IB_7322_LT_STATE_LINKUP) {
1747                u8 ltstate = qib_7322_phys_portstate(ibcst);
1748                u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1749                                          LinkTrainingState);
1750                if (!ppd->dd->cspec->r1 &&
1751                    pibclt == IB_7322_LT_STATE_LINKUP &&
1752                    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1753                    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1754                    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1755                    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1756                        /* If the link went down (but no into recovery,
1757                         * turn LOS back on */
1758                        serdes_7322_los_enable(ppd, 1);
1759                if (!ppd->cpspec->qdr_dfe_on &&
1760                    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1761                        ppd->cpspec->qdr_dfe_on = 1;
1762                        ppd->cpspec->qdr_dfe_time = 0;
1763                        /* On link down, reenable QDR adaptation */
1764                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1765                                            ppd->dd->cspec->r1 ?
1766                                            QDR_STATIC_ADAPT_DOWN_R1 :
1767                                            QDR_STATIC_ADAPT_DOWN);
1768                        pr_info(
1769                                "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1770                                ppd->dd->unit, ppd->port, ibclt);
1771                }
1772        }
1773}
1774
1775static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1776
1777/*
1778 * This is per-pport error handling.
1779 * will likely get it's own MSIx interrupt (one for each port,
1780 * although just a single handler).
1781 */
1782static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1783{
1784        char *msg;
1785        u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1786        struct qib_devdata *dd = ppd->dd;
1787
1788        /* do this as soon as possible */
1789        fmask = qib_read_kreg64(dd, kr_act_fmask);
1790        if (!fmask)
1791                check_7322_rxe_status(ppd);
1792
1793        errs = qib_read_kreg_port(ppd, krp_errstatus);
1794        if (!errs)
1795                qib_devinfo(dd->pcidev,
1796                         "Port%d error interrupt, but no error bits set!\n",
1797                         ppd->port);
1798        if (!fmask)
1799                errs &= ~QIB_E_P_IBSTATUSCHANGED;
1800        if (!errs)
1801                goto done;
1802
1803        msg = ppd->cpspec->epmsgbuf;
1804        *msg = '\0';
1805
1806        if (errs & ~QIB_E_P_BITSEXTANT) {
1807                err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1808                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1809                if (!*msg)
1810                        snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1811                                 "no others");
1812                qib_dev_porterr(dd, ppd->port,
1813                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1814                        (errs & ~QIB_E_P_BITSEXTANT), msg);
1815                *msg = '\0';
1816        }
1817
1818        if (errs & QIB_E_P_SHDR) {
1819                u64 symptom;
1820
1821                /* determine cause, then write to clear */
1822                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1823                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1824                err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1825                           hdrchk_msgs);
1826                *msg = '\0';
1827                /* senderrbuf cleared in SPKTERRS below */
1828        }
1829
1830        if (errs & QIB_E_P_SPKTERRS) {
1831                if ((errs & QIB_E_P_LINK_PKTERRS) &&
1832                    !(ppd->lflags & QIBL_LINKACTIVE)) {
1833                        /*
1834                         * This can happen when trying to bring the link
1835                         * up, but the IB link changes state at the "wrong"
1836                         * time. The IB logic then complains that the packet
1837                         * isn't valid.  We don't want to confuse people, so
1838                         * we just don't print them, except at debug
1839                         */
1840                        err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1841                                   (errs & QIB_E_P_LINK_PKTERRS),
1842                                   qib_7322p_error_msgs);
1843                        *msg = '\0';
1844                        ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1845                }
1846                qib_disarm_7322_senderrbufs(ppd);
1847        } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1848                   !(ppd->lflags & QIBL_LINKACTIVE)) {
1849                /*
1850                 * This can happen when SMA is trying to bring the link
1851                 * up, but the IB link changes state at the "wrong" time.
1852                 * The IB logic then complains that the packet isn't
1853                 * valid.  We don't want to confuse people, so we just
1854                 * don't print them, except at debug
1855                 */
1856                err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1857                           qib_7322p_error_msgs);
1858                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1859                *msg = '\0';
1860        }
1861
1862        qib_write_kreg_port(ppd, krp_errclear, errs);
1863
1864        errs &= ~ignore_this_time;
1865        if (!errs)
1866                goto done;
1867
1868        if (errs & QIB_E_P_RPKTERRS)
1869                qib_stats.sps_rcverrs++;
1870        if (errs & QIB_E_P_SPKTERRS)
1871                qib_stats.sps_txerrs++;
1872
1873        iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1874
1875        if (errs & QIB_E_P_SDMAERRS)
1876                sdma_7322_p_errors(ppd, errs);
1877
1878        if (errs & QIB_E_P_IBSTATUSCHANGED) {
1879                u64 ibcs;
1880                u8 ltstate;
1881
1882                ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1883                ltstate = qib_7322_phys_portstate(ibcs);
1884
1885                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1886                        handle_serdes_issues(ppd, ibcs);
1887                if (!(ppd->cpspec->ibcctrl_a &
1888                      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1889                        /*
1890                         * We got our interrupt, so init code should be
1891                         * happy and not try alternatives. Now squelch
1892                         * other "chatter" from link-negotiation (pre Init)
1893                         */
1894                        ppd->cpspec->ibcctrl_a |=
1895                                SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1896                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
1897                                            ppd->cpspec->ibcctrl_a);
1898                }
1899
1900                /* Update our picture of width and speed from chip */
1901                ppd->link_width_active =
1902                        (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1903                            IB_WIDTH_4X : IB_WIDTH_1X;
1904                ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1905                        LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1906                          SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1907                                   QIB_IB_DDR : QIB_IB_SDR;
1908
1909                if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1910                    IB_PHYSPORTSTATE_DISABLED)
1911                        qib_set_ib_7322_lstate(ppd, 0,
1912                               QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1913                else
1914                        /*
1915                         * Since going into a recovery state causes the link
1916                         * state to go down and since recovery is transitory,
1917                         * it is better if we "miss" ever seeing the link
1918                         * training state go into recovery (i.e., ignore this
1919                         * transition for link state special handling purposes)
1920                         * without updating lastibcstat.
1921                         */
1922                        if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1923                            ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1924                            ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1925                            ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1926                                qib_handle_e_ibstatuschanged(ppd, ibcs);
1927        }
1928        if (*msg && iserr)
1929                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1930
1931        if (ppd->state_wanted & ppd->lflags)
1932                wake_up_interruptible(&ppd->state_wait);
1933done:
1934        return;
1935}
1936
1937/* enable/disable chip from delivering interrupts */
1938static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1939{
1940        if (enable) {
1941                if (dd->flags & QIB_BADINTR)
1942                        return;
1943                qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1944                /* cause any pending enabled interrupts to be re-delivered */
1945                qib_write_kreg(dd, kr_intclear, 0ULL);
1946                if (dd->cspec->num_msix_entries) {
1947                        /* and same for MSIx */
1948                        u64 val = qib_read_kreg64(dd, kr_intgranted);
1949                        if (val)
1950                                qib_write_kreg(dd, kr_intgranted, val);
1951                }
1952        } else
1953                qib_write_kreg(dd, kr_intmask, 0ULL);
1954}
1955
1956/*
1957 * Try to cleanup as much as possible for anything that might have gone
1958 * wrong while in freeze mode, such as pio buffers being written by user
1959 * processes (causing armlaunch), send errors due to going into freeze mode,
1960 * etc., and try to avoid causing extra interrupts while doing so.
1961 * Forcibly update the in-memory pioavail register copies after cleanup
1962 * because the chip won't do it while in freeze mode (the register values
1963 * themselves are kept correct).
1964 * Make sure that we don't lose any important interrupts by using the chip
1965 * feature that says that writing 0 to a bit in *clear that is set in
1966 * *status will cause an interrupt to be generated again (if allowed by
1967 * the *mask value).
1968 * This is in chip-specific code because of all of the register accesses,
1969 * even though the details are similar on most chips.
1970 */
1971static void qib_7322_clear_freeze(struct qib_devdata *dd)
1972{
1973        int pidx;
1974
1975        /* disable error interrupts, to avoid confusion */
1976        qib_write_kreg(dd, kr_errmask, 0ULL);
1977
1978        for (pidx = 0; pidx < dd->num_pports; ++pidx)
1979                if (dd->pport[pidx].link_speed_supported)
1980                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
1981                                            0ULL);
1982
1983        /* also disable interrupts; errormask is sometimes overwriten */
1984        qib_7322_set_intr_state(dd, 0);
1985
1986        /* clear the freeze, and be sure chip saw it */
1987        qib_write_kreg(dd, kr_control, dd->control);
1988        qib_read_kreg32(dd, kr_scratch);
1989
1990        /*
1991         * Force new interrupt if any hwerr, error or interrupt bits are
1992         * still set, and clear "safe" send packet errors related to freeze
1993         * and cancelling sends.  Re-enable error interrupts before possible
1994         * force of re-interrupt on pending interrupts.
1995         */
1996        qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1997        qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1998        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1999        /* We need to purge per-port errs and reset mask, too */
2000        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2001                if (!dd->pport[pidx].link_speed_supported)
2002                        continue;
2003                qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2004                qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2005        }
2006        qib_7322_set_intr_state(dd, 1);
2007}
2008
2009/* no error handling to speak of */
2010/**
2011 * qib_7322_handle_hwerrors - display hardware errors.
2012 * @dd: the qlogic_ib device
2013 * @msg: the output buffer
2014 * @msgl: the size of the output buffer
2015 *
2016 * Use same msg buffer as regular errors to avoid excessive stack
2017 * use.  Most hardware errors are catastrophic, but for right now,
2018 * we'll print them and continue.  We reuse the same message buffer as
2019 * qib_handle_errors() to avoid excessive stack usage.
2020 */
2021static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2022                                     size_t msgl)
2023{
2024        u64 hwerrs;
2025        u32 ctrl;
2026        int isfatal = 0;
2027
2028        hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2029        if (!hwerrs)
2030                goto bail;
2031        if (hwerrs == ~0ULL) {
2032                qib_dev_err(dd,
2033                        "Read of hardware error status failed (all bits set); ignoring\n");
2034                goto bail;
2035        }
2036        qib_stats.sps_hwerrs++;
2037
2038        /* Always clear the error status register, except BIST fail */
2039        qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2040                       ~HWE_MASK(PowerOnBISTFailed));
2041
2042        hwerrs &= dd->cspec->hwerrmask;
2043
2044        /* no EEPROM logging, yet */
2045
2046        if (hwerrs)
2047                qib_devinfo(dd->pcidev,
2048                        "Hardware error: hwerr=0x%llx (cleared)\n",
2049                        (unsigned long long) hwerrs);
2050
2051        ctrl = qib_read_kreg32(dd, kr_control);
2052        if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2053                /*
2054                 * No recovery yet...
2055                 */
2056                if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2057                    dd->cspec->stay_in_freeze) {
2058                        /*
2059                         * If any set that we aren't ignoring only make the
2060                         * complaint once, in case it's stuck or recurring,
2061                         * and we get here multiple times
2062                         * Force link down, so switch knows, and
2063                         * LEDs are turned off.
2064                         */
2065                        if (dd->flags & QIB_INITTED)
2066                                isfatal = 1;
2067                } else
2068                        qib_7322_clear_freeze(dd);
2069        }
2070
2071        if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2072                isfatal = 1;
2073                strlcpy(msg,
2074                        "[Memory BIST test failed, InfiniPath hardware unusable]",
2075                        msgl);
2076                /* ignore from now on, so disable until driver reloaded */
2077                dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2078                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2079        }
2080
2081        err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2082
2083        /* Ignore esoteric PLL failures et al. */
2084
2085        qib_dev_err(dd, "%s hardware error\n", msg);
2086
2087        if (isfatal && !dd->diag_client) {
2088                qib_dev_err(dd,
2089                        "Fatal Hardware Error, no longer usable, SN %.16s\n",
2090                        dd->serial);
2091                /*
2092                 * for /sys status file and user programs to print; if no
2093                 * trailing brace is copied, we'll know it was truncated.
2094                 */
2095                if (dd->freezemsg)
2096                        snprintf(dd->freezemsg, dd->freezelen,
2097                                 "{%s}", msg);
2098                qib_disable_after_error(dd);
2099        }
2100bail:;
2101}
2102
2103/**
2104 * qib_7322_init_hwerrors - enable hardware errors
2105 * @dd: the qlogic_ib device
2106 *
2107 * now that we have finished initializing everything that might reasonably
2108 * cause a hardware error, and cleared those errors bits as they occur,
2109 * we can enable hardware errors in the mask (potentially enabling
2110 * freeze mode), and enable hardware errors as errors (along with
2111 * everything else) in errormask
2112 */
2113static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2114{
2115        int pidx;
2116        u64 extsval;
2117
2118        extsval = qib_read_kreg64(dd, kr_extstatus);
2119        if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2120                         QIB_EXTS_MEMBIST_ENDTEST)))
2121                qib_dev_err(dd, "MemBIST did not complete!\n");
2122
2123        /* never clear BIST failure, so reported on each driver load */
2124        qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2125        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2126
2127        /* clear all */
2128        qib_write_kreg(dd, kr_errclear, ~0ULL);
2129        /* enable errors that are masked, at least this first time. */
2130        qib_write_kreg(dd, kr_errmask, ~0ULL);
2131        dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2132        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2133                if (dd->pport[pidx].link_speed_supported)
2134                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2135                                            ~0ULL);
2136}
2137
2138/*
2139 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2140 * on chips that are count-based, rather than trigger-based.  There is no
2141 * reference counting, but that's also fine, given the intended use.
2142 * Only chip-specific because it's all register accesses
2143 */
2144static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2145{
2146        if (enable) {
2147                qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2148                dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2149        } else
2150                dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2151        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2152}
2153
2154/*
2155 * Formerly took parameter <which> in pre-shifted,
2156 * pre-merged form with LinkCmd and LinkInitCmd
2157 * together, and assuming the zero was NOP.
2158 */
2159static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2160                                   u16 linitcmd)
2161{
2162        u64 mod_wd;
2163        struct qib_devdata *dd = ppd->dd;
2164        unsigned long flags;
2165
2166        if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2167                /*
2168                 * If we are told to disable, note that so link-recovery
2169                 * code does not attempt to bring us back up.
2170                 * Also reset everything that we can, so we start
2171                 * completely clean when re-enabled (before we
2172                 * actually issue the disable to the IBC)
2173                 */
2174                qib_7322_mini_pcs_reset(ppd);
2175                spin_lock_irqsave(&ppd->lflags_lock, flags);
2176                ppd->lflags |= QIBL_IB_LINK_DISABLED;
2177                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2178        } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2179                /*
2180                 * Any other linkinitcmd will lead to LINKDOWN and then
2181                 * to INIT (if all is well), so clear flag to let
2182                 * link-recovery code attempt to bring us back up.
2183                 */
2184                spin_lock_irqsave(&ppd->lflags_lock, flags);
2185                ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2186                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2187                /*
2188                 * Clear status change interrupt reduction so the
2189                 * new state is seen.
2190                 */
2191                ppd->cpspec->ibcctrl_a &=
2192                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2193        }
2194
2195        mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2196                (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2197
2198        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2199                            mod_wd);
2200        /* write to chip to prevent back-to-back writes of ibc reg */
2201        qib_write_kreg(dd, kr_scratch, 0);
2202
2203}
2204
2205/*
2206 * The total RCV buffer memory is 64KB, used for both ports, and is
2207 * in units of 64 bytes (same as IB flow control credit unit).
2208 * The consumedVL unit in the same registers are in 32 byte units!
2209 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2210 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2211 * in krp_rxcreditvl15, rather than 10.
2212 */
2213#define RCV_BUF_UNITSZ 64
2214#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2215
2216static void set_vls(struct qib_pportdata *ppd)
2217{
2218        int i, numvls, totcred, cred_vl, vl0extra;
2219        struct qib_devdata *dd = ppd->dd;
2220        u64 val;
2221
2222        numvls = qib_num_vls(ppd->vls_operational);
2223
2224        /*
2225         * Set up per-VL credits. Below is kluge based on these assumptions:
2226         * 1) port is disabled at the time early_init is called.
2227         * 2) give VL15 17 credits, for two max-plausible packets.
2228         * 3) Give VL0-N the rest, with any rounding excess used for VL0
2229         */
2230        /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2231        totcred = NUM_RCV_BUF_UNITS(dd);
2232        cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2233        totcred -= cred_vl;
2234        qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2235        cred_vl = totcred / numvls;
2236        vl0extra = totcred - cred_vl * numvls;
2237        qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2238        for (i = 1; i < numvls; i++)
2239                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2240        for (; i < 8; i++) /* no buffer space for other VLs */
2241                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2242
2243        /* Notify IBC that credits need to be recalculated */
2244        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2245        val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2246        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2247        qib_write_kreg(dd, kr_scratch, 0ULL);
2248        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2249        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2250
2251        for (i = 0; i < numvls; i++)
2252                val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2253        val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2254
2255        /* Change the number of operational VLs */
2256        ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2257                                ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2258                ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2259        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2260        qib_write_kreg(dd, kr_scratch, 0ULL);
2261}
2262
2263/*
2264 * The code that deals with actual SerDes is in serdes_7322_init().
2265 * Compared to the code for iba7220, it is minimal.
2266 */
2267static int serdes_7322_init(struct qib_pportdata *ppd);
2268
2269/**
2270 * qib_7322_bringup_serdes - bring up the serdes
2271 * @ppd: physical port on the qlogic_ib device
2272 */
2273static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2274{
2275        struct qib_devdata *dd = ppd->dd;
2276        u64 val, guid, ibc;
2277        unsigned long flags;
2278        int ret = 0;
2279
2280        /*
2281         * SerDes model not in Pd, but still need to
2282         * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2283         * eventually.
2284         */
2285        /* Put IBC in reset, sends disabled (should be in reset already) */
2286        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2287        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2288        qib_write_kreg(dd, kr_scratch, 0ULL);
2289
2290        if (qib_compat_ddr_negotiate) {
2291                ppd->cpspec->ibdeltainprog = 1;
2292                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2293                                                crp_ibsymbolerr);
2294                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2295                                                crp_iblinkerrrecov);
2296        }
2297
2298        /* flowcontrolwatermark is in units of KBytes */
2299        ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2300        /*
2301         * Flow control is sent this often, even if no changes in
2302         * buffer space occur.  Units are 128ns for this chip.
2303         * Set to 3usec.
2304         */
2305        ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2306        /* max error tolerance */
2307        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2308        /* IB credit flow control. */
2309        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2310        /*
2311         * set initial max size pkt IBC will send, including ICRC; it's the
2312         * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2313         */
2314        ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2315                SYM_LSB(IBCCtrlA_0, MaxPktLen);
2316        ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2317
2318        /*
2319         * Reset the PCS interface to the serdes (and also ibc, which is still
2320         * in reset from above).  Writes new value of ibcctrl_a as last step.
2321         */
2322        qib_7322_mini_pcs_reset(ppd);
2323
2324        if (!ppd->cpspec->ibcctrl_b) {
2325                unsigned lse = ppd->link_speed_enabled;
2326
2327                /*
2328                 * Not on re-init after reset, establish shadow
2329                 * and force initial config.
2330                 */
2331                ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2332                                                             krp_ibcctrl_b);
2333                ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2334                                IBA7322_IBC_SPEED_DDR |
2335                                IBA7322_IBC_SPEED_SDR |
2336                                IBA7322_IBC_WIDTH_AUTONEG |
2337                                SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2338                if (lse & (lse - 1)) /* Muliple speeds enabled */
2339                        ppd->cpspec->ibcctrl_b |=
2340                                (lse << IBA7322_IBC_SPEED_LSB) |
2341                                IBA7322_IBC_IBTA_1_2_MASK |
2342                                IBA7322_IBC_MAX_SPEED_MASK;
2343                else
2344                        ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2345                                IBA7322_IBC_SPEED_QDR |
2346                                 IBA7322_IBC_IBTA_1_2_MASK :
2347                                (lse == QIB_IB_DDR) ?
2348                                        IBA7322_IBC_SPEED_DDR :
2349                                        IBA7322_IBC_SPEED_SDR;
2350                if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2351                    (IB_WIDTH_1X | IB_WIDTH_4X))
2352                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2353                else
2354                        ppd->cpspec->ibcctrl_b |=
2355                                ppd->link_width_enabled == IB_WIDTH_4X ?
2356                                IBA7322_IBC_WIDTH_4X_ONLY :
2357                                IBA7322_IBC_WIDTH_1X_ONLY;
2358
2359                /* always enable these on driver reload, not sticky */
2360                ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2361                        IBA7322_IBC_HRTBT_MASK);
2362        }
2363        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2364
2365        /* setup so we have more time at CFGTEST to change H1 */
2366        val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2367        val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2368        val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2369        qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2370
2371        serdes_7322_init(ppd);
2372
2373        guid = be64_to_cpu(ppd->guid);
2374        if (!guid) {
2375                if (dd->base_guid)
2376                        guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2377                ppd->guid = cpu_to_be64(guid);
2378        }
2379
2380        qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2381        /* write to chip to prevent back-to-back writes of ibc reg */
2382        qib_write_kreg(dd, kr_scratch, 0);
2383
2384        /* Enable port */
2385        ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2386        set_vls(ppd);
2387
2388        /* initially come up DISABLED, without sending anything. */
2389        val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2390                                        QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2391        qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2392        qib_write_kreg(dd, kr_scratch, 0ULL);
2393        /* clear the linkinit cmds */
2394        ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2395
2396        /* be paranoid against later code motion, etc. */
2397        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2398        ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2399        qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2400        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2401
2402        /* Also enable IBSTATUSCHG interrupt.  */
2403        val = qib_read_kreg_port(ppd, krp_errmask);
2404        qib_write_kreg_port(ppd, krp_errmask,
2405                val | ERR_MASK_N(IBStatusChanged));
2406
2407        /* Always zero until we start messing with SerDes for real */
2408        return ret;
2409}
2410
2411/**
2412 * qib_7322_quiet_serdes - set serdes to txidle
2413 * @dd: the qlogic_ib device
2414 * Called when driver is being unloaded
2415 */
2416static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2417{
2418        u64 val;
2419        unsigned long flags;
2420
2421        qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2422
2423        spin_lock_irqsave(&ppd->lflags_lock, flags);
2424        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2425        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2426        wake_up(&ppd->cpspec->autoneg_wait);
2427        cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2428        if (ppd->dd->cspec->r1)
2429                cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2430
2431        ppd->cpspec->chase_end = 0;
2432        if (ppd->cpspec->chase_timer.data) /* if initted */
2433                del_timer_sync(&ppd->cpspec->chase_timer);
2434
2435        /*
2436         * Despite the name, actually disables IBC as well. Do it when
2437         * we are as sure as possible that no more packets can be
2438         * received, following the down and the PCS reset.
2439         * The actual disabling happens in qib_7322_mini_pci_reset(),
2440         * along with the PCS being reset.
2441         */
2442        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2443        qib_7322_mini_pcs_reset(ppd);
2444
2445        /*
2446         * Update the adjusted counters so the adjustment persists
2447         * across driver reload.
2448         */
2449        if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2450            ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2451                struct qib_devdata *dd = ppd->dd;
2452                u64 diagc;
2453
2454                /* enable counter writes */
2455                diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2456                qib_write_kreg(dd, kr_hwdiagctrl,
2457                               diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2458
2459                if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2460                        val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2461                        if (ppd->cpspec->ibdeltainprog)
2462                                val -= val - ppd->cpspec->ibsymsnap;
2463                        val -= ppd->cpspec->ibsymdelta;
2464                        write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2465                }
2466                if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2467                        val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2468                        if (ppd->cpspec->ibdeltainprog)
2469                                val -= val - ppd->cpspec->iblnkerrsnap;
2470                        val -= ppd->cpspec->iblnkerrdelta;
2471                        write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2472                }
2473                if (ppd->cpspec->iblnkdowndelta) {
2474                        val = read_7322_creg32_port(ppd, crp_iblinkdown);
2475                        val += ppd->cpspec->iblnkdowndelta;
2476                        write_7322_creg_port(ppd, crp_iblinkdown, val);
2477                }
2478                /*
2479                 * No need to save ibmalfdelta since IB perfcounters
2480                 * are cleared on driver reload.
2481                 */
2482
2483                /* and disable counter writes */
2484                qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2485        }
2486}
2487
2488/**
2489 * qib_setup_7322_setextled - set the state of the two external LEDs
2490 * @ppd: physical port on the qlogic_ib device
2491 * @on: whether the link is up or not
2492 *
2493 * The exact combo of LEDs if on is true is determined by looking
2494 * at the ibcstatus.
2495 *
2496 * These LEDs indicate the physical and logical state of IB link.
2497 * For this chip (at least with recommended board pinouts), LED1
2498 * is Yellow (logical state) and LED2 is Green (physical state),
2499 *
2500 * Note:  We try to match the Mellanox HCA LED behavior as best
2501 * we can.  Green indicates physical link state is OK (something is
2502 * plugged in, and we can train).
2503 * Amber indicates the link is logically up (ACTIVE).
2504 * Mellanox further blinks the amber LED to indicate data packet
2505 * activity, but we have no hardware support for that, so it would
2506 * require waking up every 10-20 msecs and checking the counters
2507 * on the chip, and then turning the LED off if appropriate.  That's
2508 * visible overhead, so not something we will do.
2509 */
2510static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2511{
2512        struct qib_devdata *dd = ppd->dd;
2513        u64 extctl, ledblink = 0, val;
2514        unsigned long flags;
2515        int yel, grn;
2516
2517        /*
2518         * The diags use the LED to indicate diag info, so we leave
2519         * the external LED alone when the diags are running.
2520         */
2521        if (dd->diag_client)
2522                return;
2523
2524        /* Allow override of LED display for, e.g. Locating system in rack */
2525        if (ppd->led_override) {
2526                grn = (ppd->led_override & QIB_LED_PHYS);
2527                yel = (ppd->led_override & QIB_LED_LOG);
2528        } else if (on) {
2529                val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2530                grn = qib_7322_phys_portstate(val) ==
2531                        IB_PHYSPORTSTATE_LINKUP;
2532                yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2533        } else {
2534                grn = 0;
2535                yel = 0;
2536        }
2537
2538        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2539        extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2540                ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2541        if (grn) {
2542                extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2543                /*
2544                 * Counts are in chip clock (4ns) periods.
2545                 * This is 1/16 sec (66.6ms) on,
2546                 * 3/16 sec (187.5 ms) off, with packets rcvd.
2547                 */
2548                ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2549                        ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2550        }
2551        if (yel)
2552                extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2553        dd->cspec->extctrl = extctl;
2554        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2555        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2556
2557        if (ledblink) /* blink the LED on packet receive */
2558                qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2559}
2560
2561/*
2562 * Disable MSIx interrupt if enabled, call generic MSIx code
2563 * to cleanup, and clear pending MSIx interrupts.
2564 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2565 */
2566static void qib_7322_nomsix(struct qib_devdata *dd)
2567{
2568        u64 intgranted;
2569        int n;
2570
2571        dd->cspec->main_int_mask = ~0ULL;
2572        n = dd->cspec->num_msix_entries;
2573        if (n) {
2574                int i;
2575
2576                dd->cspec->num_msix_entries = 0;
2577                for (i = 0; i < n; i++) {
2578                        irq_set_affinity_hint(
2579                          dd->cspec->msix_entries[i].msix.vector, NULL);
2580                        free_cpumask_var(dd->cspec->msix_entries[i].mask);
2581                        free_irq(dd->cspec->msix_entries[i].msix.vector,
2582                           dd->cspec->msix_entries[i].arg);
2583                }
2584                qib_nomsix(dd);
2585        }
2586        /* make sure no MSIx interrupts are left pending */
2587        intgranted = qib_read_kreg64(dd, kr_intgranted);
2588        if (intgranted)
2589                qib_write_kreg(dd, kr_intgranted, intgranted);
2590}
2591
2592static void qib_7322_free_irq(struct qib_devdata *dd)
2593{
2594        if (dd->cspec->irq) {
2595                free_irq(dd->cspec->irq, dd);
2596                dd->cspec->irq = 0;
2597        }
2598        qib_7322_nomsix(dd);
2599}
2600
2601static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2602{
2603        int i;
2604
2605        qib_7322_free_irq(dd);
2606        kfree(dd->cspec->cntrs);
2607        kfree(dd->cspec->sendchkenable);
2608        kfree(dd->cspec->sendgrhchk);
2609        kfree(dd->cspec->sendibchk);
2610        kfree(dd->cspec->msix_entries);
2611        for (i = 0; i < dd->num_pports; i++) {
2612                unsigned long flags;
2613                u32 mask = QSFP_GPIO_MOD_PRS_N |
2614                        (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2615
2616                kfree(dd->pport[i].cpspec->portcntrs);
2617                if (dd->flags & QIB_HAS_QSFP) {
2618                        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2619                        dd->cspec->gpio_mask &= ~mask;
2620                        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2621                        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2622                        qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2623                }
2624                if (dd->pport[i].ibport_data.smi_ah)
2625                        ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2626        }
2627}
2628
2629/* handle SDMA interrupts */
2630static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2631{
2632        struct qib_pportdata *ppd0 = &dd->pport[0];
2633        struct qib_pportdata *ppd1 = &dd->pport[1];
2634        u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2635                INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2636        u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2637                INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2638
2639        if (intr0)
2640                qib_sdma_intr(ppd0);
2641        if (intr1)
2642                qib_sdma_intr(ppd1);
2643
2644        if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2645                qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2646        if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2647                qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2648}
2649
2650/*
2651 * Set or clear the Send buffer available interrupt enable bit.
2652 */
2653static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2654{
2655        unsigned long flags;
2656
2657        spin_lock_irqsave(&dd->sendctrl_lock, flags);
2658        if (needint)
2659                dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2660        else
2661                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2662        qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2663        qib_write_kreg(dd, kr_scratch, 0ULL);
2664        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2665}
2666
2667/*
2668 * Somehow got an interrupt with reserved bits set in interrupt status.
2669 * Print a message so we know it happened, then clear them.
2670 * keep mainline interrupt handler cache-friendly
2671 */
2672static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2673{
2674        u64 kills;
2675        char msg[128];
2676
2677        kills = istat & ~QIB_I_BITSEXTANT;
2678        qib_dev_err(dd,
2679                "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2680                (unsigned long long) kills, msg);
2681        qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2682}
2683
2684/* keep mainline interrupt handler cache-friendly */
2685static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2686{
2687        u32 gpiostatus;
2688        int handled = 0;
2689        int pidx;
2690
2691        /*
2692         * Boards for this chip currently don't use GPIO interrupts,
2693         * so clear by writing GPIOstatus to GPIOclear, and complain
2694         * to developer.  To avoid endless repeats, clear
2695         * the bits in the mask, since there is some kind of
2696         * programming error or chip problem.
2697         */
2698        gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2699        /*
2700         * In theory, writing GPIOstatus to GPIOclear could
2701         * have a bad side-effect on some diagnostic that wanted
2702         * to poll for a status-change, but the various shadows
2703         * make that problematic at best. Diags will just suppress
2704         * all GPIO interrupts during such tests.
2705         */
2706        qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2707        /*
2708         * Check for QSFP MOD_PRS changes
2709         * only works for single port if IB1 != pidx1
2710         */
2711        for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2712             ++pidx) {
2713                struct qib_pportdata *ppd;
2714                struct qib_qsfp_data *qd;
2715                u32 mask;
2716                if (!dd->pport[pidx].link_speed_supported)
2717                        continue;
2718                mask = QSFP_GPIO_MOD_PRS_N;
2719                ppd = dd->pport + pidx;
2720                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2721                if (gpiostatus & dd->cspec->gpio_mask & mask) {
2722                        u64 pins;
2723                        qd = &ppd->cpspec->qsfp_data;
2724                        gpiostatus &= ~mask;
2725                        pins = qib_read_kreg64(dd, kr_extstatus);
2726                        pins >>= SYM_LSB(EXTStatus, GPIOIn);
2727                        if (!(pins & mask)) {
2728                                ++handled;
2729                                qd->t_insert = jiffies;
2730                                queue_work(ib_wq, &qd->work);
2731                        }
2732                }
2733        }
2734
2735        if (gpiostatus && !handled) {
2736                const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2737                u32 gpio_irq = mask & gpiostatus;
2738
2739                /*
2740                 * Clear any troublemakers, and update chip from shadow
2741                 */
2742                dd->cspec->gpio_mask &= ~gpio_irq;
2743                qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2744        }
2745}
2746
2747/*
2748 * Handle errors and unusual events first, separate function
2749 * to improve cache hits for fast path interrupt handling.
2750 */
2751static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2752{
2753        if (istat & ~QIB_I_BITSEXTANT)
2754                unknown_7322_ibits(dd, istat);
2755        if (istat & QIB_I_GPIO)
2756                unknown_7322_gpio_intr(dd);
2757        if (istat & QIB_I_C_ERROR) {
2758                qib_write_kreg(dd, kr_errmask, 0ULL);
2759                tasklet_schedule(&dd->error_tasklet);
2760        }
2761        if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2762                handle_7322_p_errors(dd->rcd[0]->ppd);
2763        if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2764                handle_7322_p_errors(dd->rcd[1]->ppd);
2765}
2766
2767/*
2768 * Dynamically adjust the rcv int timeout for a context based on incoming
2769 * packet rate.
2770 */
2771static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2772{
2773        struct qib_devdata *dd = rcd->dd;
2774        u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2775
2776        /*
2777         * Dynamically adjust idle timeout on chip
2778         * based on number of packets processed.
2779         */
2780        if (npkts < rcv_int_count && timeout > 2)
2781                timeout >>= 1;
2782        else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2783                timeout = min(timeout << 1, rcv_int_timeout);
2784        else
2785                return;
2786
2787        dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2788        qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2789}
2790
2791/*
2792 * This is the main interrupt handler.
2793 * It will normally only be used for low frequency interrupts but may
2794 * have to handle all interrupts if INTx is enabled or fewer than normal
2795 * MSIx interrupts were allocated.
2796 * This routine should ignore the interrupt bits for any of the
2797 * dedicated MSIx handlers.
2798 */
2799static irqreturn_t qib_7322intr(int irq, void *data)
2800{
2801        struct qib_devdata *dd = data;
2802        irqreturn_t ret;
2803        u64 istat;
2804        u64 ctxtrbits;
2805        u64 rmask;
2806        unsigned i;
2807        u32 npkts;
2808
2809        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2810                /*
2811                 * This return value is not great, but we do not want the
2812                 * interrupt core code to remove our interrupt handler
2813                 * because we don't appear to be handling an interrupt
2814                 * during a chip reset.
2815                 */
2816                ret = IRQ_HANDLED;
2817                goto bail;
2818        }
2819
2820        istat = qib_read_kreg64(dd, kr_intstatus);
2821
2822        if (unlikely(istat == ~0ULL)) {
2823                qib_bad_intrstatus(dd);
2824                qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2825                /* don't know if it was our interrupt or not */
2826                ret = IRQ_NONE;
2827                goto bail;
2828        }
2829
2830        istat &= dd->cspec->main_int_mask;
2831        if (unlikely(!istat)) {
2832                /* already handled, or shared and not us */
2833                ret = IRQ_NONE;
2834                goto bail;
2835        }
2836
2837        qib_stats.sps_ints++;
2838        if (dd->int_counter != (u32) -1)
2839                dd->int_counter++;
2840
2841        /* handle "errors" of various kinds first, device ahead of port */
2842        if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2843                              QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2844                              INT_MASK_P(Err, 1))))
2845                unlikely_7322_intr(dd, istat);
2846
2847        /*
2848         * Clear the interrupt bits we found set, relatively early, so we
2849         * "know" know the chip will have seen this by the time we process
2850         * the queue, and will re-interrupt if necessary.  The processor
2851         * itself won't take the interrupt again until we return.
2852         */
2853        qib_write_kreg(dd, kr_intclear, istat);
2854
2855        /*
2856         * Handle kernel receive queues before checking for pio buffers
2857         * available since receives can overflow; piobuf waiters can afford
2858         * a few extra cycles, since they were waiting anyway.
2859         */
2860        ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2861        if (ctxtrbits) {
2862                rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2863                        (1ULL << QIB_I_RCVURG_LSB);
2864                for (i = 0; i < dd->first_user_ctxt; i++) {
2865                        if (ctxtrbits & rmask) {
2866                                ctxtrbits &= ~rmask;
2867                                if (dd->rcd[i])
2868                                        qib_kreceive(dd->rcd[i], NULL, &npkts);
2869                        }
2870                        rmask <<= 1;
2871                }
2872                if (ctxtrbits) {
2873                        ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2874                                (ctxtrbits >> QIB_I_RCVURG_LSB);
2875                        qib_handle_urcv(dd, ctxtrbits);
2876                }
2877        }
2878
2879        if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2880                sdma_7322_intr(dd, istat);
2881
2882        if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2883                qib_ib_piobufavail(dd);
2884
2885        ret = IRQ_HANDLED;
2886bail:
2887        return ret;
2888}
2889
2890/*
2891 * Dedicated receive packet available interrupt handler.
2892 */
2893static irqreturn_t qib_7322pintr(int irq, void *data)
2894{
2895        struct qib_ctxtdata *rcd = data;
2896        struct qib_devdata *dd = rcd->dd;
2897        u32 npkts;
2898
2899        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2900                /*
2901                 * This return value is not great, but we do not want the
2902                 * interrupt core code to remove our interrupt handler
2903                 * because we don't appear to be handling an interrupt
2904                 * during a chip reset.
2905                 */
2906                return IRQ_HANDLED;
2907
2908        qib_stats.sps_ints++;
2909        if (dd->int_counter != (u32) -1)
2910                dd->int_counter++;
2911
2912        /* Clear the interrupt bit we expect to be set. */
2913        qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
2914                       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2915
2916        qib_kreceive(rcd, NULL, &npkts);
2917
2918        return IRQ_HANDLED;
2919}
2920
2921/*
2922 * Dedicated Send buffer available interrupt handler.
2923 */
2924static irqreturn_t qib_7322bufavail(int irq, void *data)
2925{
2926        struct qib_devdata *dd = data;
2927
2928        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2929                /*
2930                 * This return value is not great, but we do not want the
2931                 * interrupt core code to remove our interrupt handler
2932                 * because we don't appear to be handling an interrupt
2933                 * during a chip reset.
2934                 */
2935                return IRQ_HANDLED;
2936
2937        qib_stats.sps_ints++;
2938        if (dd->int_counter != (u32) -1)
2939                dd->int_counter++;
2940
2941        /* Clear the interrupt bit we expect to be set. */
2942        qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
2943
2944        /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
2945        if (dd->flags & QIB_INITTED)
2946                qib_ib_piobufavail(dd);
2947        else
2948                qib_wantpiobuf_7322_intr(dd, 0);
2949
2950        return IRQ_HANDLED;
2951}
2952
2953/*
2954 * Dedicated Send DMA interrupt handler.
2955 */
2956static irqreturn_t sdma_intr(int irq, void *data)
2957{
2958        struct qib_pportdata *ppd = data;
2959        struct qib_devdata *dd = ppd->dd;
2960
2961        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2962                /*
2963                 * This return value is not great, but we do not want the
2964                 * interrupt core code to remove our interrupt handler
2965                 * because we don't appear to be handling an interrupt
2966                 * during a chip reset.
2967                 */
2968                return IRQ_HANDLED;
2969
2970        qib_stats.sps_ints++;
2971        if (dd->int_counter != (u32) -1)
2972                dd->int_counter++;
2973
2974        /* Clear the interrupt bit we expect to be set. */
2975        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2976                       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
2977        qib_sdma_intr(ppd);
2978
2979        return IRQ_HANDLED;
2980}
2981
2982/*
2983 * Dedicated Send DMA idle interrupt handler.
2984 */
2985static irqreturn_t sdma_idle_intr(int irq, void *data)
2986{
2987        struct qib_pportdata *ppd = data;
2988        struct qib_devdata *dd = ppd->dd;
2989
2990        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2991                /*
2992                 * This return value is not great, but we do not want the
2993                 * interrupt core code to remove our interrupt handler
2994                 * because we don't appear to be handling an interrupt
2995                 * during a chip reset.
2996                 */
2997                return IRQ_HANDLED;
2998
2999        qib_stats.sps_ints++;
3000        if (dd->int_counter != (u32) -1)
3001                dd->int_counter++;
3002
3003        /* Clear the interrupt bit we expect to be set. */
3004        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3005                       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3006        qib_sdma_intr(ppd);
3007
3008        return IRQ_HANDLED;
3009}
3010
3011/*
3012 * Dedicated Send DMA progress interrupt handler.
3013 */
3014static irqreturn_t sdma_progress_intr(int irq, void *data)
3015{
3016        struct qib_pportdata *ppd = data;
3017        struct qib_devdata *dd = ppd->dd;
3018
3019        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3020                /*
3021                 * This return value is not great, but we do not want the
3022                 * interrupt core code to remove our interrupt handler
3023                 * because we don't appear to be handling an interrupt
3024                 * during a chip reset.
3025                 */
3026                return IRQ_HANDLED;
3027
3028        qib_stats.sps_ints++;
3029        if (dd->int_counter != (u32) -1)
3030                dd->int_counter++;
3031
3032        /* Clear the interrupt bit we expect to be set. */
3033        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3034                       INT_MASK_P(SDmaProgress, 1) :
3035                       INT_MASK_P(SDmaProgress, 0));
3036        qib_sdma_intr(ppd);
3037
3038        return IRQ_HANDLED;
3039}
3040
3041/*
3042 * Dedicated Send DMA cleanup interrupt handler.
3043 */
3044static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3045{
3046        struct qib_pportdata *ppd = data;
3047        struct qib_devdata *dd = ppd->dd;
3048
3049        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3050                /*
3051                 * This return value is not great, but we do not want the
3052                 * interrupt core code to remove our interrupt handler
3053                 * because we don't appear to be handling an interrupt
3054                 * during a chip reset.
3055                 */
3056                return IRQ_HANDLED;
3057
3058        qib_stats.sps_ints++;
3059        if (dd->int_counter != (u32) -1)
3060                dd->int_counter++;
3061
3062        /* Clear the interrupt bit we expect to be set. */
3063        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3064                       INT_MASK_PM(SDmaCleanupDone, 1) :
3065                       INT_MASK_PM(SDmaCleanupDone, 0));
3066        qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3067
3068        return IRQ_HANDLED;
3069}
3070
3071/*
3072 * Set up our chip-specific interrupt handler.
3073 * The interrupt type has already been setup, so
3074 * we just need to do the registration and error checking.
3075 * If we are using MSIx interrupts, we may fall back to
3076 * INTx later, if the interrupt handler doesn't get called
3077 * within 1/2 second (see verify_interrupt()).
3078 */
3079static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3080{
3081        int ret, i, msixnum;
3082        u64 redirect[6];
3083        u64 mask;
3084        const struct cpumask *local_mask;
3085        int firstcpu, secondcpu = 0, currrcvcpu = 0;
3086
3087        if (!dd->num_pports)
3088                return;
3089
3090        if (clearpend) {
3091                /*
3092                 * if not switching interrupt types, be sure interrupts are
3093                 * disabled, and then clear anything pending at this point,
3094                 * because we are starting clean.
3095                 */
3096                qib_7322_set_intr_state(dd, 0);
3097
3098                /* clear the reset error, init error/hwerror mask */
3099                qib_7322_init_hwerrors(dd);
3100
3101                /* clear any interrupt bits that might be set */
3102                qib_write_kreg(dd, kr_intclear, ~0ULL);
3103
3104                /* make sure no pending MSIx intr, and clear diag reg */
3105                qib_write_kreg(dd, kr_intgranted, ~0ULL);
3106                qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3107        }
3108
3109        if (!dd->cspec->num_msix_entries) {
3110                /* Try to get INTx interrupt */
3111try_intx:
3112                if (!dd->pcidev->irq) {
3113                        qib_dev_err(dd,
3114                                "irq is 0, BIOS error?  Interrupts won't work\n");
3115                        goto bail;
3116                }
3117                ret = request_irq(dd->pcidev->irq, qib_7322intr,
3118                                  IRQF_SHARED, QIB_DRV_NAME, dd);
3119                if (ret) {
3120                        qib_dev_err(dd,
3121                                "Couldn't setup INTx interrupt (irq=%d): %d\n",
3122                                dd->pcidev->irq, ret);
3123                        goto bail;
3124                }
3125                dd->cspec->irq = dd->pcidev->irq;
3126                dd->cspec->main_int_mask = ~0ULL;
3127                goto bail;
3128        }
3129
3130        /* Try to get MSIx interrupts */
3131        memset(redirect, 0, sizeof redirect);
3132        mask = ~0ULL;
3133        msixnum = 0;
3134        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3135        firstcpu = cpumask_first(local_mask);
3136        if (firstcpu >= nr_cpu_ids ||
3137                        cpumask_weight(local_mask) == num_online_cpus()) {
3138                local_mask = topology_core_cpumask(0);
3139                firstcpu = cpumask_first(local_mask);
3140        }
3141        if (firstcpu < nr_cpu_ids) {
3142                secondcpu = cpumask_next(firstcpu, local_mask);
3143                if (secondcpu >= nr_cpu_ids)
3144                        secondcpu = firstcpu;
3145                currrcvcpu = secondcpu;
3146        }
3147        for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3148                irq_handler_t handler;
3149                void *arg;
3150                u64 val;
3151                int lsb, reg, sh;
3152
3153                dd->cspec->msix_entries[msixnum].
3154                        name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3155                        = '\0';
3156                if (i < ARRAY_SIZE(irq_table)) {
3157                        if (irq_table[i].port) {
3158                                /* skip if for a non-configured port */
3159                                if (irq_table[i].port > dd->num_pports)
3160                                        continue;
3161                                arg = dd->pport + irq_table[i].port - 1;
3162                        } else
3163                                arg = dd;
3164                        lsb = irq_table[i].lsb;
3165                        handler = irq_table[i].handler;
3166                        snprintf(dd->cspec->msix_entries[msixnum].name,
3167                                sizeof(dd->cspec->msix_entries[msixnum].name)
3168                                 - 1,
3169                                QIB_DRV_NAME "%d%s", dd->unit,
3170                                irq_table[i].name);
3171                } else {
3172                        unsigned ctxt;
3173
3174                        ctxt = i - ARRAY_SIZE(irq_table);
3175                        /* per krcvq context receive interrupt */
3176                        arg = dd->rcd[ctxt];
3177                        if (!arg)
3178                                continue;
3179                        if (qib_krcvq01_no_msi && ctxt < 2)
3180                                continue;
3181                        lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3182                        handler = qib_7322pintr;
3183                        snprintf(dd->cspec->msix_entries[msixnum].name,
3184                                sizeof(dd->cspec->msix_entries[msixnum].name)
3185                                 - 1,
3186                                QIB_DRV_NAME "%d (kctx)", dd->unit);
3187                }
3188                ret = request_irq(
3189                        dd->cspec->msix_entries[msixnum].msix.vector,
3190                        handler, 0, dd->cspec->msix_entries[msixnum].name,
3191                        arg);
3192                if (ret) {
3193                        /*
3194                         * Shouldn't happen since the enable said we could
3195                         * have as many as we are trying to setup here.
3196                         */
3197                        qib_dev_err(dd,
3198                                "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3199                                msixnum,
3200                                dd->cspec->msix_entries[msixnum].msix.vector,
3201                                ret);
3202                        qib_7322_nomsix(dd);
3203                        goto try_intx;
3204                }
3205                dd->cspec->msix_entries[msixnum].arg = arg;
3206                if (lsb >= 0) {
3207                        reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3208                        sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3209                                SYM_LSB(IntRedirect0, vec1);
3210                        mask &= ~(1ULL << lsb);
3211                        redirect[reg] |= ((u64) msixnum) << sh;
3212                }
3213                val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3214                        (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3215                if (firstcpu < nr_cpu_ids &&
3216                        zalloc_cpumask_var(
3217                                &dd->cspec->msix_entries[msixnum].mask,
3218                                GFP_KERNEL)) {
3219                        if (handler == qib_7322pintr) {
3220                                cpumask_set_cpu(currrcvcpu,
3221                                        dd->cspec->msix_entries[msixnum].mask);
3222                                currrcvcpu = cpumask_next(currrcvcpu,
3223                                        local_mask);
3224                                if (currrcvcpu >= nr_cpu_ids)
3225                                        currrcvcpu = secondcpu;
3226                        } else {
3227                                cpumask_set_cpu(firstcpu,
3228                                        dd->cspec->msix_entries[msixnum].mask);
3229                        }
3230                        irq_set_affinity_hint(
3231                                dd->cspec->msix_entries[msixnum].msix.vector,
3232                                dd->cspec->msix_entries[msixnum].mask);
3233                }
3234                msixnum++;
3235        }
3236        /* Initialize the vector mapping */
3237        for (i = 0; i < ARRAY_SIZE(redirect); i++)
3238                qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3239        dd->cspec->main_int_mask = mask;
3240        tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3241                (unsigned long)dd);
3242bail:;
3243}
3244
3245/**
3246 * qib_7322_boardname - fill in the board name and note features
3247 * @dd: the qlogic_ib device
3248 *
3249 * info will be based on the board revision register
3250 */
3251static unsigned qib_7322_boardname(struct qib_devdata *dd)
3252{
3253        /* Will need enumeration of board-types here */
3254        char *n;
3255        u32 boardid, namelen;
3256        unsigned features = DUAL_PORT_CAP;
3257
3258        boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3259
3260        switch (boardid) {
3261        case 0:
3262                n = "InfiniPath_QLE7342_Emulation";
3263                break;
3264        case 1:
3265                n = "InfiniPath_QLE7340";
3266                dd->flags |= QIB_HAS_QSFP;
3267                features = PORT_SPD_CAP;
3268                break;
3269        case 2:
3270                n = "InfiniPath_QLE7342";
3271                dd->flags |= QIB_HAS_QSFP;
3272                break;
3273        case 3:
3274                n = "InfiniPath_QMI7342";
3275                break;
3276        case 4:
3277                n = "InfiniPath_Unsupported7342";
3278                qib_dev_err(dd, "Unsupported version of QMH7342\n");
3279                features = 0;
3280                break;
3281        case BOARD_QMH7342:
3282                n = "InfiniPath_QMH7342";
3283                features = 0x24;
3284                break;
3285        case BOARD_QME7342:
3286                n = "InfiniPath_QME7342";
3287                break;
3288        case 8:
3289                n = "InfiniPath_QME7362";
3290                dd->flags |= QIB_HAS_QSFP;
3291                break;
3292        case 15:
3293                n = "InfiniPath_QLE7342_TEST";
3294                dd->flags |= QIB_HAS_QSFP;
3295                break;
3296        default:
3297                n = "InfiniPath_QLE73xy_UNKNOWN";
3298                qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3299                break;
3300        }
3301        dd->board_atten = 1; /* index into txdds_Xdr */
3302
3303        namelen = strlen(n) + 1;
3304        dd->boardname = kmalloc(namelen, GFP_KERNEL);
3305        if (!dd->boardname)
3306                qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3307        else
3308                snprintf(dd->boardname, namelen, "%s", n);
3309
3310        snprintf(dd->boardversion, sizeof(dd->boardversion),
3311                 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3312                 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3313                 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3314                 dd->majrev, dd->minrev,
3315                 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3316
3317        if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3318                qib_devinfo(dd->pcidev,
3319                        "IB%u: Forced to single port mode by module parameter\n",
3320                        dd->unit);
3321                features &= PORT_SPD_CAP;
3322        }
3323
3324        return features;
3325}
3326
3327/*
3328 * This routine sleeps, so it can only be called from user context, not
3329 * from interrupt context.
3330 */
3331static int qib_do_7322_reset(struct qib_devdata *dd)
3332{
3333        u64 val;
3334        u64 *msix_vecsave;
3335        int i, msix_entries, ret = 1;
3336        u16 cmdval;
3337        u8 int_line, clinesz;
3338        unsigned long flags;
3339
3340        /* Use dev_err so it shows up in logs, etc. */
3341        qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3342
3343        qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3344
3345        msix_entries = dd->cspec->num_msix_entries;
3346
3347        /* no interrupts till re-initted */
3348        qib_7322_set_intr_state(dd, 0);
3349
3350        if (msix_entries) {
3351                qib_7322_nomsix(dd);
3352                /* can be up to 512 bytes, too big for stack */
3353                msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3354                        sizeof(u64), GFP_KERNEL);
3355                if (!msix_vecsave)
3356                        qib_dev_err(dd, "No mem to save MSIx data\n");
3357        } else
3358                msix_vecsave = NULL;
3359
3360        /*
3361         * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3362         * info that is set up by the BIOS, so we have to save and restore
3363         * it ourselves.   There is some risk something could change it,
3364         * after we save it, but since we have disabled the MSIx, it
3365         * shouldn't be touched...
3366         */
3367        for (i = 0; i < msix_entries; i++) {
3368                u64 vecaddr, vecdata;
3369                vecaddr = qib_read_kreg64(dd, 2 * i +
3370                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3371                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3372                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3373                if (msix_vecsave) {
3374                        msix_vecsave[2 * i] = vecaddr;
3375                        /* save it without the masked bit set */
3376                        msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3377                }
3378        }
3379
3380        dd->pport->cpspec->ibdeltainprog = 0;
3381        dd->pport->cpspec->ibsymdelta = 0;
3382        dd->pport->cpspec->iblnkerrdelta = 0;
3383        dd->pport->cpspec->ibmalfdelta = 0;
3384        dd->int_counter = 0; /* so we check interrupts work again */
3385
3386        /*
3387         * Keep chip from being accessed until we are ready.  Use
3388         * writeq() directly, to allow the write even though QIB_PRESENT
3389         * isn't set.
3390         */
3391        dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3392        dd->flags |= QIB_DOING_RESET;
3393        val = dd->control | QLOGIC_IB_C_RESET;
3394        writeq(val, &dd->kregbase[kr_control]);
3395
3396        for (i = 1; i <= 5; i++) {
3397                /*
3398                 * Allow MBIST, etc. to complete; longer on each retry.
3399                 * We sometimes get machine checks from bus timeout if no
3400                 * response, so for now, make it *really* long.
3401                 */
3402                msleep(1000 + (1 + i) * 3000);
3403
3404                qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3405
3406                /*
3407                 * Use readq directly, so we don't need to mark it as PRESENT
3408                 * until we get a successful indication that all is well.
3409                 */
3410                val = readq(&dd->kregbase[kr_revision]);
3411                if (val == dd->revision)
3412                        break;
3413                if (i == 5) {
3414                        qib_dev_err(dd,
3415                                "Failed to initialize after reset, unusable\n");
3416                        ret = 0;
3417                        goto  bail;
3418                }
3419        }
3420
3421        dd->flags |= QIB_PRESENT; /* it's back */
3422
3423        if (msix_entries) {
3424                /* restore the MSIx vector address and data if saved above */
3425                for (i = 0; i < msix_entries; i++) {
3426                        dd->cspec->msix_entries[i].msix.entry = i;
3427                        if (!msix_vecsave || !msix_vecsave[2 * i])
3428                                continue;
3429                        qib_write_kreg(dd, 2 * i +
3430                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3431                                msix_vecsave[2 * i]);
3432                        qib_write_kreg(dd, 1 + 2 * i +
3433                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3434                                msix_vecsave[1 + 2 * i]);
3435                }
3436        }
3437
3438        /* initialize the remaining registers.  */
3439        for (i = 0; i < dd->num_pports; ++i)
3440                write_7322_init_portregs(&dd->pport[i]);
3441        write_7322_initregs(dd);
3442
3443        if (qib_pcie_params(dd, dd->lbus_width,
3444                            &dd->cspec->num_msix_entries,
3445                            dd->cspec->msix_entries))
3446                qib_dev_err(dd,
3447                        "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3448
3449        qib_setup_7322_interrupt(dd, 1);
3450
3451        for (i = 0; i < dd->num_pports; ++i) {
3452                struct qib_pportdata *ppd = &dd->pport[i];
3453
3454                spin_lock_irqsave(&ppd->lflags_lock, flags);
3455                ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3456                ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3457                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3458        }
3459
3460bail:
3461        dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3462        kfree(msix_vecsave);
3463        return ret;
3464}
3465
3466/**
3467 * qib_7322_put_tid - write a TID to the chip
3468 * @dd: the qlogic_ib device
3469 * @tidptr: pointer to the expected TID (in chip) to update
3470 * @tidtype: 0 for eager, 1 for expected
3471 * @pa: physical address of in memory buffer; tidinvalid if freeing
3472 */
3473static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3474                             u32 type, unsigned long pa)
3475{
3476        if (!(dd->flags & QIB_PRESENT))
3477                return;
3478        if (pa != dd->tidinvalid) {
3479                u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3480
3481                /* paranoia checks */
3482                if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3483                        qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3484                                    pa);
3485                        return;
3486                }
3487                if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3488                        qib_dev_err(dd,
3489                                "Physical page address 0x%lx larger than supported\n",
3490                                pa);
3491                        return;
3492                }
3493
3494                if (type == RCVHQ_RCV_TYPE_EAGER)
3495                        chippa |= dd->tidtemplate;
3496                else /* for now, always full 4KB page */
3497                        chippa |= IBA7322_TID_SZ_4K;
3498                pa = chippa;
3499        }
3500        writeq(pa, tidptr);
3501        mmiowb();
3502}
3503
3504/**
3505 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3506 * @dd: the qlogic_ib device
3507 * @ctxt: the ctxt
3508 *
3509 * clear all TID entries for a ctxt, expected and eager.
3510 * Used from qib_close().
3511 */
3512static void qib_7322_clear_tids(struct qib_devdata *dd,
3513                                struct qib_ctxtdata *rcd)
3514{
3515        u64 __iomem *tidbase;
3516        unsigned long tidinv;
3517        u32 ctxt;
3518        int i;
3519
3520        if (!dd->kregbase || !rcd)
3521                return;
3522
3523        ctxt = rcd->ctxt;
3524
3525        tidinv = dd->tidinvalid;
3526        tidbase = (u64 __iomem *)
3527                ((char __iomem *) dd->kregbase +
3528                 dd->rcvtidbase +
3529                 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3530
3531        for (i = 0; i < dd->rcvtidcnt; i++)
3532                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3533                                 tidinv);
3534
3535        tidbase = (u64 __iomem *)
3536                ((char __iomem *) dd->kregbase +
3537                 dd->rcvegrbase +
3538                 rcd->rcvegr_tid_base * sizeof(*tidbase));
3539
3540        for (i = 0; i < rcd->rcvegrcnt; i++)
3541                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3542                                 tidinv);
3543}
3544
3545/**
3546 * qib_7322_tidtemplate - setup constants for TID updates
3547 * @dd: the qlogic_ib device
3548 *
3549 * We setup stuff that we use a lot, to avoid calculating each time
3550 */
3551static void qib_7322_tidtemplate(struct qib_devdata *dd)
3552{
3553        /*
3554         * For now, we always allocate 4KB buffers (at init) so we can
3555         * receive max size packets.  We may want a module parameter to
3556         * specify 2KB or 4KB and/or make it per port instead of per device
3557         * for those who want to reduce memory footprint.  Note that the
3558         * rcvhdrentsize size must be large enough to hold the largest
3559         * IB header (currently 96 bytes) that we expect to handle (plus of
3560         * course the 2 dwords of RHF).
3561         */
3562        if (dd->rcvegrbufsize == 2048)
3563                dd->tidtemplate = IBA7322_TID_SZ_2K;
3564        else if (dd->rcvegrbufsize == 4096)
3565                dd->tidtemplate = IBA7322_TID_SZ_4K;
3566        dd->tidinvalid = 0;
3567}
3568
3569/**
3570 * qib_init_7322_get_base_info - set chip-specific flags for user code
3571 * @rcd: the qlogic_ib ctxt
3572 * @kbase: qib_base_info pointer
3573 *
3574 * We set the PCIE flag because the lower bandwidth on PCIe vs
3575 * HyperTransport can affect some user packet algorithims.
3576 */
3577
3578static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3579                                  struct qib_base_info *kinfo)
3580{
3581        kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3582                QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3583                QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3584        if (rcd->dd->cspec->r1)
3585                kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3586        if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3587                kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3588
3589        return 0;
3590}
3591
3592static struct qib_message_header *
3593qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3594{
3595        u32 offset = qib_hdrget_offset(rhf_addr);
3596
3597        return (struct qib_message_header *)
3598                (rhf_addr - dd->rhf_offset + offset);
3599}
3600
3601/*
3602 * Configure number of contexts.
3603 */
3604static void qib_7322_config_ctxts(struct qib_devdata *dd)
3605{
3606        unsigned long flags;
3607        u32 nchipctxts;
3608
3609        nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3610        dd->cspec->numctxts = nchipctxts;
3611        if (qib_n_krcv_queues > 1 && dd->num_pports) {
3612                dd->first_user_ctxt = NUM_IB_PORTS +
3613                        (qib_n_krcv_queues - 1) * dd->num_pports;
3614                if (dd->first_user_ctxt > nchipctxts)
3615                        dd->first_user_ctxt = nchipctxts;
3616                dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3617        } else {
3618                dd->first_user_ctxt = NUM_IB_PORTS;
3619                dd->n_krcv_queues = 1;
3620        }
3621
3622        if (!qib_cfgctxts) {
3623                int nctxts = dd->first_user_ctxt + num_online_cpus();
3624
3625                if (nctxts <= 6)
3626                        dd->ctxtcnt = 6;
3627                else if (nctxts <= 10)
3628                        dd->ctxtcnt = 10;
3629                else if (nctxts <= nchipctxts)
3630                        dd->ctxtcnt = nchipctxts;
3631        } else if (qib_cfgctxts < dd->num_pports)
3632                dd->ctxtcnt = dd->num_pports;
3633        else if (qib_cfgctxts <= nchipctxts)
3634                dd->ctxtcnt = qib_cfgctxts;
3635        if (!dd->ctxtcnt) /* none of the above, set to max */
3636                dd->ctxtcnt = nchipctxts;
3637
3638        /*
3639         * Chip can be configured for 6, 10, or 18 ctxts, and choice
3640         * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3641         * Lock to be paranoid about later motion, etc.
3642         */
3643        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3644        if (dd->ctxtcnt > 10)
3645                dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3646        else if (dd->ctxtcnt > 6)
3647                dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3648        /* else configure for default 6 receive ctxts */
3649
3650        /* The XRC opcode is 5. */
3651        dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3652
3653        /*
3654         * RcvCtrl *must* be written here so that the
3655         * chip understands how to change rcvegrcnt below.
3656         */
3657        qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3658        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3659
3660        /* kr_rcvegrcnt changes based on the number of contexts enabled */
3661        dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3662        if (qib_rcvhdrcnt)
3663                dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3664        else
3665                dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3666                                    dd->num_pports > 1 ? 1024U : 2048U);
3667}
3668
3669static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3670{
3671
3672        int lsb, ret = 0;
3673        u64 maskr; /* right-justified mask */
3674
3675        switch (which) {
3676
3677        case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3678                ret = ppd->link_width_enabled;
3679                goto done;
3680
3681        case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3682                ret = ppd->link_width_active;
3683                goto done;
3684
3685        case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3686                ret = ppd->link_speed_enabled;
3687                goto done;
3688
3689        case QIB_IB_CFG_SPD: /* Get current Link spd */
3690                ret = ppd->link_speed_active;
3691                goto done;
3692
3693        case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3694                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3695                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3696                break;
3697
3698        case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3699                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3700                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3701                break;
3702
3703        case QIB_IB_CFG_LINKLATENCY:
3704                ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3705                        SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3706                goto done;
3707
3708        case QIB_IB_CFG_OP_VLS:
3709                ret = ppd->vls_operational;
3710                goto done;
3711
3712        case QIB_IB_CFG_VL_HIGH_CAP:
3713                ret = 16;
3714                goto done;
3715
3716        case QIB_IB_CFG_VL_LOW_CAP:
3717                ret = 16;
3718                goto done;
3719
3720        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3721                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3722                                OverrunThreshold);
3723                goto done;
3724
3725        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3726                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3727                                PhyerrThreshold);
3728                goto done;
3729
3730        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3731                /* will only take effect when the link state changes */
3732                ret = (ppd->cpspec->ibcctrl_a &
3733                       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3734                        IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
3735                goto done;
3736
3737        case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
3738                lsb = IBA7322_IBC_HRTBT_LSB;
3739                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3740                break;
3741
3742        case QIB_IB_CFG_PMA_TICKS:
3743                /*
3744                 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
3745                 * Since the clock is always 250MHz, the value is 3, 1 or 0.
3746                 */
3747                if (ppd->link_speed_active == QIB_IB_QDR)
3748                        ret = 3;
3749                else if (ppd->link_speed_active == QIB_IB_DDR)
3750                        ret = 1;
3751                else
3752                        ret = 0;
3753                goto done;
3754
3755        default:
3756                ret = -EINVAL;
3757                goto done;
3758        }
3759        ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
3760done:
3761        return ret;
3762}
3763
3764/*
3765 * Below again cribbed liberally from older version. Do not lean
3766 * heavily on it.
3767 */
3768#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3769#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3770        | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3771
3772static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
3773{
3774        struct qib_devdata *dd = ppd->dd;
3775        u64 maskr; /* right-justified mask */
3776        int lsb, ret = 0;
3777        u16 lcmd, licmd;
3778        unsigned long flags;
3779
3780        switch (which) {
3781        case QIB_IB_CFG_LIDLMC:
3782                /*
3783                 * Set LID and LMC. Combined to avoid possible hazard
3784                 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
3785                 */
3786                lsb = IBA7322_IBC_DLIDLMC_SHIFT;
3787                maskr = IBA7322_IBC_DLIDLMC_MASK;
3788                /*
3789                 * For header-checking, the SLID in the packet will
3790                 * be masked with SendIBSLMCMask, and compared
3791                 * with SendIBSLIDAssignMask. Make sure we do not
3792                 * set any bits not covered by the mask, or we get
3793                 * false-positives.
3794                 */
3795                qib_write_kreg_port(ppd, krp_sendslid,
3796                                    val & (val >> 16) & SendIBSLIDAssignMask);
3797                qib_write_kreg_port(ppd, krp_sendslidmask,
3798                                    (val >> 16) & SendIBSLMCMask);
3799                break;
3800
3801        case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
3802                ppd->link_width_enabled = val;
3803                /* convert IB value to chip register value */
3804                if (val == IB_WIDTH_1X)
3805                        val = 0;
3806                else if (val == IB_WIDTH_4X)
3807                        val = 1;
3808                else
3809                        val = 3;
3810                maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3811                lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3812                break;
3813
3814        case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
3815                /*
3816                 * As with width, only write the actual register if the
3817                 * link is currently down, otherwise takes effect on next
3818                 * link change.  Since setting is being explicitly requested
3819                 * (via MAD or sysfs), clear autoneg failure status if speed
3820                 * autoneg is enabled.
3821                 */
3822                ppd->link_speed_enabled = val;
3823                val <<= IBA7322_IBC_SPEED_LSB;
3824                maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
3825                        IBA7322_IBC_MAX_SPEED_MASK;
3826                if (val & (val - 1)) {
3827                        /* Muliple speeds enabled */
3828                        val |= IBA7322_IBC_IBTA_1_2_MASK |
3829                                IBA7322_IBC_MAX_SPEED_MASK;
3830                        spin_lock_irqsave(&ppd->lflags_lock, flags);
3831                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3832                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3833                } else if (val & IBA7322_IBC_SPEED_QDR)
3834                        val |= IBA7322_IBC_IBTA_1_2_MASK;
3835                /* IBTA 1.2 mode + min/max + speed bits are contiguous */
3836                lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3837                break;
3838
3839        case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
3840                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3841                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3842                break;
3843
3844        case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
3845                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3846                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3847                break;
3848
3849        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3850                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3851                                  OverrunThreshold);
3852                if (maskr != val) {
3853                        ppd->cpspec->ibcctrl_a &=
3854                                ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3855                        ppd->cpspec->ibcctrl_a |= (u64) val <<
3856                                SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3857                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
3858                                            ppd->cpspec->ibcctrl_a);
3859                        qib_write_kreg(dd, kr_scratch, 0ULL);
3860                }
3861                goto bail;
3862
3863        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3864                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3865                                  PhyerrThreshold);
3866                if (maskr != val) {
3867                        ppd->cpspec->ibcctrl_a &=
3868                                ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3869                        ppd->cpspec->ibcctrl_a |= (u64) val <<
3870                                SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3871                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
3872                                            ppd->cpspec->ibcctrl_a);
3873                        qib_write_kreg(dd, kr_scratch, 0ULL);
3874                }
3875                goto bail;
3876
3877        case QIB_IB_CFG_PKEYS: /* update pkeys */
3878                maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
3879                        ((u64) ppd->pkeys[2] << 32) |
3880                        ((u64) ppd->pkeys[3] << 48);
3881                qib_write_kreg_port(ppd, krp_partitionkey, maskr);
3882                goto bail;
3883
3884        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3885                /* will only take effect when the link state changes */
3886                if (val == IB_LINKINITCMD_POLL)
3887                        ppd->cpspec->ibcctrl_a &=
3888                                ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3889                else /* SLEEP */
3890                        ppd->cpspec->ibcctrl_a |=
3891                                SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3892                qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
3893                qib_write_kreg(dd, kr_scratch, 0ULL);
3894                goto bail;
3895
3896        case QIB_IB_CFG_MTU: /* update the MTU in IBC */
3897                /*
3898                 * Update our housekeeping variables, and set IBC max
3899                 * size, same as init code; max IBC is max we allow in
3900                 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
3901                 * Set even if it's unchanged, print debug message only
3902                 * on changes.
3903                 */
3904                val = (ppd->ibmaxlen >> 2) + 1;
3905                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
3906                ppd->cpspec->ibcctrl_a |= (u64)val <<
3907                        SYM_LSB(IBCCtrlA_0, MaxPktLen);
3908                qib_write_kreg_port(ppd, krp_ibcctrl_a,
3909                                    ppd->cpspec->ibcctrl_a);
3910                qib_write_kreg(dd, kr_scratch, 0ULL);
3911                goto bail;
3912
3913        case QIB_IB_CFG_LSTATE: /* set the IB link state */
3914                switch (val & 0xffff0000) {
3915                case IB_LINKCMD_DOWN:
3916                        lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
3917                        ppd->cpspec->ibmalfusesnap = 1;
3918                        ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3919                                crp_errlink);
3920                        if (!ppd->cpspec->ibdeltainprog &&
3921                            qib_compat_ddr_negotiate) {
3922                                ppd->cpspec->ibdeltainprog = 1;
3923                                ppd->cpspec->ibsymsnap =
3924                                        read_7322_creg32_port(ppd,
3925                                                              crp_ibsymbolerr);
3926                                ppd->cpspec->iblnkerrsnap =
3927                                        read_7322_creg32_port(ppd,
3928                                                      crp_iblinkerrrecov);
3929                        }
3930                        break;
3931
3932                case IB_LINKCMD_ARMED:
3933                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
3934                        if (ppd->cpspec->ibmalfusesnap) {
3935                                ppd->cpspec->ibmalfusesnap = 0;
3936                                ppd->cpspec->ibmalfdelta +=
3937                                        read_7322_creg32_port(ppd,
3938                                                              crp_errlink) -
3939                                        ppd->cpspec->ibmalfsnap;
3940                        }
3941                        break;
3942
3943                case IB_LINKCMD_ACTIVE:
3944                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
3945                        break;
3946
3947                default:
3948                        ret = -EINVAL;
3949                        qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
3950                        goto bail;
3951                }
3952                switch (val & 0xffff) {
3953                case IB_LINKINITCMD_NOP:
3954                        licmd = 0;
3955                        break;
3956
3957                case IB_LINKINITCMD_POLL:
3958                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
3959                        break;
3960
3961                case IB_LINKINITCMD_SLEEP:
3962                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
3963                        break;
3964
3965                case IB_LINKINITCMD_DISABLE:
3966                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
3967                        ppd->cpspec->chase_end = 0;
3968                        /*
3969                         * stop state chase counter and timer, if running.
3970                         * wait forpending timer, but don't clear .data (ppd)!
3971                         */
3972                        if (ppd->cpspec->chase_timer.expires) {
3973                                del_timer_sync(&ppd->cpspec->chase_timer);
3974                                ppd->cpspec->chase_timer.expires = 0;
3975                        }
3976                        break;
3977
3978                default:
3979                        ret = -EINVAL;
3980                        qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
3981                                    val & 0xffff);
3982                        goto bail;
3983                }
3984                qib_set_ib_7322_lstate(ppd, lcmd, licmd);
3985                goto bail;
3986
3987        case QIB_IB_CFG_OP_VLS:
3988                if (ppd->vls_operational != val) {
3989                        ppd->vls_operational = val;
3990                        set_vls(ppd);
3991                }
3992                goto bail;
3993
3994        case QIB_IB_CFG_VL_HIGH_LIMIT:
3995                qib_write_kreg_port(ppd, krp_highprio_limit, val);
3996                goto bail;
3997
3998        case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
3999                if (val > 3) {
4000                        ret = -EINVAL;
4001                        goto bail;
4002                }
4003                lsb = IBA7322_IBC_HRTBT_LSB;
4004                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4005                break;
4006
4007        case QIB_IB_CFG_PORT:
4008                /* val is the port number of the switch we are connected to. */
4009                if (ppd->dd->cspec->r1) {
4010                        cancel_delayed_work(&ppd->cpspec->ipg_work);
4011                        ppd->cpspec->ipg_tries = 0;
4012                }
4013                goto bail;
4014
4015        default:
4016                ret = -EINVAL;
4017                goto bail;
4018        }
4019        ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4020        ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4021        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4022        qib_write_kreg(dd, kr_scratch, 0);
4023bail:
4024        return ret;
4025}
4026
4027static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4028{
4029        int ret = 0;
4030        u64 val, ctrlb;
4031
4032        /* only IBC loopback, may add serdes and xgxs loopbacks later */
4033        if (!strncmp(what, "ibc", 3)) {
4034                ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4035                                                       Loopback);
4036                val = 0; /* disable heart beat, so link will come up */
4037                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4038                         ppd->dd->unit, ppd->port);
4039        } else if (!strncmp(what, "off", 3)) {
4040                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4041                                                        Loopback);
4042                /* enable heart beat again */
4043                val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4044                qib_devinfo(ppd->dd->pcidev,
4045                        "Disabling IB%u:%u IBC loopback (normal)\n",
4046                        ppd->dd->unit, ppd->port);
4047        } else
4048                ret = -EINVAL;
4049        if (!ret) {
4050                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4051                                    ppd->cpspec->ibcctrl_a);
4052                ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4053                                             << IBA7322_IBC_HRTBT_LSB);
4054                ppd->cpspec->ibcctrl_b = ctrlb | val;
4055                qib_write_kreg_port(ppd, krp_ibcctrl_b,
4056                                    ppd->cpspec->ibcctrl_b);
4057                qib_write_kreg(ppd->dd, kr_scratch, 0);
4058        }
4059        return ret;
4060}
4061
4062static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4063                           struct ib_vl_weight_elem *vl)
4064{
4065        unsigned i;
4066
4067        for (i = 0; i < 16; i++, regno++, vl++) {
4068                u32 val = qib_read_kreg_port(ppd, regno);
4069
4070                vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4071                        SYM_RMASK(LowPriority0_0, VirtualLane);
4072                vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4073                        SYM_RMASK(LowPriority0_0, Weight);
4074        }
4075}
4076
4077static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4078                           struct ib_vl_weight_elem *vl)
4079{
4080        unsigned i;
4081
4082        for (i = 0; i < 16; i++, regno++, vl++) {
4083                u64 val;
4084
4085                val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4086                        SYM_LSB(LowPriority0_0, VirtualLane)) |
4087                      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4088                        SYM_LSB(LowPriority0_0, Weight));
4089                qib_write_kreg_port(ppd, regno, val);
4090        }
4091        if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4092                struct qib_devdata *dd = ppd->dd;
4093                unsigned long flags;
4094
4095                spin_lock_irqsave(&dd->sendctrl_lock, flags);
4096                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4097                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4098                qib_write_kreg(dd, kr_scratch, 0);
4099                spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4100        }
4101}
4102
4103static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4104{
4105        switch (which) {
4106        case QIB_IB_TBL_VL_HIGH_ARB:
4107                get_vl_weights(ppd, krp_highprio_0, t);
4108                break;
4109
4110        case QIB_IB_TBL_VL_LOW_ARB:
4111                get_vl_weights(ppd, krp_lowprio_0, t);
4112                break;
4113
4114        default:
4115                return -EINVAL;
4116        }
4117        return 0;
4118}
4119
4120static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4121{
4122        switch (which) {
4123        case QIB_IB_TBL_VL_HIGH_ARB:
4124                set_vl_weights(ppd, krp_highprio_0, t);
4125                break;
4126
4127        case QIB_IB_TBL_VL_LOW_ARB:
4128                set_vl_weights(ppd, krp_lowprio_0, t);
4129                break;
4130
4131        default:
4132                return -EINVAL;
4133        }
4134        return 0;
4135}
4136
4137static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4138                                    u32 updegr, u32 egrhd, u32 npkts)
4139{
4140        /*
4141         * Need to write timeout register before updating rcvhdrhead to ensure
4142         * that the timer is enabled on reception of a packet.
4143         */
4144        if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4145                adjust_rcv_timeout(rcd, npkts);
4146        if (updegr)
4147                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4148        mmiowb();
4149        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4150        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4151        mmiowb();
4152}
4153
4154static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4155{
4156        u32 head, tail;
4157
4158        head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4159        if (rcd->rcvhdrtail_kvaddr)
4160                tail = qib_get_rcvhdrtail(rcd);
4161        else
4162                tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4163        return head == tail;
4164}
4165
4166#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4167        QIB_RCVCTRL_CTXT_DIS | \
4168        QIB_RCVCTRL_TIDFLOW_ENB | \
4169        QIB_RCVCTRL_TIDFLOW_DIS | \
4170        QIB_RCVCTRL_TAILUPD_ENB | \
4171        QIB_RCVCTRL_TAILUPD_DIS | \
4172        QIB_RCVCTRL_INTRAVAIL_ENB | \
4173        QIB_RCVCTRL_INTRAVAIL_DIS | \
4174        QIB_RCVCTRL_BP_ENB | \
4175        QIB_RCVCTRL_BP_DIS)
4176
4177#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4178        QIB_RCVCTRL_CTXT_DIS | \
4179        QIB_RCVCTRL_PKEY_DIS | \
4180        QIB_RCVCTRL_PKEY_ENB)
4181
4182/*
4183 * Modify the RCVCTRL register in chip-specific way. This
4184 * is a function because bit positions and (future) register
4185 * location is chip-specifc, but the needed operations are
4186 * generic. <op> is a bit-mask because we often want to
4187 * do multiple modifications.
4188 */
4189static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4190                             int ctxt)
4191{
4192        struct qib_devdata *dd = ppd->dd;
4193        struct qib_ctxtdata *rcd;
4194        u64 mask, val;
4195        unsigned long flags;
4196
4197        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4198
4199        if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4200                dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4201        if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4202                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4203        if (op & QIB_RCVCTRL_TAILUPD_ENB)
4204                dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4205        if (op & QIB_RCVCTRL_TAILUPD_DIS)
4206                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4207        if (op & QIB_RCVCTRL_PKEY_ENB)
4208                ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4209        if (op & QIB_RCVCTRL_PKEY_DIS)
4210                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4211        if (ctxt < 0) {
4212                mask = (1ULL << dd->ctxtcnt) - 1;
4213                rcd = NULL;
4214        } else {
4215                mask = (1ULL << ctxt);
4216                rcd = dd->rcd[ctxt];
4217        }
4218        if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4219                ppd->p_rcvctrl |=
4220                        (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4221                if (!(dd->flags & QIB_NODMA_RTAIL)) {
4222                        op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4223                        dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4224                }
4225                /* Write these registers before the context is enabled. */
4226                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4227                                    rcd->rcvhdrqtailaddr_phys);
4228                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4229                                    rcd->rcvhdrq_phys);
4230                rcd->seq_cnt = 1;
4231        }
4232        if (op & QIB_RCVCTRL_CTXT_DIS)
4233                ppd->p_rcvctrl &=
4234                        ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4235        if (op & QIB_RCVCTRL_BP_ENB)
4236                dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4237        if (op & QIB_RCVCTRL_BP_DIS)
4238                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4239        if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4240                dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4241        if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4242                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4243        /*
4244         * Decide which registers to write depending on the ops enabled.
4245         * Special case is "flush" (no bits set at all)
4246         * which needs to write both.
4247         */
4248        if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4249                qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4250        if (op == 0 || (op & RCVCTRL_PORT_MODS))
4251                qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4252        if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4253                /*
4254                 * Init the context registers also; if we were
4255                 * disabled, tail and head should both be zero
4256                 * already from the enable, but since we don't
4257                 * know, we have to do it explicitly.
4258                 */
4259                val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4260                qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4261
4262                /* be sure enabling write seen; hd/tl should be 0 */
4263                (void) qib_read_kreg32(dd, kr_scratch);
4264                val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4265                dd->rcd[ctxt]->head = val;
4266                /* If kctxt, interrupt on next receive. */
4267                if (ctxt < dd->first_user_ctxt)
4268                        val |= dd->rhdrhead_intr_off;
4269                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4270        } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4271                dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4272                /* arm rcv interrupt */
4273                val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4274                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4275        }
4276        if (op & QIB_RCVCTRL_CTXT_DIS) {
4277                unsigned f;
4278
4279                /* Now that the context is disabled, clear these registers. */
4280                if (ctxt >= 0) {
4281                        qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4282                        qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4283                        for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4284                                qib_write_ureg(dd, ur_rcvflowtable + f,
4285                                               TIDFLOW_ERRBITS, ctxt);
4286                } else {
4287                        unsigned i;
4288
4289                        for (i = 0; i < dd->cfgctxts; i++) {
4290                                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4291                                                    i, 0);
4292                                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4293                                for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4294                                        qib_write_ureg(dd, ur_rcvflowtable + f,
4295                                                       TIDFLOW_ERRBITS, i);
4296                        }
4297                }
4298        }
4299        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4300}
4301
4302/*
4303 * Modify the SENDCTRL register in chip-specific way. This
4304 * is a function where there are multiple such registers with
4305 * slightly different layouts.
4306 * The chip doesn't allow back-to-back sendctrl writes, so write
4307 * the scratch register after writing sendctrl.
4308 *
4309 * Which register is written depends on the operation.
4310 * Most operate on the common register, while
4311 * SEND_ENB and SEND_DIS operate on the per-port ones.
4312 * SEND_ENB is included in common because it can change SPCL_TRIG
4313 */
4314#define SENDCTRL_COMMON_MODS (\
4315        QIB_SENDCTRL_CLEAR | \
4316        QIB_SENDCTRL_AVAIL_DIS | \
4317        QIB_SENDCTRL_AVAIL_ENB | \
4318        QIB_SENDCTRL_AVAIL_BLIP | \
4319        QIB_SENDCTRL_DISARM | \
4320        QIB_SENDCTRL_DISARM_ALL | \
4321        QIB_SENDCTRL_SEND_ENB)
4322
4323#define SENDCTRL_PORT_MODS (\
4324        QIB_SENDCTRL_CLEAR | \
4325        QIB_SENDCTRL_SEND_ENB | \
4326        QIB_SENDCTRL_SEND_DIS | \
4327        QIB_SENDCTRL_FLUSH)
4328
4329static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4330{
4331        struct qib_devdata *dd = ppd->dd;
4332        u64 tmp_dd_sendctrl;
4333        unsigned long flags;
4334
4335        spin_lock_irqsave(&dd->sendctrl_lock, flags);
4336
4337        /* First the dd ones that are "sticky", saved in shadow */
4338        if (op & QIB_SENDCTRL_CLEAR)
4339                dd->sendctrl = 0;
4340        if (op & QIB_SENDCTRL_AVAIL_DIS)
4341                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4342        else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4343                dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4344                if (dd->flags & QIB_USE_SPCL_TRIG)
4345                        dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4346        }
4347
4348        /* Then the ppd ones that are "sticky", saved in shadow */
4349        if (op & QIB_SENDCTRL_SEND_DIS)
4350                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4351        else if (op & QIB_SENDCTRL_SEND_ENB)
4352                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4353
4354        if (op & QIB_SENDCTRL_DISARM_ALL) {
4355                u32 i, last;
4356
4357                tmp_dd_sendctrl = dd->sendctrl;
4358                last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4359                /*
4360                 * Disarm any buffers that are not yet launched,
4361                 * disabling updates until done.
4362                 */
4363                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4364                for (i = 0; i < last; i++) {
4365                        qib_write_kreg(dd, kr_sendctrl,
4366                                       tmp_dd_sendctrl |
4367                                       SYM_MASK(SendCtrl, Disarm) | i);
4368                        qib_write_kreg(dd, kr_scratch, 0);
4369                }
4370        }
4371
4372        if (op & QIB_SENDCTRL_FLUSH) {
4373                u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4374
4375                /*
4376                 * Now drain all the fifos.  The Abort bit should never be
4377                 * needed, so for now, at least, we don't use it.
4378                 */
4379                tmp_ppd_sendctrl |=
4380                        SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4381                        SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4382                        SYM_MASK(SendCtrl_0, TxeBypassIbc);
4383                qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4384                qib_write_kreg(dd, kr_scratch, 0);
4385        }
4386
4387        tmp_dd_sendctrl = dd->sendctrl;
4388
4389        if (op & QIB_SENDCTRL_DISARM)
4390                tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4391                        ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4392                         SYM_LSB(SendCtrl, DisarmSendBuf));
4393        if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4394            (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4395                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4396
4397        if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4398                qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4399                qib_write_kreg(dd, kr_scratch, 0);
4400        }
4401
4402        if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4403                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4404                qib_write_kreg(dd, kr_scratch, 0);
4405        }
4406
4407        if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4408                qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4409                qib_write_kreg(dd, kr_scratch, 0);
4410        }
4411
4412        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4413
4414        if (op & QIB_SENDCTRL_FLUSH) {
4415                u32 v;
4416                /*
4417                 * ensure writes have hit chip, then do a few
4418                 * more reads, to allow DMA of pioavail registers
4419                 * to occur, so in-memory copy is in sync with
4420                 * the chip.  Not always safe to sleep.
4421                 */
4422                v = qib_read_kreg32(dd, kr_scratch);
4423                qib_write_kreg(dd, kr_scratch, v);
4424                v = qib_read_kreg32(dd, kr_scratch);
4425                qib_write_kreg(dd, kr_scratch, v);
4426                qib_read_kreg32(dd, kr_scratch);
4427        }
4428}
4429
4430#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4431#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4432#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4433
4434/**
4435 * qib_portcntr_7322 - read a per-port chip counter
4436 * @ppd: the qlogic_ib pport
4437 * @creg: the counter to read (not a chip offset)
4438 */
4439static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4440{
4441        struct qib_devdata *dd = ppd->dd;
4442        u64 ret = 0ULL;
4443        u16 creg;
4444        /* 0xffff for unimplemented or synthesized counters */
4445        static const u32 xlator[] = {
4446                [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4447                [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4448                [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4449                [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4450                [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4451                [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4452                [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4453                [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4454                [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4455                [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4456                [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4457                [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4458                [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4459                [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4460                [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4461                [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4462                [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4463                [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4464                [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4465                [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4466                [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4467                [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4468                [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4469                [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4470                [QIBPORTCNTR_ERRLINK] = crp_errlink,
4471                [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4472                [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4473                [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4474                [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4475                [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4476                /*
4477                 * the next 3 aren't really counters, but were implemented
4478                 * as counters in older chips, so still get accessed as
4479                 * though they were counters from this code.
4480                 */
4481                [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4482                [QIBPORTCNTR_PSSTART] = krp_psstart,
4483                [QIBPORTCNTR_PSSTAT] = krp_psstat,
4484                /* pseudo-counter, summed for all ports */
4485                [QIBPORTCNTR_KHDROVFL] = 0xffff,
4486        };
4487
4488        if (reg >= ARRAY_SIZE(xlator)) {
4489                qib_devinfo(ppd->dd->pcidev,
4490                         "Unimplemented portcounter %u\n", reg);
4491                goto done;
4492        }
4493        creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4494
4495        /* handle non-counters and special cases first */
4496        if (reg == QIBPORTCNTR_KHDROVFL) {
4497                int i;
4498
4499                /* sum over all kernel contexts (skip if mini_init) */
4500                for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4501                        struct qib_ctxtdata *rcd = dd->rcd[i];
4502
4503                        if (!rcd || rcd->ppd != ppd)
4504                                continue;
4505                        ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4506                }
4507                goto done;
4508        } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4509                /*
4510                 * Used as part of the synthesis of port_rcv_errors
4511                 * in the verbs code for IBTA counters.  Not needed for 7322,
4512                 * because all the errors are already counted by other cntrs.
4513                 */
4514                goto done;
4515        } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4516                   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4517                /* were counters in older chips, now per-port kernel regs */
4518                ret = qib_read_kreg_port(ppd, creg);
4519                goto done;
4520        }
4521
4522        /*
4523         * Only fast increment counters are 64 bits; use 32 bit reads to
4524         * avoid two independent reads when on Opteron.
4525         */
4526        if (xlator[reg] & _PORT_64BIT_FLAG)
4527                ret = read_7322_creg_port(ppd, creg);
4528        else
4529                ret = read_7322_creg32_port(ppd, creg);
4530        if (creg == crp_ibsymbolerr) {
4531                if (ppd->cpspec->ibdeltainprog)
4532                        ret -= ret - ppd->cpspec->ibsymsnap;
4533                ret -= ppd->cpspec->ibsymdelta;
4534        } else if (creg == crp_iblinkerrrecov) {
4535                if (ppd->cpspec->ibdeltainprog)
4536                        ret -= ret - ppd->cpspec->iblnkerrsnap;
4537                ret -= ppd->cpspec->iblnkerrdelta;
4538        } else if (creg == crp_errlink)
4539                ret -= ppd->cpspec->ibmalfdelta;
4540        else if (creg == crp_iblinkdown)
4541                ret += ppd->cpspec->iblnkdowndelta;
4542done:
4543        return ret;
4544}
4545
4546/*
4547 * Device counter names (not port-specific), one line per stat,
4548 * single string.  Used by utilities like ipathstats to print the stats
4549 * in a way which works for different versions of drivers, without changing
4550 * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4551 * display by utility.
4552 * Non-error counters are first.
4553 * Start of "error" conters is indicated by a leading "E " on the first
4554 * "error" counter, and doesn't count in label length.
4555 * The EgrOvfl list needs to be last so we truncate them at the configured
4556 * context count for the device.
4557 * cntr7322indices contains the corresponding register indices.
4558 */
4559static const char cntr7322names[] =
4560        "Interrupts\n"
4561        "HostBusStall\n"
4562        "E RxTIDFull\n"
4563        "RxTIDInvalid\n"
4564        "RxTIDFloDrop\n" /* 7322 only */
4565        "Ctxt0EgrOvfl\n"
4566        "Ctxt1EgrOvfl\n"
4567        "Ctxt2EgrOvfl\n"
4568        "Ctxt3EgrOvfl\n"
4569        "Ctxt4EgrOvfl\n"
4570        "Ctxt5EgrOvfl\n"
4571        "Ctxt6EgrOvfl\n"
4572        "Ctxt7EgrOvfl\n"
4573        "Ctxt8EgrOvfl\n"
4574        "Ctxt9EgrOvfl\n"
4575        "Ctx10EgrOvfl\n"
4576        "Ctx11EgrOvfl\n"
4577        "Ctx12EgrOvfl\n"
4578        "Ctx13EgrOvfl\n"
4579        "Ctx14EgrOvfl\n"
4580        "Ctx15EgrOvfl\n"
4581        "Ctx16EgrOvfl\n"
4582        "Ctx17EgrOvfl\n"
4583        ;
4584
4585static const u32 cntr7322indices[] = {
4586        cr_lbint | _PORT_64BIT_FLAG,
4587        cr_lbstall | _PORT_64BIT_FLAG,
4588        cr_tidfull,
4589        cr_tidinvalid,
4590        cr_rxtidflowdrop,
4591        cr_base_egrovfl + 0,
4592        cr_base_egrovfl + 1,
4593        cr_base_egrovfl + 2,
4594        cr_base_egrovfl + 3,
4595        cr_base_egrovfl + 4,
4596        cr_base_egrovfl + 5,
4597        cr_base_egrovfl + 6,
4598        cr_base_egrovfl + 7,
4599        cr_base_egrovfl + 8,
4600        cr_base_egrovfl + 9,
4601        cr_base_egrovfl + 10,
4602        cr_base_egrovfl + 11,
4603        cr_base_egrovfl + 12,
4604        cr_base_egrovfl + 13,
4605        cr_base_egrovfl + 14,
4606        cr_base_egrovfl + 15,
4607        cr_base_egrovfl + 16,
4608        cr_base_egrovfl + 17,
4609};
4610
4611/*
4612 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4613 * portcntr7322indices is somewhat complicated by some registers needing
4614 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4615 */
4616static const char portcntr7322names[] =
4617        "TxPkt\n"
4618        "TxFlowPkt\n"
4619        "TxWords\n"
4620        "RxPkt\n"
4621        "RxFlowPkt\n"
4622        "RxWords\n"
4623        "TxFlowStall\n"
4624        "TxDmaDesc\n"  /* 7220 and 7322-only */
4625        "E RxDlidFltr\n"  /* 7220 and 7322-only */
4626        "IBStatusChng\n"
4627        "IBLinkDown\n"
4628        "IBLnkRecov\n"
4629        "IBRxLinkErr\n"
4630        "IBSymbolErr\n"
4631        "RxLLIErr\n"
4632        "RxBadFormat\n"
4633        "RxBadLen\n"
4634        "RxBufOvrfl\n"
4635        "RxEBP\n"
4636        "RxFlowCtlErr\n"
4637        "RxICRCerr\n"
4638        "RxLPCRCerr\n"
4639        "RxVCRCerr\n"
4640        "RxInvalLen\n"
4641        "RxInvalPKey\n"
4642        "RxPktDropped\n"
4643        "TxBadLength\n"
4644        "TxDropped\n"
4645        "TxInvalLen\n"
4646        "TxUnderrun\n"
4647        "TxUnsupVL\n"
4648        "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4649        "RxVL15Drop\n"
4650        "RxVlErr\n"
4651        "XcessBufOvfl\n"
4652        "RxQPBadCtxt\n" /* 7322-only from here down */
4653        "TXBadHeader\n"
4654        ;
4655
4656static const u32 portcntr7322indices[] = {
4657        QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4658        crp_pktsendflow,
4659        QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4660        QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4661        crp_pktrcvflowctrl,
4662        QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4663        QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4664        crp_txsdmadesc | _PORT_64BIT_FLAG,
4665        crp_rxdlidfltr,
4666        crp_ibstatuschange,
4667        QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4668        QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4669        QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4670        QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4671        QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4672        QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4673        QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4674        QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4675        QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4676        crp_rcvflowctrlviol,
4677        QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4678        QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4679        QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4680        QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4681        QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4682        QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4683        crp_txminmaxlenerr,
4684        crp_txdroppedpkt,
4685        crp_txlenerr,
4686        crp_txunderrun,
4687        crp_txunsupvl,
4688        QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4689        QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4690        QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4691        QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4692        crp_rxqpinvalidctxt,
4693        crp_txhdrerr,
4694};
4695
4696/* do all the setup to make the counter reads efficient later */
4697static void init_7322_cntrnames(struct qib_devdata *dd)
4698{
4699        int i, j = 0;
4700        char *s;
4701
4702        for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4703             i++) {
4704                /* we always have at least one counter before the egrovfl */
4705                if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4706                        j = 1;
4707                s = strchr(s + 1, '\n');
4708                if (s && j)
4709                        j++;
4710        }
4711        dd->cspec->ncntrs = i;
4712        if (!s)
4713                /* full list; size is without terminating null */
4714                dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4715        else
4716                dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4717        dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
4718                * sizeof(u64), GFP_KERNEL);
4719        if (!dd->cspec->cntrs)
4720                qib_dev_err(dd, "Failed allocation for counters\n");
4721
4722        for (i = 0, s = (char *)portcntr7322names; s; i++)
4723                s = strchr(s + 1, '\n');
4724        dd->cspec->nportcntrs = i - 1;
4725        dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4726        for (i = 0; i < dd->num_pports; ++i) {
4727                dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
4728                        * sizeof(u64), GFP_KERNEL);
4729                if (!dd->pport[i].cpspec->portcntrs)
4730                        qib_dev_err(dd,
4731                                "Failed allocation for portcounters\n");
4732        }
4733}
4734
4735static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
4736                              u64 **cntrp)
4737{
4738        u32 ret;
4739
4740        if (namep) {
4741                ret = dd->cspec->cntrnamelen;
4742                if (pos >= ret)
4743                        ret = 0; /* final read after getting everything */
4744                else
4745                        *namep = (char *) cntr7322names;
4746        } else {
4747                u64 *cntr = dd->cspec->cntrs;
4748                int i;
4749
4750                ret = dd->cspec->ncntrs * sizeof(u64);
4751                if (!cntr || pos >= ret) {
4752                        /* everything read, or couldn't get memory */
4753                        ret = 0;
4754                        goto done;
4755                }
4756                *cntrp = cntr;
4757                for (i = 0; i < dd->cspec->ncntrs; i++)
4758                        if (cntr7322indices[i] & _PORT_64BIT_FLAG)
4759                                *cntr++ = read_7322_creg(dd,
4760                                                         cntr7322indices[i] &
4761                                                         _PORT_CNTR_IDXMASK);
4762                        else
4763                                *cntr++ = read_7322_creg32(dd,
4764                                                           cntr7322indices[i]);
4765        }
4766done:
4767        return ret;
4768}
4769
4770static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
4771                                  char **namep, u64 **cntrp)
4772{
4773        u32 ret;
4774
4775        if (namep) {
4776                ret = dd->cspec->portcntrnamelen;
4777                if (pos >= ret)
4778                        ret = 0; /* final read after getting everything */
4779                else
4780                        *namep = (char *)portcntr7322names;
4781        } else {
4782                struct qib_pportdata *ppd = &dd->pport[port];
4783                u64 *cntr = ppd->cpspec->portcntrs;
4784                int i;
4785
4786                ret = dd->cspec->nportcntrs * sizeof(u64);
4787                if (!cntr || pos >= ret) {
4788                        /* everything read, or couldn't get memory */
4789                        ret = 0;
4790                        goto done;
4791                }
4792                *cntrp = cntr;
4793                for (i = 0; i < dd->cspec->nportcntrs; i++) {
4794                        if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
4795                                *cntr++ = qib_portcntr_7322(ppd,
4796                                        portcntr7322indices[i] &
4797                                        _PORT_CNTR_IDXMASK);
4798                        else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
4799                                *cntr++ = read_7322_creg_port(ppd,
4800                                           portcntr7322indices[i] &
4801                                            _PORT_CNTR_IDXMASK);
4802                        else
4803                                *cntr++ = read_7322_creg32_port(ppd,
4804                                           portcntr7322indices[i]);
4805                }
4806        }
4807done:
4808        return ret;
4809}
4810
4811/**
4812 * qib_get_7322_faststats - get word counters from chip before they overflow
4813 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
4814 *
4815 * VESTIGIAL IBA7322 has no "small fast counters", so the only
4816 * real purpose of this function is to maintain the notion of
4817 * "active time", which in turn is only logged into the eeprom,
4818 * which we don;t have, yet, for 7322-based boards.
4819 *
4820 * called from add_timer
4821 */
4822static void qib_get_7322_faststats(unsigned long opaque)
4823{
4824        struct qib_devdata *dd = (struct qib_devdata *) opaque;
4825        struct qib_pportdata *ppd;
4826        unsigned long flags;
4827        u64 traffic_wds;
4828        int pidx;
4829
4830        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4831                ppd = dd->pport + pidx;
4832
4833                /*
4834                 * If port isn't enabled or not operational ports, or
4835                 * diags is running (can cause memory diags to fail)
4836                 * skip this port this time.
4837                 */
4838                if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
4839                    || dd->diag_client)
4840                        continue;
4841
4842                /*
4843                 * Maintain an activity timer, based on traffic
4844                 * exceeding a threshold, so we need to check the word-counts
4845                 * even if they are 64-bit.
4846                 */
4847                traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
4848                        qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
4849                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
4850                traffic_wds -= ppd->dd->traffic_wds;
4851                ppd->dd->traffic_wds += traffic_wds;
4852                if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
4853                        atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
4854                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
4855                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
4856                                                QIB_IB_QDR) &&
4857                    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
4858                                    QIBL_LINKACTIVE)) &&
4859                    ppd->cpspec->qdr_dfe_time &&
4860                    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
4861                        ppd->cpspec->qdr_dfe_on = 0;
4862
4863                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
4864                                            ppd->dd->cspec->r1 ?
4865                                            QDR_STATIC_ADAPT_INIT_R1 :
4866                                            QDR_STATIC_ADAPT_INIT);
4867                        force_h1(ppd);
4868                }
4869        }
4870        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
4871}
4872
4873/*
4874 * If we were using MSIx, try to fallback to INTx.
4875 */
4876static int qib_7322_intr_fallback(struct qib_devdata *dd)
4877{
4878        if (!dd->cspec->num_msix_entries)
4879                return 0; /* already using INTx */
4880
4881        qib_devinfo(dd->pcidev,
4882                "MSIx interrupt not detected, trying INTx interrupts\n");
4883        qib_7322_nomsix(dd);
4884        qib_enable_intx(dd->pcidev);
4885        qib_setup_7322_interrupt(dd, 0);
4886        return 1;
4887}
4888
4889/*
4890 * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
4891 * than resetting the IBC or external link state, and useful in some
4892 * cases to cause some retraining.  To do this right, we reset IBC
4893 * as well, then return to previous state (which may be still in reset)
4894 * NOTE: some callers of this "know" this writes the current value
4895 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
4896 * check all callers.
4897 */
4898static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
4899{
4900        u64 val;
4901        struct qib_devdata *dd = ppd->dd;
4902        const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4903                SYM_MASK(IBPCSConfig_0, xcv_treset) |
4904                SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4905
4906        val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
4907        qib_write_kreg(dd, kr_hwerrmask,
4908                       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
4909        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4910                            ppd->cpspec->ibcctrl_a &
4911                            ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
4912
4913        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
4914        qib_read_kreg32(dd, kr_scratch);
4915        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
4916        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4917        qib_write_kreg(dd, kr_scratch, 0ULL);
4918        qib_write_kreg(dd, kr_hwerrclear,
4919                       SYM_MASK(HwErrClear, statusValidNoEopClear));
4920        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
4921}
4922
4923/*
4924 * This code for non-IBTA-compliant IB speed negotiation is only known to
4925 * work for the SDR to DDR transition, and only between an HCA and a switch
4926 * with recent firmware.  It is based on observed heuristics, rather than
4927 * actual knowledge of the non-compliant speed negotiation.
4928 * It has a number of hard-coded fields, since the hope is to rewrite this
4929 * when a spec is available on how the negoation is intended to work.
4930 */
4931static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
4932                                 u32 dcnt, u32 *data)
4933{
4934        int i;
4935        u64 pbc;
4936        u32 __iomem *piobuf;
4937        u32 pnum, control, len;
4938        struct qib_devdata *dd = ppd->dd;
4939
4940        i = 0;
4941        len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
4942        control = qib_7322_setpbc_control(ppd, len, 0, 15);
4943        pbc = ((u64) control << 32) | len;
4944        while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
4945                if (i++ > 15)
4946                        return;
4947                udelay(2);
4948        }
4949        /* disable header check on this packet, since it can't be valid */
4950        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
4951        writeq(pbc, piobuf);
4952        qib_flush_wc();
4953        qib_pio_copy(piobuf + 2, hdr, 7);
4954        qib_pio_copy(piobuf + 9, data, dcnt);
4955        if (dd->flags & QIB_USE_SPCL_TRIG) {
4956                u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
4957
4958                qib_flush_wc();
4959                __raw_writel(0xaebecede, piobuf + spcl_off);
4960        }
4961        qib_flush_wc();
4962        qib_sendbuf_done(dd, pnum);
4963        /* and re-enable hdr check */
4964        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
4965}
4966
4967/*
4968 * _start packet gets sent twice at start, _done gets sent twice at end
4969 */
4970static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
4971{
4972        struct qib_devdata *dd = ppd->dd;
4973        static u32 swapped;
4974        u32 dw, i, hcnt, dcnt, *data;
4975        static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
4976        static u32 madpayload_start[0x40] = {
4977                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4978                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4979                0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
4980                };
4981        static u32 madpayload_done[0x40] = {
4982                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4983                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4984                0x40000001, 0x1388, 0x15e, /* rest 0's */
4985                };
4986
4987        dcnt = ARRAY_SIZE(madpayload_start);
4988        hcnt = ARRAY_SIZE(hdr);
4989        if (!swapped) {
4990                /* for maintainability, do it at runtime */
4991                for (i = 0; i < hcnt; i++) {
4992                        dw = (__force u32) cpu_to_be32(hdr[i]);
4993                        hdr[i] = dw;
4994                }
4995                for (i = 0; i < dcnt; i++) {
4996                        dw = (__force u32) cpu_to_be32(madpayload_start[i]);
4997                        madpayload_start[i] = dw;
4998                        dw = (__force u32) cpu_to_be32(madpayload_done[i]);
4999                        madpayload_done[i] = dw;
5000                }
5001                swapped = 1;
5002        }
5003
5004        data = which ? madpayload_done : madpayload_start;
5005
5006        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5007        qib_read_kreg64(dd, kr_scratch);
5008        udelay(2);
5009        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5010        qib_read_kreg64(dd, kr_scratch);
5011        udelay(2);
5012}
5013
5014/*
5015 * Do the absolute minimum to cause an IB speed change, and make it
5016 * ready, but don't actually trigger the change.   The caller will
5017 * do that when ready (if link is in Polling training state, it will
5018 * happen immediately, otherwise when link next goes down)
5019 *
5020 * This routine should only be used as part of the DDR autonegotation
5021 * code for devices that are not compliant with IB 1.2 (or code that
5022 * fixes things up for same).
5023 *
5024 * When link has gone down, and autoneg enabled, or autoneg has
5025 * failed and we give up until next time we set both speeds, and
5026 * then we want IBTA enabled as well as "use max enabled speed.
5027 */
5028static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5029{
5030        u64 newctrlb;
5031        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5032                                    IBA7322_IBC_IBTA_1_2_MASK |
5033                                    IBA7322_IBC_MAX_SPEED_MASK);
5034
5035        if (speed & (speed - 1)) /* multiple speeds */
5036                newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5037                                    IBA7322_IBC_IBTA_1_2_MASK |
5038                                    IBA7322_IBC_MAX_SPEED_MASK;
5039        else
5040                newctrlb |= speed == QIB_IB_QDR ?
5041                        IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5042                        ((speed == QIB_IB_DDR ?
5043                          IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5044
5045        if (newctrlb == ppd->cpspec->ibcctrl_b)
5046                return;
5047
5048        ppd->cpspec->ibcctrl_b = newctrlb;
5049        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5050        qib_write_kreg(ppd->dd, kr_scratch, 0);
5051}
5052
5053/*
5054 * This routine is only used when we are not talking to another
5055 * IB 1.2-compliant device that we think can do DDR.
5056 * (This includes all existing switch chips as of Oct 2007.)
5057 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5058 */
5059static void try_7322_autoneg(struct qib_pportdata *ppd)
5060{
5061        unsigned long flags;
5062
5063        spin_lock_irqsave(&ppd->lflags_lock, flags);
5064        ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5065        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5066        qib_autoneg_7322_send(ppd, 0);
5067        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5068        qib_7322_mini_pcs_reset(ppd);
5069        /* 2 msec is minimum length of a poll cycle */
5070        queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5071                           msecs_to_jiffies(2));
5072}
5073
5074/*
5075 * Handle the empirically determined mechanism for auto-negotiation
5076 * of DDR speed with switches.
5077 */
5078static void autoneg_7322_work(struct work_struct *work)
5079{
5080        struct qib_pportdata *ppd;
5081        struct qib_devdata *dd;
5082        u64 startms;
5083        u32 i;
5084        unsigned long flags;
5085
5086        ppd = container_of(work, struct qib_chippport_specific,
5087                            autoneg_work.work)->ppd;
5088        dd = ppd->dd;
5089
5090        startms = jiffies_to_msecs(jiffies);
5091
5092        /*
5093         * Busy wait for this first part, it should be at most a
5094         * few hundred usec, since we scheduled ourselves for 2msec.
5095         */
5096        for (i = 0; i < 25; i++) {
5097                if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5098                     == IB_7322_LT_STATE_POLLQUIET) {
5099                        qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5100                        break;
5101                }
5102                udelay(100);
5103        }
5104
5105        if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5106                goto done; /* we got there early or told to stop */
5107
5108        /* we expect this to timeout */
5109        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5110                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5111                               msecs_to_jiffies(90)))
5112                goto done;
5113        qib_7322_mini_pcs_reset(ppd);
5114
5115        /* we expect this to timeout */
5116        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5117                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5118                               msecs_to_jiffies(1700)))
5119                goto done;
5120        qib_7322_mini_pcs_reset(ppd);
5121
5122        set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5123
5124        /*
5125         * Wait up to 250 msec for link to train and get to INIT at DDR;
5126         * this should terminate early.
5127         */
5128        wait_event_timeout(ppd->cpspec->autoneg_wait,
5129                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5130                msecs_to_jiffies(250));
5131done:
5132        if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5133                spin_lock_irqsave(&ppd->lflags_lock, flags);
5134                ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5135                if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5136                        ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5137                        ppd->cpspec->autoneg_tries = 0;
5138                }
5139                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5140                set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5141        }
5142}
5143
5144/*
5145 * This routine is used to request IPG set in the QLogic switch.
5146 * Only called if r1.
5147 */
5148static void try_7322_ipg(struct qib_pportdata *ppd)
5149{
5150        struct qib_ibport *ibp = &ppd->ibport_data;
5151        struct ib_mad_send_buf *send_buf;
5152        struct ib_mad_agent *agent;
5153        struct ib_smp *smp;
5154        unsigned delay;
5155        int ret;
5156
5157        agent = ibp->send_agent;
5158        if (!agent)
5159                goto retry;
5160
5161        send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5162                                      IB_MGMT_MAD_DATA, GFP_ATOMIC);
5163        if (IS_ERR(send_buf))
5164                goto retry;
5165
5166        if (!ibp->smi_ah) {
5167                struct ib_ah *ah;
5168
5169                ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5170                if (IS_ERR(ah))
5171                        ret = PTR_ERR(ah);
5172                else {
5173                        send_buf->ah = ah;
5174                        ibp->smi_ah = to_iah(ah);
5175                        ret = 0;
5176                }
5177        } else {
5178                send_buf->ah = &ibp->smi_ah->ibah;
5179                ret = 0;
5180        }
5181
5182        smp = send_buf->mad;
5183        smp->base_version = IB_MGMT_BASE_VERSION;
5184        smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5185        smp->class_version = 1;
5186        smp->method = IB_MGMT_METHOD_SEND;
5187        smp->hop_cnt = 1;
5188        smp->attr_id = QIB_VENDOR_IPG;
5189        smp->attr_mod = 0;
5190
5191        if (!ret)
5192                ret = ib_post_send_mad(send_buf, NULL);
5193        if (ret)
5194                ib_free_send_mad(send_buf);
5195retry:
5196        delay = 2 << ppd->cpspec->ipg_tries;
5197        queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5198                           msecs_to_jiffies(delay));
5199}
5200
5201/*
5202 * Timeout handler for setting IPG.
5203 * Only called if r1.
5204 */
5205static void ipg_7322_work(struct work_struct *work)
5206{
5207        struct qib_pportdata *ppd;
5208
5209        ppd = container_of(work, struct qib_chippport_specific,
5210                           ipg_work.work)->ppd;
5211        if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5212            && ++ppd->cpspec->ipg_tries <= 10)
5213                try_7322_ipg(ppd);
5214}
5215
5216static u32 qib_7322_iblink_state(u64 ibcs)
5217{
5218        u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5219
5220        switch (state) {
5221        case IB_7322_L_STATE_INIT:
5222                state = IB_PORT_INIT;
5223                break;
5224        case IB_7322_L_STATE_ARM:
5225                state = IB_PORT_ARMED;
5226                break;
5227        case IB_7322_L_STATE_ACTIVE:
5228                /* fall through */
5229        case IB_7322_L_STATE_ACT_DEFER:
5230                state = IB_PORT_ACTIVE;
5231                break;
5232        default: /* fall through */
5233        case IB_7322_L_STATE_DOWN:
5234                state = IB_PORT_DOWN;
5235                break;
5236        }
5237        return state;
5238}
5239
5240/* returns the IBTA port state, rather than the IBC link training state */
5241static u8 qib_7322_phys_portstate(u64 ibcs)
5242{
5243        u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5244        return qib_7322_physportstate[state];
5245}
5246
5247static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5248{
5249        int ret = 0, symadj = 0;
5250        unsigned long flags;
5251        int mult;
5252
5253        spin_lock_irqsave(&ppd->lflags_lock, flags);
5254        ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5255        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5256
5257        /* Update our picture of width and speed from chip */
5258        if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5259                ppd->link_speed_active = QIB_IB_QDR;
5260                mult = 4;
5261        } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5262                ppd->link_speed_active = QIB_IB_DDR;
5263                mult = 2;
5264        } else {
5265                ppd->link_speed_active = QIB_IB_SDR;
5266                mult = 1;
5267        }
5268        if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5269                ppd->link_width_active = IB_WIDTH_4X;
5270                mult *= 4;
5271        } else
5272                ppd->link_width_active = IB_WIDTH_1X;
5273        ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5274
5275        if (!ibup) {
5276                u64 clr;
5277
5278                /* Link went down. */
5279                /* do IPG MAD again after linkdown, even if last time failed */
5280                ppd->cpspec->ipg_tries = 0;
5281                clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5282                        (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5283                         SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5284                if (clr)
5285                        qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5286                if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5287                                     QIBL_IB_AUTONEG_INPROG)))
5288                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5289                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5290                        struct qib_qsfp_data *qd =
5291                                &ppd->cpspec->qsfp_data;
5292                        /* unlock the Tx settings, speed may change */
5293                        qib_write_kreg_port(ppd, krp_tx_deemph_override,
5294                                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5295                                reset_tx_deemphasis_override));
5296                        qib_cancel_sends(ppd);
5297                        /* on link down, ensure sane pcs state */
5298                        qib_7322_mini_pcs_reset(ppd);
5299                        /* schedule the qsfp refresh which should turn the link
5300                           off */
5301                        if (ppd->dd->flags & QIB_HAS_QSFP) {
5302                                qd->t_insert = jiffies;
5303                                queue_work(ib_wq, &qd->work);
5304                        }
5305                        spin_lock_irqsave(&ppd->sdma_lock, flags);
5306                        if (__qib_sdma_running(ppd))
5307                                __qib_sdma_process_event(ppd,
5308                                        qib_sdma_event_e70_go_idle);
5309                        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5310                }
5311                clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5312                if (clr == ppd->cpspec->iblnkdownsnap)
5313                        ppd->cpspec->iblnkdowndelta++;
5314        } else {
5315                if (qib_compat_ddr_negotiate &&
5316                    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5317                                     QIBL_IB_AUTONEG_INPROG)) &&
5318                    ppd->link_speed_active == QIB_IB_SDR &&
5319                    (ppd->link_speed_enabled & QIB_IB_DDR)
5320                    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5321                        /* we are SDR, and auto-negotiation enabled */
5322                        ++ppd->cpspec->autoneg_tries;
5323                        if (!ppd->cpspec->ibdeltainprog) {
5324                                ppd->cpspec->ibdeltainprog = 1;
5325                                ppd->cpspec->ibsymdelta +=
5326                                        read_7322_creg32_port(ppd,
5327                                                crp_ibsymbolerr) -
5328                                                ppd->cpspec->ibsymsnap;
5329                                ppd->cpspec->iblnkerrdelta +=
5330                                        read_7322_creg32_port(ppd,
5331                                                crp_iblinkerrrecov) -
5332                                                ppd->cpspec->iblnkerrsnap;
5333                        }
5334                        try_7322_autoneg(ppd);
5335                        ret = 1; /* no other IB status change processing */
5336                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5337                           ppd->link_speed_active == QIB_IB_SDR) {
5338                        qib_autoneg_7322_send(ppd, 1);
5339                        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5340                        qib_7322_mini_pcs_reset(ppd);
5341                        udelay(2);
5342                        ret = 1; /* no other IB status change processing */
5343                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5344                           (ppd->link_speed_active & QIB_IB_DDR)) {
5345                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5346                        ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5347                                         QIBL_IB_AUTONEG_FAILED);
5348                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5349                        ppd->cpspec->autoneg_tries = 0;
5350                        /* re-enable SDR, for next link down */
5351                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5352                        wake_up(&ppd->cpspec->autoneg_wait);
5353                        symadj = 1;
5354                } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5355                        /*
5356                         * Clear autoneg failure flag, and do setup
5357                         * so we'll try next time link goes down and
5358                         * back to INIT (possibly connected to a
5359                         * different device).
5360                         */
5361                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5362                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5363                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5364                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5365                        symadj = 1;
5366                }
5367                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5368                        symadj = 1;
5369                        if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5370                                try_7322_ipg(ppd);
5371                        if (!ppd->cpspec->recovery_init)
5372                                setup_7322_link_recovery(ppd, 0);
5373                        ppd->cpspec->qdr_dfe_time = jiffies +
5374                                msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5375                }
5376                ppd->cpspec->ibmalfusesnap = 0;
5377                ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5378                        crp_errlink);
5379        }
5380        if (symadj) {
5381                ppd->cpspec->iblnkdownsnap =
5382                        read_7322_creg32_port(ppd, crp_iblinkdown);
5383                if (ppd->cpspec->ibdeltainprog) {
5384                        ppd->cpspec->ibdeltainprog = 0;
5385                        ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5386                                crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5387                        ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5388                                crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5389                }
5390        } else if (!ibup && qib_compat_ddr_negotiate &&
5391                   !ppd->cpspec->ibdeltainprog &&
5392                        !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5393                ppd->cpspec->ibdeltainprog = 1;
5394                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5395                        crp_ibsymbolerr);
5396                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5397                        crp_iblinkerrrecov);
5398        }
5399
5400        if (!ret)
5401                qib_setup_7322_setextled(ppd, ibup);
5402        return ret;
5403}
5404
5405/*
5406 * Does read/modify/write to appropriate registers to
5407 * set output and direction bits selected by mask.
5408 * these are in their canonical postions (e.g. lsb of
5409 * dir will end up in D48 of extctrl on existing chips).
5410 * returns contents of GP Inputs.
5411 */
5412static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5413{
5414        u64 read_val, new_out;
5415        unsigned long flags;
5416
5417        if (mask) {
5418                /* some bits being written, lock access to GPIO */
5419                dir &= mask;
5420                out &= mask;
5421                spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5422                dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5423                dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5424                new_out = (dd->cspec->gpio_out & ~mask) | out;
5425
5426                qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5427                qib_write_kreg(dd, kr_gpio_out, new_out);
5428                dd->cspec->gpio_out = new_out;
5429                spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5430        }
5431        /*
5432         * It is unlikely that a read at this time would get valid
5433         * data on a pin whose direction line was set in the same
5434         * call to this function. We include the read here because
5435         * that allows us to potentially combine a change on one pin with
5436         * a read on another, and because the old code did something like
5437         * this.
5438         */
5439        read_val = qib_read_kreg64(dd, kr_extstatus);
5440        return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5441}
5442
5443/* Enable writes to config EEPROM, if possible. Returns previous state */
5444static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5445{
5446        int prev_wen;
5447        u32 mask;
5448
5449        mask = 1 << QIB_EEPROM_WEN_NUM;
5450        prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5451        gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5452
5453        return prev_wen & 1;
5454}
5455
5456/*
5457 * Read fundamental info we need to use the chip.  These are
5458 * the registers that describe chip capabilities, and are
5459 * saved in shadow registers.
5460 */
5461static void get_7322_chip_params(struct qib_devdata *dd)
5462{
5463        u64 val;
5464        u32 piobufs;
5465        int mtu;
5466
5467        dd->palign = qib_read_kreg32(dd, kr_pagealign);
5468
5469        dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5470
5471        dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5472        dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5473        dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5474        dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5475        dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5476
5477        val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5478        dd->piobcnt2k = val & ~0U;
5479        dd->piobcnt4k = val >> 32;
5480        val = qib_read_kreg64(dd, kr_sendpiosize);
5481        dd->piosize2k = val & ~0U;
5482        dd->piosize4k = val >> 32;
5483
5484        mtu = ib_mtu_enum_to_int(qib_ibmtu);
5485        if (mtu == -1)
5486                mtu = QIB_DEFAULT_MTU;
5487        dd->pport[0].ibmtu = (u32)mtu;
5488        dd->pport[1].ibmtu = (u32)mtu;
5489
5490        /* these may be adjusted in init_chip_wc_pat() */
5491        dd->pio2kbase = (u32 __iomem *)
5492                ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5493        dd->pio4kbase = (u32 __iomem *)
5494                ((char __iomem *) dd->kregbase +
5495                 (dd->piobufbase >> 32));
5496        /*
5497         * 4K buffers take 2 pages; we use roundup just to be
5498         * paranoid; we calculate it once here, rather than on
5499         * ever buf allocate
5500         */
5501        dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5502
5503        piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5504
5505        dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5506                (sizeof(u64) * BITS_PER_BYTE / 2);
5507}
5508
5509/*
5510 * The chip base addresses in cspec and cpspec have to be set
5511 * after possible init_chip_wc_pat(), rather than in
5512 * get_7322_chip_params(), so split out as separate function
5513 */
5514static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5515{
5516        u32 cregbase;
5517        cregbase = qib_read_kreg32(dd, kr_counterregbase);
5518
5519        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5520                (char __iomem *)dd->kregbase);
5521
5522        dd->egrtidbase = (u64 __iomem *)
5523                ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5524
5525        /* port registers are defined as relative to base of chip */
5526        dd->pport[0].cpspec->kpregbase =
5527                (u64 __iomem *)((char __iomem *)dd->kregbase);
5528        dd->pport[1].cpspec->kpregbase =
5529                (u64 __iomem *)(dd->palign +
5530                (char __iomem *)dd->kregbase);
5531        dd->pport[0].cpspec->cpregbase =
5532                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5533                kr_counterregbase) + (char __iomem *)dd->kregbase);
5534        dd->pport[1].cpspec->cpregbase =
5535                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5536                kr_counterregbase) + (char __iomem *)dd->kregbase);
5537}
5538
5539/*
5540 * This is a fairly special-purpose observer, so we only support
5541 * the port-specific parts of SendCtrl
5542 */
5543
5544#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |           \
5545                           SYM_MASK(SendCtrl_0, SDmaEnable) |           \
5546                           SYM_MASK(SendCtrl_0, SDmaIntEnable) |        \
5547                           SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5548                           SYM_MASK(SendCtrl_0, SDmaHalt) |             \
5549                           SYM_MASK(SendCtrl_0, IBVLArbiterEn) |        \
5550                           SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5551
5552static int sendctrl_hook(struct qib_devdata *dd,
5553                         const struct diag_observer *op, u32 offs,
5554                         u64 *data, u64 mask, int only_32)
5555{
5556        unsigned long flags;
5557        unsigned idx;
5558        unsigned pidx;
5559        struct qib_pportdata *ppd = NULL;
5560        u64 local_data, all_bits;
5561
5562        /*
5563         * The fixed correspondence between Physical ports and pports is
5564         * severed. We need to hunt for the ppd that corresponds
5565         * to the offset we got. And we have to do that without admitting
5566         * we know the stride, apparently.
5567         */
5568        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5569                u64 __iomem *psptr;
5570                u32 psoffs;
5571
5572                ppd = dd->pport + pidx;
5573                if (!ppd->cpspec->kpregbase)
5574                        continue;
5575
5576                psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5577                psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5578                if (psoffs == offs)
5579                        break;
5580        }
5581
5582        /* If pport is not being managed by driver, just avoid shadows. */
5583        if (pidx >= dd->num_pports)
5584                ppd = NULL;
5585
5586        /* In any case, "idx" is flat index in kreg space */
5587        idx = offs / sizeof(u64);
5588
5589        all_bits = ~0ULL;
5590        if (only_32)
5591                all_bits >>= 32;
5592
5593        spin_lock_irqsave(&dd->sendctrl_lock, flags);
5594        if (!ppd || (mask & all_bits) != all_bits) {
5595                /*
5596                 * At least some mask bits are zero, so we need
5597                 * to read. The judgement call is whether from
5598                 * reg or shadow. First-cut: read reg, and complain
5599                 * if any bits which should be shadowed are different
5600                 * from their shadowed value.
5601                 */
5602                if (only_32)
5603                        local_data = (u64)qib_read_kreg32(dd, idx);
5604                else
5605                        local_data = qib_read_kreg64(dd, idx);
5606                *data = (local_data & ~mask) | (*data & mask);
5607        }
5608        if (mask) {
5609                /*
5610                 * At least some mask bits are one, so we need
5611                 * to write, but only shadow some bits.
5612                 */
5613                u64 sval, tval; /* Shadowed, transient */
5614
5615                /*
5616                 * New shadow val is bits we don't want to touch,
5617                 * ORed with bits we do, that are intended for shadow.
5618                 */
5619                if (ppd) {
5620                        sval = ppd->p_sendctrl & ~mask;
5621                        sval |= *data & SENDCTRL_SHADOWED & mask;
5622                        ppd->p_sendctrl = sval;
5623                } else
5624                        sval = *data & SENDCTRL_SHADOWED & mask;
5625                tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5626                qib_write_kreg(dd, idx, tval);
5627                qib_write_kreg(dd, kr_scratch, 0Ull);
5628        }
5629        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5630        return only_32 ? 4 : 8;
5631}
5632
5633static const struct diag_observer sendctrl_0_observer = {
5634        sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5635        KREG_IDX(SendCtrl_0) * sizeof(u64)
5636};
5637
5638static const struct diag_observer sendctrl_1_observer = {
5639        sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5640        KREG_IDX(SendCtrl_1) * sizeof(u64)
5641};
5642
5643static ushort sdma_fetch_prio = 8;
5644module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5645MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5646
5647/* Besides logging QSFP events, we set appropriate TxDDS values */
5648static void init_txdds_table(struct qib_pportdata *ppd, int override);
5649
5650static void qsfp_7322_event(struct work_struct *work)
5651{
5652        struct qib_qsfp_data *qd;
5653        struct qib_pportdata *ppd;
5654        unsigned long pwrup;
5655        unsigned long flags;
5656        int ret;
5657        u32 le2;
5658
5659        qd = container_of(work, struct qib_qsfp_data, work);
5660        ppd = qd->ppd;
5661        pwrup = qd->t_insert +
5662                msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5663
5664        /* Delay for 20 msecs to allow ModPrs resistor to setup */
5665        mdelay(QSFP_MODPRS_LAG_MSEC);
5666
5667        if (!qib_qsfp_mod_present(ppd)) {
5668                ppd->cpspec->qsfp_data.modpresent = 0;
5669                /* Set the physical link to disabled */
5670                qib_set_ib_7322_lstate(ppd, 0,
5671                                       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5672                spin_lock_irqsave(&ppd->lflags_lock, flags);
5673                ppd->lflags &= ~QIBL_LINKV;
5674                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5675        } else {
5676                /*
5677                 * Some QSFP's not only do not respond until the full power-up
5678                 * time, but may behave badly if we try. So hold off responding
5679                 * to insertion.
5680                 */
5681                while (1) {
5682                        if (time_is_before_jiffies(pwrup))
5683                                break;
5684                        msleep(20);
5685                }
5686
5687                ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5688
5689                /*
5690                 * Need to change LE2 back to defaults if we couldn't
5691                 * read the cable type (to handle cable swaps), so do this
5692                 * even on failure to read cable information.  We don't
5693                 * get here for QME, so IS_QME check not needed here.
5694                 */
5695                if (!ret && !ppd->dd->cspec->r1) {
5696                        if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5697                                le2 = LE2_QME;
5698                        else if (qd->cache.atten[1] >= qib_long_atten &&
5699                                 QSFP_IS_CU(qd->cache.tech))
5700                                le2 = LE2_5m;
5701                        else
5702                                le2 = LE2_DEFAULT;
5703                } else
5704                        le2 = LE2_DEFAULT;
5705                ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5706                /*
5707                 * We always change parameteters, since we can choose
5708                 * values for cables without eeproms, and the cable may have
5709                 * changed from a cable with full or partial eeprom content
5710                 * to one with partial or no content.
5711                 */
5712                init_txdds_table(ppd, 0);
5713                /* The physical link is being re-enabled only when the
5714                 * previous state was DISABLED and the VALID bit is not
5715                 * set. This should only happen when  the cable has been
5716                 * physically pulled. */
5717                if (!ppd->cpspec->qsfp_data.modpresent &&
5718                    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
5719                        ppd->cpspec->qsfp_data.modpresent = 1;
5720                        qib_set_ib_7322_lstate(ppd, 0,
5721                                QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5722                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5723                        ppd->lflags |= QIBL_LINKV;
5724                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5725                }
5726        }
5727}
5728
5729/*
5730 * There is little we can do but complain to the user if QSFP
5731 * initialization fails.
5732 */
5733static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5734{
5735        unsigned long flags;
5736        struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5737        struct qib_devdata *dd = ppd->dd;
5738        u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
5739
5740        mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
5741        qd->ppd = ppd;
5742        qib_qsfp_init(qd, qsfp_7322_event);
5743        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5744        dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
5745        dd->cspec->gpio_mask |= mod_prs_bit;
5746        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5747        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
5748        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5749}
5750
5751/*
5752 * called at device initialization time, and also if the txselect
5753 * module parameter is changed.  This is used for cables that don't
5754 * have valid QSFP EEPROMs (not present, or attenuation is zero).
5755 * We initialize to the default, then if there is a specific
5756 * unit,port match, we use that (and set it immediately, for the
5757 * current speed, if the link is at INIT or better).
5758 * String format is "default# unit#,port#=# ... u,p=#", separators must
5759 * be a SPACE character.  A newline terminates.  The u,p=# tuples may
5760 * optionally have "u,p=#,#", where the final # is the H1 value
5761 * The last specific match is used (actually, all are used, but last
5762 * one is the one that winds up set); if none at all, fall back on default.
5763 */
5764static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5765{
5766        char *nxt, *str;
5767        u32 pidx, unit, port, deflt, h1;
5768        unsigned long val;
5769        int any = 0, seth1;
5770        int txdds_size;
5771
5772        str = txselect_list;
5773
5774        /* default number is validated in setup_txselect() */
5775        deflt = simple_strtoul(str, &nxt, 0);
5776        for (pidx = 0; pidx < dd->num_pports; ++pidx)
5777                dd->pport[pidx].cpspec->no_eep = deflt;
5778
5779        txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
5780        if (IS_QME(dd) || IS_QMH(dd))
5781                txdds_size += TXDDS_MFG_SZ;
5782
5783        while (*nxt && nxt[1]) {
5784                str = ++nxt;
5785                unit = simple_strtoul(str, &nxt, 0);
5786                if (nxt == str || !*nxt || *nxt != ',') {
5787                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5788                                ;
5789                        continue;
5790                }
5791                str = ++nxt;
5792                port = simple_strtoul(str, &nxt, 0);
5793                if (nxt == str || *nxt != '=') {
5794                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5795                                ;
5796                        continue;
5797                }
5798                str = ++nxt;
5799                val = simple_strtoul(str, &nxt, 0);
5800                if (nxt == str) {
5801                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5802                                ;
5803                        continue;
5804                }
5805                if (val >= txdds_size)
5806                        continue;
5807                seth1 = 0;
5808                h1 = 0; /* gcc thinks it might be used uninitted */
5809                if (*nxt == ',' && nxt[1]) {
5810                        str = ++nxt;
5811                        h1 = (u32)simple_strtoul(str, &nxt, 0);
5812                        if (nxt == str)
5813                                while (*nxt && *nxt++ != ' ') /* skip */
5814                                        ;
5815                        else
5816                                seth1 = 1;
5817                }
5818                for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5819                     ++pidx) {
5820                        struct qib_pportdata *ppd = &dd->pport[pidx];
5821
5822                        if (ppd->port != port || !ppd->link_speed_supported)
5823                                continue;
5824                        ppd->cpspec->no_eep = val;
5825                        if (seth1)
5826                                ppd->cpspec->h1_val = h1;
5827                        /* now change the IBC and serdes, overriding generic */
5828                        init_txdds_table(ppd, 1);
5829                        /* Re-enable the physical state machine on mezz boards
5830                         * now that the correct settings have been set.
5831                         * QSFP boards are handles by the QSFP event handler */
5832                        if (IS_QMH(dd) || IS_QME(dd))
5833                                qib_set_ib_7322_lstate(ppd, 0,
5834                                            QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5835                        any++;
5836                }
5837                if (*nxt == '\n')
5838                        break; /* done */
5839        }
5840        if (change && !any) {
5841                /* no specific setting, use the default.
5842                 * Change the IBC and serdes, but since it's
5843                 * general, don't override specific settings.
5844                 */
5845                for (pidx = 0; pidx < dd->num_pports; ++pidx)
5846                        if (dd->pport[pidx].link_speed_supported)
5847                                init_txdds_table(&dd->pport[pidx], 0);
5848        }
5849}
5850
5851/* handle the txselect parameter changing */
5852static int setup_txselect(const char *str, struct kernel_param *kp)
5853{
5854        struct qib_devdata *dd;
5855        unsigned long val;
5856        int ret;
5857
5858        if (strlen(str) >= MAX_ATTEN_LEN) {
5859                pr_info("txselect_values string too long\n");
5860                return -ENOSPC;
5861        }
5862        ret = kstrtoul(str, 0, &val);
5863        if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
5864                                TXDDS_MFG_SZ)) {
5865                pr_info("txselect_values must start with a number < %d\n",
5866                        TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
5867                return ret ? ret : -EINVAL;
5868        }
5869
5870        strcpy(txselect_list, str);
5871        list_for_each_entry(dd, &qib_dev_list, list)
5872                if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
5873                        set_no_qsfp_atten(dd, 1);
5874        return 0;
5875}
5876
5877/*
5878 * Write the final few registers that depend on some of the
5879 * init setup.  Done late in init, just before bringing up
5880 * the serdes.
5881 */
5882static int qib_late_7322_initreg(struct qib_devdata *dd)
5883{
5884        int ret = 0, n;
5885        u64 val;
5886
5887        qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
5888        qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
5889        qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
5890        qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
5891        val = qib_read_kreg64(dd, kr_sendpioavailaddr);
5892        if (val != dd->pioavailregs_phys) {
5893                qib_dev_err(dd,
5894                        "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
5895                        (unsigned long) dd->pioavailregs_phys,
5896                        (unsigned long long) val);
5897                ret = -EINVAL;
5898        }
5899
5900        n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
5901        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
5902        /* driver sends get pkey, lid, etc. checking also, to catch bugs */
5903        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
5904
5905        qib_register_observer(dd, &sendctrl_0_observer);
5906        qib_register_observer(dd, &sendctrl_1_observer);
5907
5908        dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
5909        qib_write_kreg(dd, kr_control, dd->control);
5910        /*
5911         * Set SendDmaFetchPriority and init Tx params, including
5912         * QSFP handler on boards that have QSFP.
5913         * First set our default attenuation entry for cables that
5914         * don't have valid attenuation.
5915         */
5916        set_no_qsfp_atten(dd, 0);
5917        for (n = 0; n < dd->num_pports; ++n) {
5918                struct qib_pportdata *ppd = dd->pport + n;
5919
5920                qib_write_kreg_port(ppd, krp_senddmaprioritythld,
5921                                    sdma_fetch_prio & 0xf);
5922                /* Initialize qsfp if present on board. */
5923                if (dd->flags & QIB_HAS_QSFP)
5924                        qib_init_7322_qsfp(ppd);
5925        }
5926        dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
5927        qib_write_kreg(dd, kr_control, dd->control);
5928
5929        return ret;
5930}
5931
5932/* per IB port errors.  */
5933#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5934        MASK_ACROSS(8, 15))
5935#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5936#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5937        MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5938        MASK_ACROSS(0, 11))
5939
5940/*
5941 * Write the initialization per-port registers that need to be done at
5942 * driver load and after reset completes (i.e., that aren't done as part
5943 * of other init procedures called from qib_init.c).
5944 * Some of these should be redundant on reset, but play safe.
5945 */
5946static void write_7322_init_portregs(struct qib_pportdata *ppd)
5947{
5948        u64 val;
5949        int i;
5950
5951        if (!ppd->link_speed_supported) {
5952                /* no buffer credits for this port */
5953                for (i = 1; i < 8; i++)
5954                        qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
5955                qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
5956                qib_write_kreg(ppd->dd, kr_scratch, 0);
5957                return;
5958        }
5959
5960        /*
5961         * Set the number of supported virtual lanes in IBC,
5962         * for flow control packet handling on unsupported VLs
5963         */
5964        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
5965        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5966        val |= (u64)(ppd->vls_supported - 1) <<
5967                SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5968        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
5969
5970        qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
5971
5972        /* enable tx header checking */
5973        qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
5974                            IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
5975                            IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
5976
5977        qib_write_kreg_port(ppd, krp_ncmodectrl,
5978                SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5979
5980        /*
5981         * Unconditionally clear the bufmask bits.  If SDMA is
5982         * enabled, we'll set them appropriately later.
5983         */
5984        qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
5985        qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
5986        qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
5987        if (ppd->dd->cspec->r1)
5988                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
5989}
5990
5991/*
5992 * Write the initialization per-device registers that need to be done at
5993 * driver load and after reset completes (i.e., that aren't done as part
5994 * of other init procedures called from qib_init.c).  Also write per-port
5995 * registers that are affected by overall device config, such as QP mapping
5996 * Some of these should be redundant on reset, but play safe.
5997 */
5998static void write_7322_initregs(struct qib_devdata *dd)
5999{
6000        struct qib_pportdata *ppd;
6001        int i, pidx;
6002        u64 val;
6003
6004        /* Set Multicast QPs received by port 2 to map to context one. */
6005        qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6006
6007        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6008                unsigned n, regno;
6009                unsigned long flags;
6010
6011                if (dd->n_krcv_queues < 2 ||
6012                        !dd->pport[pidx].link_speed_supported)
6013                        continue;
6014
6015                ppd = &dd->pport[pidx];
6016
6017                /* be paranoid against later code motion, etc. */
6018                spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6019                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6020                spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6021
6022                /* Initialize QP to context mapping */
6023                regno = krp_rcvqpmaptable;
6024                val = 0;
6025                if (dd->num_pports > 1)
6026                        n = dd->first_user_ctxt / dd->num_pports;
6027                else
6028                        n = dd->first_user_ctxt - 1;
6029                for (i = 0; i < 32; ) {
6030                        unsigned ctxt;
6031
6032                        if (dd->num_pports > 1)
6033                                ctxt = (i % n) * dd->num_pports + pidx;
6034                        else if (i % n)
6035                                ctxt = (i % n) + 1;
6036                        else
6037                                ctxt = ppd->hw_pidx;
6038                        val |= ctxt << (5 * (i % 6));
6039                        i++;
6040                        if (i % 6 == 0) {
6041                                qib_write_kreg_port(ppd, regno, val);
6042                                val = 0;
6043                                regno++;
6044                        }
6045                }
6046                qib_write_kreg_port(ppd, regno, val);
6047        }
6048
6049        /*
6050         * Setup up interrupt mitigation for kernel contexts, but
6051         * not user contexts (user contexts use interrupts when
6052         * stalled waiting for any packet, so want those interrupts
6053         * right away).
6054         */
6055        for (i = 0; i < dd->first_user_ctxt; i++) {
6056                dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6057                qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6058        }
6059
6060        /*
6061         * Initialize  as (disabled) rcvflow tables.  Application code
6062         * will setup each flow as it uses the flow.
6063         * Doesn't clear any of the error bits that might be set.
6064         */
6065        val = TIDFLOW_ERRBITS; /* these are W1C */
6066        for (i = 0; i < dd->cfgctxts; i++) {
6067                int flow;
6068                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6069                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6070        }
6071
6072        /*
6073         * dual cards init to dual port recovery, single port cards to
6074         * the one port.  Dual port cards may later adjust to 1 port,
6075         * and then back to dual port if both ports are connected
6076         * */
6077        if (dd->num_pports)
6078                setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6079}
6080
6081static int qib_init_7322_variables(struct qib_devdata *dd)
6082{
6083        struct qib_pportdata *ppd;
6084        unsigned features, pidx, sbufcnt;
6085        int ret, mtu;
6086        u32 sbufs, updthresh;
6087
6088        /* pport structs are contiguous, allocated after devdata */
6089        ppd = (struct qib_pportdata *)(dd + 1);
6090        dd->pport = ppd;
6091        ppd[0].dd = dd;
6092        ppd[1].dd = dd;
6093
6094        dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6095
6096        ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6097        ppd[1].cpspec = &ppd[0].cpspec[1];
6098        ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6099        ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6100
6101        spin_lock_init(&dd->cspec->rcvmod_lock);
6102        spin_lock_init(&dd->cspec->gpio_lock);
6103
6104        /* we haven't yet set QIB_PRESENT, so use read directly */
6105        dd->revision = readq(&dd->kregbase[kr_revision]);
6106
6107        if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6108                qib_dev_err(dd,
6109                        "Revision register read failure, giving up initialization\n");
6110                ret = -ENODEV;
6111                goto bail;
6112        }
6113        dd->flags |= QIB_PRESENT;  /* now register routines work */
6114
6115        dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6116        dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6117        dd->cspec->r1 = dd->minrev == 1;
6118
6119        get_7322_chip_params(dd);
6120        features = qib_7322_boardname(dd);
6121
6122        /* now that piobcnt2k and 4k set, we can allocate these */
6123        sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6124                NUM_VL15_BUFS + BITS_PER_LONG - 1;
6125        sbufcnt /= BITS_PER_LONG;
6126        dd->cspec->sendchkenable = kmalloc(sbufcnt *
6127                sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6128        dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6129                sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6130        dd->cspec->sendibchk = kmalloc(sbufcnt *
6131                sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6132        if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6133                !dd->cspec->sendibchk) {
6134                qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6135                ret = -ENOMEM;
6136                goto bail;
6137        }
6138
6139        ppd = dd->pport;
6140
6141        /*
6142         * GPIO bits for TWSI data and clock,
6143         * used for serial EEPROM.
6144         */
6145        dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6146        dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6147        dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6148
6149        dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6150                QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6151                QIB_HAS_THRESH_UPDATE |
6152                (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6153        dd->flags |= qib_special_trigger ?
6154                QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6155
6156        /*
6157         * Setup initial values.  These may change when PAT is enabled, but
6158         * we need these to do initial chip register accesses.
6159         */
6160        qib_7322_set_baseaddrs(dd);
6161
6162        mtu = ib_mtu_enum_to_int(qib_ibmtu);
6163        if (mtu == -1)
6164                mtu = QIB_DEFAULT_MTU;
6165
6166        dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6167        /* all hwerrors become interrupts, unless special purposed */
6168        dd->cspec->hwerrmask = ~0ULL;
6169        /*  link_recovery setup causes these errors, so ignore them,
6170         *  other than clearing them when they occur */
6171        dd->cspec->hwerrmask &=
6172                ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6173                  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6174                  HWE_MASK(LATriggered));
6175
6176        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6177                struct qib_chippport_specific *cp = ppd->cpspec;
6178                ppd->link_speed_supported = features & PORT_SPD_CAP;
6179                features >>=  PORT_SPD_CAP_SHIFT;
6180                if (!ppd->link_speed_supported) {
6181                        /* single port mode (7340, or configured) */
6182                        dd->skip_kctxt_mask |= 1 << pidx;
6183                        if (pidx == 0) {
6184                                /* Make sure port is disabled. */
6185                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6186                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6187                                ppd[0] = ppd[1];
6188                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6189                                                  IBSerdesPClkNotDetectMask_0)
6190                                                  | SYM_MASK(HwErrMask,
6191                                                  SDmaMemReadErrMask_0));
6192                                dd->cspec->int_enable_mask &= ~(
6193                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6194                                     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6195                                     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6196                                     SYM_MASK(IntMask, SDmaIntMask_0) |
6197                                     SYM_MASK(IntMask, ErrIntMask_0) |
6198                                     SYM_MASK(IntMask, SendDoneIntMask_0));
6199                        } else {
6200                                /* Make sure port is disabled. */
6201                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6202                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6203                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6204                                                  IBSerdesPClkNotDetectMask_1)
6205                                                  | SYM_MASK(HwErrMask,
6206                                                  SDmaMemReadErrMask_1));
6207                                dd->cspec->int_enable_mask &= ~(
6208                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6209                                     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6210                                     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6211                                     SYM_MASK(IntMask, SDmaIntMask_1) |
6212                                     SYM_MASK(IntMask, ErrIntMask_1) |
6213                                     SYM_MASK(IntMask, SendDoneIntMask_1));
6214                        }
6215                        continue;
6216                }
6217
6218                dd->num_pports++;
6219                qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6220
6221                ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6222                ppd->link_width_enabled = IB_WIDTH_4X;
6223                ppd->link_speed_enabled = ppd->link_speed_supported;
6224                /*
6225                 * Set the initial values to reasonable default, will be set
6226                 * for real when link is up.
6227                 */
6228                ppd->link_width_active = IB_WIDTH_4X;
6229                ppd->link_speed_active = QIB_IB_SDR;
6230                ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6231                switch (qib_num_cfg_vls) {
6232                case 1:
6233                        ppd->vls_supported = IB_VL_VL0;
6234                        break;
6235                case 2:
6236                        ppd->vls_supported = IB_VL_VL0_1;
6237                        break;
6238                default:
6239                        qib_devinfo(dd->pcidev,
6240                                    "Invalid num_vls %u, using 4 VLs\n",
6241                                    qib_num_cfg_vls);
6242                        qib_num_cfg_vls = 4;
6243                        /* fall through */
6244                case 4:
6245                        ppd->vls_supported = IB_VL_VL0_3;
6246                        break;
6247                case 8:
6248                        if (mtu <= 2048)
6249                                ppd->vls_supported = IB_VL_VL0_7;
6250                        else {
6251                                qib_devinfo(dd->pcidev,
6252                                            "Invalid num_vls %u for MTU %d "
6253                                            ", using 4 VLs\n",
6254                                            qib_num_cfg_vls, mtu);
6255                                ppd->vls_supported = IB_VL_VL0_3;
6256                                qib_num_cfg_vls = 4;
6257                        }
6258                        break;
6259                }
6260                ppd->vls_operational = ppd->vls_supported;
6261
6262                init_waitqueue_head(&cp->autoneg_wait);
6263                INIT_DELAYED_WORK(&cp->autoneg_work,
6264                                  autoneg_7322_work);
6265                if (ppd->dd->cspec->r1)
6266                        INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6267
6268                /*
6269                 * For Mez and similar cards, no qsfp info, so do
6270                 * the "cable info" setup here.  Can be overridden
6271                 * in adapter-specific routines.
6272                 */
6273                if (!(dd->flags & QIB_HAS_QSFP)) {
6274                        if (!IS_QMH(dd) && !IS_QME(dd))
6275                                qib_devinfo(dd->pcidev,
6276                                        "IB%u:%u: Unknown mezzanine card type\n",
6277                                        dd->unit, ppd->port);
6278                        cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6279                        /*
6280                         * Choose center value as default tx serdes setting
6281                         * until changed through module parameter.
6282                         */
6283                        ppd->cpspec->no_eep = IS_QMH(dd) ?
6284                                TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6285                } else
6286                        cp->h1_val = H1_FORCE_VAL;
6287
6288                /* Avoid writes to chip for mini_init */
6289                if (!qib_mini_init)
6290                        write_7322_init_portregs(ppd);
6291
6292                init_timer(&cp->chase_timer);
6293                cp->chase_timer.function = reenable_chase;
6294                cp->chase_timer.data = (unsigned long)ppd;
6295
6296                ppd++;
6297        }
6298
6299        dd->rcvhdrentsize = qib_rcvhdrentsize ?
6300                qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6301        dd->rcvhdrsize = qib_rcvhdrsize ?
6302                qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6303        dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6304
6305        /* we always allocate at least 2048 bytes for eager buffers */
6306        dd->rcvegrbufsize = max(mtu, 2048);
6307        BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6308        dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6309
6310        qib_7322_tidtemplate(dd);
6311
6312        /*
6313         * We can request a receive interrupt for 1 or
6314         * more packets from current offset.
6315         */
6316        dd->rhdrhead_intr_off =
6317                (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6318
6319        /* setup the stats timer; the add_timer is done at end of init */
6320        init_timer(&dd->stats_timer);
6321        dd->stats_timer.function = qib_get_7322_faststats;
6322        dd->stats_timer.data = (unsigned long) dd;
6323
6324        dd->ureg_align = 0x10000;  /* 64KB alignment */
6325
6326        dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6327
6328        qib_7322_config_ctxts(dd);
6329        qib_set_ctxtcnt(dd);
6330
6331        if (qib_wc_pat) {
6332                resource_size_t vl15off;
6333                /*
6334                 * We do not set WC on the VL15 buffers to avoid
6335                 * a rare problem with unaligned writes from
6336                 * interrupt-flushed store buffers, so we need
6337                 * to map those separately here.  We can't solve
6338                 * this for the rarely used mtrr case.
6339                 */
6340                ret = init_chip_wc_pat(dd, 0);
6341                if (ret)
6342                        goto bail;
6343
6344                /* vl15 buffers start just after the 4k buffers */
6345                vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6346                        dd->piobcnt4k * dd->align4k;
6347                dd->piovl15base = ioremap_nocache(vl15off,
6348                                                  NUM_VL15_BUFS * dd->align4k);
6349                if (!dd->piovl15base) {
6350                        ret = -ENOMEM;
6351                        goto bail;
6352                }
6353        }
6354        qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6355
6356        ret = 0;
6357        if (qib_mini_init)
6358                goto bail;
6359        if (!dd->num_pports) {
6360                qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6361                goto bail; /* no error, so can still figure out why err */
6362        }
6363
6364        write_7322_initregs(dd);
6365        ret = qib_create_ctxts(dd);
6366        init_7322_cntrnames(dd);
6367
6368        updthresh = 8U; /* update threshold */
6369
6370        /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6371         * reserve the update threshold amount for other kernel use, such
6372         * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6373         * unless we aren't enabling SDMA, in which case we want to use
6374         * all the 4k bufs for the kernel.
6375         * if this was less than the update threshold, we could wait
6376         * a long time for an update.  Coded this way because we
6377         * sometimes change the update threshold for various reasons,
6378         * and we want this to remain robust.
6379         */
6380        if (dd->flags & QIB_HAS_SEND_DMA) {
6381                dd->cspec->sdmabufcnt = dd->piobcnt4k;
6382                sbufs = updthresh > 3 ? updthresh : 3;
6383        } else {
6384                dd->cspec->sdmabufcnt = 0;
6385                sbufs = dd->piobcnt4k;
6386        }
6387        dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6388                dd->cspec->sdmabufcnt;
6389        dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6390        dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6391        dd->last_pio = dd->cspec->lastbuf_for_pio;
6392        dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6393                dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6394
6395        /*
6396         * If we have 16 user contexts, we will have 7 sbufs
6397         * per context, so reduce the update threshold to match.  We
6398         * want to update before we actually run out, at low pbufs/ctxt
6399         * so give ourselves some margin.
6400         */
6401        if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6402                updthresh = dd->pbufsctxt - 2;
6403        dd->cspec->updthresh_dflt = updthresh;
6404        dd->cspec->updthresh = updthresh;
6405
6406        /* before full enable, no interrupts, no locking needed */
6407        dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6408                             << SYM_LSB(SendCtrl, AvailUpdThld)) |
6409                        SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6410
6411        dd->psxmitwait_supported = 1;
6412        dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6413bail:
6414        if (!dd->ctxtcnt)
6415                dd->ctxtcnt = 1; /* for other initialization code */
6416
6417        return ret;
6418}
6419
6420static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6421                                        u32 *pbufnum)
6422{
6423        u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6424        struct qib_devdata *dd = ppd->dd;
6425
6426        /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6427        if (pbc & PBC_7322_VL15_SEND) {
6428                first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6429                last = first;
6430        } else {
6431                if ((plen + 1) > dd->piosize2kmax_dwords)
6432                        first = dd->piobcnt2k;
6433                else
6434                        first = 0;
6435                last = dd->cspec->lastbuf_for_pio;
6436        }
6437        return qib_getsendbuf_range(dd, pbufnum, first, last);
6438}
6439
6440static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6441                                     u32 start)
6442{
6443        qib_write_kreg_port(ppd, krp_psinterval, intv);
6444        qib_write_kreg_port(ppd, krp_psstart, start);
6445}
6446
6447/*
6448 * Must be called with sdma_lock held, or before init finished.
6449 */
6450static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6451{
6452        qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6453}
6454
6455static struct sdma_set_state_action sdma_7322_action_table[] = {
6456        [qib_sdma_state_s00_hw_down] = {
6457                .go_s99_running_tofalse = 1,
6458                .op_enable = 0,
6459                .op_intenable = 0,
6460                .op_halt = 0,
6461                .op_drain = 0,
6462        },
6463        [qib_sdma_state_s10_hw_start_up_wait] = {
6464                .op_enable = 0,
6465                .op_intenable = 1,
6466                .op_halt = 1,
6467                .op_drain = 0,
6468        },
6469        [qib_sdma_state_s20_idle] = {
6470                .op_enable = 1,
6471                .op_intenable = 1,
6472                .op_halt = 1,
6473                .op_drain = 0,
6474        },
6475        [qib_sdma_state_s30_sw_clean_up_wait] = {
6476                .op_enable = 0,
6477                .op_intenable = 1,
6478                .op_halt = 1,
6479                .op_drain = 0,
6480        },
6481        [qib_sdma_state_s40_hw_clean_up_wait] = {
6482                .op_enable = 1,
6483                .op_intenable = 1,
6484                .op_halt = 1,
6485                .op_drain = 0,
6486        },
6487        [qib_sdma_state_s50_hw_halt_wait] = {
6488                .op_enable = 1,
6489                .op_intenable = 1,
6490                .op_halt = 1,
6491                .op_drain = 1,
6492        },
6493        [qib_sdma_state_s99_running] = {
6494                .op_enable = 1,
6495                .op_intenable = 1,
6496                .op_halt = 0,
6497                .op_drain = 0,
6498                .go_s99_running_totrue = 1,
6499        },
6500};
6501
6502static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6503{
6504        ppd->sdma_state.set_state_action = sdma_7322_action_table;
6505}
6506
6507static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6508{
6509        struct qib_devdata *dd = ppd->dd;
6510        unsigned lastbuf, erstbuf;
6511        u64 senddmabufmask[3] = { 0 };
6512        int n, ret = 0;
6513
6514        qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6515        qib_sdma_7322_setlengen(ppd);
6516        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6517        qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6518        qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6519        qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6520
6521        if (dd->num_pports)
6522                n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6523        else
6524                n = dd->cspec->sdmabufcnt; /* failsafe for init */
6525        erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6526                ((dd->num_pports == 1 || ppd->port == 2) ? n :
6527                dd->cspec->sdmabufcnt);
6528        lastbuf = erstbuf + n;
6529
6530        ppd->sdma_state.first_sendbuf = erstbuf;
6531        ppd->sdma_state.last_sendbuf = lastbuf;
6532        for (; erstbuf < lastbuf; ++erstbuf) {
6533                unsigned word = erstbuf / BITS_PER_LONG;
6534                unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6535
6536                BUG_ON(word >= 3);
6537                senddmabufmask[word] |= 1ULL << bit;
6538        }
6539        qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6540        qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6541        qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6542        return ret;
6543}
6544
6545/* sdma_lock must be held */
6546static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6547{
6548        struct qib_devdata *dd = ppd->dd;
6549        int sane;
6550        int use_dmahead;
6551        u16 swhead;
6552        u16 swtail;
6553        u16 cnt;
6554        u16 hwhead;
6555
6556        use_dmahead = __qib_sdma_running(ppd) &&
6557                (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6558retry:
6559        hwhead = use_dmahead ?
6560                (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6561                (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6562
6563        swhead = ppd->sdma_descq_head;
6564        swtail = ppd->sdma_descq_tail;
6565        cnt = ppd->sdma_descq_cnt;
6566
6567        if (swhead < swtail)
6568                /* not wrapped */
6569                sane = (hwhead >= swhead) & (hwhead <= swtail);
6570        else if (swhead > swtail)
6571                /* wrapped around */
6572                sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6573                        (hwhead <= swtail);
6574        else
6575                /* empty */
6576                sane = (hwhead == swhead);
6577
6578        if (unlikely(!sane)) {
6579                if (use_dmahead) {
6580                        /* try one more time, directly from the register */
6581                        use_dmahead = 0;
6582                        goto retry;
6583                }
6584                /* proceed as if no progress */
6585                hwhead = swhead;
6586        }
6587
6588        return hwhead;
6589}
6590
6591static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6592{
6593        u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6594
6595        return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6596               (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6597               !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6598               !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6599}
6600
6601/*
6602 * Compute the amount of delay before sending the next packet if the
6603 * port's send rate differs from the static rate set for the QP.
6604 * The delay affects the next packet and the amount of the delay is
6605 * based on the length of the this packet.
6606 */
6607static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6608                                   u8 srate, u8 vl)
6609{
6610        u8 snd_mult = ppd->delay_mult;
6611        u8 rcv_mult = ib_rate_to_delay[srate];
6612        u32 ret;
6613
6614        ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6615
6616        /* Indicate VL15, else set the VL in the control word */
6617        if (vl == 15)
6618                ret |= PBC_7322_VL15_SEND_CTRL;
6619        else
6620                ret |= vl << PBC_VL_NUM_LSB;
6621        ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6622
6623        return ret;
6624}
6625
6626/*
6627 * Enable the per-port VL15 send buffers for use.
6628 * They follow the rest of the buffers, without a config parameter.
6629 * This was in initregs, but that is done before the shadow
6630 * is set up, and this has to be done after the shadow is
6631 * set up.
6632 */
6633static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6634{
6635        unsigned vl15bufs;
6636
6637        vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6638        qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6639                               TXCHK_CHG_TYPE_KERN, NULL);
6640}
6641
6642static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6643{
6644        if (rcd->ctxt < NUM_IB_PORTS) {
6645                if (rcd->dd->num_pports > 1) {
6646                        rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6647                        rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6648                } else {
6649                        rcd->rcvegrcnt = KCTXT0_EGRCNT;
6650                        rcd->rcvegr_tid_base = 0;
6651                }
6652        } else {
6653                rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6654                rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6655                        (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6656        }
6657}
6658
6659#define QTXSLEEPS 5000
6660static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6661                                  u32 len, u32 which, struct qib_ctxtdata *rcd)
6662{
6663        int i;
6664        const int last = start + len - 1;
6665        const int lastr = last / BITS_PER_LONG;
6666        u32 sleeps = 0;
6667        int wait = rcd != NULL;
6668        unsigned long flags;
6669
6670        while (wait) {
6671                unsigned long shadow;
6672                int cstart, previ = -1;
6673
6674                /*
6675                 * when flipping from kernel to user, we can't change
6676                 * the checking type if the buffer is allocated to the
6677                 * driver.   It's OK the other direction, because it's
6678                 * from close, and we have just disarm'ed all the
6679                 * buffers.  All the kernel to kernel changes are also
6680                 * OK.
6681                 */
6682                for (cstart = start; cstart <= last; cstart++) {
6683                        i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6684                                / BITS_PER_LONG;
6685                        if (i != previ) {
6686                                shadow = (unsigned long)
6687                                        le64_to_cpu(dd->pioavailregs_dma[i]);
6688                                previ = i;
6689                        }
6690                        if (test_bit(((2 * cstart) +
6691                                      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6692                                     % BITS_PER_LONG, &shadow))
6693                                break;
6694                }
6695
6696                if (cstart > last)
6697                        break;
6698
6699                if (sleeps == QTXSLEEPS)
6700                        break;
6701                /* make sure we see an updated copy next time around */
6702                sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6703                sleeps++;
6704                msleep(20);
6705        }
6706
6707        switch (which) {
6708        case TXCHK_CHG_TYPE_DIS1:
6709                /*
6710                 * disable checking on a range; used by diags; just
6711                 * one buffer, but still written generically
6712                 */
6713                for (i = start; i <= last; i++)
6714                        clear_bit(i, dd->cspec->sendchkenable);
6715                break;
6716
6717        case TXCHK_CHG_TYPE_ENAB1:
6718                /*
6719                 * (re)enable checking on a range; used by diags; just
6720                 * one buffer, but still written generically; read
6721                 * scratch to be sure buffer actually triggered, not
6722                 * just flushed from processor.
6723                 */
6724                qib_read_kreg32(dd, kr_scratch);
6725                for (i = start; i <= last; i++)
6726                        set_bit(i, dd->cspec->sendchkenable);
6727                break;
6728
6729        case TXCHK_CHG_TYPE_KERN:
6730                /* usable by kernel */
6731                for (i = start; i <= last; i++) {
6732                        set_bit(i, dd->cspec->sendibchk);
6733                        clear_bit(i, dd->cspec->sendgrhchk);
6734                }
6735                spin_lock_irqsave(&dd->uctxt_lock, flags);
6736                /* see if we need to raise avail update threshold */
6737                for (i = dd->first_user_ctxt;
6738                     dd->cspec->updthresh != dd->cspec->updthresh_dflt
6739                     && i < dd->cfgctxts; i++)
6740                        if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
6741                           ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
6742                           < dd->cspec->updthresh_dflt)
6743                                break;
6744                spin_unlock_irqrestore(&dd->uctxt_lock, flags);
6745                if (i == dd->cfgctxts) {
6746                        spin_lock_irqsave(&dd->sendctrl_lock, flags);
6747                        dd->cspec->updthresh = dd->cspec->updthresh_dflt;
6748                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6749                        dd->sendctrl |= (dd->cspec->updthresh &
6750                                         SYM_RMASK(SendCtrl, AvailUpdThld)) <<
6751                                           SYM_LSB(SendCtrl, AvailUpdThld);
6752                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6753                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6754                }
6755                break;
6756
6757        case TXCHK_CHG_TYPE_USER:
6758                /* for user process */
6759                for (i = start; i <= last; i++) {
6760                        clear_bit(i, dd->cspec->sendibchk);
6761                        set_bit(i, dd->cspec->sendgrhchk);
6762                }
6763                spin_lock_irqsave(&dd->sendctrl_lock, flags);
6764                if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
6765                        / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
6766                        dd->cspec->updthresh = (rcd->piocnt /
6767                                                rcd->subctxt_cnt) - 1;
6768                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6769                        dd->sendctrl |= (dd->cspec->updthresh &
6770                                        SYM_RMASK(SendCtrl, AvailUpdThld))
6771                                        << SYM_LSB(SendCtrl, AvailUpdThld);
6772                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6773                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6774                } else
6775                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6776                break;
6777
6778        default:
6779                break;
6780        }
6781
6782        for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
6783                qib_write_kreg(dd, kr_sendcheckmask + i,
6784                               dd->cspec->sendchkenable[i]);
6785
6786        for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
6787                qib_write_kreg(dd, kr_sendgrhcheckmask + i,
6788                               dd->cspec->sendgrhchk[i]);
6789                qib_write_kreg(dd, kr_sendibpktmask + i,
6790                               dd->cspec->sendibchk[i]);
6791        }
6792
6793        /*
6794         * Be sure whatever we did was seen by the chip and acted upon,
6795         * before we return.  Mostly important for which >= 2.
6796         */
6797        qib_read_kreg32(dd, kr_scratch);
6798}
6799
6800
6801/* useful for trigger analyzers, etc. */
6802static void writescratch(struct qib_devdata *dd, u32 val)
6803{
6804        qib_write_kreg(dd, kr_scratch, val);
6805}
6806
6807/* Dummy for now, use chip regs soon */
6808static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
6809{
6810        return -ENXIO;
6811}
6812
6813/**
6814 * qib_init_iba7322_funcs - set up the chip-specific function pointers
6815 * @dev: the pci_dev for qlogic_ib device
6816 * @ent: pci_device_id struct for this dev
6817 *
6818 * Also allocates, inits, and returns the devdata struct for this
6819 * device instance
6820 *
6821 * This is global, and is called directly at init to set up the
6822 * chip-specific function pointers for later use.
6823 */
6824struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6825                                           const struct pci_device_id *ent)
6826{
6827        struct qib_devdata *dd;
6828        int ret, i;
6829        u32 tabsize, actual_cnt = 0;
6830
6831        dd = qib_alloc_devdata(pdev,
6832                NUM_IB_PORTS * sizeof(struct qib_pportdata) +
6833                sizeof(struct qib_chip_specific) +
6834                NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
6835        if (IS_ERR(dd))
6836                goto bail;
6837
6838        dd->f_bringup_serdes    = qib_7322_bringup_serdes;
6839        dd->f_cleanup           = qib_setup_7322_cleanup;
6840        dd->f_clear_tids        = qib_7322_clear_tids;
6841        dd->f_free_irq          = qib_7322_free_irq;
6842        dd->f_get_base_info     = qib_7322_get_base_info;
6843        dd->f_get_msgheader     = qib_7322_get_msgheader;
6844        dd->f_getsendbuf        = qib_7322_getsendbuf;
6845        dd->f_gpio_mod          = gpio_7322_mod;
6846        dd->f_eeprom_wen        = qib_7322_eeprom_wen;
6847        dd->f_hdrqempty         = qib_7322_hdrqempty;
6848        dd->f_ib_updown         = qib_7322_ib_updown;
6849        dd->f_init_ctxt         = qib_7322_init_ctxt;
6850        dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
6851        dd->f_intr_fallback     = qib_7322_intr_fallback;
6852        dd->f_late_initreg      = qib_late_7322_initreg;
6853        dd->f_setpbc_control    = qib_7322_setpbc_control;
6854        dd->f_portcntr          = qib_portcntr_7322;
6855        dd->f_put_tid           = qib_7322_put_tid;
6856        dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
6857        dd->f_rcvctrl           = rcvctrl_7322_mod;
6858        dd->f_read_cntrs        = qib_read_7322cntrs;
6859        dd->f_read_portcntrs    = qib_read_7322portcntrs;
6860        dd->f_reset             = qib_do_7322_reset;
6861        dd->f_init_sdma_regs    = init_sdma_7322_regs;
6862        dd->f_sdma_busy         = qib_sdma_7322_busy;
6863        dd->f_sdma_gethead      = qib_sdma_7322_gethead;
6864        dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
6865        dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
6866        dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
6867        dd->f_sendctrl          = sendctrl_7322_mod;
6868        dd->f_set_armlaunch     = qib_set_7322_armlaunch;
6869        dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
6870        dd->f_iblink_state      = qib_7322_iblink_state;
6871        dd->f_ibphys_portstate  = qib_7322_phys_portstate;
6872        dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
6873        dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
6874        dd->f_set_ib_loopback   = qib_7322_set_loopback;
6875        dd->f_get_ib_table      = qib_7322_get_ib_table;
6876        dd->f_set_ib_table      = qib_7322_set_ib_table;
6877        dd->f_set_intr_state    = qib_7322_set_intr_state;
6878        dd->f_setextled         = qib_setup_7322_setextled;
6879        dd->f_txchk_change      = qib_7322_txchk_change;
6880        dd->f_update_usrhead    = qib_update_7322_usrhead;
6881        dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
6882        dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
6883        dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
6884        dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
6885        dd->f_sdma_init_early   = qib_7322_sdma_init_early;
6886        dd->f_writescratch      = writescratch;
6887        dd->f_tempsense_rd      = qib_7322_tempsense_rd;
6888        /*
6889         * Do remaining PCIe setup and save PCIe values in dd.
6890         * Any error printing is already done by the init code.
6891         * On return, we have the chip mapped, but chip registers
6892         * are not set up until start of qib_init_7322_variables.
6893         */
6894        ret = qib_pcie_ddinit(dd, pdev, ent);
6895        if (ret < 0)
6896                goto bail_free;
6897
6898        /* initialize chip-specific variables */
6899        ret = qib_init_7322_variables(dd);
6900        if (ret)
6901                goto bail_cleanup;
6902
6903        if (qib_mini_init || !dd->num_pports)
6904                goto bail;
6905
6906        /*
6907         * Determine number of vectors we want; depends on port count
6908         * and number of configured kernel receive queues actually used.
6909         * Should also depend on whether sdma is enabled or not, but
6910         * that's such a rare testing case it's not worth worrying about.
6911         */
6912        tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
6913        for (i = 0; i < tabsize; i++)
6914                if ((i < ARRAY_SIZE(irq_table) &&
6915                     irq_table[i].port <= dd->num_pports) ||
6916                    (i >= ARRAY_SIZE(irq_table) &&
6917                     dd->rcd[i - ARRAY_SIZE(irq_table)]))
6918                        actual_cnt++;
6919        /* reduce by ctxt's < 2 */
6920        if (qib_krcvq01_no_msi)
6921                actual_cnt -= dd->num_pports;
6922
6923        tabsize = actual_cnt;
6924        dd->cspec->msix_entries = kmalloc(tabsize *
6925                        sizeof(struct qib_msix_entry), GFP_KERNEL);
6926        if (!dd->cspec->msix_entries) {
6927                qib_dev_err(dd, "No memory for MSIx table\n");
6928                tabsize = 0;
6929        }
6930        for (i = 0; i < tabsize; i++)
6931                dd->cspec->msix_entries[i].msix.entry = i;
6932
6933        if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6934                qib_dev_err(dd,
6935                        "Failed to setup PCIe or interrupts; continuing anyway\n");
6936        /* may be less than we wanted, if not enough available */
6937        dd->cspec->num_msix_entries = tabsize;
6938
6939        /* setup interrupt handler */
6940        qib_setup_7322_interrupt(dd, 1);
6941
6942        /* clear diagctrl register, in case diags were running and crashed */
6943        qib_write_kreg(dd, kr_hwdiagctrl, 0);
6944
6945        goto bail;
6946
6947bail_cleanup:
6948        qib_pcie_ddcleanup(dd);
6949bail_free:
6950        qib_free_devdata(dd);
6951        dd = ERR_PTR(ret);
6952bail:
6953        return dd;
6954}
6955
6956/*
6957 * Set the table entry at the specified index from the table specifed.
6958 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
6959 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
6960 * 'idx' below addresses the correct entry, while its 4 LSBs select the
6961 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
6962 */
6963#define DDS_ENT_AMP_LSB 14
6964#define DDS_ENT_MAIN_LSB 9
6965#define DDS_ENT_POST_LSB 5
6966#define DDS_ENT_PRE_XTRA_LSB 3
6967#define DDS_ENT_PRE_LSB 0
6968
6969/*
6970 * Set one entry in the TxDDS table for spec'd port
6971 * ridx picks one of the entries, while tp points
6972 * to the appropriate table entry.
6973 */
6974static void set_txdds(struct qib_pportdata *ppd, int ridx,
6975                      const struct txdds_ent *tp)
6976{
6977        struct qib_devdata *dd = ppd->dd;
6978        u32 pack_ent;
6979        int regidx;
6980
6981        /* Get correct offset in chip-space, and in source table */
6982        regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
6983        /*
6984         * We do not use qib_write_kreg_port() because it was intended
6985         * only for registers in the lower "port specific" pages.
6986         * So do index calculation  by hand.
6987         */
6988        if (ppd->hw_pidx)
6989                regidx += (dd->palign / sizeof(u64));
6990
6991        pack_ent = tp->amp << DDS_ENT_AMP_LSB;
6992        pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
6993        pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
6994        pack_ent |= tp->post << DDS_ENT_POST_LSB;
6995        qib_write_kreg(dd, regidx, pack_ent);
6996        /* Prevent back-to-back writes by hitting scratch */
6997        qib_write_kreg(ppd->dd, kr_scratch, 0);
6998}
6999
7000static const struct vendor_txdds_ent vendor_txdds[] = {
7001        { /* Amphenol 1m 30awg NoEq */
7002                { 0x41, 0x50, 0x48 }, "584470002       ",
7003                { 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7004        },
7005        { /* Amphenol 3m 28awg NoEq */
7006                { 0x41, 0x50, 0x48 }, "584470004       ",
7007                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7008        },
7009        { /* Finisar 3m OM2 Optical */
7010                { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7011                {  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7012        },
7013        { /* Finisar 30m OM2 Optical */
7014                { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7015                {  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7016        },
7017        { /* Finisar Default OM2 Optical */
7018                { 0x00, 0x90, 0x65 }, NULL,
7019                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7020        },
7021        { /* Gore 1m 30awg NoEq */
7022                { 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7023                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7024        },
7025        { /* Gore 2m 30awg NoEq */
7026                { 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7027                {  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7028        },
7029        { /* Gore 1m 28awg NoEq */
7030                { 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7031                {  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7032        },
7033        { /* Gore 3m 28awg NoEq */
7034                { 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7035                {  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7036        },
7037        { /* Gore 5m 24awg Eq */
7038                { 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7039                {  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7040        },
7041        { /* Gore 7m 24awg Eq */
7042                { 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7043                {  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7044        },
7045        { /* Gore 5m 26awg Eq */
7046                { 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7047                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7048        },
7049        { /* Gore 7m 26awg Eq */
7050                { 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7051                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7052        },
7053        { /* Intersil 12m 24awg Active */
7054                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7055                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7056        },
7057        { /* Intersil 10m 28awg Active */
7058                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7059                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7060        },
7061        { /* Intersil 7m 30awg Active */
7062                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7063                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7064        },
7065        { /* Intersil 5m 32awg Active */
7066                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7067                {  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7068        },
7069        { /* Intersil Default Active */
7070                { 0x00, 0x30, 0xB4 }, NULL,
7071                {  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7072        },
7073        { /* Luxtera 20m Active Optical */
7074                { 0x00, 0x25, 0x63 }, NULL,
7075                {  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7076        },
7077        { /* Molex 1M Cu loopback */
7078                { 0x00, 0x09, 0x3A }, "74763-0025      ",
7079                {  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7080        },
7081        { /* Molex 2m 28awg NoEq */
7082                { 0x00, 0x09, 0x3A }, "74757-2201      ",
7083                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7084        },
7085};
7086
7087static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7088        /* amp, pre, main, post */
7089        {  2, 2, 15,  6 },      /* Loopback */
7090        {  0, 0,  0,  1 },      /*  2 dB */
7091        {  0, 0,  0,  2 },      /*  3 dB */
7092        {  0, 0,  0,  3 },      /*  4 dB */
7093        {  0, 0,  0,  4 },      /*  5 dB */
7094        {  0, 0,  0,  5 },      /*  6 dB */
7095        {  0, 0,  0,  6 },      /*  7 dB */
7096        {  0, 0,  0,  7 },      /*  8 dB */
7097        {  0, 0,  0,  8 },      /*  9 dB */
7098        {  0, 0,  0,  9 },      /* 10 dB */
7099        {  0, 0,  0, 10 },      /* 11 dB */
7100        {  0, 0,  0, 11 },      /* 12 dB */
7101        {  0, 0,  0, 12 },      /* 13 dB */
7102        {  0, 0,  0, 13 },      /* 14 dB */
7103        {  0, 0,  0, 14 },      /* 15 dB */
7104        {  0, 0,  0, 15 },      /* 16 dB */
7105};
7106
7107static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7108        /* amp, pre, main, post */
7109        {  2, 2, 15,  6 },      /* Loopback */
7110        {  0, 0,  0,  8 },      /*  2 dB */
7111        {  0, 0,  0,  8 },      /*  3 dB */
7112        {  0, 0,  0,  9 },      /*  4 dB */
7113        {  0, 0,  0,  9 },      /*  5 dB */
7114        {  0, 0,  0, 10 },      /*  6 dB */
7115        {  0, 0,  0, 10 },      /*  7 dB */
7116        {  0, 0,  0, 11 },      /*  8 dB */
7117        {  0, 0,  0, 11 },      /*  9 dB */
7118        {  0, 0,  0, 12 },      /* 10 dB */
7119        {  0, 0,  0, 12 },      /* 11 dB */
7120        {  0, 0,  0, 13 },      /* 12 dB */
7121        {  0, 0,  0, 13 },      /* 13 dB */
7122        {  0, 0,  0, 14 },      /* 14 dB */
7123        {  0, 0,  0, 14 },      /* 15 dB */
7124        {  0, 0,  0, 15 },      /* 16 dB */
7125};
7126
7127static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7128        /* amp, pre, main, post */
7129        {  2, 2, 15,  6 },      /* Loopback */
7130        {  0, 1,  0,  7 },      /*  2 dB (also QMH7342) */
7131        {  0, 1,  0,  9 },      /*  3 dB (also QMH7342) */
7132        {  0, 1,  0, 11 },      /*  4 dB */
7133        {  0, 1,  0, 13 },      /*  5 dB */
7134        {  0, 1,  0, 15 },      /*  6 dB */
7135        {  0, 1,  3, 15 },      /*  7 dB */
7136        {  0, 1,  7, 15 },      /*  8 dB */
7137        {  0, 1,  7, 15 },      /*  9 dB */
7138        {  0, 1,  8, 15 },      /* 10 dB */
7139        {  0, 1,  9, 15 },      /* 11 dB */
7140        {  0, 1, 10, 15 },      /* 12 dB */
7141        {  0, 2,  6, 15 },      /* 13 dB */
7142        {  0, 2,  7, 15 },      /* 14 dB */
7143        {  0, 2,  8, 15 },      /* 15 dB */
7144        {  0, 2,  9, 15 },      /* 16 dB */
7145};
7146
7147/*
7148 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7149 * These are mostly used for mez cards going through connectors
7150 * and backplane traces, but can be used to add other "unusual"
7151 * table values as well.
7152 */
7153static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7154        /* amp, pre, main, post */
7155        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7156        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7157        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7158        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7159        {  0, 0, 0, 11 },       /* QME7342 backplane settings */
7160        {  0, 0, 0, 11 },       /* QME7342 backplane settings */
7161        {  0, 0, 0, 11 },       /* QME7342 backplane settings */
7162        {  0, 0, 0, 11 },       /* QME7342 backplane settings */
7163        {  0, 0, 0, 11 },       /* QME7342 backplane settings */
7164        {  0, 0, 0, 11 },       /* QME7342 backplane settings */
7165        {  0, 0, 0, 11 },       /* QME7342 backplane settings */
7166        {  0, 0, 0,  3 },       /* QMH7342 backplane settings */
7167        {  0, 0, 0,  4 },       /* QMH7342 backplane settings */
7168};
7169
7170static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7171        /* amp, pre, main, post */
7172        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7173        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7174        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7175        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7176        {  0, 0, 0, 13 },       /* QME7342 backplane settings */
7177        {  0, 0, 0, 13 },       /* QME7342 backplane settings */
7178        {  0, 0, 0, 13 },       /* QME7342 backplane settings */
7179        {  0, 0, 0, 13 },       /* QME7342 backplane settings */
7180        {  0, 0, 0, 13 },       /* QME7342 backplane settings */
7181        {  0, 0, 0, 13 },       /* QME7342 backplane settings */
7182        {  0, 0, 0, 13 },       /* QME7342 backplane settings */
7183        {  0, 0, 0,  9 },       /* QMH7342 backplane settings */
7184        {  0, 0, 0, 10 },       /* QMH7342 backplane settings */
7185};
7186
7187static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7188        /* amp, pre, main, post */
7189        {  0, 1,  0,  4 },      /* QMH7342 backplane settings */
7190        {  0, 1,  0,  5 },      /* QMH7342 backplane settings */
7191        {  0, 1,  0,  6 },      /* QMH7342 backplane settings */
7192        {  0, 1,  0,  8 },      /* QMH7342 backplane settings */
7193        {  0, 1, 12, 10 },      /* QME7342 backplane setting */
7194        {  0, 1, 12, 11 },      /* QME7342 backplane setting */
7195        {  0, 1, 12, 12 },      /* QME7342 backplane setting */
7196        {  0, 1, 12, 14 },      /* QME7342 backplane setting */
7197        {  0, 1, 12,  6 },      /* QME7342 backplane setting */
7198        {  0, 1, 12,  7 },      /* QME7342 backplane setting */
7199        {  0, 1, 12,  8 },      /* QME7342 backplane setting */
7200        {  0, 1,  0, 10 },      /* QMH7342 backplane settings */
7201        {  0, 1,  0, 12 },      /* QMH7342 backplane settings */
7202};
7203
7204static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7205        /* amp, pre, main, post */
7206        { 0, 0, 0, 0 },         /* QME7342 mfg settings */
7207        { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7208};
7209
7210static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7211                                               unsigned atten)
7212{
7213        /*
7214         * The attenuation table starts at 2dB for entry 1,
7215         * with entry 0 being the loopback entry.
7216         */
7217        if (atten <= 2)
7218                atten = 1;
7219        else if (atten > TXDDS_TABLE_SZ)
7220                atten = TXDDS_TABLE_SZ - 1;
7221        else
7222                atten--;
7223        return txdds + atten;
7224}
7225
7226/*
7227 * if override is set, the module parameter txselect has a value
7228 * for this specific port, so use it, rather than our normal mechanism.
7229 */
7230static void find_best_ent(struct qib_pportdata *ppd,
7231                          const struct txdds_ent **sdr_dds,
7232                          const struct txdds_ent **ddr_dds,
7233                          const struct txdds_ent **qdr_dds, int override)
7234{
7235        struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7236        int idx;
7237
7238        /* Search table of known cables */
7239        for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7240                const struct vendor_txdds_ent *v = vendor_txdds + idx;
7241
7242                if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7243                    (!v->partnum ||
7244                     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7245                        *sdr_dds = &v->sdr;
7246                        *ddr_dds = &v->ddr;
7247                        *qdr_dds = &v->qdr;
7248                        return;
7249                }
7250        }
7251
7252        /* Active cables don't have attenuation so we only set SERDES
7253         * settings to account for the attenuation of the board traces. */
7254        if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7255                *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7256                *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7257                *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7258                return;
7259        }
7260
7261        if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7262                                                      qd->atten[1])) {
7263                *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7264                *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7265                *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7266                return;
7267        } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7268                /*
7269                 * If we have no (or incomplete) data from the cable
7270                 * EEPROM, or no QSFP, or override is set, use the
7271                 * module parameter value to index into the attentuation
7272                 * table.
7273                 */
7274                idx = ppd->cpspec->no_eep;
7275                *sdr_dds = &txdds_sdr[idx];
7276                *ddr_dds = &txdds_ddr[idx];
7277                *qdr_dds = &txdds_qdr[idx];
7278        } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7279                /* similar to above, but index into the "extra" table. */
7280                idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7281                *sdr_dds = &txdds_extra_sdr[idx];
7282                *ddr_dds = &txdds_extra_ddr[idx];
7283                *qdr_dds = &txdds_extra_qdr[idx];
7284        } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7285                   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7286                                          TXDDS_MFG_SZ)) {
7287                idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7288                pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7289                        ppd->dd->unit, ppd->port, idx);
7290                *sdr_dds = &txdds_extra_mfg[idx];
7291                *ddr_dds = &txdds_extra_mfg[idx];
7292                *qdr_dds = &txdds_extra_mfg[idx];
7293        } else {
7294                /* this shouldn't happen, it's range checked */
7295                *sdr_dds = txdds_sdr + qib_long_atten;
7296                *ddr_dds = txdds_ddr + qib_long_atten;
7297                *qdr_dds = txdds_qdr + qib_long_atten;
7298        }
7299}
7300
7301static void init_txdds_table(struct qib_pportdata *ppd, int override)
7302{
7303        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7304        struct txdds_ent *dds;
7305        int idx;
7306        int single_ent = 0;
7307
7308        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7309
7310        /* for mez cards or override, use the selected value for all entries */
7311        if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7312                single_ent = 1;
7313
7314        /* Fill in the first entry with the best entry found. */
7315        set_txdds(ppd, 0, sdr_dds);
7316        set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7317        set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7318        if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7319                QIBL_LINKACTIVE)) {
7320                dds = (struct txdds_ent *)(ppd->link_speed_active ==
7321                                           QIB_IB_QDR ?  qdr_dds :
7322                                           (ppd->link_speed_active ==
7323                                            QIB_IB_DDR ? ddr_dds : sdr_dds));
7324                write_tx_serdes_param(ppd, dds);
7325        }
7326
7327        /* Fill in the remaining entries with the default table values. */
7328        for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7329                set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7330                set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7331                          single_ent ? ddr_dds : txdds_ddr + idx);
7332                set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7333                          single_ent ? qdr_dds : txdds_qdr + idx);
7334        }
7335}
7336
7337#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7338#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7339#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7340#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7341#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7342#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7343#define AHB_TRANS_TRIES 10
7344
7345/*
7346 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7347 * 5=subsystem which is why most calls have "chan + chan >> 1"
7348 * for the channel argument.
7349 */
7350static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7351                    u32 data, u32 mask)
7352{
7353        u32 rd_data, wr_data, sz_mask;
7354        u64 trans, acc, prev_acc;
7355        u32 ret = 0xBAD0BAD;
7356        int tries;
7357
7358        prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7359        /* From this point on, make sure we return access */
7360        acc = (quad << 1) | 1;
7361        qib_write_kreg(dd, KR_AHB_ACC, acc);
7362
7363        for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7364                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7365                if (trans & AHB_TRANS_RDY)
7366                        break;
7367        }
7368        if (tries >= AHB_TRANS_TRIES) {
7369                qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7370                goto bail;
7371        }
7372
7373        /* If mask is not all 1s, we need to read, but different SerDes
7374         * entities have different sizes
7375         */
7376        sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7377        wr_data = data & mask & sz_mask;
7378        if ((~mask & sz_mask) != 0) {
7379                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7380                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7381
7382                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7383                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7384                        if (trans & AHB_TRANS_RDY)
7385                                break;
7386                }
7387                if (tries >= AHB_TRANS_TRIES) {
7388                        qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7389                                    AHB_TRANS_TRIES);
7390                        goto bail;
7391                }
7392                /* Re-read in case host split reads and read data first */
7393                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7394                rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7395                wr_data |= (rd_data & ~mask & sz_mask);
7396        }
7397
7398        /* If mask is not zero, we need to write. */
7399        if (mask & sz_mask) {
7400                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7401                trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7402                trans |= AHB_WR;
7403                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7404
7405                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7406                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7407                        if (trans & AHB_TRANS_RDY)
7408                                break;
7409                }
7410                if (tries >= AHB_TRANS_TRIES) {
7411                        qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7412                                    AHB_TRANS_TRIES);
7413                        goto bail;
7414                }
7415        }
7416        ret = wr_data;
7417bail:
7418        qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7419        return ret;
7420}
7421
7422static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7423                             unsigned mask)
7424{
7425        struct qib_devdata *dd = ppd->dd;
7426        int chan;
7427        u32 rbc;
7428
7429        for (chan = 0; chan < SERDES_CHANS; ++chan) {
7430                ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7431                        data, mask);
7432                rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7433                              addr, 0, 0);
7434        }
7435}
7436
7437static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7438{
7439        u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7440        u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7441
7442        if (enable && !state) {
7443                pr_info("IB%u:%u Turning LOS on\n",
7444                        ppd->dd->unit, ppd->port);
7445                data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7446        } else if (!enable && state) {
7447                pr_info("IB%u:%u Turning LOS off\n",
7448                        ppd->dd->unit, ppd->port);
7449                data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7450        }
7451        qib_write_kreg_port(ppd, krp_serdesctrl, data);
7452}
7453
7454static int serdes_7322_init(struct qib_pportdata *ppd)
7455{
7456        int ret = 0;
7457        if (ppd->dd->cspec->r1)
7458                ret = serdes_7322_init_old(ppd);
7459        else
7460                ret = serdes_7322_init_new(ppd);
7461        return ret;
7462}
7463
7464static int serdes_7322_init_old(struct qib_pportdata *ppd)
7465{
7466        u32 le_val;
7467
7468        /*
7469         * Initialize the Tx DDS tables.  Also done every QSFP event,
7470         * for adapters with QSFP
7471         */
7472        init_txdds_table(ppd, 0);
7473
7474        /* ensure no tx overrides from earlier driver loads */
7475        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7476                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7477                reset_tx_deemphasis_override));
7478
7479        /* Patch some SerDes defaults to "Better for IB" */
7480        /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7481        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7482
7483        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7484        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7485        /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7486        ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7487
7488        /* May be overridden in qsfp_7322_event */
7489        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7490        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7491
7492        /* enable LE1 adaptation for all but QME, which is disabled */
7493        le_val = IS_QME(ppd->dd) ? 0 : 1;
7494        ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7495
7496        /* Clear cmode-override, may be set from older driver */
7497        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7498
7499        /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7500        ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7501
7502        /* setup LoS params; these are subsystem, so chan == 5 */
7503        /* LoS filter threshold_count on, ch 0-3, set to 8 */
7504        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7505        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7506        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7507        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7508
7509        /* LoS filter threshold_count off, ch 0-3, set to 4 */
7510        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7511        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7512        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7513        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7514
7515        /* LoS filter select enabled */
7516        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7517
7518        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7519        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7520        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7521        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7522
7523        serdes_7322_los_enable(ppd, 1);
7524
7525        /* rxbistena; set 0 to avoid effects of it switch later */
7526        ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7527
7528        /* Configure 4 DFE taps, and only they adapt */
7529        ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7530
7531        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7532        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7533        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7534
7535        /*
7536         * Set receive adaptation mode.  SDR and DDR adaptation are
7537         * always on, and QDR is initially enabled; later disabled.
7538         */
7539        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7540        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7541        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7542                            ppd->dd->cspec->r1 ?
7543                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7544        ppd->cpspec->qdr_dfe_on = 1;
7545
7546        /* FLoop LOS gate: PPM filter  enabled */
7547        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7548
7549        /* rx offset center enabled */
7550        ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7551
7552        if (!ppd->dd->cspec->r1) {
7553                ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7554                ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7555        }
7556
7557        /* Set the frequency loop bandwidth to 15 */
7558        ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7559
7560        return 0;
7561}
7562
7563static int serdes_7322_init_new(struct qib_pportdata *ppd)
7564{
7565        unsigned long tend;
7566        u32 le_val, rxcaldone;
7567        int chan, chan_done = (1 << SERDES_CHANS) - 1;
7568
7569        /* Clear cmode-override, may be set from older driver */
7570        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7571
7572        /* ensure no tx overrides from earlier driver loads */
7573        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7574                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7575                reset_tx_deemphasis_override));
7576
7577        /* START OF LSI SUGGESTED SERDES BRINGUP */
7578        /* Reset - Calibration Setup */
7579        /*       Stop DFE adaptaion */
7580        ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7581        /*       Disable LE1 */
7582        ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7583        /*       Disable autoadapt for LE1 */
7584        ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7585        /*       Disable LE2 */
7586        ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7587        /*       Disable VGA */
7588        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7589        /*       Disable AFE Offset Cancel */
7590        ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7591        /*       Disable Timing Loop */
7592        ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7593        /*       Disable Frequency Loop */
7594        ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7595        /*       Disable Baseline Wander Correction */
7596        ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7597        /*       Disable RX Calibration */
7598        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7599        /*       Disable RX Offset Calibration */
7600        ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7601        /*       Select BB CDR */
7602        ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7603        /*       CDR Step Size */
7604        ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7605        /*       Enable phase Calibration */
7606        ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7607        /*       DFE Bandwidth [2:14-12] */
7608        ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7609        /*       DFE Config (4 taps only) */
7610        ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7611        /*       Gain Loop Bandwidth */
7612        if (!ppd->dd->cspec->r1) {
7613                ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7614                ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7615        } else {
7616                ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7617        }
7618        /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
7619        /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
7620        /*       Data Rate Select [5:7-6] (leave as default) */
7621        /*       RX Parallel Word Width [3:10-8] (leave as default) */
7622
7623        /* RX REST */
7624        /*       Single- or Multi-channel reset */
7625        /*       RX Analog reset */
7626        /*       RX Digital reset */
7627        ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7628        msleep(20);
7629        /*       RX Analog reset */
7630        ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7631        msleep(20);
7632        /*       RX Digital reset */
7633        ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7634        msleep(20);
7635
7636        /* setup LoS params; these are subsystem, so chan == 5 */
7637        /* LoS filter threshold_count on, ch 0-3, set to 8 */
7638        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7639        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7640        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7641        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7642
7643        /* LoS filter threshold_count off, ch 0-3, set to 4 */
7644        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7645        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7646        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7647        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7648
7649        /* LoS filter select enabled */
7650        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7651
7652        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7653        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7654        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7655        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7656
7657        /* Turn on LOS on initial SERDES init */
7658        serdes_7322_los_enable(ppd, 1);
7659        /* FLoop LOS gate: PPM filter  enabled */
7660        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7661
7662        /* RX LATCH CALIBRATION */
7663        /*       Enable Eyefinder Phase Calibration latch */
7664        ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
7665        /*       Enable RX Offset Calibration latch */
7666        ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
7667        msleep(20);
7668        /*       Start Calibration */
7669        ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7670        tend = jiffies + msecs_to_jiffies(500);
7671        while (chan_done && !time_is_before_jiffies(tend)) {
7672                msleep(20);
7673                for (chan = 0; chan < SERDES_CHANS; ++chan) {
7674                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7675                                            (chan + (chan >> 1)),
7676                                            25, 0, 0);
7677                        if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
7678                            (~chan_done & (1 << chan)) == 0)
7679                                chan_done &= ~(1 << chan);
7680                }
7681        }
7682        if (chan_done) {
7683                pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
7684                         IBSD(ppd->hw_pidx), chan_done);
7685        } else {
7686                for (chan = 0; chan < SERDES_CHANS; ++chan) {
7687                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7688                                            (chan + (chan >> 1)),
7689                                            25, 0, 0);
7690                        if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
7691                                pr_info("Serdes %d chan %d calibration failed\n",
7692                                        IBSD(ppd->hw_pidx), chan);
7693                }
7694        }
7695
7696        /*       Turn off Calibration */
7697        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7698        msleep(20);
7699
7700        /* BRING RX UP */
7701        /*       Set LE2 value (May be overridden in qsfp_7322_event) */
7702        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7703        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7704        /*       Set LE2 Loop bandwidth */
7705        ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
7706        /*       Enable LE2 */
7707        ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
7708        msleep(20);
7709        /*       Enable H0 only */
7710        ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
7711        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7712        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7713        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7714        /*       Enable VGA */
7715        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7716        msleep(20);
7717        /*       Set Frequency Loop Bandwidth */
7718        ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
7719        /*       Enable Frequency Loop */
7720        ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
7721        /*       Set Timing Loop Bandwidth */
7722        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7723        /*       Enable Timing Loop */
7724        ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
7725        msleep(50);
7726        /*       Enable DFE
7727         *       Set receive adaptation mode.  SDR and DDR adaptation are
7728         *       always on, and QDR is initially enabled; later disabled.
7729         */
7730        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7731        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7732        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7733                            ppd->dd->cspec->r1 ?
7734                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7735        ppd->cpspec->qdr_dfe_on = 1;
7736        /*       Disable LE1  */
7737        ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
7738        /*       Disable auto adapt for LE1 */
7739        ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
7740        msleep(20);
7741        /*       Enable AFE Offset Cancel */
7742        ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
7743        /*       Enable Baseline Wander Correction */
7744        ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
7745        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7746        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7747        /* VGA output common mode */
7748        ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
7749
7750        /*
7751         * Initialize the Tx DDS tables.  Also done every QSFP event,
7752         * for adapters with QSFP
7753         */
7754        init_txdds_table(ppd, 0);
7755
7756        return 0;
7757}
7758
7759/* start adjust QMH serdes parameters */
7760
7761static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
7762{
7763        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7764                9, code << 9, 0x3f << 9);
7765}
7766
7767static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
7768        int enable, u32 tapenable)
7769{
7770        if (enable)
7771                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7772                        1, 3 << 10, 0x1f << 10);
7773        else
7774                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7775                        1, 0, 0x1f << 10);
7776}
7777
7778/* Set clock to 1, 0, 1, 0 */
7779static void clock_man(struct qib_pportdata *ppd, int chan)
7780{
7781        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7782                4, 0x4000, 0x4000);
7783        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7784                4, 0, 0x4000);
7785        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7786                4, 0x4000, 0x4000);
7787        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7788                4, 0, 0x4000);
7789}
7790
7791/*
7792 * write the current Tx serdes pre,post,main,amp settings into the serdes.
7793 * The caller must pass the settings appropriate for the current speed,
7794 * or not care if they are correct for the current speed.
7795 */
7796static void write_tx_serdes_param(struct qib_pportdata *ppd,
7797                                  struct txdds_ent *txdds)
7798{
7799        u64 deemph;
7800
7801        deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
7802        /* field names for amp, main, post, pre, respectively */
7803        deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7804                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7805                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7806                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7807
7808        deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7809                           tx_override_deemphasis_select);
7810        deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7811                    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7812                                       txampcntl_d2a);
7813        deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7814                     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7815                                   txc0_ena);
7816        deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7817                     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7818                                    txcp1_ena);
7819        deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7820                     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7821                                    txcn1_ena);
7822        qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7823}
7824
7825/*
7826 * Set the parameters for mez cards on link bounce, so they are
7827 * always exactly what was requested.  Similar logic to init_txdds
7828 * but does just the serdes.
7829 */
7830static void adj_tx_serdes(struct qib_pportdata *ppd)
7831{
7832        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7833        struct txdds_ent *dds;
7834
7835        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
7836        dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7837                qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
7838                                ddr_dds : sdr_dds));
7839        write_tx_serdes_param(ppd, dds);
7840}
7841
7842/* set QDR forced value for H1, if needed */
7843static void force_h1(struct qib_pportdata *ppd)
7844{
7845        int chan;
7846
7847        ppd->cpspec->qdr_reforce = 0;
7848        if (!ppd->dd->cspec->r1)
7849                return;
7850
7851        for (chan = 0; chan < SERDES_CHANS; chan++) {
7852                set_man_mode_h1(ppd, chan, 1, 0);
7853                set_man_code(ppd, chan, ppd->cpspec->h1_val);
7854                clock_man(ppd, chan);
7855                set_man_mode_h1(ppd, chan, 0, 0);
7856        }
7857}
7858
7859#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7860#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7861
7862#define R_OPCODE_LSB 3
7863#define R_OP_NOP 0
7864#define R_OP_SHIFT 2
7865#define R_OP_UPDATE 3
7866#define R_TDI_LSB 2
7867#define R_TDO_LSB 1
7868#define R_RDY 1
7869
7870static int qib_r_grab(struct qib_devdata *dd)
7871{
7872        u64 val;
7873        val = SJA_EN;
7874        qib_write_kreg(dd, kr_r_access, val);
7875        qib_read_kreg32(dd, kr_scratch);
7876        return 0;
7877}
7878
7879/* qib_r_wait_for_rdy() not only waits for the ready bit, it
7880 * returns the current state of R_TDO
7881 */
7882static int qib_r_wait_for_rdy(struct qib_devdata *dd)
7883{
7884        u64 val;
7885        int timeout;
7886        for (timeout = 0; timeout < 100 ; ++timeout) {
7887                val = qib_read_kreg32(dd, kr_r_access);
7888                if (val & R_RDY)
7889                        return (val >> R_TDO_LSB) & 1;
7890        }
7891        return -1;
7892}
7893
7894static int qib_r_shift(struct qib_devdata *dd, int bisten,
7895                       int len, u8 *inp, u8 *outp)
7896{
7897        u64 valbase, val;
7898        int ret, pos;
7899
7900        valbase = SJA_EN | (bisten << BISTEN_LSB) |
7901                (R_OP_SHIFT << R_OPCODE_LSB);
7902        ret = qib_r_wait_for_rdy(dd);
7903        if (ret < 0)
7904                goto bail;
7905        for (pos = 0; pos < len; ++pos) {
7906                val = valbase;
7907                if (outp) {
7908                        outp[pos >> 3] &= ~(1 << (pos & 7));
7909                        outp[pos >> 3] |= (ret << (pos & 7));
7910                }
7911                if (inp) {
7912                        int tdi = inp[pos >> 3] >> (pos & 7);
7913                        val |= ((tdi & 1) << R_TDI_LSB);
7914                }
7915                qib_write_kreg(dd, kr_r_access, val);
7916                qib_read_kreg32(dd, kr_scratch);
7917                ret = qib_r_wait_for_rdy(dd);
7918                if (ret < 0)
7919                        break;
7920        }
7921        /* Restore to NOP between operations. */
7922        val =  SJA_EN | (bisten << BISTEN_LSB);
7923        qib_write_kreg(dd, kr_r_access, val);
7924        qib_read_kreg32(dd, kr_scratch);
7925        ret = qib_r_wait_for_rdy(dd);
7926
7927        if (ret >= 0)
7928                ret = pos;
7929bail:
7930        return ret;
7931}
7932
7933static int qib_r_update(struct qib_devdata *dd, int bisten)
7934{
7935        u64 val;
7936        int ret;
7937
7938        val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
7939        ret = qib_r_wait_for_rdy(dd);
7940        if (ret >= 0) {
7941                qib_write_kreg(dd, kr_r_access, val);
7942                qib_read_kreg32(dd, kr_scratch);
7943        }
7944        return ret;
7945}
7946
7947#define BISTEN_PORT_SEL 15
7948#define LEN_PORT_SEL 625
7949#define BISTEN_AT 17
7950#define LEN_AT 156
7951#define BISTEN_ETM 16
7952#define LEN_ETM 632
7953
7954#define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7955
7956/* these are common for all IB port use cases. */
7957static u8 reset_at[BIT2BYTE(LEN_AT)] = {
7958        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7959        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7960};
7961static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
7962        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7963        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7964        0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7965        0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7966        0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7967        0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7968        0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7969        0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7970};
7971static u8 at[BIT2BYTE(LEN_AT)] = {
7972        0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7973        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7974};
7975
7976/* used for IB1 or IB2, only one in use */
7977static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
7978        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7979        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7980        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7981        0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7982        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7983        0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7984        0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7985        0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7986};
7987
7988/* used when both IB1 and IB2 are in use */
7989static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
7990        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7991        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7992        0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7993        0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7994        0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7995        0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7996        0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7997        0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
7998};
7999
8000/* used when only IB1 is in use */
8001static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8002        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8003        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8004        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8005        0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8006        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8007        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8008        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8009        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8010};
8011
8012/* used when only IB2 is in use */
8013static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8014        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8015        0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8016        0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8017        0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8018        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8019        0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8020        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8021        0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8022};
8023
8024/* used when both IB1 and IB2 are in use */
8025static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8026        0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8027        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8028        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8029        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8030        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8031        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8032        0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8033        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8034};
8035
8036/*
8037 * Do setup to properly handle IB link recovery; if port is zero, we
8038 * are initializing to cover both ports; otherwise we are initializing
8039 * to cover a single port card, or the port has reached INIT and we may
8040 * need to switch coverage types.
8041 */
8042static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8043{
8044        u8 *portsel, *etm;
8045        struct qib_devdata *dd = ppd->dd;
8046
8047        if (!ppd->dd->cspec->r1)
8048                return;
8049        if (!both) {
8050                dd->cspec->recovery_ports_initted++;
8051                ppd->cpspec->recovery_init = 1;
8052        }
8053        if (!both && dd->cspec->recovery_ports_initted == 1) {
8054                portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8055                etm = atetm_1port;
8056        } else {
8057                portsel = portsel_2port;
8058                etm = atetm_2port;
8059        }
8060
8061        if (qib_r_grab(dd) < 0 ||
8062                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8063                qib_r_update(dd, BISTEN_ETM) < 0 ||
8064                qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8065                qib_r_update(dd, BISTEN_AT) < 0 ||
8066                qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8067                            portsel, NULL) < 0 ||
8068                qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8069                qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8070                qib_r_update(dd, BISTEN_AT) < 0 ||
8071                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8072                qib_r_update(dd, BISTEN_ETM) < 0)
8073                qib_dev_err(dd, "Failed IB link recovery setup\n");
8074}
8075
8076static void check_7322_rxe_status(struct qib_pportdata *ppd)
8077{
8078        struct qib_devdata *dd = ppd->dd;
8079        u64 fmask;
8080
8081        if (dd->cspec->recovery_ports_initted != 1)
8082                return; /* rest doesn't apply to dualport */
8083        qib_write_kreg(dd, kr_control, dd->control |
8084                       SYM_MASK(Control, FreezeMode));
8085        (void)qib_read_kreg64(dd, kr_scratch);
8086        udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8087        fmask = qib_read_kreg64(dd, kr_act_fmask);
8088        if (!fmask) {
8089                /*
8090                 * require a powercycle before we'll work again, and make
8091                 * sure we get no more interrupts, and don't turn off
8092                 * freeze.
8093                 */
8094                ppd->dd->cspec->stay_in_freeze = 1;
8095                qib_7322_set_intr_state(ppd->dd, 0);
8096                qib_write_kreg(dd, kr_fmask, 0ULL);
8097                qib_dev_err(dd, "HCA unusable until powercycled\n");
8098                return; /* eventually reset */
8099        }
8100
8101        qib_write_kreg(ppd->dd, kr_hwerrclear,
8102            SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8103
8104        /* don't do the full clear_freeze(), not needed for this */
8105        qib_write_kreg(dd, kr_control, dd->control);
8106        qib_read_kreg32(dd, kr_scratch);
8107        /* take IBC out of reset */
8108        if (ppd->link_speed_supported) {
8109                ppd->cpspec->ibcctrl_a &=
8110                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8111                qib_write_kreg_port(ppd, krp_ibcctrl_a,
8112                                    ppd->cpspec->ibcctrl_a);
8113                qib_read_kreg32(dd, kr_scratch);
8114                if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8115                        qib_set_ib_7322_lstate(ppd, 0,
8116                                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8117        }
8118}
8119