linux/drivers/infiniband/hw/qib/qib_iba7322.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * This file contains all of the code that is specific to the
  36 * InfiniPath 7322 chip
  37 */
  38
  39#include <linux/interrupt.h>
  40#include <linux/pci.h>
  41#include <linux/delay.h>
  42#include <linux/io.h>
  43#include <linux/jiffies.h>
  44#include <linux/module.h>
  45#include <rdma/ib_verbs.h>
  46#include <rdma/ib_smi.h>
  47#ifdef CONFIG_INFINIBAND_QIB_DCA
  48#include <linux/dca.h>
  49#endif
  50
  51#include "qib.h"
  52#include "qib_7322_regs.h"
  53#include "qib_qsfp.h"
  54
  55#include "qib_mad.h"
  56#include "qib_verbs.h"
  57
  58#undef pr_fmt
  59#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  60
  61static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  62static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  63static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  64static irqreturn_t qib_7322intr(int irq, void *data);
  65static irqreturn_t qib_7322bufavail(int irq, void *data);
  66static irqreturn_t sdma_intr(int irq, void *data);
  67static irqreturn_t sdma_idle_intr(int irq, void *data);
  68static irqreturn_t sdma_progress_intr(int irq, void *data);
  69static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  70static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  71                                  struct qib_ctxtdata *rcd);
  72static u8 qib_7322_phys_portstate(u64);
  73static u32 qib_7322_iblink_state(u64);
  74static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  75                                   u16 linitcmd);
  76static void force_h1(struct qib_pportdata *);
  77static void adj_tx_serdes(struct qib_pportdata *);
  78static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  79static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  80
  81static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  82static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  83static void serdes_7322_los_enable(struct qib_pportdata *, int);
  84static int serdes_7322_init_old(struct qib_pportdata *);
  85static int serdes_7322_init_new(struct qib_pportdata *);
  86static void dump_sdma_7322_state(struct qib_pportdata *);
  87
  88#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  89
  90/* LE2 serdes values for different cases */
  91#define LE2_DEFAULT 5
  92#define LE2_5m 4
  93#define LE2_QME 0
  94
  95/* Below is special-purpose, so only really works for the IB SerDes blocks. */
  96#define IBSD(hw_pidx) (hw_pidx + 2)
  97
  98/* these are variables for documentation and experimentation purposes */
  99static const unsigned rcv_int_timeout = 375;
 100static const unsigned rcv_int_count = 16;
 101static const unsigned sdma_idle_cnt = 64;
 102
 103/* Time to stop altering Rx Equalization parameters, after link up. */
 104#define RXEQ_DISABLE_MSECS 2500
 105
 106/*
 107 * Number of VLs we are configured to use (to allow for more
 108 * credits per vl, etc.)
 109 */
 110ushort qib_num_cfg_vls = 2;
 111module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
 112MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
 113
 114static ushort qib_chase = 1;
 115module_param_named(chase, qib_chase, ushort, S_IRUGO);
 116MODULE_PARM_DESC(chase, "Enable state chase handling");
 117
 118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
 120MODULE_PARM_DESC(long_attenuation,
 121                 "attenuation cutoff (dB) for long copper cable setup");
 122
 123static ushort qib_singleport;
 124module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 125MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 126
 127static ushort qib_krcvq01_no_msi;
 128module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
 129MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
 130
 131/*
 132 * Receive header queue sizes
 133 */
 134static unsigned qib_rcvhdrcnt;
 135module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
 136MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
 137
 138static unsigned qib_rcvhdrsize;
 139module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
 140MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
 141
 142static unsigned qib_rcvhdrentsize;
 143module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
 144MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
 145
 146#define MAX_ATTEN_LEN 64 /* plenty for any real system */
 147/* for read back, default index is ~5m copper cable */
 148static char txselect_list[MAX_ATTEN_LEN] = "10";
 149static struct kparam_string kp_txselect = {
 150        .string = txselect_list,
 151        .maxlen = MAX_ATTEN_LEN
 152};
 153static int  setup_txselect(const char *, const struct kernel_param *);
 154module_param_call(txselect, setup_txselect, param_get_string,
 155                  &kp_txselect, S_IWUSR | S_IRUGO);
 156MODULE_PARM_DESC(txselect,
 157                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 158
 159#define BOARD_QME7342 5
 160#define BOARD_QMH7342 6
 161#define BOARD_QMH7360 9
 162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 163                    BOARD_QMH7342)
 164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 165                    BOARD_QME7342)
 166
 167#define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
 168
 169#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
 170
 171#define MASK_ACROSS(lsb, msb) \
 172        (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
 173
 174#define SYM_RMASK(regname, fldname) ((u64)              \
 175        QIB_7322_##regname##_##fldname##_RMASK)
 176
 177#define SYM_MASK(regname, fldname) ((u64)               \
 178        QIB_7322_##regname##_##fldname##_RMASK <<       \
 179         QIB_7322_##regname##_##fldname##_LSB)
 180
 181#define SYM_FIELD(value, regname, fldname) ((u64)       \
 182        (((value) >> SYM_LSB(regname, fldname)) &       \
 183         SYM_RMASK(regname, fldname)))
 184
 185/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
 186#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
 187        (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
 188
 189#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
 190#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
 191#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
 192#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
 193#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
 194/* Below because most, but not all, fields of IntMask have that full suffix */
 195#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
 196
 197
 198#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
 199
 200/*
 201 * the size bits give us 2^N, in KB units.  0 marks as invalid,
 202 * and 7 is reserved.  We currently use only 2KB and 4KB
 203 */
 204#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
 205#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
 206#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
 207#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
 208
 209#define SendIBSLIDAssignMask \
 210        QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
 211#define SendIBSLMCMask \
 212        QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
 213
 214#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
 215#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
 216#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
 217#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
 218#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
 219#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
 220
 221#define _QIB_GPIO_SDA_NUM 1
 222#define _QIB_GPIO_SCL_NUM 0
 223#define QIB_EEPROM_WEN_NUM 14
 224#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
 225
 226/* HW counter clock is at 4nsec */
 227#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
 228
 229/* full speed IB port 1 only */
 230#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
 231#define PORT_SPD_CAP_SHIFT 3
 232
 233/* full speed featuremask, both ports */
 234#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
 235
 236/*
 237 * This file contains almost all the chip-specific register information and
 238 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
 239 */
 240
 241/* Use defines to tie machine-generated names to lower-case names */
 242#define kr_contextcnt KREG_IDX(ContextCnt)
 243#define kr_control KREG_IDX(Control)
 244#define kr_counterregbase KREG_IDX(CntrRegBase)
 245#define kr_errclear KREG_IDX(ErrClear)
 246#define kr_errmask KREG_IDX(ErrMask)
 247#define kr_errstatus KREG_IDX(ErrStatus)
 248#define kr_extctrl KREG_IDX(EXTCtrl)
 249#define kr_extstatus KREG_IDX(EXTStatus)
 250#define kr_gpio_clear KREG_IDX(GPIOClear)
 251#define kr_gpio_mask KREG_IDX(GPIOMask)
 252#define kr_gpio_out KREG_IDX(GPIOOut)
 253#define kr_gpio_status KREG_IDX(GPIOStatus)
 254#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
 255#define kr_debugportval KREG_IDX(DebugPortValueReg)
 256#define kr_fmask KREG_IDX(feature_mask)
 257#define kr_act_fmask KREG_IDX(active_feature_mask)
 258#define kr_hwerrclear KREG_IDX(HwErrClear)
 259#define kr_hwerrmask KREG_IDX(HwErrMask)
 260#define kr_hwerrstatus KREG_IDX(HwErrStatus)
 261#define kr_intclear KREG_IDX(IntClear)
 262#define kr_intmask KREG_IDX(IntMask)
 263#define kr_intredirect KREG_IDX(IntRedirect0)
 264#define kr_intstatus KREG_IDX(IntStatus)
 265#define kr_pagealign KREG_IDX(PageAlign)
 266#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
 267#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
 268#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
 269#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
 270#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
 271#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
 272#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
 273#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
 274#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
 275#define kr_revision KREG_IDX(Revision)
 276#define kr_scratch KREG_IDX(Scratch)
 277#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
 278#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
 279#define kr_sendctrl KREG_IDX(SendCtrl)
 280#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
 281#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
 282#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
 283#define kr_sendpiobufbase KREG_IDX(SendBufBase)
 284#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
 285#define kr_sendpiosize KREG_IDX(SendBufSize)
 286#define kr_sendregbase KREG_IDX(SendRegBase)
 287#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
 288#define kr_userregbase KREG_IDX(UserRegBase)
 289#define kr_intgranted KREG_IDX(Int_Granted)
 290#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
 291#define kr_intblocked KREG_IDX(IntBlocked)
 292#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
 293
 294/*
 295 * per-port kernel registers.  Access only with qib_read_kreg_port()
 296 * or qib_write_kreg_port()
 297 */
 298#define krp_errclear KREG_IBPORT_IDX(ErrClear)
 299#define krp_errmask KREG_IBPORT_IDX(ErrMask)
 300#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
 301#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
 302#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
 303#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
 304#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
 305#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
 306#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
 307#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
 308#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
 309#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
 310#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
 311#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
 312#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
 313#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
 314#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
 315#define krp_psstart KREG_IBPORT_IDX(PSStart)
 316#define krp_psstat KREG_IBPORT_IDX(PSStat)
 317#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
 318#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
 319#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
 320#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
 321#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
 322#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
 323#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
 324#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
 325#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
 326#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
 327#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
 328#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
 329#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
 330#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
 331#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
 332#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
 333#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
 334#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
 335#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
 336#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
 337#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
 338#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
 339#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
 340#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
 341#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
 342#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
 343#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
 344#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
 345#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
 346#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
 347#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 348
 349/*
 350 * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
 351 * or qib_write_kreg_ctxt()
 352 */
 353#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
 354#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
 355
 356/*
 357 * TID Flow table, per context.  Reduces
 358 * number of hdrq updates to one per flow (or on errors).
 359 * context 0 and 1 share same memory, but have distinct
 360 * addresses.  Since for now, we never use expected sends
 361 * on kernel contexts, we don't worry about that (we initialize
 362 * those entries for ctxt 0/1 on driver load twice, for example).
 363 */
 364#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
 365#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
 366
 367/* these are the error bits in the tid flows, and are W1C */
 368#define TIDFLOW_ERRBITS  ( \
 369        (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
 370        SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
 371        (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
 372        SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
 373
 374/* Most (not all) Counters are per-IBport.
 375 * Requires LBIntCnt is at offset 0 in the group
 376 */
 377#define CREG_IDX(regname) \
 378((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 379
 380#define crp_badformat CREG_IDX(RxVersionErrCnt)
 381#define crp_err_rlen CREG_IDX(RxLenErrCnt)
 382#define crp_erricrc CREG_IDX(RxICRCErrCnt)
 383#define crp_errlink CREG_IDX(RxLinkMalformCnt)
 384#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
 385#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
 386#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
 387#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
 388#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
 389#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
 390#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
 391#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
 392#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
 393#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
 394#define crp_pktrcv CREG_IDX(RxDataPktCnt)
 395#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
 396#define crp_pktsend CREG_IDX(TxDataPktCnt)
 397#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
 398#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
 399#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
 400#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
 401#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
 402#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
 403#define crp_rcvebp CREG_IDX(RxEBPCnt)
 404#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
 405#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
 406#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
 407#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
 408#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
 409#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
 410#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
 411#define crp_sendstall CREG_IDX(TxFlowStallCnt)
 412#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
 413#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
 414#define crp_txlenerr CREG_IDX(TxLenErrCnt)
 415#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
 416#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
 417#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
 418#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
 419#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
 420#define crp_wordrcv CREG_IDX(RxDwordCnt)
 421#define crp_wordsend CREG_IDX(TxDwordCnt)
 422#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
 423
 424/* these are the (few) counters that are not port-specific */
 425#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
 426                        QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 427#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
 428#define cr_lbint CREG_DEVIDX(LBIntCnt)
 429#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
 430#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
 431#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
 432#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
 433#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
 434
 435/* no chip register for # of IB ports supported, so define */
 436#define NUM_IB_PORTS 2
 437
 438/* 1 VL15 buffer per hardware IB port, no register for this, so define */
 439#define NUM_VL15_BUFS NUM_IB_PORTS
 440
 441/*
 442 * context 0 and 1 are special, and there is no chip register that
 443 * defines this value, so we have to define it here.
 444 * These are all allocated to either 0 or 1 for single port
 445 * hardware configuration, otherwise each gets half
 446 */
 447#define KCTXT0_EGRCNT 2048
 448
 449/* values for vl and port fields in PBC, 7322-specific */
 450#define PBC_PORT_SEL_LSB 26
 451#define PBC_PORT_SEL_RMASK 1
 452#define PBC_VL_NUM_LSB 27
 453#define PBC_VL_NUM_RMASK 7
 454#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
 455#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
 456
 457static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 458        [IB_RATE_2_5_GBPS] = 16,
 459        [IB_RATE_5_GBPS] = 8,
 460        [IB_RATE_10_GBPS] = 4,
 461        [IB_RATE_20_GBPS] = 2,
 462        [IB_RATE_30_GBPS] = 2,
 463        [IB_RATE_40_GBPS] = 1
 464};
 465
 466static const char * const qib_sdma_state_names[] = {
 467        [qib_sdma_state_s00_hw_down]          = "s00_HwDown",
 468        [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
 469        [qib_sdma_state_s20_idle]             = "s20_Idle",
 470        [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
 471        [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
 472        [qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
 473        [qib_sdma_state_s99_running]          = "s99_Running",
 474};
 475
 476#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
 477#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
 478
 479/* link training states, from IBC */
 480#define IB_7322_LT_STATE_DISABLED        0x00
 481#define IB_7322_LT_STATE_LINKUP          0x01
 482#define IB_7322_LT_STATE_POLLACTIVE      0x02
 483#define IB_7322_LT_STATE_POLLQUIET       0x03
 484#define IB_7322_LT_STATE_SLEEPDELAY      0x04
 485#define IB_7322_LT_STATE_SLEEPQUIET      0x05
 486#define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
 487#define IB_7322_LT_STATE_CFGRCVFCFG      0x09
 488#define IB_7322_LT_STATE_CFGWAITRMT      0x0a
 489#define IB_7322_LT_STATE_CFGIDLE         0x0b
 490#define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
 491#define IB_7322_LT_STATE_TXREVLANES      0x0d
 492#define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
 493#define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 494#define IB_7322_LT_STATE_CFGENH          0x10
 495#define IB_7322_LT_STATE_CFGTEST         0x11
 496#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
 497#define IB_7322_LT_STATE_CFGWAITENH      0x13
 498
 499/* link state machine states from IBC */
 500#define IB_7322_L_STATE_DOWN             0x0
 501#define IB_7322_L_STATE_INIT             0x1
 502#define IB_7322_L_STATE_ARM              0x2
 503#define IB_7322_L_STATE_ACTIVE           0x3
 504#define IB_7322_L_STATE_ACT_DEFER        0x4
 505
 506static const u8 qib_7322_physportstate[0x20] = {
 507        [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
 508        [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
 509        [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
 510        [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
 511        [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
 512        [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
 513        [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
 514        [IB_7322_LT_STATE_CFGRCVFCFG] =
 515                IB_PHYSPORTSTATE_CFG_TRAIN,
 516        [IB_7322_LT_STATE_CFGWAITRMT] =
 517                IB_PHYSPORTSTATE_CFG_TRAIN,
 518        [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
 519        [IB_7322_LT_STATE_RECOVERRETRAIN] =
 520                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 521        [IB_7322_LT_STATE_RECOVERWAITRMT] =
 522                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 523        [IB_7322_LT_STATE_RECOVERIDLE] =
 524                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 525        [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
 526        [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
 527        [IB_7322_LT_STATE_CFGWAITRMTTEST] =
 528                IB_PHYSPORTSTATE_CFG_TRAIN,
 529        [IB_7322_LT_STATE_CFGWAITENH] =
 530                IB_PHYSPORTSTATE_CFG_WAIT_ENH,
 531        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
 532        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
 533        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
 534        [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
 535};
 536
 537#ifdef CONFIG_INFINIBAND_QIB_DCA
 538struct qib_irq_notify {
 539        int rcv;
 540        void *arg;
 541        struct irq_affinity_notify notify;
 542};
 543#endif
 544
 545struct qib_chip_specific {
 546        u64 __iomem *cregbase;
 547        u64 *cntrs;
 548        spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
 549        spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
 550        u64 main_int_mask;      /* clear bits which have dedicated handlers */
 551        u64 int_enable_mask;  /* for per port interrupts in single port mode */
 552        u64 errormask;
 553        u64 hwerrmask;
 554        u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
 555        u64 gpio_mask; /* shadow the gpio mask register */
 556        u64 extctrl; /* shadow the gpio output enable, etc... */
 557        u32 ncntrs;
 558        u32 nportcntrs;
 559        u32 cntrnamelen;
 560        u32 portcntrnamelen;
 561        u32 numctxts;
 562        u32 rcvegrcnt;
 563        u32 updthresh; /* current AvailUpdThld */
 564        u32 updthresh_dflt; /* default AvailUpdThld */
 565        u32 r1;
 566        u32 num_msix_entries;
 567        u32 sdmabufcnt;
 568        u32 lastbuf_for_pio;
 569        u32 stay_in_freeze;
 570        u32 recovery_ports_initted;
 571#ifdef CONFIG_INFINIBAND_QIB_DCA
 572        u32 dca_ctrl;
 573        int rhdr_cpu[18];
 574        int sdma_cpu[2];
 575        u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
 576#endif
 577        struct qib_msix_entry *msix_entries;
 578        unsigned long *sendchkenable;
 579        unsigned long *sendgrhchk;
 580        unsigned long *sendibchk;
 581        u32 rcvavail_timeout[18];
 582        char emsgbuf[128]; /* for device error interrupt msg buffer */
 583};
 584
 585/* Table of entries in "human readable" form Tx Emphasis. */
 586struct txdds_ent {
 587        u8 amp;
 588        u8 pre;
 589        u8 main;
 590        u8 post;
 591};
 592
 593struct vendor_txdds_ent {
 594        u8 oui[QSFP_VOUI_LEN];
 595        u8 *partnum;
 596        struct txdds_ent sdr;
 597        struct txdds_ent ddr;
 598        struct txdds_ent qdr;
 599};
 600
 601static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 602
 603#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 604#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
 605#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 606#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 607
 608#define H1_FORCE_VAL 8
 609#define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
 610#define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
 611
 612/* The static and dynamic registers are paired, and the pairs indexed by spd */
 613#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
 614        + ((spd) * 2))
 615
 616#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
 617#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
 618#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
 619#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
 620#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
 621
 622struct qib_chippport_specific {
 623        u64 __iomem *kpregbase;
 624        u64 __iomem *cpregbase;
 625        u64 *portcntrs;
 626        struct qib_pportdata *ppd;
 627        wait_queue_head_t autoneg_wait;
 628        struct delayed_work autoneg_work;
 629        struct delayed_work ipg_work;
 630        struct timer_list chase_timer;
 631        /*
 632         * these 5 fields are used to establish deltas for IB symbol
 633         * errors and linkrecovery errors.  They can be reported on
 634         * some chips during link negotiation prior to INIT, and with
 635         * DDR when faking DDR negotiations with non-IBTA switches.
 636         * The chip counters are adjusted at driver unload if there is
 637         * a non-zero delta.
 638         */
 639        u64 ibdeltainprog;
 640        u64 ibsymdelta;
 641        u64 ibsymsnap;
 642        u64 iblnkerrdelta;
 643        u64 iblnkerrsnap;
 644        u64 iblnkdownsnap;
 645        u64 iblnkdowndelta;
 646        u64 ibmalfdelta;
 647        u64 ibmalfsnap;
 648        u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
 649        u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
 650        unsigned long qdr_dfe_time;
 651        unsigned long chase_end;
 652        u32 autoneg_tries;
 653        u32 recovery_init;
 654        u32 qdr_dfe_on;
 655        u32 qdr_reforce;
 656        /*
 657         * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
 658         * entry zero is unused, to simplify indexing
 659         */
 660        u8 h1_val;
 661        u8 no_eep;  /* txselect table index to use if no qsfp info */
 662        u8 ipg_tries;
 663        u8 ibmalfusesnap;
 664        struct qib_qsfp_data qsfp_data;
 665        char epmsgbuf[192]; /* for port error interrupt msg buffer */
 666        char sdmamsgbuf[192]; /* for per-port sdma error messages */
 667};
 668
 669static struct {
 670        const char *name;
 671        irq_handler_t handler;
 672        int lsb;
 673        int port; /* 0 if not port-specific, else port # */
 674        int dca;
 675} irq_table[] = {
 676        { "", qib_7322intr, -1, 0, 0 },
 677        { " (buf avail)", qib_7322bufavail,
 678                SYM_LSB(IntStatus, SendBufAvail), 0, 0},
 679        { " (sdma 0)", sdma_intr,
 680                SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
 681        { " (sdma 1)", sdma_intr,
 682                SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
 683        { " (sdmaI 0)", sdma_idle_intr,
 684                SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
 685        { " (sdmaI 1)", sdma_idle_intr,
 686                SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
 687        { " (sdmaP 0)", sdma_progress_intr,
 688                SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
 689        { " (sdmaP 1)", sdma_progress_intr,
 690                SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
 691        { " (sdmaC 0)", sdma_cleanup_intr,
 692                SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
 693        { " (sdmaC 1)", sdma_cleanup_intr,
 694                SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
 695};
 696
 697#ifdef CONFIG_INFINIBAND_QIB_DCA
 698
 699static const struct dca_reg_map {
 700        int     shadow_inx;
 701        int     lsb;
 702        u64     mask;
 703        u16     regno;
 704} dca_rcvhdr_reg_map[] = {
 705        { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
 706           ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
 707        { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
 708           ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
 709        { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
 710           ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
 711        { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
 712           ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
 713        { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
 714           ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
 715        { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
 716           ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
 717        { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
 718           ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
 719        { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
 720           ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
 721        { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
 722           ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
 723        { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
 724           ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
 725        { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
 726           ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
 727        { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
 728           ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
 729        { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
 730           ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
 731        { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
 732           ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
 733        { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
 734           ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
 735        { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
 736           ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
 737        { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
 738           ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
 739        { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
 740           ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
 741};
 742#endif
 743
 744/* ibcctrl bits */
 745#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
 746/* cycle through TS1/TS2 till OK */
 747#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
 748/* wait for TS1, then go on */
 749#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
 750#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
 751
 752#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
 753#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
 754#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
 755
 756#define BLOB_7322_IBCHG 0x101
 757
 758static inline void qib_write_kreg(const struct qib_devdata *dd,
 759                                  const u32 regno, u64 value);
 760static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
 761static void write_7322_initregs(struct qib_devdata *);
 762static void write_7322_init_portregs(struct qib_pportdata *);
 763static void setup_7322_link_recovery(struct qib_pportdata *, u32);
 764static void check_7322_rxe_status(struct qib_pportdata *);
 765static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
 766#ifdef CONFIG_INFINIBAND_QIB_DCA
 767static void qib_setup_dca(struct qib_devdata *dd);
 768static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
 769static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
 770#endif
 771
 772/**
 773 * qib_read_ureg32 - read 32-bit virtualized per-context register
 774 * @dd: device
 775 * @regno: register number
 776 * @ctxt: context number
 777 *
 778 * Return the contents of a register that is virtualized to be per context.
 779 * Returns -1 on errors (not distinguishable from valid contents at
 780 * runtime; we may add a separate error variable at some point).
 781 */
 782static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
 783                                  enum qib_ureg regno, int ctxt)
 784{
 785        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 786                return 0;
 787        return readl(regno + (u64 __iomem *)(
 788                (dd->ureg_align * ctxt) + (dd->userbase ?
 789                 (char __iomem *)dd->userbase :
 790                 (char __iomem *)dd->kregbase + dd->uregbase)));
 791}
 792
 793/**
 794 * qib_write_ureg - write virtualized per-context register
 795 * @dd: device
 796 * @regno: register number
 797 * @value: value
 798 * @ctxt: context
 799 *
 800 * Write the contents of a register that is virtualized to be per context.
 801 */
 802static inline void qib_write_ureg(const struct qib_devdata *dd,
 803                                  enum qib_ureg regno, u64 value, int ctxt)
 804{
 805        u64 __iomem *ubase;
 806
 807        if (dd->userbase)
 808                ubase = (u64 __iomem *)
 809                        ((char __iomem *) dd->userbase +
 810                         dd->ureg_align * ctxt);
 811        else
 812                ubase = (u64 __iomem *)
 813                        (dd->uregbase +
 814                         (char __iomem *) dd->kregbase +
 815                         dd->ureg_align * ctxt);
 816
 817        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 818                writeq(value, &ubase[regno]);
 819}
 820
 821static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
 822                                  const u32 regno)
 823{
 824        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 825                return -1;
 826        return readl((u32 __iomem *) &dd->kregbase[regno]);
 827}
 828
 829static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
 830                                  const u32 regno)
 831{
 832        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 833                return -1;
 834        return readq(&dd->kregbase[regno]);
 835}
 836
 837static inline void qib_write_kreg(const struct qib_devdata *dd,
 838                                  const u32 regno, u64 value)
 839{
 840        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 841                writeq(value, &dd->kregbase[regno]);
 842}
 843
 844/*
 845 * not many sanity checks for the port-specific kernel register routines,
 846 * since they are only used when it's known to be safe.
 847*/
 848static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
 849                                     const u16 regno)
 850{
 851        if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
 852                return 0ULL;
 853        return readq(&ppd->cpspec->kpregbase[regno]);
 854}
 855
 856static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
 857                                       const u16 regno, u64 value)
 858{
 859        if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
 860            (ppd->dd->flags & QIB_PRESENT))
 861                writeq(value, &ppd->cpspec->kpregbase[regno]);
 862}
 863
 864/**
 865 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
 866 * @dd: the qlogic_ib device
 867 * @regno: the register number to write
 868 * @ctxt: the context containing the register
 869 * @value: the value to write
 870 */
 871static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
 872                                       const u16 regno, unsigned ctxt,
 873                                       u64 value)
 874{
 875        qib_write_kreg(dd, regno + ctxt, value);
 876}
 877
 878static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
 879{
 880        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 881                return 0;
 882        return readq(&dd->cspec->cregbase[regno]);
 883
 884
 885}
 886
 887static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
 888{
 889        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 890                return 0;
 891        return readl(&dd->cspec->cregbase[regno]);
 892
 893
 894}
 895
 896static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
 897                                        u16 regno, u64 value)
 898{
 899        if (ppd->cpspec && ppd->cpspec->cpregbase &&
 900            (ppd->dd->flags & QIB_PRESENT))
 901                writeq(value, &ppd->cpspec->cpregbase[regno]);
 902}
 903
 904static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
 905                                      u16 regno)
 906{
 907        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 908            !(ppd->dd->flags & QIB_PRESENT))
 909                return 0;
 910        return readq(&ppd->cpspec->cpregbase[regno]);
 911}
 912
 913static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
 914                                        u16 regno)
 915{
 916        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 917            !(ppd->dd->flags & QIB_PRESENT))
 918                return 0;
 919        return readl(&ppd->cpspec->cpregbase[regno]);
 920}
 921
 922/* bits in Control register */
 923#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
 924#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
 925
 926/* bits in general interrupt regs */
 927#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
 928#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
 929#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
 930#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
 931#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
 932#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
 933#define QIB_I_C_ERROR INT_MASK(Err)
 934
 935#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
 936#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
 937#define QIB_I_GPIO INT_MASK(AssertGPIO)
 938#define QIB_I_P_SDMAINT(pidx) \
 939        (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 940         INT_MASK_P(SDmaProgress, pidx) | \
 941         INT_MASK_PM(SDmaCleanupDone, pidx))
 942
 943/* Interrupt bits that are "per port" */
 944#define QIB_I_P_BITSEXTANT(pidx) \
 945        (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
 946        INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 947        INT_MASK_P(SDmaProgress, pidx) | \
 948        INT_MASK_PM(SDmaCleanupDone, pidx))
 949
 950/* Interrupt bits that are common to a device */
 951/* currently unused: QIB_I_SPIOSENT */
 952#define QIB_I_C_BITSEXTANT \
 953        (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
 954        QIB_I_SPIOSENT | \
 955        QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
 956
 957#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
 958        QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
 959
 960/*
 961 * Error bits that are "per port".
 962 */
 963#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
 964#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
 965#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
 966#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
 967#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
 968#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
 969#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
 970#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
 971#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
 972#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
 973#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
 974#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
 975#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
 976#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
 977#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
 978#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
 979#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
 980#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
 981#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
 982#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
 983#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
 984#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
 985#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
 986#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
 987#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
 988#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
 989#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
 990#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
 991
 992#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
 993#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
 994#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
 995#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
 996#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
 997#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
 998#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
 999#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1000#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1001#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1002#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1003
1004/* Error bits that are common to a device */
1005#define QIB_E_RESET ERR_MASK(ResetNegated)
1006#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1007#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1008
1009
1010/*
1011 * Per chip (rather than per-port) errors.  Most either do
1012 * nothing but trigger a print (because they self-recover, or
1013 * always occur in tandem with other errors that handle the
1014 * issue), or because they indicate errors with no recovery,
1015 * but we want to know that they happened.
1016 */
1017#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1018#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1019#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1020#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1021#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1022#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1023#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1024#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1025
1026/* SDMA chip errors (not per port)
1027 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1028 * the SDMAHALT error immediately, so we just print the dup error via the
1029 * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1030 * as well, but since this is port-independent, by definition, it's
1031 * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1032 * packet send errors, and so are handled in the same manner as other
1033 * per-packet errors.
1034 */
1035#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1036#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1037#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1038
1039/*
1040 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1041 * it is used to print "common" packet errors.
1042 */
1043#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1044        QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1045        QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1046        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1047        QIB_E_P_REBP)
1048
1049/* Error Bits that Packet-related (Receive, per-port) */
1050#define QIB_E_P_RPKTERRS (\
1051        QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1052        QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1053        QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1054        QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1055        QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1056        QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1057
1058/*
1059 * Error bits that are Send-related (per port)
1060 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1061 * All of these potentially need to have a buffer disarmed
1062 */
1063#define QIB_E_P_SPKTERRS (\
1064        QIB_E_P_SUNEXP_PKTNUM |\
1065        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1066        QIB_E_P_SMAXPKTLEN |\
1067        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1068        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1069        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1070
1071#define QIB_E_SPKTERRS ( \
1072                QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1073                ERR_MASK_N(SendUnsupportedVLErr) |                      \
1074                QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1075
1076#define QIB_E_P_SDMAERRS ( \
1077        QIB_E_P_SDMAHALT | \
1078        QIB_E_P_SDMADESCADDRMISALIGN | \
1079        QIB_E_P_SDMAUNEXPDATA | \
1080        QIB_E_P_SDMAMISSINGDW | \
1081        QIB_E_P_SDMADWEN | \
1082        QIB_E_P_SDMARPYTAG | \
1083        QIB_E_P_SDMA1STDESC | \
1084        QIB_E_P_SDMABASE | \
1085        QIB_E_P_SDMATAILOUTOFBOUND | \
1086        QIB_E_P_SDMAOUTOFBOUND | \
1087        QIB_E_P_SDMAGENMISMATCH)
1088
1089/*
1090 * This sets some bits more than once, but makes it more obvious which
1091 * bits are not handled under other categories, and the repeat definition
1092 * is not a problem.
1093 */
1094#define QIB_E_P_BITSEXTANT ( \
1095        QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1096        QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1097        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1098        QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1099        )
1100
1101/*
1102 * These are errors that can occur when the link
1103 * changes state while a packet is being sent or received.  This doesn't
1104 * cover things like EBP or VCRC that can be the result of a sending
1105 * having the link change state, so we receive a "known bad" packet.
1106 * All of these are "per port", so renamed:
1107 */
1108#define QIB_E_P_LINK_PKTERRS (\
1109        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1110        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1111        QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1112        QIB_E_P_RUNEXPCHAR)
1113
1114/*
1115 * This sets some bits more than once, but makes it more obvious which
1116 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1117 * and the repeat definition is not a problem.
1118 */
1119#define QIB_E_C_BITSEXTANT (\
1120        QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1121        QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1122        QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1123
1124/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1125#define E_SPKT_ERRS_IGNORE 0
1126
1127#define QIB_EXTS_MEMBIST_DISABLED \
1128        SYM_MASK(EXTStatus, MemBISTDisabled)
1129#define QIB_EXTS_MEMBIST_ENDTEST \
1130        SYM_MASK(EXTStatus, MemBISTEndTest)
1131
1132#define QIB_E_SPIOARMLAUNCH \
1133        ERR_MASK(SendArmLaunchErr)
1134
1135#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1136#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1137
1138/*
1139 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1140 * and also if forced QDR (only QDR enabled).  It's enabled for the
1141 * forced QDR case so that scrambling will be enabled by the TS3
1142 * exchange, when supported by both sides of the link.
1143 */
1144#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1145#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1146#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1147#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1148#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1149#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1150        SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1151#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1152
1153#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1154#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1155
1156#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1157#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1158#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1159
1160#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1161#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1162#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1163        SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1164#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1165        SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1166#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1167
1168#define IBA7322_REDIRECT_VEC_PER_REG 12
1169
1170#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1171#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1172#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1173#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1174#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1175
1176#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1177
1178#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1179        .msg = #fldname , .sz = sizeof(#fldname) }
1180#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1181        fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1182static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1183        HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1184        HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1185        HWE_AUTO(PCIESerdesPClkNotDetect),
1186        HWE_AUTO(PowerOnBISTFailed),
1187        HWE_AUTO(TempsenseTholdReached),
1188        HWE_AUTO(MemoryErr),
1189        HWE_AUTO(PCIeBusParityErr),
1190        HWE_AUTO(PcieCplTimeout),
1191        HWE_AUTO(PciePoisonedTLP),
1192        HWE_AUTO_P(SDmaMemReadErr, 1),
1193        HWE_AUTO_P(SDmaMemReadErr, 0),
1194        HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1195        HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1196        HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1197        HWE_AUTO(statusValidNoEop),
1198        HWE_AUTO(LATriggered),
1199        { .mask = 0, .sz = 0 }
1200};
1201
1202#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1203        .msg = #fldname, .sz = sizeof(#fldname) }
1204#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1205        .msg = #fldname, .sz = sizeof(#fldname) }
1206static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1207        E_AUTO(RcvEgrFullErr),
1208        E_AUTO(RcvHdrFullErr),
1209        E_AUTO(ResetNegated),
1210        E_AUTO(HardwareErr),
1211        E_AUTO(InvalidAddrErr),
1212        E_AUTO(SDmaVL15Err),
1213        E_AUTO(SBufVL15MisUseErr),
1214        E_AUTO(InvalidEEPCmd),
1215        E_AUTO(RcvContextShareErr),
1216        E_AUTO(SendVLMismatchErr),
1217        E_AUTO(SendArmLaunchErr),
1218        E_AUTO(SendSpecialTriggerErr),
1219        E_AUTO(SDmaWrongPortErr),
1220        E_AUTO(SDmaBufMaskDuplicateErr),
1221        { .mask = 0, .sz = 0 }
1222};
1223
1224static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1225        E_P_AUTO(IBStatusChanged),
1226        E_P_AUTO(SHeadersErr),
1227        E_P_AUTO(VL15BufMisuseErr),
1228        /*
1229         * SDmaHaltErr is not really an error, make it clearer;
1230         */
1231        {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1232                .sz = 11},
1233        E_P_AUTO(SDmaDescAddrMisalignErr),
1234        E_P_AUTO(SDmaUnexpDataErr),
1235        E_P_AUTO(SDmaMissingDwErr),
1236        E_P_AUTO(SDmaDwEnErr),
1237        E_P_AUTO(SDmaRpyTagErr),
1238        E_P_AUTO(SDma1stDescErr),
1239        E_P_AUTO(SDmaBaseErr),
1240        E_P_AUTO(SDmaTailOutOfBoundErr),
1241        E_P_AUTO(SDmaOutOfBoundErr),
1242        E_P_AUTO(SDmaGenMismatchErr),
1243        E_P_AUTO(SendBufMisuseErr),
1244        E_P_AUTO(SendUnsupportedVLErr),
1245        E_P_AUTO(SendUnexpectedPktNumErr),
1246        E_P_AUTO(SendDroppedDataPktErr),
1247        E_P_AUTO(SendDroppedSmpPktErr),
1248        E_P_AUTO(SendPktLenErr),
1249        E_P_AUTO(SendUnderRunErr),
1250        E_P_AUTO(SendMaxPktLenErr),
1251        E_P_AUTO(SendMinPktLenErr),
1252        E_P_AUTO(RcvIBLostLinkErr),
1253        E_P_AUTO(RcvHdrErr),
1254        E_P_AUTO(RcvHdrLenErr),
1255        E_P_AUTO(RcvBadTidErr),
1256        E_P_AUTO(RcvBadVersionErr),
1257        E_P_AUTO(RcvIBFlowErr),
1258        E_P_AUTO(RcvEBPErr),
1259        E_P_AUTO(RcvUnsupportedVLErr),
1260        E_P_AUTO(RcvUnexpectedCharErr),
1261        E_P_AUTO(RcvShortPktLenErr),
1262        E_P_AUTO(RcvLongPktLenErr),
1263        E_P_AUTO(RcvMaxPktLenErr),
1264        E_P_AUTO(RcvMinPktLenErr),
1265        E_P_AUTO(RcvICRCErr),
1266        E_P_AUTO(RcvVCRCErr),
1267        E_P_AUTO(RcvFormatErr),
1268        { .mask = 0, .sz = 0 }
1269};
1270
1271/*
1272 * Below generates "auto-message" for interrupts not specific to any port or
1273 * context
1274 */
1275#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1276        .msg = #fldname, .sz = sizeof(#fldname) }
1277/* Below generates "auto-message" for interrupts specific to a port */
1278#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1279        SYM_LSB(IntMask, fldname##Mask##_0), \
1280        SYM_LSB(IntMask, fldname##Mask##_1)), \
1281        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1282/* For some reason, the SerDesTrimDone bits are reversed */
1283#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1284        SYM_LSB(IntMask, fldname##Mask##_1), \
1285        SYM_LSB(IntMask, fldname##Mask##_0)), \
1286        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1287/*
1288 * Below generates "auto-message" for interrupts specific to a context,
1289 * with ctxt-number appended
1290 */
1291#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1292        SYM_LSB(IntMask, fldname##0IntMask), \
1293        SYM_LSB(IntMask, fldname##17IntMask)), \
1294        .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1295
1296#define TXSYMPTOM_AUTO_P(fldname) \
1297        { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1298        .msg = #fldname, .sz = sizeof(#fldname) }
1299static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1300        TXSYMPTOM_AUTO_P(NonKeyPacket),
1301        TXSYMPTOM_AUTO_P(GRHFail),
1302        TXSYMPTOM_AUTO_P(PkeyFail),
1303        TXSYMPTOM_AUTO_P(QPFail),
1304        TXSYMPTOM_AUTO_P(SLIDFail),
1305        TXSYMPTOM_AUTO_P(RawIPV6),
1306        TXSYMPTOM_AUTO_P(PacketTooSmall),
1307        { .mask = 0, .sz = 0 }
1308};
1309
1310#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1311
1312/*
1313 * Called when we might have an error that is specific to a particular
1314 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1315 * because we don't need to force the update of pioavail
1316 */
1317static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1318{
1319        struct qib_devdata *dd = ppd->dd;
1320        u32 i;
1321        int any;
1322        u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1323        u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1324        unsigned long sbuf[4];
1325
1326        /*
1327         * It's possible that sendbuffererror could have bits set; might
1328         * have already done this as a result of hardware error handling.
1329         */
1330        any = 0;
1331        for (i = 0; i < regcnt; ++i) {
1332                sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1333                if (sbuf[i]) {
1334                        any = 1;
1335                        qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1336                }
1337        }
1338
1339        if (any)
1340                qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1341}
1342
1343/* No txe_recover yet, if ever */
1344
1345/* No decode__errors yet */
1346static void err_decode(char *msg, size_t len, u64 errs,
1347                       const struct qib_hwerror_msgs *msp)
1348{
1349        u64 these, lmask;
1350        int took, multi, n = 0;
1351
1352        while (errs && msp && msp->mask) {
1353                multi = (msp->mask & (msp->mask - 1));
1354                while (errs & msp->mask) {
1355                        these = (errs & msp->mask);
1356                        lmask = (these & (these - 1)) ^ these;
1357                        if (len) {
1358                                if (n++) {
1359                                        /* separate the strings */
1360                                        *msg++ = ',';
1361                                        len--;
1362                                }
1363                                /* msp->sz counts the nul */
1364                                took = min_t(size_t, msp->sz - (size_t)1, len);
1365                                memcpy(msg,  msp->msg, took);
1366                                len -= took;
1367                                msg += took;
1368                                if (len)
1369                                        *msg = '\0';
1370                        }
1371                        errs &= ~lmask;
1372                        if (len && multi) {
1373                                /* More than one bit this mask */
1374                                int idx = -1;
1375
1376                                while (lmask & msp->mask) {
1377                                        ++idx;
1378                                        lmask >>= 1;
1379                                }
1380                                took = scnprintf(msg, len, "_%d", idx);
1381                                len -= took;
1382                                msg += took;
1383                        }
1384                }
1385                ++msp;
1386        }
1387        /* If some bits are left, show in hex. */
1388        if (len && errs)
1389                snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1390                        (unsigned long long) errs);
1391}
1392
1393/* only called if r1 set */
1394static void flush_fifo(struct qib_pportdata *ppd)
1395{
1396        struct qib_devdata *dd = ppd->dd;
1397        u32 __iomem *piobuf;
1398        u32 bufn;
1399        u32 *hdr;
1400        u64 pbc;
1401        const unsigned hdrwords = 7;
1402        static struct ib_header ibhdr = {
1403                .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1404                .lrh[1] = IB_LID_PERMISSIVE,
1405                .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1406                .lrh[3] = IB_LID_PERMISSIVE,
1407                .u.oth.bth[0] = cpu_to_be32(
1408                        (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1409                .u.oth.bth[1] = cpu_to_be32(0),
1410                .u.oth.bth[2] = cpu_to_be32(0),
1411                .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1412                .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1413        };
1414
1415        /*
1416         * Send a dummy VL15 packet to flush the launch FIFO.
1417         * This will not actually be sent since the TxeBypassIbc bit is set.
1418         */
1419        pbc = PBC_7322_VL15_SEND |
1420                (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1421                (hdrwords + SIZE_OF_CRC);
1422        piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1423        if (!piobuf)
1424                return;
1425        writeq(pbc, piobuf);
1426        hdr = (u32 *) &ibhdr;
1427        if (dd->flags & QIB_PIO_FLUSH_WC) {
1428                qib_flush_wc();
1429                qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1430                qib_flush_wc();
1431                __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1432                qib_flush_wc();
1433        } else
1434                qib_pio_copy(piobuf + 2, hdr, hdrwords);
1435        qib_sendbuf_done(dd, bufn);
1436}
1437
1438/*
1439 * This is called with interrupts disabled and sdma_lock held.
1440 */
1441static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1442{
1443        struct qib_devdata *dd = ppd->dd;
1444        u64 set_sendctrl = 0;
1445        u64 clr_sendctrl = 0;
1446
1447        if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1448                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1449        else
1450                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1451
1452        if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1453                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1454        else
1455                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1456
1457        if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1458                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1459        else
1460                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1461
1462        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1463                set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1464                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1465                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1466        else
1467                clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1468                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1469                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1470
1471        spin_lock(&dd->sendctrl_lock);
1472
1473        /* If we are draining everything, block sends first */
1474        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1475                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1476                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1477                qib_write_kreg(dd, kr_scratch, 0);
1478        }
1479
1480        ppd->p_sendctrl |= set_sendctrl;
1481        ppd->p_sendctrl &= ~clr_sendctrl;
1482
1483        if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1484                qib_write_kreg_port(ppd, krp_sendctrl,
1485                                    ppd->p_sendctrl |
1486                                    SYM_MASK(SendCtrl_0, SDmaCleanup));
1487        else
1488                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1489        qib_write_kreg(dd, kr_scratch, 0);
1490
1491        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1492                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1493                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1494                qib_write_kreg(dd, kr_scratch, 0);
1495        }
1496
1497        spin_unlock(&dd->sendctrl_lock);
1498
1499        if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1500                flush_fifo(ppd);
1501}
1502
1503static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1504{
1505        __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1506}
1507
1508static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1509{
1510        /*
1511         * Set SendDmaLenGen and clear and set
1512         * the MSB of the generation count to enable generation checking
1513         * and load the internal generation counter.
1514         */
1515        qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1516        qib_write_kreg_port(ppd, krp_senddmalengen,
1517                            ppd->sdma_descq_cnt |
1518                            (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1519}
1520
1521/*
1522 * Must be called with sdma_lock held, or before init finished.
1523 */
1524static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1525{
1526        /* Commit writes to memory and advance the tail on the chip */
1527        wmb();
1528        ppd->sdma_descq_tail = tail;
1529        qib_write_kreg_port(ppd, krp_senddmatail, tail);
1530}
1531
1532/*
1533 * This is called with interrupts disabled and sdma_lock held.
1534 */
1535static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1536{
1537        /*
1538         * Drain all FIFOs.
1539         * The hardware doesn't require this but we do it so that verbs
1540         * and user applications don't wait for link active to send stale
1541         * data.
1542         */
1543        sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1544
1545        qib_sdma_7322_setlengen(ppd);
1546        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1547        ppd->sdma_head_dma[0] = 0;
1548        qib_7322_sdma_sendctrl(ppd,
1549                ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1550}
1551
1552#define DISABLES_SDMA ( \
1553        QIB_E_P_SDMAHALT | \
1554        QIB_E_P_SDMADESCADDRMISALIGN | \
1555        QIB_E_P_SDMAMISSINGDW | \
1556        QIB_E_P_SDMADWEN | \
1557        QIB_E_P_SDMARPYTAG | \
1558        QIB_E_P_SDMA1STDESC | \
1559        QIB_E_P_SDMABASE | \
1560        QIB_E_P_SDMATAILOUTOFBOUND | \
1561        QIB_E_P_SDMAOUTOFBOUND | \
1562        QIB_E_P_SDMAGENMISMATCH)
1563
1564static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1565{
1566        unsigned long flags;
1567        struct qib_devdata *dd = ppd->dd;
1568
1569        errs &= QIB_E_P_SDMAERRS;
1570        err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1571                   errs, qib_7322p_error_msgs);
1572
1573        if (errs & QIB_E_P_SDMAUNEXPDATA)
1574                qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1575                            ppd->port);
1576
1577        spin_lock_irqsave(&ppd->sdma_lock, flags);
1578
1579        if (errs != QIB_E_P_SDMAHALT) {
1580                /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1581                qib_dev_porterr(dd, ppd->port,
1582                        "SDMA %s 0x%016llx %s\n",
1583                        qib_sdma_state_names[ppd->sdma_state.current_state],
1584                        errs, ppd->cpspec->sdmamsgbuf);
1585                dump_sdma_7322_state(ppd);
1586        }
1587
1588        switch (ppd->sdma_state.current_state) {
1589        case qib_sdma_state_s00_hw_down:
1590                break;
1591
1592        case qib_sdma_state_s10_hw_start_up_wait:
1593                if (errs & QIB_E_P_SDMAHALT)
1594                        __qib_sdma_process_event(ppd,
1595                                qib_sdma_event_e20_hw_started);
1596                break;
1597
1598        case qib_sdma_state_s20_idle:
1599                break;
1600
1601        case qib_sdma_state_s30_sw_clean_up_wait:
1602                break;
1603
1604        case qib_sdma_state_s40_hw_clean_up_wait:
1605                if (errs & QIB_E_P_SDMAHALT)
1606                        __qib_sdma_process_event(ppd,
1607                                qib_sdma_event_e50_hw_cleaned);
1608                break;
1609
1610        case qib_sdma_state_s50_hw_halt_wait:
1611                if (errs & QIB_E_P_SDMAHALT)
1612                        __qib_sdma_process_event(ppd,
1613                                qib_sdma_event_e60_hw_halted);
1614                break;
1615
1616        case qib_sdma_state_s99_running:
1617                __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1618                __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1619                break;
1620        }
1621
1622        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1623}
1624
1625/*
1626 * handle per-device errors (not per-port errors)
1627 */
1628static noinline void handle_7322_errors(struct qib_devdata *dd)
1629{
1630        char *msg;
1631        u64 iserr = 0;
1632        u64 errs;
1633        u64 mask;
1634
1635        qib_stats.sps_errints++;
1636        errs = qib_read_kreg64(dd, kr_errstatus);
1637        if (!errs) {
1638                qib_devinfo(dd->pcidev,
1639                        "device error interrupt, but no error bits set!\n");
1640                goto done;
1641        }
1642
1643        /* don't report errors that are masked */
1644        errs &= dd->cspec->errormask;
1645        msg = dd->cspec->emsgbuf;
1646
1647        /* do these first, they are most important */
1648        if (errs & QIB_E_HARDWARE) {
1649                *msg = '\0';
1650                qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1651        }
1652
1653        if (errs & QIB_E_SPKTERRS) {
1654                qib_disarm_7322_senderrbufs(dd->pport);
1655                qib_stats.sps_txerrs++;
1656        } else if (errs & QIB_E_INVALIDADDR)
1657                qib_stats.sps_txerrs++;
1658        else if (errs & QIB_E_ARMLAUNCH) {
1659                qib_stats.sps_txerrs++;
1660                qib_disarm_7322_senderrbufs(dd->pport);
1661        }
1662        qib_write_kreg(dd, kr_errclear, errs);
1663
1664        /*
1665         * The ones we mask off are handled specially below
1666         * or above.  Also mask SDMADISABLED by default as it
1667         * is too chatty.
1668         */
1669        mask = QIB_E_HARDWARE;
1670        *msg = '\0';
1671
1672        err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1673                   qib_7322error_msgs);
1674
1675        /*
1676         * Getting reset is a tragedy for all ports. Mark the device
1677         * _and_ the ports as "offline" in way meaningful to each.
1678         */
1679        if (errs & QIB_E_RESET) {
1680                int pidx;
1681
1682                qib_dev_err(dd,
1683                        "Got reset, requires re-init (unload and reload driver)\n");
1684                dd->flags &= ~QIB_INITTED;  /* needs re-init */
1685                /* mark as having had error */
1686                *dd->devstatusp |= QIB_STATUS_HWERROR;
1687                for (pidx = 0; pidx < dd->num_pports; ++pidx)
1688                        if (dd->pport[pidx].link_speed_supported)
1689                                *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1690        }
1691
1692        if (*msg && iserr)
1693                qib_dev_err(dd, "%s error\n", msg);
1694
1695        /*
1696         * If there were hdrq or egrfull errors, wake up any processes
1697         * waiting in poll.  We used to try to check which contexts had
1698         * the overflow, but given the cost of that and the chip reads
1699         * to support it, it's better to just wake everybody up if we
1700         * get an overflow; waiters can poll again if it's not them.
1701         */
1702        if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1703                qib_handle_urcv(dd, ~0U);
1704                if (errs & ERR_MASK(RcvEgrFullErr))
1705                        qib_stats.sps_buffull++;
1706                else
1707                        qib_stats.sps_hdrfull++;
1708        }
1709
1710done:
1711        return;
1712}
1713
1714static void qib_error_tasklet(struct tasklet_struct *t)
1715{
1716        struct qib_devdata *dd = from_tasklet(dd, t, error_tasklet);
1717
1718        handle_7322_errors(dd);
1719        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1720}
1721
1722static void reenable_chase(struct timer_list *t)
1723{
1724        struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1725        struct qib_pportdata *ppd = cp->ppd;
1726
1727        ppd->cpspec->chase_timer.expires = 0;
1728        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1729                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1730}
1731
1732static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1733                u8 ibclt)
1734{
1735        ppd->cpspec->chase_end = 0;
1736
1737        if (!qib_chase)
1738                return;
1739
1740        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1741                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1742        ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1743        add_timer(&ppd->cpspec->chase_timer);
1744}
1745
1746static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1747{
1748        u8 ibclt;
1749        unsigned long tnow;
1750
1751        ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1752
1753        /*
1754         * Detect and handle the state chase issue, where we can
1755         * get stuck if we are unlucky on timing on both sides of
1756         * the link.   If we are, we disable, set a timer, and
1757         * then re-enable.
1758         */
1759        switch (ibclt) {
1760        case IB_7322_LT_STATE_CFGRCVFCFG:
1761        case IB_7322_LT_STATE_CFGWAITRMT:
1762        case IB_7322_LT_STATE_TXREVLANES:
1763        case IB_7322_LT_STATE_CFGENH:
1764                tnow = jiffies;
1765                if (ppd->cpspec->chase_end &&
1766                     time_after(tnow, ppd->cpspec->chase_end))
1767                        disable_chase(ppd, tnow, ibclt);
1768                else if (!ppd->cpspec->chase_end)
1769                        ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1770                break;
1771        default:
1772                ppd->cpspec->chase_end = 0;
1773                break;
1774        }
1775
1776        if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1777              ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1778             ibclt == IB_7322_LT_STATE_LINKUP) &&
1779            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1780                force_h1(ppd);
1781                ppd->cpspec->qdr_reforce = 1;
1782                if (!ppd->dd->cspec->r1)
1783                        serdes_7322_los_enable(ppd, 0);
1784        } else if (ppd->cpspec->qdr_reforce &&
1785                (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1786                 (ibclt == IB_7322_LT_STATE_CFGENH ||
1787                ibclt == IB_7322_LT_STATE_CFGIDLE ||
1788                ibclt == IB_7322_LT_STATE_LINKUP))
1789                force_h1(ppd);
1790
1791        if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1792            ppd->link_speed_enabled == QIB_IB_QDR &&
1793            (ibclt == IB_7322_LT_STATE_CFGTEST ||
1794             ibclt == IB_7322_LT_STATE_CFGENH ||
1795             (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1796              ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1797                adj_tx_serdes(ppd);
1798
1799        if (ibclt != IB_7322_LT_STATE_LINKUP) {
1800                u8 ltstate = qib_7322_phys_portstate(ibcst);
1801                u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1802                                          LinkTrainingState);
1803                if (!ppd->dd->cspec->r1 &&
1804                    pibclt == IB_7322_LT_STATE_LINKUP &&
1805                    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1806                    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1807                    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1808                    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1809                        /* If the link went down (but no into recovery,
1810                         * turn LOS back on */
1811                        serdes_7322_los_enable(ppd, 1);
1812                if (!ppd->cpspec->qdr_dfe_on &&
1813                    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1814                        ppd->cpspec->qdr_dfe_on = 1;
1815                        ppd->cpspec->qdr_dfe_time = 0;
1816                        /* On link down, reenable QDR adaptation */
1817                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1818                                            ppd->dd->cspec->r1 ?
1819                                            QDR_STATIC_ADAPT_DOWN_R1 :
1820                                            QDR_STATIC_ADAPT_DOWN);
1821                        pr_info(
1822                                "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1823                                ppd->dd->unit, ppd->port, ibclt);
1824                }
1825        }
1826}
1827
1828static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1829
1830/*
1831 * This is per-pport error handling.
1832 * will likely get it's own MSIx interrupt (one for each port,
1833 * although just a single handler).
1834 */
1835static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1836{
1837        char *msg;
1838        u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1839        struct qib_devdata *dd = ppd->dd;
1840
1841        /* do this as soon as possible */
1842        fmask = qib_read_kreg64(dd, kr_act_fmask);
1843        if (!fmask)
1844                check_7322_rxe_status(ppd);
1845
1846        errs = qib_read_kreg_port(ppd, krp_errstatus);
1847        if (!errs)
1848                qib_devinfo(dd->pcidev,
1849                         "Port%d error interrupt, but no error bits set!\n",
1850                         ppd->port);
1851        if (!fmask)
1852                errs &= ~QIB_E_P_IBSTATUSCHANGED;
1853        if (!errs)
1854                goto done;
1855
1856        msg = ppd->cpspec->epmsgbuf;
1857        *msg = '\0';
1858
1859        if (errs & ~QIB_E_P_BITSEXTANT) {
1860                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1861                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1862                if (!*msg)
1863                        snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1864                                 "no others");
1865                qib_dev_porterr(dd, ppd->port,
1866                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1867                        (errs & ~QIB_E_P_BITSEXTANT), msg);
1868                *msg = '\0';
1869        }
1870
1871        if (errs & QIB_E_P_SHDR) {
1872                u64 symptom;
1873
1874                /* determine cause, then write to clear */
1875                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1876                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1877                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1878                           hdrchk_msgs);
1879                *msg = '\0';
1880                /* senderrbuf cleared in SPKTERRS below */
1881        }
1882
1883        if (errs & QIB_E_P_SPKTERRS) {
1884                if ((errs & QIB_E_P_LINK_PKTERRS) &&
1885                    !(ppd->lflags & QIBL_LINKACTIVE)) {
1886                        /*
1887                         * This can happen when trying to bring the link
1888                         * up, but the IB link changes state at the "wrong"
1889                         * time. The IB logic then complains that the packet
1890                         * isn't valid.  We don't want to confuse people, so
1891                         * we just don't print them, except at debug
1892                         */
1893                        err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1894                                   (errs & QIB_E_P_LINK_PKTERRS),
1895                                   qib_7322p_error_msgs);
1896                        *msg = '\0';
1897                        ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1898                }
1899                qib_disarm_7322_senderrbufs(ppd);
1900        } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1901                   !(ppd->lflags & QIBL_LINKACTIVE)) {
1902                /*
1903                 * This can happen when SMA is trying to bring the link
1904                 * up, but the IB link changes state at the "wrong" time.
1905                 * The IB logic then complains that the packet isn't
1906                 * valid.  We don't want to confuse people, so we just
1907                 * don't print them, except at debug
1908                 */
1909                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1910                           qib_7322p_error_msgs);
1911                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1912                *msg = '\0';
1913        }
1914
1915        qib_write_kreg_port(ppd, krp_errclear, errs);
1916
1917        errs &= ~ignore_this_time;
1918        if (!errs)
1919                goto done;
1920
1921        if (errs & QIB_E_P_RPKTERRS)
1922                qib_stats.sps_rcverrs++;
1923        if (errs & QIB_E_P_SPKTERRS)
1924                qib_stats.sps_txerrs++;
1925
1926        iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1927
1928        if (errs & QIB_E_P_SDMAERRS)
1929                sdma_7322_p_errors(ppd, errs);
1930
1931        if (errs & QIB_E_P_IBSTATUSCHANGED) {
1932                u64 ibcs;
1933                u8 ltstate;
1934
1935                ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1936                ltstate = qib_7322_phys_portstate(ibcs);
1937
1938                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1939                        handle_serdes_issues(ppd, ibcs);
1940                if (!(ppd->cpspec->ibcctrl_a &
1941                      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1942                        /*
1943                         * We got our interrupt, so init code should be
1944                         * happy and not try alternatives. Now squelch
1945                         * other "chatter" from link-negotiation (pre Init)
1946                         */
1947                        ppd->cpspec->ibcctrl_a |=
1948                                SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1949                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
1950                                            ppd->cpspec->ibcctrl_a);
1951                }
1952
1953                /* Update our picture of width and speed from chip */
1954                ppd->link_width_active =
1955                        (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1956                            IB_WIDTH_4X : IB_WIDTH_1X;
1957                ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1958                        LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1959                          SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1960                                   QIB_IB_DDR : QIB_IB_SDR;
1961
1962                if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1963                    IB_PHYSPORTSTATE_DISABLED)
1964                        qib_set_ib_7322_lstate(ppd, 0,
1965                               QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1966                else
1967                        /*
1968                         * Since going into a recovery state causes the link
1969                         * state to go down and since recovery is transitory,
1970                         * it is better if we "miss" ever seeing the link
1971                         * training state go into recovery (i.e., ignore this
1972                         * transition for link state special handling purposes)
1973                         * without updating lastibcstat.
1974                         */
1975                        if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1976                            ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1977                            ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1978                            ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1979                                qib_handle_e_ibstatuschanged(ppd, ibcs);
1980        }
1981        if (*msg && iserr)
1982                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1983
1984        if (ppd->state_wanted & ppd->lflags)
1985                wake_up_interruptible(&ppd->state_wait);
1986done:
1987        return;
1988}
1989
1990/* enable/disable chip from delivering interrupts */
1991static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1992{
1993        if (enable) {
1994                if (dd->flags & QIB_BADINTR)
1995                        return;
1996                qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1997                /* cause any pending enabled interrupts to be re-delivered */
1998                qib_write_kreg(dd, kr_intclear, 0ULL);
1999                if (dd->cspec->num_msix_entries) {
2000                        /* and same for MSIx */
2001                        u64 val = qib_read_kreg64(dd, kr_intgranted);
2002
2003                        if (val)
2004                                qib_write_kreg(dd, kr_intgranted, val);
2005                }
2006        } else
2007                qib_write_kreg(dd, kr_intmask, 0ULL);
2008}
2009
2010/*
2011 * Try to cleanup as much as possible for anything that might have gone
2012 * wrong while in freeze mode, such as pio buffers being written by user
2013 * processes (causing armlaunch), send errors due to going into freeze mode,
2014 * etc., and try to avoid causing extra interrupts while doing so.
2015 * Forcibly update the in-memory pioavail register copies after cleanup
2016 * because the chip won't do it while in freeze mode (the register values
2017 * themselves are kept correct).
2018 * Make sure that we don't lose any important interrupts by using the chip
2019 * feature that says that writing 0 to a bit in *clear that is set in
2020 * *status will cause an interrupt to be generated again (if allowed by
2021 * the *mask value).
2022 * This is in chip-specific code because of all of the register accesses,
2023 * even though the details are similar on most chips.
2024 */
2025static void qib_7322_clear_freeze(struct qib_devdata *dd)
2026{
2027        int pidx;
2028
2029        /* disable error interrupts, to avoid confusion */
2030        qib_write_kreg(dd, kr_errmask, 0ULL);
2031
2032        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2033                if (dd->pport[pidx].link_speed_supported)
2034                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2035                                            0ULL);
2036
2037        /* also disable interrupts; errormask is sometimes overwritten */
2038        qib_7322_set_intr_state(dd, 0);
2039
2040        /* clear the freeze, and be sure chip saw it */
2041        qib_write_kreg(dd, kr_control, dd->control);
2042        qib_read_kreg32(dd, kr_scratch);
2043
2044        /*
2045         * Force new interrupt if any hwerr, error or interrupt bits are
2046         * still set, and clear "safe" send packet errors related to freeze
2047         * and cancelling sends.  Re-enable error interrupts before possible
2048         * force of re-interrupt on pending interrupts.
2049         */
2050        qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2051        qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2052        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2053        /* We need to purge per-port errs and reset mask, too */
2054        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2055                if (!dd->pport[pidx].link_speed_supported)
2056                        continue;
2057                qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2058                qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2059        }
2060        qib_7322_set_intr_state(dd, 1);
2061}
2062
2063/* no error handling to speak of */
2064/**
2065 * qib_7322_handle_hwerrors - display hardware errors.
2066 * @dd: the qlogic_ib device
2067 * @msg: the output buffer
2068 * @msgl: the size of the output buffer
2069 *
2070 * Use same msg buffer as regular errors to avoid excessive stack
2071 * use.  Most hardware errors are catastrophic, but for right now,
2072 * we'll print them and continue.  We reuse the same message buffer as
2073 * qib_handle_errors() to avoid excessive stack usage.
2074 */
2075static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2076                                     size_t msgl)
2077{
2078        u64 hwerrs;
2079        u32 ctrl;
2080        int isfatal = 0;
2081
2082        hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2083        if (!hwerrs)
2084                goto bail;
2085        if (hwerrs == ~0ULL) {
2086                qib_dev_err(dd,
2087                        "Read of hardware error status failed (all bits set); ignoring\n");
2088                goto bail;
2089        }
2090        qib_stats.sps_hwerrs++;
2091
2092        /* Always clear the error status register, except BIST fail */
2093        qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2094                       ~HWE_MASK(PowerOnBISTFailed));
2095
2096        hwerrs &= dd->cspec->hwerrmask;
2097
2098        /* no EEPROM logging, yet */
2099
2100        if (hwerrs)
2101                qib_devinfo(dd->pcidev,
2102                        "Hardware error: hwerr=0x%llx (cleared)\n",
2103                        (unsigned long long) hwerrs);
2104
2105        ctrl = qib_read_kreg32(dd, kr_control);
2106        if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2107                /*
2108                 * No recovery yet...
2109                 */
2110                if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2111                    dd->cspec->stay_in_freeze) {
2112                        /*
2113                         * If any set that we aren't ignoring only make the
2114                         * complaint once, in case it's stuck or recurring,
2115                         * and we get here multiple times
2116                         * Force link down, so switch knows, and
2117                         * LEDs are turned off.
2118                         */
2119                        if (dd->flags & QIB_INITTED)
2120                                isfatal = 1;
2121                } else
2122                        qib_7322_clear_freeze(dd);
2123        }
2124
2125        if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2126                isfatal = 1;
2127                strlcpy(msg,
2128                        "[Memory BIST test failed, InfiniPath hardware unusable]",
2129                        msgl);
2130                /* ignore from now on, so disable until driver reloaded */
2131                dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2132                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2133        }
2134
2135        err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2136
2137        /* Ignore esoteric PLL failures et al. */
2138
2139        qib_dev_err(dd, "%s hardware error\n", msg);
2140
2141        if (hwerrs &
2142                   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2143                    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2144                int pidx = 0;
2145                int err;
2146                unsigned long flags;
2147                struct qib_pportdata *ppd = dd->pport;
2148
2149                for (; pidx < dd->num_pports; ++pidx, ppd++) {
2150                        err = 0;
2151                        if (pidx == 0 && (hwerrs &
2152                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2153                                err++;
2154                        if (pidx == 1 && (hwerrs &
2155                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2156                                err++;
2157                        if (err) {
2158                                spin_lock_irqsave(&ppd->sdma_lock, flags);
2159                                dump_sdma_7322_state(ppd);
2160                                spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2161                        }
2162                }
2163        }
2164
2165        if (isfatal && !dd->diag_client) {
2166                qib_dev_err(dd,
2167                        "Fatal Hardware Error, no longer usable, SN %.16s\n",
2168                        dd->serial);
2169                /*
2170                 * for /sys status file and user programs to print; if no
2171                 * trailing brace is copied, we'll know it was truncated.
2172                 */
2173                if (dd->freezemsg)
2174                        snprintf(dd->freezemsg, dd->freezelen,
2175                                 "{%s}", msg);
2176                qib_disable_after_error(dd);
2177        }
2178bail:;
2179}
2180
2181/**
2182 * qib_7322_init_hwerrors - enable hardware errors
2183 * @dd: the qlogic_ib device
2184 *
2185 * now that we have finished initializing everything that might reasonably
2186 * cause a hardware error, and cleared those errors bits as they occur,
2187 * we can enable hardware errors in the mask (potentially enabling
2188 * freeze mode), and enable hardware errors as errors (along with
2189 * everything else) in errormask
2190 */
2191static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2192{
2193        int pidx;
2194        u64 extsval;
2195
2196        extsval = qib_read_kreg64(dd, kr_extstatus);
2197        if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2198                         QIB_EXTS_MEMBIST_ENDTEST)))
2199                qib_dev_err(dd, "MemBIST did not complete!\n");
2200
2201        /* never clear BIST failure, so reported on each driver load */
2202        qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2203        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2204
2205        /* clear all */
2206        qib_write_kreg(dd, kr_errclear, ~0ULL);
2207        /* enable errors that are masked, at least this first time. */
2208        qib_write_kreg(dd, kr_errmask, ~0ULL);
2209        dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2210        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2211                if (dd->pport[pidx].link_speed_supported)
2212                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2213                                            ~0ULL);
2214}
2215
2216/*
2217 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2218 * on chips that are count-based, rather than trigger-based.  There is no
2219 * reference counting, but that's also fine, given the intended use.
2220 * Only chip-specific because it's all register accesses
2221 */
2222static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2223{
2224        if (enable) {
2225                qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2226                dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2227        } else
2228                dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2229        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2230}
2231
2232/*
2233 * Formerly took parameter <which> in pre-shifted,
2234 * pre-merged form with LinkCmd and LinkInitCmd
2235 * together, and assuming the zero was NOP.
2236 */
2237static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2238                                   u16 linitcmd)
2239{
2240        u64 mod_wd;
2241        struct qib_devdata *dd = ppd->dd;
2242        unsigned long flags;
2243
2244        if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2245                /*
2246                 * If we are told to disable, note that so link-recovery
2247                 * code does not attempt to bring us back up.
2248                 * Also reset everything that we can, so we start
2249                 * completely clean when re-enabled (before we
2250                 * actually issue the disable to the IBC)
2251                 */
2252                qib_7322_mini_pcs_reset(ppd);
2253                spin_lock_irqsave(&ppd->lflags_lock, flags);
2254                ppd->lflags |= QIBL_IB_LINK_DISABLED;
2255                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2256        } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2257                /*
2258                 * Any other linkinitcmd will lead to LINKDOWN and then
2259                 * to INIT (if all is well), so clear flag to let
2260                 * link-recovery code attempt to bring us back up.
2261                 */
2262                spin_lock_irqsave(&ppd->lflags_lock, flags);
2263                ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2264                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2265                /*
2266                 * Clear status change interrupt reduction so the
2267                 * new state is seen.
2268                 */
2269                ppd->cpspec->ibcctrl_a &=
2270                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2271        }
2272
2273        mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2274                (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2275
2276        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2277                            mod_wd);
2278        /* write to chip to prevent back-to-back writes of ibc reg */
2279        qib_write_kreg(dd, kr_scratch, 0);
2280
2281}
2282
2283/*
2284 * The total RCV buffer memory is 64KB, used for both ports, and is
2285 * in units of 64 bytes (same as IB flow control credit unit).
2286 * The consumedVL unit in the same registers are in 32 byte units!
2287 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2288 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2289 * in krp_rxcreditvl15, rather than 10.
2290 */
2291#define RCV_BUF_UNITSZ 64
2292#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2293
2294static void set_vls(struct qib_pportdata *ppd)
2295{
2296        int i, numvls, totcred, cred_vl, vl0extra;
2297        struct qib_devdata *dd = ppd->dd;
2298        u64 val;
2299
2300        numvls = qib_num_vls(ppd->vls_operational);
2301
2302        /*
2303         * Set up per-VL credits. Below is kluge based on these assumptions:
2304         * 1) port is disabled at the time early_init is called.
2305         * 2) give VL15 17 credits, for two max-plausible packets.
2306         * 3) Give VL0-N the rest, with any rounding excess used for VL0
2307         */
2308        /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2309        totcred = NUM_RCV_BUF_UNITS(dd);
2310        cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2311        totcred -= cred_vl;
2312        qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2313        cred_vl = totcred / numvls;
2314        vl0extra = totcred - cred_vl * numvls;
2315        qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2316        for (i = 1; i < numvls; i++)
2317                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2318        for (; i < 8; i++) /* no buffer space for other VLs */
2319                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2320
2321        /* Notify IBC that credits need to be recalculated */
2322        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2323        val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2324        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2325        qib_write_kreg(dd, kr_scratch, 0ULL);
2326        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2327        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2328
2329        for (i = 0; i < numvls; i++)
2330                val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2331        val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2332
2333        /* Change the number of operational VLs */
2334        ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2335                                ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2336                ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2337        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2338        qib_write_kreg(dd, kr_scratch, 0ULL);
2339}
2340
2341/*
2342 * The code that deals with actual SerDes is in serdes_7322_init().
2343 * Compared to the code for iba7220, it is minimal.
2344 */
2345static int serdes_7322_init(struct qib_pportdata *ppd);
2346
2347/**
2348 * qib_7322_bringup_serdes - bring up the serdes
2349 * @ppd: physical port on the qlogic_ib device
2350 */
2351static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2352{
2353        struct qib_devdata *dd = ppd->dd;
2354        u64 val, guid, ibc;
2355        unsigned long flags;
2356
2357        /*
2358         * SerDes model not in Pd, but still need to
2359         * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2360         * eventually.
2361         */
2362        /* Put IBC in reset, sends disabled (should be in reset already) */
2363        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2364        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2365        qib_write_kreg(dd, kr_scratch, 0ULL);
2366
2367        /* ensure previous Tx parameters are not still forced */
2368        qib_write_kreg_port(ppd, krp_tx_deemph_override,
2369                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2370                reset_tx_deemphasis_override));
2371
2372        if (qib_compat_ddr_negotiate) {
2373                ppd->cpspec->ibdeltainprog = 1;
2374                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2375                                                crp_ibsymbolerr);
2376                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2377                                                crp_iblinkerrrecov);
2378        }
2379
2380        /* flowcontrolwatermark is in units of KBytes */
2381        ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2382        /*
2383         * Flow control is sent this often, even if no changes in
2384         * buffer space occur.  Units are 128ns for this chip.
2385         * Set to 3usec.
2386         */
2387        ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2388        /* max error tolerance */
2389        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2390        /* IB credit flow control. */
2391        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2392        /*
2393         * set initial max size pkt IBC will send, including ICRC; it's the
2394         * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2395         */
2396        ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2397                SYM_LSB(IBCCtrlA_0, MaxPktLen);
2398        ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2399
2400        /*
2401         * Reset the PCS interface to the serdes (and also ibc, which is still
2402         * in reset from above).  Writes new value of ibcctrl_a as last step.
2403         */
2404        qib_7322_mini_pcs_reset(ppd);
2405
2406        if (!ppd->cpspec->ibcctrl_b) {
2407                unsigned lse = ppd->link_speed_enabled;
2408
2409                /*
2410                 * Not on re-init after reset, establish shadow
2411                 * and force initial config.
2412                 */
2413                ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2414                                                             krp_ibcctrl_b);
2415                ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2416                                IBA7322_IBC_SPEED_DDR |
2417                                IBA7322_IBC_SPEED_SDR |
2418                                IBA7322_IBC_WIDTH_AUTONEG |
2419                                SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2420                if (lse & (lse - 1)) /* Muliple speeds enabled */
2421                        ppd->cpspec->ibcctrl_b |=
2422                                (lse << IBA7322_IBC_SPEED_LSB) |
2423                                IBA7322_IBC_IBTA_1_2_MASK |
2424                                IBA7322_IBC_MAX_SPEED_MASK;
2425                else
2426                        ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2427                                IBA7322_IBC_SPEED_QDR |
2428                                 IBA7322_IBC_IBTA_1_2_MASK :
2429                                (lse == QIB_IB_DDR) ?
2430                                        IBA7322_IBC_SPEED_DDR :
2431                                        IBA7322_IBC_SPEED_SDR;
2432                if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2433                    (IB_WIDTH_1X | IB_WIDTH_4X))
2434                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2435                else
2436                        ppd->cpspec->ibcctrl_b |=
2437                                ppd->link_width_enabled == IB_WIDTH_4X ?
2438                                IBA7322_IBC_WIDTH_4X_ONLY :
2439                                IBA7322_IBC_WIDTH_1X_ONLY;
2440
2441                /* always enable these on driver reload, not sticky */
2442                ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2443                        IBA7322_IBC_HRTBT_MASK);
2444        }
2445        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2446
2447        /* setup so we have more time at CFGTEST to change H1 */
2448        val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2449        val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2450        val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2451        qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2452
2453        serdes_7322_init(ppd);
2454
2455        guid = be64_to_cpu(ppd->guid);
2456        if (!guid) {
2457                if (dd->base_guid)
2458                        guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2459                ppd->guid = cpu_to_be64(guid);
2460        }
2461
2462        qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2463        /* write to chip to prevent back-to-back writes of ibc reg */
2464        qib_write_kreg(dd, kr_scratch, 0);
2465
2466        /* Enable port */
2467        ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2468        set_vls(ppd);
2469
2470        /* initially come up DISABLED, without sending anything. */
2471        val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2472                                        QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2473        qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2474        qib_write_kreg(dd, kr_scratch, 0ULL);
2475        /* clear the linkinit cmds */
2476        ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2477
2478        /* be paranoid against later code motion, etc. */
2479        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2480        ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2481        qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2482        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2483
2484        /* Also enable IBSTATUSCHG interrupt.  */
2485        val = qib_read_kreg_port(ppd, krp_errmask);
2486        qib_write_kreg_port(ppd, krp_errmask,
2487                val | ERR_MASK_N(IBStatusChanged));
2488
2489        /* Always zero until we start messing with SerDes for real */
2490        return 0;
2491}
2492
2493/**
2494 * qib_7322_mini_quiet_serdes - set serdes to txidle
2495 * @ppd: the qlogic_ib device
2496 * Called when driver is being unloaded
2497 */
2498static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2499{
2500        u64 val;
2501        unsigned long flags;
2502
2503        qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2504
2505        spin_lock_irqsave(&ppd->lflags_lock, flags);
2506        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2507        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2508        wake_up(&ppd->cpspec->autoneg_wait);
2509        cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2510        if (ppd->dd->cspec->r1)
2511                cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2512
2513        ppd->cpspec->chase_end = 0;
2514        if (ppd->cpspec->chase_timer.function) /* if initted */
2515                del_timer_sync(&ppd->cpspec->chase_timer);
2516
2517        /*
2518         * Despite the name, actually disables IBC as well. Do it when
2519         * we are as sure as possible that no more packets can be
2520         * received, following the down and the PCS reset.
2521         * The actual disabling happens in qib_7322_mini_pci_reset(),
2522         * along with the PCS being reset.
2523         */
2524        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2525        qib_7322_mini_pcs_reset(ppd);
2526
2527        /*
2528         * Update the adjusted counters so the adjustment persists
2529         * across driver reload.
2530         */
2531        if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2532            ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2533                struct qib_devdata *dd = ppd->dd;
2534                u64 diagc;
2535
2536                /* enable counter writes */
2537                diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2538                qib_write_kreg(dd, kr_hwdiagctrl,
2539                               diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2540
2541                if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2542                        val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2543                        if (ppd->cpspec->ibdeltainprog)
2544                                val -= val - ppd->cpspec->ibsymsnap;
2545                        val -= ppd->cpspec->ibsymdelta;
2546                        write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2547                }
2548                if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2549                        val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2550                        if (ppd->cpspec->ibdeltainprog)
2551                                val -= val - ppd->cpspec->iblnkerrsnap;
2552                        val -= ppd->cpspec->iblnkerrdelta;
2553                        write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2554                }
2555                if (ppd->cpspec->iblnkdowndelta) {
2556                        val = read_7322_creg32_port(ppd, crp_iblinkdown);
2557                        val += ppd->cpspec->iblnkdowndelta;
2558                        write_7322_creg_port(ppd, crp_iblinkdown, val);
2559                }
2560                /*
2561                 * No need to save ibmalfdelta since IB perfcounters
2562                 * are cleared on driver reload.
2563                 */
2564
2565                /* and disable counter writes */
2566                qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2567        }
2568}
2569
2570/**
2571 * qib_setup_7322_setextled - set the state of the two external LEDs
2572 * @ppd: physical port on the qlogic_ib device
2573 * @on: whether the link is up or not
2574 *
2575 * The exact combo of LEDs if on is true is determined by looking
2576 * at the ibcstatus.
2577 *
2578 * These LEDs indicate the physical and logical state of IB link.
2579 * For this chip (at least with recommended board pinouts), LED1
2580 * is Yellow (logical state) and LED2 is Green (physical state),
2581 *
2582 * Note:  We try to match the Mellanox HCA LED behavior as best
2583 * we can.  Green indicates physical link state is OK (something is
2584 * plugged in, and we can train).
2585 * Amber indicates the link is logically up (ACTIVE).
2586 * Mellanox further blinks the amber LED to indicate data packet
2587 * activity, but we have no hardware support for that, so it would
2588 * require waking up every 10-20 msecs and checking the counters
2589 * on the chip, and then turning the LED off if appropriate.  That's
2590 * visible overhead, so not something we will do.
2591 */
2592static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2593{
2594        struct qib_devdata *dd = ppd->dd;
2595        u64 extctl, ledblink = 0, val;
2596        unsigned long flags;
2597        int yel, grn;
2598
2599        /*
2600         * The diags use the LED to indicate diag info, so we leave
2601         * the external LED alone when the diags are running.
2602         */
2603        if (dd->diag_client)
2604                return;
2605
2606        /* Allow override of LED display for, e.g. Locating system in rack */
2607        if (ppd->led_override) {
2608                grn = (ppd->led_override & QIB_LED_PHYS);
2609                yel = (ppd->led_override & QIB_LED_LOG);
2610        } else if (on) {
2611                val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2612                grn = qib_7322_phys_portstate(val) ==
2613                        IB_PHYSPORTSTATE_LINKUP;
2614                yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2615        } else {
2616                grn = 0;
2617                yel = 0;
2618        }
2619
2620        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2621        extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2622                ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2623        if (grn) {
2624                extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2625                /*
2626                 * Counts are in chip clock (4ns) periods.
2627                 * This is 1/16 sec (66.6ms) on,
2628                 * 3/16 sec (187.5 ms) off, with packets rcvd.
2629                 */
2630                ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2631                        ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2632        }
2633        if (yel)
2634                extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2635        dd->cspec->extctrl = extctl;
2636        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2637        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2638
2639        if (ledblink) /* blink the LED on packet receive */
2640                qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2641}
2642
2643#ifdef CONFIG_INFINIBAND_QIB_DCA
2644
2645static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2646{
2647        switch (event) {
2648        case DCA_PROVIDER_ADD:
2649                if (dd->flags & QIB_DCA_ENABLED)
2650                        break;
2651                if (!dca_add_requester(&dd->pcidev->dev)) {
2652                        qib_devinfo(dd->pcidev, "DCA enabled\n");
2653                        dd->flags |= QIB_DCA_ENABLED;
2654                        qib_setup_dca(dd);
2655                }
2656                break;
2657        case DCA_PROVIDER_REMOVE:
2658                if (dd->flags & QIB_DCA_ENABLED) {
2659                        dca_remove_requester(&dd->pcidev->dev);
2660                        dd->flags &= ~QIB_DCA_ENABLED;
2661                        dd->cspec->dca_ctrl = 0;
2662                        qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2663                                dd->cspec->dca_ctrl);
2664                }
2665                break;
2666        }
2667        return 0;
2668}
2669
2670static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2671{
2672        struct qib_devdata *dd = rcd->dd;
2673        struct qib_chip_specific *cspec = dd->cspec;
2674
2675        if (!(dd->flags & QIB_DCA_ENABLED))
2676                return;
2677        if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2678                const struct dca_reg_map *rmp;
2679
2680                cspec->rhdr_cpu[rcd->ctxt] = cpu;
2681                rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2682                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2683                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2684                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2685                qib_devinfo(dd->pcidev,
2686                        "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2687                        (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2688                qib_write_kreg(dd, rmp->regno,
2689                               cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2690                cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2691                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2692        }
2693}
2694
2695static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2696{
2697        struct qib_devdata *dd = ppd->dd;
2698        struct qib_chip_specific *cspec = dd->cspec;
2699        unsigned pidx = ppd->port - 1;
2700
2701        if (!(dd->flags & QIB_DCA_ENABLED))
2702                return;
2703        if (cspec->sdma_cpu[pidx] != cpu) {
2704                cspec->sdma_cpu[pidx] = cpu;
2705                cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2706                        SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2707                        SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2708                cspec->dca_rcvhdr_ctrl[4] |=
2709                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2710                                (ppd->hw_pidx ?
2711                                        SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2712                                        SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2713                qib_devinfo(dd->pcidev,
2714                        "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2715                        (long long) cspec->dca_rcvhdr_ctrl[4]);
2716                qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2717                               cspec->dca_rcvhdr_ctrl[4]);
2718                cspec->dca_ctrl |= ppd->hw_pidx ?
2719                        SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2720                        SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2721                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2722        }
2723}
2724
2725static void qib_setup_dca(struct qib_devdata *dd)
2726{
2727        struct qib_chip_specific *cspec = dd->cspec;
2728        int i;
2729
2730        for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2731                cspec->rhdr_cpu[i] = -1;
2732        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2733                cspec->sdma_cpu[i] = -1;
2734        cspec->dca_rcvhdr_ctrl[0] =
2735                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2736                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2737                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2738                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2739        cspec->dca_rcvhdr_ctrl[1] =
2740                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2741                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2742                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2743                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2744        cspec->dca_rcvhdr_ctrl[2] =
2745                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2746                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2747                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2748                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2749        cspec->dca_rcvhdr_ctrl[3] =
2750                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2751                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2752                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2753                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2754        cspec->dca_rcvhdr_ctrl[4] =
2755                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2756                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2757        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2758                qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2759                               cspec->dca_rcvhdr_ctrl[i]);
2760        for (i = 0; i < cspec->num_msix_entries; i++)
2761                setup_dca_notifier(dd, i);
2762}
2763
2764static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2765                             const cpumask_t *mask)
2766{
2767        struct qib_irq_notify *n =
2768                container_of(notify, struct qib_irq_notify, notify);
2769        int cpu = cpumask_first(mask);
2770
2771        if (n->rcv) {
2772                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2773
2774                qib_update_rhdrq_dca(rcd, cpu);
2775        } else {
2776                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2777
2778                qib_update_sdma_dca(ppd, cpu);
2779        }
2780}
2781
2782static void qib_irq_notifier_release(struct kref *ref)
2783{
2784        struct qib_irq_notify *n =
2785                container_of(ref, struct qib_irq_notify, notify.kref);
2786        struct qib_devdata *dd;
2787
2788        if (n->rcv) {
2789                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2790
2791                dd = rcd->dd;
2792        } else {
2793                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2794
2795                dd = ppd->dd;
2796        }
2797        qib_devinfo(dd->pcidev,
2798                "release on HCA notify 0x%p n 0x%p\n", ref, n);
2799        kfree(n);
2800}
2801#endif
2802
2803static void qib_7322_free_irq(struct qib_devdata *dd)
2804{
2805        u64 intgranted;
2806        int i;
2807
2808        dd->cspec->main_int_mask = ~0ULL;
2809
2810        for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2811                /* only free IRQs that were allocated */
2812                if (dd->cspec->msix_entries[i].arg) {
2813#ifdef CONFIG_INFINIBAND_QIB_DCA
2814                        reset_dca_notifier(dd, i);
2815#endif
2816                        irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2817                                              NULL);
2818                        free_cpumask_var(dd->cspec->msix_entries[i].mask);
2819                        pci_free_irq(dd->pcidev, i,
2820                                     dd->cspec->msix_entries[i].arg);
2821                }
2822        }
2823
2824        /* If num_msix_entries was 0, disable the INTx IRQ */
2825        if (!dd->cspec->num_msix_entries)
2826                pci_free_irq(dd->pcidev, 0, dd);
2827        else
2828                dd->cspec->num_msix_entries = 0;
2829
2830        pci_free_irq_vectors(dd->pcidev);
2831
2832        /* make sure no MSIx interrupts are left pending */
2833        intgranted = qib_read_kreg64(dd, kr_intgranted);
2834        if (intgranted)
2835                qib_write_kreg(dd, kr_intgranted, intgranted);
2836}
2837
2838static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2839{
2840        int i;
2841
2842#ifdef CONFIG_INFINIBAND_QIB_DCA
2843        if (dd->flags & QIB_DCA_ENABLED) {
2844                dca_remove_requester(&dd->pcidev->dev);
2845                dd->flags &= ~QIB_DCA_ENABLED;
2846                dd->cspec->dca_ctrl = 0;
2847                qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2848        }
2849#endif
2850
2851        qib_7322_free_irq(dd);
2852        kfree(dd->cspec->cntrs);
2853        kfree(dd->cspec->sendchkenable);
2854        kfree(dd->cspec->sendgrhchk);
2855        kfree(dd->cspec->sendibchk);
2856        kfree(dd->cspec->msix_entries);
2857        for (i = 0; i < dd->num_pports; i++) {
2858                unsigned long flags;
2859                u32 mask = QSFP_GPIO_MOD_PRS_N |
2860                        (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2861
2862                kfree(dd->pport[i].cpspec->portcntrs);
2863                if (dd->flags & QIB_HAS_QSFP) {
2864                        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2865                        dd->cspec->gpio_mask &= ~mask;
2866                        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2867                        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2868                }
2869        }
2870}
2871
2872/* handle SDMA interrupts */
2873static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2874{
2875        struct qib_pportdata *ppd0 = &dd->pport[0];
2876        struct qib_pportdata *ppd1 = &dd->pport[1];
2877        u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2878                INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2879        u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2880                INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2881
2882        if (intr0)
2883                qib_sdma_intr(ppd0);
2884        if (intr1)
2885                qib_sdma_intr(ppd1);
2886
2887        if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2888                qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2889        if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2890                qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2891}
2892
2893/*
2894 * Set or clear the Send buffer available interrupt enable bit.
2895 */
2896static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2897{
2898        unsigned long flags;
2899
2900        spin_lock_irqsave(&dd->sendctrl_lock, flags);
2901        if (needint)
2902                dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2903        else
2904                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2905        qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2906        qib_write_kreg(dd, kr_scratch, 0ULL);
2907        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2908}
2909
2910/*
2911 * Somehow got an interrupt with reserved bits set in interrupt status.
2912 * Print a message so we know it happened, then clear them.
2913 * keep mainline interrupt handler cache-friendly
2914 */
2915static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2916{
2917        u64 kills;
2918        char msg[128];
2919
2920        kills = istat & ~QIB_I_BITSEXTANT;
2921        qib_dev_err(dd,
2922                "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2923                (unsigned long long) kills, msg);
2924        qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2925}
2926
2927/* keep mainline interrupt handler cache-friendly */
2928static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2929{
2930        u32 gpiostatus;
2931        int handled = 0;
2932        int pidx;
2933
2934        /*
2935         * Boards for this chip currently don't use GPIO interrupts,
2936         * so clear by writing GPIOstatus to GPIOclear, and complain
2937         * to developer.  To avoid endless repeats, clear
2938         * the bits in the mask, since there is some kind of
2939         * programming error or chip problem.
2940         */
2941        gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2942        /*
2943         * In theory, writing GPIOstatus to GPIOclear could
2944         * have a bad side-effect on some diagnostic that wanted
2945         * to poll for a status-change, but the various shadows
2946         * make that problematic at best. Diags will just suppress
2947         * all GPIO interrupts during such tests.
2948         */
2949        qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2950        /*
2951         * Check for QSFP MOD_PRS changes
2952         * only works for single port if IB1 != pidx1
2953         */
2954        for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2955             ++pidx) {
2956                struct qib_pportdata *ppd;
2957                struct qib_qsfp_data *qd;
2958                u32 mask;
2959
2960                if (!dd->pport[pidx].link_speed_supported)
2961                        continue;
2962                mask = QSFP_GPIO_MOD_PRS_N;
2963                ppd = dd->pport + pidx;
2964                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2965                if (gpiostatus & dd->cspec->gpio_mask & mask) {
2966                        u64 pins;
2967
2968                        qd = &ppd->cpspec->qsfp_data;
2969                        gpiostatus &= ~mask;
2970                        pins = qib_read_kreg64(dd, kr_extstatus);
2971                        pins >>= SYM_LSB(EXTStatus, GPIOIn);
2972                        if (!(pins & mask)) {
2973                                ++handled;
2974                                qd->t_insert = jiffies;
2975                                queue_work(ib_wq, &qd->work);
2976                        }
2977                }
2978        }
2979
2980        if (gpiostatus && !handled) {
2981                const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2982                u32 gpio_irq = mask & gpiostatus;
2983
2984                /*
2985                 * Clear any troublemakers, and update chip from shadow
2986                 */
2987                dd->cspec->gpio_mask &= ~gpio_irq;
2988                qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2989        }
2990}
2991
2992/*
2993 * Handle errors and unusual events first, separate function
2994 * to improve cache hits for fast path interrupt handling.
2995 */
2996static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2997{
2998        if (istat & ~QIB_I_BITSEXTANT)
2999                unknown_7322_ibits(dd, istat);
3000        if (istat & QIB_I_GPIO)
3001                unknown_7322_gpio_intr(dd);
3002        if (istat & QIB_I_C_ERROR) {
3003                qib_write_kreg(dd, kr_errmask, 0ULL);
3004                tasklet_schedule(&dd->error_tasklet);
3005        }
3006        if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3007                handle_7322_p_errors(dd->rcd[0]->ppd);
3008        if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3009                handle_7322_p_errors(dd->rcd[1]->ppd);
3010}
3011
3012/*
3013 * Dynamically adjust the rcv int timeout for a context based on incoming
3014 * packet rate.
3015 */
3016static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3017{
3018        struct qib_devdata *dd = rcd->dd;
3019        u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3020
3021        /*
3022         * Dynamically adjust idle timeout on chip
3023         * based on number of packets processed.
3024         */
3025        if (npkts < rcv_int_count && timeout > 2)
3026                timeout >>= 1;
3027        else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3028                timeout = min(timeout << 1, rcv_int_timeout);
3029        else
3030                return;
3031
3032        dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3033        qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3034}
3035
3036/*
3037 * This is the main interrupt handler.
3038 * It will normally only be used for low frequency interrupts but may
3039 * have to handle all interrupts if INTx is enabled or fewer than normal
3040 * MSIx interrupts were allocated.
3041 * This routine should ignore the interrupt bits for any of the
3042 * dedicated MSIx handlers.
3043 */
3044static irqreturn_t qib_7322intr(int irq, void *data)
3045{
3046        struct qib_devdata *dd = data;
3047        irqreturn_t ret;
3048        u64 istat;
3049        u64 ctxtrbits;
3050        u64 rmask;
3051        unsigned i;
3052        u32 npkts;
3053
3054        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3055                /*
3056                 * This return value is not great, but we do not want the
3057                 * interrupt core code to remove our interrupt handler
3058                 * because we don't appear to be handling an interrupt
3059                 * during a chip reset.
3060                 */
3061                ret = IRQ_HANDLED;
3062                goto bail;
3063        }
3064
3065        istat = qib_read_kreg64(dd, kr_intstatus);
3066
3067        if (unlikely(istat == ~0ULL)) {
3068                qib_bad_intrstatus(dd);
3069                qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3070                /* don't know if it was our interrupt or not */
3071                ret = IRQ_NONE;
3072                goto bail;
3073        }
3074
3075        istat &= dd->cspec->main_int_mask;
3076        if (unlikely(!istat)) {
3077                /* already handled, or shared and not us */
3078                ret = IRQ_NONE;
3079                goto bail;
3080        }
3081
3082        this_cpu_inc(*dd->int_counter);
3083
3084        /* handle "errors" of various kinds first, device ahead of port */
3085        if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3086                              QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3087                              INT_MASK_P(Err, 1))))
3088                unlikely_7322_intr(dd, istat);
3089
3090        /*
3091         * Clear the interrupt bits we found set, relatively early, so we
3092         * "know" know the chip will have seen this by the time we process
3093         * the queue, and will re-interrupt if necessary.  The processor
3094         * itself won't take the interrupt again until we return.
3095         */
3096        qib_write_kreg(dd, kr_intclear, istat);
3097
3098        /*
3099         * Handle kernel receive queues before checking for pio buffers
3100         * available since receives can overflow; piobuf waiters can afford
3101         * a few extra cycles, since they were waiting anyway.
3102         */
3103        ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3104        if (ctxtrbits) {
3105                rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3106                        (1ULL << QIB_I_RCVURG_LSB);
3107                for (i = 0; i < dd->first_user_ctxt; i++) {
3108                        if (ctxtrbits & rmask) {
3109                                ctxtrbits &= ~rmask;
3110                                if (dd->rcd[i])
3111                                        qib_kreceive(dd->rcd[i], NULL, &npkts);
3112                        }
3113                        rmask <<= 1;
3114                }
3115                if (ctxtrbits) {
3116                        ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3117                                (ctxtrbits >> QIB_I_RCVURG_LSB);
3118                        qib_handle_urcv(dd, ctxtrbits);
3119                }
3120        }
3121
3122        if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3123                sdma_7322_intr(dd, istat);
3124
3125        if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3126                qib_ib_piobufavail(dd);
3127
3128        ret = IRQ_HANDLED;
3129bail:
3130        return ret;
3131}
3132
3133/*
3134 * Dedicated receive packet available interrupt handler.
3135 */
3136static irqreturn_t qib_7322pintr(int irq, void *data)
3137{
3138        struct qib_ctxtdata *rcd = data;
3139        struct qib_devdata *dd = rcd->dd;
3140        u32 npkts;
3141
3142        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3143                /*
3144                 * This return value is not great, but we do not want the
3145                 * interrupt core code to remove our interrupt handler
3146                 * because we don't appear to be handling an interrupt
3147                 * during a chip reset.
3148                 */
3149                return IRQ_HANDLED;
3150
3151        this_cpu_inc(*dd->int_counter);
3152
3153        /* Clear the interrupt bit we expect to be set. */
3154        qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3155                       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3156
3157        qib_kreceive(rcd, NULL, &npkts);
3158
3159        return IRQ_HANDLED;
3160}
3161
3162/*
3163 * Dedicated Send buffer available interrupt handler.
3164 */
3165static irqreturn_t qib_7322bufavail(int irq, void *data)
3166{
3167        struct qib_devdata *dd = data;
3168
3169        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3170                /*
3171                 * This return value is not great, but we do not want the
3172                 * interrupt core code to remove our interrupt handler
3173                 * because we don't appear to be handling an interrupt
3174                 * during a chip reset.
3175                 */
3176                return IRQ_HANDLED;
3177
3178        this_cpu_inc(*dd->int_counter);
3179
3180        /* Clear the interrupt bit we expect to be set. */
3181        qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3182
3183        /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3184        if (dd->flags & QIB_INITTED)
3185                qib_ib_piobufavail(dd);
3186        else
3187                qib_wantpiobuf_7322_intr(dd, 0);
3188
3189        return IRQ_HANDLED;
3190}
3191
3192/*
3193 * Dedicated Send DMA interrupt handler.
3194 */
3195static irqreturn_t sdma_intr(int irq, void *data)
3196{
3197        struct qib_pportdata *ppd = data;
3198        struct qib_devdata *dd = ppd->dd;
3199
3200        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3201                /*
3202                 * This return value is not great, but we do not want the
3203                 * interrupt core code to remove our interrupt handler
3204                 * because we don't appear to be handling an interrupt
3205                 * during a chip reset.
3206                 */
3207                return IRQ_HANDLED;
3208
3209        this_cpu_inc(*dd->int_counter);
3210
3211        /* Clear the interrupt bit we expect to be set. */
3212        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3213                       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3214        qib_sdma_intr(ppd);
3215
3216        return IRQ_HANDLED;
3217}
3218
3219/*
3220 * Dedicated Send DMA idle interrupt handler.
3221 */
3222static irqreturn_t sdma_idle_intr(int irq, void *data)
3223{
3224        struct qib_pportdata *ppd = data;
3225        struct qib_devdata *dd = ppd->dd;
3226
3227        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3228                /*
3229                 * This return value is not great, but we do not want the
3230                 * interrupt core code to remove our interrupt handler
3231                 * because we don't appear to be handling an interrupt
3232                 * during a chip reset.
3233                 */
3234                return IRQ_HANDLED;
3235
3236        this_cpu_inc(*dd->int_counter);
3237
3238        /* Clear the interrupt bit we expect to be set. */
3239        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3240                       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3241        qib_sdma_intr(ppd);
3242
3243        return IRQ_HANDLED;
3244}
3245
3246/*
3247 * Dedicated Send DMA progress interrupt handler.
3248 */
3249static irqreturn_t sdma_progress_intr(int irq, void *data)
3250{
3251        struct qib_pportdata *ppd = data;
3252        struct qib_devdata *dd = ppd->dd;
3253
3254        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3255                /*
3256                 * This return value is not great, but we do not want the
3257                 * interrupt core code to remove our interrupt handler
3258                 * because we don't appear to be handling an interrupt
3259                 * during a chip reset.
3260                 */
3261                return IRQ_HANDLED;
3262
3263        this_cpu_inc(*dd->int_counter);
3264
3265        /* Clear the interrupt bit we expect to be set. */
3266        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3267                       INT_MASK_P(SDmaProgress, 1) :
3268                       INT_MASK_P(SDmaProgress, 0));
3269        qib_sdma_intr(ppd);
3270
3271        return IRQ_HANDLED;
3272}
3273
3274/*
3275 * Dedicated Send DMA cleanup interrupt handler.
3276 */
3277static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3278{
3279        struct qib_pportdata *ppd = data;
3280        struct qib_devdata *dd = ppd->dd;
3281
3282        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3283                /*
3284                 * This return value is not great, but we do not want the
3285                 * interrupt core code to remove our interrupt handler
3286                 * because we don't appear to be handling an interrupt
3287                 * during a chip reset.
3288                 */
3289                return IRQ_HANDLED;
3290
3291        this_cpu_inc(*dd->int_counter);
3292
3293        /* Clear the interrupt bit we expect to be set. */
3294        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3295                       INT_MASK_PM(SDmaCleanupDone, 1) :
3296                       INT_MASK_PM(SDmaCleanupDone, 0));
3297        qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3298
3299        return IRQ_HANDLED;
3300}
3301
3302#ifdef CONFIG_INFINIBAND_QIB_DCA
3303
3304static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3305{
3306        if (!dd->cspec->msix_entries[msixnum].dca)
3307                return;
3308
3309        qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3310                    dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3311        irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3312        dd->cspec->msix_entries[msixnum].notifier = NULL;
3313}
3314
3315static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3316{
3317        struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3318        struct qib_irq_notify *n;
3319
3320        if (!m->dca)
3321                return;
3322        n = kzalloc(sizeof(*n), GFP_KERNEL);
3323        if (n) {
3324                int ret;
3325
3326                m->notifier = n;
3327                n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3328                n->notify.notify = qib_irq_notifier_notify;
3329                n->notify.release = qib_irq_notifier_release;
3330                n->arg = m->arg;
3331                n->rcv = m->rcv;
3332                qib_devinfo(dd->pcidev,
3333                        "set notifier irq %d rcv %d notify %p\n",
3334                        n->notify.irq, n->rcv, &n->notify);
3335                ret = irq_set_affinity_notifier(
3336                                n->notify.irq,
3337                                &n->notify);
3338                if (ret) {
3339                        m->notifier = NULL;
3340                        kfree(n);
3341                }
3342        }
3343}
3344
3345#endif
3346
3347/*
3348 * Set up our chip-specific interrupt handler.
3349 * The interrupt type has already been setup, so
3350 * we just need to do the registration and error checking.
3351 * If we are using MSIx interrupts, we may fall back to
3352 * INTx later, if the interrupt handler doesn't get called
3353 * within 1/2 second (see verify_interrupt()).
3354 */
3355static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3356{
3357        int ret, i, msixnum;
3358        u64 redirect[6];
3359        u64 mask;
3360        const struct cpumask *local_mask;
3361        int firstcpu, secondcpu = 0, currrcvcpu = 0;
3362
3363        if (!dd->num_pports)
3364                return;
3365
3366        if (clearpend) {
3367                /*
3368                 * if not switching interrupt types, be sure interrupts are
3369                 * disabled, and then clear anything pending at this point,
3370                 * because we are starting clean.
3371                 */
3372                qib_7322_set_intr_state(dd, 0);
3373
3374                /* clear the reset error, init error/hwerror mask */
3375                qib_7322_init_hwerrors(dd);
3376
3377                /* clear any interrupt bits that might be set */
3378                qib_write_kreg(dd, kr_intclear, ~0ULL);
3379
3380                /* make sure no pending MSIx intr, and clear diag reg */
3381                qib_write_kreg(dd, kr_intgranted, ~0ULL);
3382                qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3383        }
3384
3385        if (!dd->cspec->num_msix_entries) {
3386                /* Try to get INTx interrupt */
3387try_intx:
3388                ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3389                                      QIB_DRV_NAME);
3390                if (ret) {
3391                        qib_dev_err(
3392                                dd,
3393                                "Couldn't setup INTx interrupt (irq=%d): %d\n",
3394                                pci_irq_vector(dd->pcidev, 0), ret);
3395                        return;
3396                }
3397                dd->cspec->main_int_mask = ~0ULL;
3398                return;
3399        }
3400
3401        /* Try to get MSIx interrupts */
3402        memset(redirect, 0, sizeof(redirect));
3403        mask = ~0ULL;
3404        msixnum = 0;
3405        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3406        firstcpu = cpumask_first(local_mask);
3407        if (firstcpu >= nr_cpu_ids ||
3408                        cpumask_weight(local_mask) == num_online_cpus()) {
3409                local_mask = topology_core_cpumask(0);
3410                firstcpu = cpumask_first(local_mask);
3411        }
3412        if (firstcpu < nr_cpu_ids) {
3413                secondcpu = cpumask_next(firstcpu, local_mask);
3414                if (secondcpu >= nr_cpu_ids)
3415                        secondcpu = firstcpu;
3416                currrcvcpu = secondcpu;
3417        }
3418        for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3419                irq_handler_t handler;
3420                void *arg;
3421                int lsb, reg, sh;
3422#ifdef CONFIG_INFINIBAND_QIB_DCA
3423                int dca = 0;
3424#endif
3425                if (i < ARRAY_SIZE(irq_table)) {
3426                        if (irq_table[i].port) {
3427                                /* skip if for a non-configured port */
3428                                if (irq_table[i].port > dd->num_pports)
3429                                        continue;
3430                                arg = dd->pport + irq_table[i].port - 1;
3431                        } else
3432                                arg = dd;
3433#ifdef CONFIG_INFINIBAND_QIB_DCA
3434                        dca = irq_table[i].dca;
3435#endif
3436                        lsb = irq_table[i].lsb;
3437                        handler = irq_table[i].handler;
3438                        ret = pci_request_irq(dd->pcidev, msixnum, handler,
3439                                              NULL, arg, QIB_DRV_NAME "%d%s",
3440                                              dd->unit,
3441                                              irq_table[i].name);
3442                } else {
3443                        unsigned ctxt;
3444
3445                        ctxt = i - ARRAY_SIZE(irq_table);
3446                        /* per krcvq context receive interrupt */
3447                        arg = dd->rcd[ctxt];
3448                        if (!arg)
3449                                continue;
3450                        if (qib_krcvq01_no_msi && ctxt < 2)
3451                                continue;
3452#ifdef CONFIG_INFINIBAND_QIB_DCA
3453                        dca = 1;
3454#endif
3455                        lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3456                        handler = qib_7322pintr;
3457                        ret = pci_request_irq(dd->pcidev, msixnum, handler,
3458                                              NULL, arg,
3459                                              QIB_DRV_NAME "%d (kctx)",
3460                                              dd->unit);
3461                }
3462
3463                if (ret) {
3464                        /*
3465                         * Shouldn't happen since the enable said we could
3466                         * have as many as we are trying to setup here.
3467                         */
3468                        qib_dev_err(dd,
3469                                    "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3470                                    msixnum,
3471                                    pci_irq_vector(dd->pcidev, msixnum),
3472                                    ret);
3473                        qib_7322_free_irq(dd);
3474                        pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3475                                              PCI_IRQ_LEGACY);
3476                        goto try_intx;
3477                }
3478                dd->cspec->msix_entries[msixnum].arg = arg;
3479#ifdef CONFIG_INFINIBAND_QIB_DCA
3480                dd->cspec->msix_entries[msixnum].dca = dca;
3481                dd->cspec->msix_entries[msixnum].rcv =
3482                        handler == qib_7322pintr;
3483#endif
3484                if (lsb >= 0) {
3485                        reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3486                        sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3487                                SYM_LSB(IntRedirect0, vec1);
3488                        mask &= ~(1ULL << lsb);
3489                        redirect[reg] |= ((u64) msixnum) << sh;
3490                }
3491                qib_read_kreg64(dd, 2 * msixnum + 1 +
3492                                (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3493                if (firstcpu < nr_cpu_ids &&
3494                        zalloc_cpumask_var(
3495                                &dd->cspec->msix_entries[msixnum].mask,
3496                                GFP_KERNEL)) {
3497                        if (handler == qib_7322pintr) {
3498                                cpumask_set_cpu(currrcvcpu,
3499                                        dd->cspec->msix_entries[msixnum].mask);
3500                                currrcvcpu = cpumask_next(currrcvcpu,
3501                                        local_mask);
3502                                if (currrcvcpu >= nr_cpu_ids)
3503                                        currrcvcpu = secondcpu;
3504                        } else {
3505                                cpumask_set_cpu(firstcpu,
3506                                        dd->cspec->msix_entries[msixnum].mask);
3507                        }
3508                        irq_set_affinity_hint(
3509                                pci_irq_vector(dd->pcidev, msixnum),
3510                                dd->cspec->msix_entries[msixnum].mask);
3511                }
3512                msixnum++;
3513        }
3514        /* Initialize the vector mapping */
3515        for (i = 0; i < ARRAY_SIZE(redirect); i++)
3516                qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3517        dd->cspec->main_int_mask = mask;
3518        tasklet_setup(&dd->error_tasklet, qib_error_tasklet);
3519}
3520
3521/**
3522 * qib_7322_boardname - fill in the board name and note features
3523 * @dd: the qlogic_ib device
3524 *
3525 * info will be based on the board revision register
3526 */
3527static unsigned qib_7322_boardname(struct qib_devdata *dd)
3528{
3529        /* Will need enumeration of board-types here */
3530        u32 boardid;
3531        unsigned int features = DUAL_PORT_CAP;
3532
3533        boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3534
3535        switch (boardid) {
3536        case 0:
3537                dd->boardname = "InfiniPath_QLE7342_Emulation";
3538                break;
3539        case 1:
3540                dd->boardname = "InfiniPath_QLE7340";
3541                dd->flags |= QIB_HAS_QSFP;
3542                features = PORT_SPD_CAP;
3543                break;
3544        case 2:
3545                dd->boardname = "InfiniPath_QLE7342";
3546                dd->flags |= QIB_HAS_QSFP;
3547                break;
3548        case 3:
3549                dd->boardname = "InfiniPath_QMI7342";
3550                break;
3551        case 4:
3552                dd->boardname = "InfiniPath_Unsupported7342";
3553                qib_dev_err(dd, "Unsupported version of QMH7342\n");
3554                features = 0;
3555                break;
3556        case BOARD_QMH7342:
3557                dd->boardname = "InfiniPath_QMH7342";
3558                features = 0x24;
3559                break;
3560        case BOARD_QME7342:
3561                dd->boardname = "InfiniPath_QME7342";
3562                break;
3563        case 8:
3564                dd->boardname = "InfiniPath_QME7362";
3565                dd->flags |= QIB_HAS_QSFP;
3566                break;
3567        case BOARD_QMH7360:
3568                dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3569                dd->flags |= QIB_HAS_QSFP;
3570                break;
3571        case 15:
3572                dd->boardname = "InfiniPath_QLE7342_TEST";
3573                dd->flags |= QIB_HAS_QSFP;
3574                break;
3575        default:
3576                dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3577                qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3578                break;
3579        }
3580        dd->board_atten = 1; /* index into txdds_Xdr */
3581
3582        snprintf(dd->boardversion, sizeof(dd->boardversion),
3583                 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3584                 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3585                 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3586                 dd->majrev, dd->minrev,
3587                 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3588
3589        if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3590                qib_devinfo(dd->pcidev,
3591                            "IB%u: Forced to single port mode by module parameter\n",
3592                            dd->unit);
3593                features &= PORT_SPD_CAP;
3594        }
3595
3596        return features;
3597}
3598
3599/*
3600 * This routine sleeps, so it can only be called from user context, not
3601 * from interrupt context.
3602 */
3603static int qib_do_7322_reset(struct qib_devdata *dd)
3604{
3605        u64 val;
3606        u64 *msix_vecsave = NULL;
3607        int i, msix_entries, ret = 1;
3608        u16 cmdval;
3609        u8 int_line, clinesz;
3610        unsigned long flags;
3611
3612        /* Use dev_err so it shows up in logs, etc. */
3613        qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3614
3615        qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3616
3617        msix_entries = dd->cspec->num_msix_entries;
3618
3619        /* no interrupts till re-initted */
3620        qib_7322_set_intr_state(dd, 0);
3621
3622        qib_7322_free_irq(dd);
3623
3624        if (msix_entries) {
3625                /* can be up to 512 bytes, too big for stack */
3626                msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
3627                                             sizeof(u64),
3628                                             GFP_KERNEL);
3629        }
3630
3631        /*
3632         * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3633         * info that is set up by the BIOS, so we have to save and restore
3634         * it ourselves.   There is some risk something could change it,
3635         * after we save it, but since we have disabled the MSIx, it
3636         * shouldn't be touched...
3637         */
3638        for (i = 0; i < msix_entries; i++) {
3639                u64 vecaddr, vecdata;
3640
3641                vecaddr = qib_read_kreg64(dd, 2 * i +
3642                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3643                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3644                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3645                if (msix_vecsave) {
3646                        msix_vecsave[2 * i] = vecaddr;
3647                        /* save it without the masked bit set */
3648                        msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3649                }
3650        }
3651
3652        dd->pport->cpspec->ibdeltainprog = 0;
3653        dd->pport->cpspec->ibsymdelta = 0;
3654        dd->pport->cpspec->iblnkerrdelta = 0;
3655        dd->pport->cpspec->ibmalfdelta = 0;
3656        /* so we check interrupts work again */
3657        dd->z_int_counter = qib_int_counter(dd);
3658
3659        /*
3660         * Keep chip from being accessed until we are ready.  Use
3661         * writeq() directly, to allow the write even though QIB_PRESENT
3662         * isn't set.
3663         */
3664        dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3665        dd->flags |= QIB_DOING_RESET;
3666        val = dd->control | QLOGIC_IB_C_RESET;
3667        writeq(val, &dd->kregbase[kr_control]);
3668
3669        for (i = 1; i <= 5; i++) {
3670                /*
3671                 * Allow MBIST, etc. to complete; longer on each retry.
3672                 * We sometimes get machine checks from bus timeout if no
3673                 * response, so for now, make it *really* long.
3674                 */
3675                msleep(1000 + (1 + i) * 3000);
3676
3677                qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3678
3679                /*
3680                 * Use readq directly, so we don't need to mark it as PRESENT
3681                 * until we get a successful indication that all is well.
3682                 */
3683                val = readq(&dd->kregbase[kr_revision]);
3684                if (val == dd->revision)
3685                        break;
3686                if (i == 5) {
3687                        qib_dev_err(dd,
3688                                "Failed to initialize after reset, unusable\n");
3689                        ret = 0;
3690                        goto  bail;
3691                }
3692        }
3693
3694        dd->flags |= QIB_PRESENT; /* it's back */
3695
3696        if (msix_entries) {
3697                /* restore the MSIx vector address and data if saved above */
3698                for (i = 0; i < msix_entries; i++) {
3699                        if (!msix_vecsave || !msix_vecsave[2 * i])
3700                                continue;
3701                        qib_write_kreg(dd, 2 * i +
3702                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3703                                msix_vecsave[2 * i]);
3704                        qib_write_kreg(dd, 1 + 2 * i +
3705                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3706                                msix_vecsave[1 + 2 * i]);
3707                }
3708        }
3709
3710        /* initialize the remaining registers.  */
3711        for (i = 0; i < dd->num_pports; ++i)
3712                write_7322_init_portregs(&dd->pport[i]);
3713        write_7322_initregs(dd);
3714
3715        if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3716                qib_dev_err(dd,
3717                        "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3718
3719        dd->cspec->num_msix_entries = msix_entries;
3720        qib_setup_7322_interrupt(dd, 1);
3721
3722        for (i = 0; i < dd->num_pports; ++i) {
3723                struct qib_pportdata *ppd = &dd->pport[i];
3724
3725                spin_lock_irqsave(&ppd->lflags_lock, flags);
3726                ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3727                ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3728                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3729        }
3730
3731bail:
3732        dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3733        kfree(msix_vecsave);
3734        return ret;
3735}
3736
3737/**
3738 * qib_7322_put_tid - write a TID to the chip
3739 * @dd: the qlogic_ib device
3740 * @tidptr: pointer to the expected TID (in chip) to update
3741 * @type: 0 for eager, 1 for expected
3742 * @pa: physical address of in memory buffer; tidinvalid if freeing
3743 */
3744static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3745                             u32 type, unsigned long pa)
3746{
3747        if (!(dd->flags & QIB_PRESENT))
3748                return;
3749        if (pa != dd->tidinvalid) {
3750                u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3751
3752                /* paranoia checks */
3753                if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3754                        qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3755                                    pa);
3756                        return;
3757                }
3758                if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3759                        qib_dev_err(dd,
3760                                "Physical page address 0x%lx larger than supported\n",
3761                                pa);
3762                        return;
3763                }
3764
3765                if (type == RCVHQ_RCV_TYPE_EAGER)
3766                        chippa |= dd->tidtemplate;
3767                else /* for now, always full 4KB page */
3768                        chippa |= IBA7322_TID_SZ_4K;
3769                pa = chippa;
3770        }
3771        writeq(pa, tidptr);
3772}
3773
3774/**
3775 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3776 * @dd: the qlogic_ib device
3777 * @rcd: the ctxt
3778 *
3779 * clear all TID entries for a ctxt, expected and eager.
3780 * Used from qib_close().
3781 */
3782static void qib_7322_clear_tids(struct qib_devdata *dd,
3783                                struct qib_ctxtdata *rcd)
3784{
3785        u64 __iomem *tidbase;
3786        unsigned long tidinv;
3787        u32 ctxt;
3788        int i;
3789
3790        if (!dd->kregbase || !rcd)
3791                return;
3792
3793        ctxt = rcd->ctxt;
3794
3795        tidinv = dd->tidinvalid;
3796        tidbase = (u64 __iomem *)
3797                ((char __iomem *) dd->kregbase +
3798                 dd->rcvtidbase +
3799                 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3800
3801        for (i = 0; i < dd->rcvtidcnt; i++)
3802                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3803                                 tidinv);
3804
3805        tidbase = (u64 __iomem *)
3806                ((char __iomem *) dd->kregbase +
3807                 dd->rcvegrbase +
3808                 rcd->rcvegr_tid_base * sizeof(*tidbase));
3809
3810        for (i = 0; i < rcd->rcvegrcnt; i++)
3811                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3812                                 tidinv);
3813}
3814
3815/**
3816 * qib_7322_tidtemplate - setup constants for TID updates
3817 * @dd: the qlogic_ib device
3818 *
3819 * We setup stuff that we use a lot, to avoid calculating each time
3820 */
3821static void qib_7322_tidtemplate(struct qib_devdata *dd)
3822{
3823        /*
3824         * For now, we always allocate 4KB buffers (at init) so we can
3825         * receive max size packets.  We may want a module parameter to
3826         * specify 2KB or 4KB and/or make it per port instead of per device
3827         * for those who want to reduce memory footprint.  Note that the
3828         * rcvhdrentsize size must be large enough to hold the largest
3829         * IB header (currently 96 bytes) that we expect to handle (plus of
3830         * course the 2 dwords of RHF).
3831         */
3832        if (dd->rcvegrbufsize == 2048)
3833                dd->tidtemplate = IBA7322_TID_SZ_2K;
3834        else if (dd->rcvegrbufsize == 4096)
3835                dd->tidtemplate = IBA7322_TID_SZ_4K;
3836        dd->tidinvalid = 0;
3837}
3838
3839/**
3840 * qib_7322_get_base_info - set chip-specific flags for user code
3841 * @rcd: the qlogic_ib ctxt
3842 * @kinfo: qib_base_info pointer
3843 *
3844 * We set the PCIE flag because the lower bandwidth on PCIe vs
3845 * HyperTransport can affect some user packet algorithims.
3846 */
3847
3848static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3849                                  struct qib_base_info *kinfo)
3850{
3851        kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3852                QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3853                QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3854        if (rcd->dd->cspec->r1)
3855                kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3856        if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3857                kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3858
3859        return 0;
3860}
3861
3862static struct qib_message_header *
3863qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3864{
3865        u32 offset = qib_hdrget_offset(rhf_addr);
3866
3867        return (struct qib_message_header *)
3868                (rhf_addr - dd->rhf_offset + offset);
3869}
3870
3871/*
3872 * Configure number of contexts.
3873 */
3874static void qib_7322_config_ctxts(struct qib_devdata *dd)
3875{
3876        unsigned long flags;
3877        u32 nchipctxts;
3878
3879        nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3880        dd->cspec->numctxts = nchipctxts;
3881        if (qib_n_krcv_queues > 1 && dd->num_pports) {
3882                dd->first_user_ctxt = NUM_IB_PORTS +
3883                        (qib_n_krcv_queues - 1) * dd->num_pports;
3884                if (dd->first_user_ctxt > nchipctxts)
3885                        dd->first_user_ctxt = nchipctxts;
3886                dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3887        } else {
3888                dd->first_user_ctxt = NUM_IB_PORTS;
3889                dd->n_krcv_queues = 1;
3890        }
3891
3892        if (!qib_cfgctxts) {
3893                int nctxts = dd->first_user_ctxt + num_online_cpus();
3894
3895                if (nctxts <= 6)
3896                        dd->ctxtcnt = 6;
3897                else if (nctxts <= 10)
3898                        dd->ctxtcnt = 10;
3899                else if (nctxts <= nchipctxts)
3900                        dd->ctxtcnt = nchipctxts;
3901        } else if (qib_cfgctxts < dd->num_pports)
3902                dd->ctxtcnt = dd->num_pports;
3903        else if (qib_cfgctxts <= nchipctxts)
3904                dd->ctxtcnt = qib_cfgctxts;
3905        if (!dd->ctxtcnt) /* none of the above, set to max */
3906                dd->ctxtcnt = nchipctxts;
3907
3908        /*
3909         * Chip can be configured for 6, 10, or 18 ctxts, and choice
3910         * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3911         * Lock to be paranoid about later motion, etc.
3912         */
3913        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3914        if (dd->ctxtcnt > 10)
3915                dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3916        else if (dd->ctxtcnt > 6)
3917                dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3918        /* else configure for default 6 receive ctxts */
3919
3920        /* The XRC opcode is 5. */
3921        dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3922
3923        /*
3924         * RcvCtrl *must* be written here so that the
3925         * chip understands how to change rcvegrcnt below.
3926         */
3927        qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3928        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3929
3930        /* kr_rcvegrcnt changes based on the number of contexts enabled */
3931        dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3932        if (qib_rcvhdrcnt)
3933                dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3934        else
3935                dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3936                                    dd->num_pports > 1 ? 1024U : 2048U);
3937}
3938
3939static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3940{
3941
3942        int lsb, ret = 0;
3943        u64 maskr; /* right-justified mask */
3944
3945        switch (which) {
3946
3947        case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3948                ret = ppd->link_width_enabled;
3949                goto done;
3950
3951        case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3952                ret = ppd->link_width_active;
3953                goto done;
3954
3955        case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3956                ret = ppd->link_speed_enabled;
3957                goto done;
3958
3959        case QIB_IB_CFG_SPD: /* Get current Link spd */
3960                ret = ppd->link_speed_active;
3961                goto done;
3962
3963        case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3964                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3965                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3966                break;
3967
3968        case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3969                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3970                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3971                break;
3972
3973        case QIB_IB_CFG_LINKLATENCY:
3974                ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3975                        SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3976                goto done;
3977
3978        case QIB_IB_CFG_OP_VLS:
3979                ret = ppd->vls_operational;
3980                goto done;
3981
3982        case QIB_IB_CFG_VL_HIGH_CAP:
3983                ret = 16;
3984                goto done;
3985
3986        case QIB_IB_CFG_VL_LOW_CAP:
3987                ret = 16;
3988                goto done;
3989
3990        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3991                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3992                                OverrunThreshold);
3993                goto done;
3994
3995        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3996                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3997                                PhyerrThreshold);
3998                goto done;
3999
4000        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4001                /* will only take effect when the link state changes */
4002                ret = (ppd->cpspec->ibcctrl_a &
4003                       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4004                        IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4005                goto done;
4006
4007        case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4008                lsb = IBA7322_IBC_HRTBT_LSB;
4009                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4010                break;
4011
4012        case QIB_IB_CFG_PMA_TICKS:
4013                /*
4014                 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4015                 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4016                 */
4017                if (ppd->link_speed_active == QIB_IB_QDR)
4018                        ret = 3;
4019                else if (ppd->link_speed_active == QIB_IB_DDR)
4020                        ret = 1;
4021                else
4022                        ret = 0;
4023                goto done;
4024
4025        default:
4026                ret = -EINVAL;
4027                goto done;
4028        }
4029        ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4030done:
4031        return ret;
4032}
4033
4034/*
4035 * Below again cribbed liberally from older version. Do not lean
4036 * heavily on it.
4037 */
4038#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4039#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4040        | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4041
4042static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4043{
4044        struct qib_devdata *dd = ppd->dd;
4045        u64 maskr; /* right-justified mask */
4046        int lsb, ret = 0;
4047        u16 lcmd, licmd;
4048        unsigned long flags;
4049
4050        switch (which) {
4051        case QIB_IB_CFG_LIDLMC:
4052                /*
4053                 * Set LID and LMC. Combined to avoid possible hazard
4054                 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4055                 */
4056                lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4057                maskr = IBA7322_IBC_DLIDLMC_MASK;
4058                /*
4059                 * For header-checking, the SLID in the packet will
4060                 * be masked with SendIBSLMCMask, and compared
4061                 * with SendIBSLIDAssignMask. Make sure we do not
4062                 * set any bits not covered by the mask, or we get
4063                 * false-positives.
4064                 */
4065                qib_write_kreg_port(ppd, krp_sendslid,
4066                                    val & (val >> 16) & SendIBSLIDAssignMask);
4067                qib_write_kreg_port(ppd, krp_sendslidmask,
4068                                    (val >> 16) & SendIBSLMCMask);
4069                break;
4070
4071        case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4072                ppd->link_width_enabled = val;
4073                /* convert IB value to chip register value */
4074                if (val == IB_WIDTH_1X)
4075                        val = 0;
4076                else if (val == IB_WIDTH_4X)
4077                        val = 1;
4078                else
4079                        val = 3;
4080                maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4081                lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4082                break;
4083
4084        case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4085                /*
4086                 * As with width, only write the actual register if the
4087                 * link is currently down, otherwise takes effect on next
4088                 * link change.  Since setting is being explicitly requested
4089                 * (via MAD or sysfs), clear autoneg failure status if speed
4090                 * autoneg is enabled.
4091                 */
4092                ppd->link_speed_enabled = val;
4093                val <<= IBA7322_IBC_SPEED_LSB;
4094                maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4095                        IBA7322_IBC_MAX_SPEED_MASK;
4096                if (val & (val - 1)) {
4097                        /* Muliple speeds enabled */
4098                        val |= IBA7322_IBC_IBTA_1_2_MASK |
4099                                IBA7322_IBC_MAX_SPEED_MASK;
4100                        spin_lock_irqsave(&ppd->lflags_lock, flags);
4101                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4102                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4103                } else if (val & IBA7322_IBC_SPEED_QDR)
4104                        val |= IBA7322_IBC_IBTA_1_2_MASK;
4105                /* IBTA 1.2 mode + min/max + speed bits are contiguous */
4106                lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4107                break;
4108
4109        case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4110                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4111                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4112                break;
4113
4114        case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4115                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4116                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4117                break;
4118
4119        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4120                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4121                                  OverrunThreshold);
4122                if (maskr != val) {
4123                        ppd->cpspec->ibcctrl_a &=
4124                                ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4125                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4126                                SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4127                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4128                                            ppd->cpspec->ibcctrl_a);
4129                        qib_write_kreg(dd, kr_scratch, 0ULL);
4130                }
4131                goto bail;
4132
4133        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4134                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4135                                  PhyerrThreshold);
4136                if (maskr != val) {
4137                        ppd->cpspec->ibcctrl_a &=
4138                                ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4139                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4140                                SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4141                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4142                                            ppd->cpspec->ibcctrl_a);
4143                        qib_write_kreg(dd, kr_scratch, 0ULL);
4144                }
4145                goto bail;
4146
4147        case QIB_IB_CFG_PKEYS: /* update pkeys */
4148                maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4149                        ((u64) ppd->pkeys[2] << 32) |
4150                        ((u64) ppd->pkeys[3] << 48);
4151                qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4152                goto bail;
4153
4154        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4155                /* will only take effect when the link state changes */
4156                if (val == IB_LINKINITCMD_POLL)
4157                        ppd->cpspec->ibcctrl_a &=
4158                                ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4159                else /* SLEEP */
4160                        ppd->cpspec->ibcctrl_a |=
4161                                SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4162                qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4163                qib_write_kreg(dd, kr_scratch, 0ULL);
4164                goto bail;
4165
4166        case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4167                /*
4168                 * Update our housekeeping variables, and set IBC max
4169                 * size, same as init code; max IBC is max we allow in
4170                 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4171                 * Set even if it's unchanged, print debug message only
4172                 * on changes.
4173                 */
4174                val = (ppd->ibmaxlen >> 2) + 1;
4175                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4176                ppd->cpspec->ibcctrl_a |= (u64)val <<
4177                        SYM_LSB(IBCCtrlA_0, MaxPktLen);
4178                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4179                                    ppd->cpspec->ibcctrl_a);
4180                qib_write_kreg(dd, kr_scratch, 0ULL);
4181                goto bail;
4182
4183        case QIB_IB_CFG_LSTATE: /* set the IB link state */
4184                switch (val & 0xffff0000) {
4185                case IB_LINKCMD_DOWN:
4186                        lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4187                        ppd->cpspec->ibmalfusesnap = 1;
4188                        ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4189                                crp_errlink);
4190                        if (!ppd->cpspec->ibdeltainprog &&
4191                            qib_compat_ddr_negotiate) {
4192                                ppd->cpspec->ibdeltainprog = 1;
4193                                ppd->cpspec->ibsymsnap =
4194                                        read_7322_creg32_port(ppd,
4195                                                              crp_ibsymbolerr);
4196                                ppd->cpspec->iblnkerrsnap =
4197                                        read_7322_creg32_port(ppd,
4198                                                      crp_iblinkerrrecov);
4199                        }
4200                        break;
4201
4202                case IB_LINKCMD_ARMED:
4203                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4204                        if (ppd->cpspec->ibmalfusesnap) {
4205                                ppd->cpspec->ibmalfusesnap = 0;
4206                                ppd->cpspec->ibmalfdelta +=
4207                                        read_7322_creg32_port(ppd,
4208                                                              crp_errlink) -
4209                                        ppd->cpspec->ibmalfsnap;
4210                        }
4211                        break;
4212
4213                case IB_LINKCMD_ACTIVE:
4214                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4215                        break;
4216
4217                default:
4218                        ret = -EINVAL;
4219                        qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4220                        goto bail;
4221                }
4222                switch (val & 0xffff) {
4223                case IB_LINKINITCMD_NOP:
4224                        licmd = 0;
4225                        break;
4226
4227                case IB_LINKINITCMD_POLL:
4228                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4229                        break;
4230
4231                case IB_LINKINITCMD_SLEEP:
4232                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4233                        break;
4234
4235                case IB_LINKINITCMD_DISABLE:
4236                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4237                        ppd->cpspec->chase_end = 0;
4238                        /*
4239                         * stop state chase counter and timer, if running.
4240                         * wait forpending timer, but don't clear .data (ppd)!
4241                         */
4242                        if (ppd->cpspec->chase_timer.expires) {
4243                                del_timer_sync(&ppd->cpspec->chase_timer);
4244                                ppd->cpspec->chase_timer.expires = 0;
4245                        }
4246                        break;
4247
4248                default:
4249                        ret = -EINVAL;
4250                        qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4251                                    val & 0xffff);
4252                        goto bail;
4253                }
4254                qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4255                goto bail;
4256
4257        case QIB_IB_CFG_OP_VLS:
4258                if (ppd->vls_operational != val) {
4259                        ppd->vls_operational = val;
4260                        set_vls(ppd);
4261                }
4262                goto bail;
4263
4264        case QIB_IB_CFG_VL_HIGH_LIMIT:
4265                qib_write_kreg_port(ppd, krp_highprio_limit, val);
4266                goto bail;
4267
4268        case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4269                if (val > 3) {
4270                        ret = -EINVAL;
4271                        goto bail;
4272                }
4273                lsb = IBA7322_IBC_HRTBT_LSB;
4274                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4275                break;
4276
4277        case QIB_IB_CFG_PORT:
4278                /* val is the port number of the switch we are connected to. */
4279                if (ppd->dd->cspec->r1) {
4280                        cancel_delayed_work(&ppd->cpspec->ipg_work);
4281                        ppd->cpspec->ipg_tries = 0;
4282                }
4283                goto bail;
4284
4285        default:
4286                ret = -EINVAL;
4287                goto bail;
4288        }
4289        ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4290        ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4291        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4292        qib_write_kreg(dd, kr_scratch, 0);
4293bail:
4294        return ret;
4295}
4296
4297static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4298{
4299        int ret = 0;
4300        u64 val, ctrlb;
4301
4302        /* only IBC loopback, may add serdes and xgxs loopbacks later */
4303        if (!strncmp(what, "ibc", 3)) {
4304                ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4305                                                       Loopback);
4306                val = 0; /* disable heart beat, so link will come up */
4307                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4308                         ppd->dd->unit, ppd->port);
4309        } else if (!strncmp(what, "off", 3)) {
4310                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4311                                                        Loopback);
4312                /* enable heart beat again */
4313                val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4314                qib_devinfo(ppd->dd->pcidev,
4315                        "Disabling IB%u:%u IBC loopback (normal)\n",
4316                        ppd->dd->unit, ppd->port);
4317        } else
4318                ret = -EINVAL;
4319        if (!ret) {
4320                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4321                                    ppd->cpspec->ibcctrl_a);
4322                ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4323                                             << IBA7322_IBC_HRTBT_LSB);
4324                ppd->cpspec->ibcctrl_b = ctrlb | val;
4325                qib_write_kreg_port(ppd, krp_ibcctrl_b,
4326                                    ppd->cpspec->ibcctrl_b);
4327                qib_write_kreg(ppd->dd, kr_scratch, 0);
4328        }
4329        return ret;
4330}
4331
4332static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4333                           struct ib_vl_weight_elem *vl)
4334{
4335        unsigned i;
4336
4337        for (i = 0; i < 16; i++, regno++, vl++) {
4338                u32 val = qib_read_kreg_port(ppd, regno);
4339
4340                vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4341                        SYM_RMASK(LowPriority0_0, VirtualLane);
4342                vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4343                        SYM_RMASK(LowPriority0_0, Weight);
4344        }
4345}
4346
4347static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4348                           struct ib_vl_weight_elem *vl)
4349{
4350        unsigned i;
4351
4352        for (i = 0; i < 16; i++, regno++, vl++) {
4353                u64 val;
4354
4355                val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4356                        SYM_LSB(LowPriority0_0, VirtualLane)) |
4357                      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4358                        SYM_LSB(LowPriority0_0, Weight));
4359                qib_write_kreg_port(ppd, regno, val);
4360        }
4361        if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4362                struct qib_devdata *dd = ppd->dd;
4363                unsigned long flags;
4364
4365                spin_lock_irqsave(&dd->sendctrl_lock, flags);
4366                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4367                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4368                qib_write_kreg(dd, kr_scratch, 0);
4369                spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4370        }
4371}
4372
4373static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4374{
4375        switch (which) {
4376        case QIB_IB_TBL_VL_HIGH_ARB:
4377                get_vl_weights(ppd, krp_highprio_0, t);
4378                break;
4379
4380        case QIB_IB_TBL_VL_LOW_ARB:
4381                get_vl_weights(ppd, krp_lowprio_0, t);
4382                break;
4383
4384        default:
4385                return -EINVAL;
4386        }
4387        return 0;
4388}
4389
4390static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4391{
4392        switch (which) {
4393        case QIB_IB_TBL_VL_HIGH_ARB:
4394                set_vl_weights(ppd, krp_highprio_0, t);
4395                break;
4396
4397        case QIB_IB_TBL_VL_LOW_ARB:
4398                set_vl_weights(ppd, krp_lowprio_0, t);
4399                break;
4400
4401        default:
4402                return -EINVAL;
4403        }
4404        return 0;
4405}
4406
4407static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4408                                    u32 updegr, u32 egrhd, u32 npkts)
4409{
4410        /*
4411         * Need to write timeout register before updating rcvhdrhead to ensure
4412         * that the timer is enabled on reception of a packet.
4413         */
4414        if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4415                adjust_rcv_timeout(rcd, npkts);
4416        if (updegr)
4417                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4418        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4419        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4420}
4421
4422static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4423{
4424        u32 head, tail;
4425
4426        head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4427        if (rcd->rcvhdrtail_kvaddr)
4428                tail = qib_get_rcvhdrtail(rcd);
4429        else
4430                tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4431        return head == tail;
4432}
4433
4434#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4435        QIB_RCVCTRL_CTXT_DIS | \
4436        QIB_RCVCTRL_TIDFLOW_ENB | \
4437        QIB_RCVCTRL_TIDFLOW_DIS | \
4438        QIB_RCVCTRL_TAILUPD_ENB | \
4439        QIB_RCVCTRL_TAILUPD_DIS | \
4440        QIB_RCVCTRL_INTRAVAIL_ENB | \
4441        QIB_RCVCTRL_INTRAVAIL_DIS | \
4442        QIB_RCVCTRL_BP_ENB | \
4443        QIB_RCVCTRL_BP_DIS)
4444
4445#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4446        QIB_RCVCTRL_CTXT_DIS | \
4447        QIB_RCVCTRL_PKEY_DIS | \
4448        QIB_RCVCTRL_PKEY_ENB)
4449
4450/*
4451 * Modify the RCVCTRL register in chip-specific way. This
4452 * is a function because bit positions and (future) register
4453 * location is chip-specifc, but the needed operations are
4454 * generic. <op> is a bit-mask because we often want to
4455 * do multiple modifications.
4456 */
4457static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4458                             int ctxt)
4459{
4460        struct qib_devdata *dd = ppd->dd;
4461        struct qib_ctxtdata *rcd;
4462        u64 mask, val;
4463        unsigned long flags;
4464
4465        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4466
4467        if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4468                dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4469        if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4470                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4471        if (op & QIB_RCVCTRL_TAILUPD_ENB)
4472                dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4473        if (op & QIB_RCVCTRL_TAILUPD_DIS)
4474                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4475        if (op & QIB_RCVCTRL_PKEY_ENB)
4476                ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4477        if (op & QIB_RCVCTRL_PKEY_DIS)
4478                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4479        if (ctxt < 0) {
4480                mask = (1ULL << dd->ctxtcnt) - 1;
4481                rcd = NULL;
4482        } else {
4483                mask = (1ULL << ctxt);
4484                rcd = dd->rcd[ctxt];
4485        }
4486        if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4487                ppd->p_rcvctrl |=
4488                        (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4489                if (!(dd->flags & QIB_NODMA_RTAIL)) {
4490                        op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4491                        dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4492                }
4493                /* Write these registers before the context is enabled. */
4494                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4495                                    rcd->rcvhdrqtailaddr_phys);
4496                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4497                                    rcd->rcvhdrq_phys);
4498                rcd->seq_cnt = 1;
4499        }
4500        if (op & QIB_RCVCTRL_CTXT_DIS)
4501                ppd->p_rcvctrl &=
4502                        ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4503        if (op & QIB_RCVCTRL_BP_ENB)
4504                dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4505        if (op & QIB_RCVCTRL_BP_DIS)
4506                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4507        if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4508                dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4509        if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4510                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4511        /*
4512         * Decide which registers to write depending on the ops enabled.
4513         * Special case is "flush" (no bits set at all)
4514         * which needs to write both.
4515         */
4516        if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4517                qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4518        if (op == 0 || (op & RCVCTRL_PORT_MODS))
4519                qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4520        if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4521                /*
4522                 * Init the context registers also; if we were
4523                 * disabled, tail and head should both be zero
4524                 * already from the enable, but since we don't
4525                 * know, we have to do it explicitly.
4526                 */
4527                val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4528                qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4529
4530                /* be sure enabling write seen; hd/tl should be 0 */
4531                (void) qib_read_kreg32(dd, kr_scratch);
4532                val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4533                dd->rcd[ctxt]->head = val;
4534                /* If kctxt, interrupt on next receive. */
4535                if (ctxt < dd->first_user_ctxt)
4536                        val |= dd->rhdrhead_intr_off;
4537                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4538        } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4539                dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4540                /* arm rcv interrupt */
4541                val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4542                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4543        }
4544        if (op & QIB_RCVCTRL_CTXT_DIS) {
4545                unsigned f;
4546
4547                /* Now that the context is disabled, clear these registers. */
4548                if (ctxt >= 0) {
4549                        qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4550                        qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4551                        for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4552                                qib_write_ureg(dd, ur_rcvflowtable + f,
4553                                               TIDFLOW_ERRBITS, ctxt);
4554                } else {
4555                        unsigned i;
4556
4557                        for (i = 0; i < dd->cfgctxts; i++) {
4558                                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4559                                                    i, 0);
4560                                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4561                                for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4562                                        qib_write_ureg(dd, ur_rcvflowtable + f,
4563                                                       TIDFLOW_ERRBITS, i);
4564                        }
4565                }
4566        }
4567        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4568}
4569
4570/*
4571 * Modify the SENDCTRL register in chip-specific way. This
4572 * is a function where there are multiple such registers with
4573 * slightly different layouts.
4574 * The chip doesn't allow back-to-back sendctrl writes, so write
4575 * the scratch register after writing sendctrl.
4576 *
4577 * Which register is written depends on the operation.
4578 * Most operate on the common register, while
4579 * SEND_ENB and SEND_DIS operate on the per-port ones.
4580 * SEND_ENB is included in common because it can change SPCL_TRIG
4581 */
4582#define SENDCTRL_COMMON_MODS (\
4583        QIB_SENDCTRL_CLEAR | \
4584        QIB_SENDCTRL_AVAIL_DIS | \
4585        QIB_SENDCTRL_AVAIL_ENB | \
4586        QIB_SENDCTRL_AVAIL_BLIP | \
4587        QIB_SENDCTRL_DISARM | \
4588        QIB_SENDCTRL_DISARM_ALL | \
4589        QIB_SENDCTRL_SEND_ENB)
4590
4591#define SENDCTRL_PORT_MODS (\
4592        QIB_SENDCTRL_CLEAR | \
4593        QIB_SENDCTRL_SEND_ENB | \
4594        QIB_SENDCTRL_SEND_DIS | \
4595        QIB_SENDCTRL_FLUSH)
4596
4597static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4598{
4599        struct qib_devdata *dd = ppd->dd;
4600        u64 tmp_dd_sendctrl;
4601        unsigned long flags;
4602
4603        spin_lock_irqsave(&dd->sendctrl_lock, flags);
4604
4605        /* First the dd ones that are "sticky", saved in shadow */
4606        if (op & QIB_SENDCTRL_CLEAR)
4607                dd->sendctrl = 0;
4608        if (op & QIB_SENDCTRL_AVAIL_DIS)
4609                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4610        else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4611                dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4612                if (dd->flags & QIB_USE_SPCL_TRIG)
4613                        dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4614        }
4615
4616        /* Then the ppd ones that are "sticky", saved in shadow */
4617        if (op & QIB_SENDCTRL_SEND_DIS)
4618                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4619        else if (op & QIB_SENDCTRL_SEND_ENB)
4620                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4621
4622        if (op & QIB_SENDCTRL_DISARM_ALL) {
4623                u32 i, last;
4624
4625                tmp_dd_sendctrl = dd->sendctrl;
4626                last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4627                /*
4628                 * Disarm any buffers that are not yet launched,
4629                 * disabling updates until done.
4630                 */
4631                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4632                for (i = 0; i < last; i++) {
4633                        qib_write_kreg(dd, kr_sendctrl,
4634                                       tmp_dd_sendctrl |
4635                                       SYM_MASK(SendCtrl, Disarm) | i);
4636                        qib_write_kreg(dd, kr_scratch, 0);
4637                }
4638        }
4639
4640        if (op & QIB_SENDCTRL_FLUSH) {
4641                u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4642
4643                /*
4644                 * Now drain all the fifos.  The Abort bit should never be
4645                 * needed, so for now, at least, we don't use it.
4646                 */
4647                tmp_ppd_sendctrl |=
4648                        SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4649                        SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4650                        SYM_MASK(SendCtrl_0, TxeBypassIbc);
4651                qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4652                qib_write_kreg(dd, kr_scratch, 0);
4653        }
4654
4655        tmp_dd_sendctrl = dd->sendctrl;
4656
4657        if (op & QIB_SENDCTRL_DISARM)
4658                tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4659                        ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4660                         SYM_LSB(SendCtrl, DisarmSendBuf));
4661        if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4662            (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4663                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4664
4665        if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4666                qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4667                qib_write_kreg(dd, kr_scratch, 0);
4668        }
4669
4670        if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4671                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4672                qib_write_kreg(dd, kr_scratch, 0);
4673        }
4674
4675        if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4676                qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4677                qib_write_kreg(dd, kr_scratch, 0);
4678        }
4679
4680        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4681
4682        if (op & QIB_SENDCTRL_FLUSH) {
4683                u32 v;
4684                /*
4685                 * ensure writes have hit chip, then do a few
4686                 * more reads, to allow DMA of pioavail registers
4687                 * to occur, so in-memory copy is in sync with
4688                 * the chip.  Not always safe to sleep.
4689                 */
4690                v = qib_read_kreg32(dd, kr_scratch);
4691                qib_write_kreg(dd, kr_scratch, v);
4692                v = qib_read_kreg32(dd, kr_scratch);
4693                qib_write_kreg(dd, kr_scratch, v);
4694                qib_read_kreg32(dd, kr_scratch);
4695        }
4696}
4697
4698#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4699#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4700#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4701
4702/**
4703 * qib_portcntr_7322 - read a per-port chip counter
4704 * @ppd: the qlogic_ib pport
4705 * @reg: the counter to read (not a chip offset)
4706 */
4707static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4708{
4709        struct qib_devdata *dd = ppd->dd;
4710        u64 ret = 0ULL;
4711        u16 creg;
4712        /* 0xffff for unimplemented or synthesized counters */
4713        static const u32 xlator[] = {
4714                [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4715                [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4716                [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4717                [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4718                [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4719                [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4720                [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4721                [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4722                [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4723                [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4724                [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4725                [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4726                [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4727                [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4728                [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4729                [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4730                [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4731                [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4732                [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4733                [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4734                [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4735                [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4736                [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4737                [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4738                [QIBPORTCNTR_ERRLINK] = crp_errlink,
4739                [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4740                [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4741                [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4742                [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4743                [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4744                /*
4745                 * the next 3 aren't really counters, but were implemented
4746                 * as counters in older chips, so still get accessed as
4747                 * though they were counters from this code.
4748                 */
4749                [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4750                [QIBPORTCNTR_PSSTART] = krp_psstart,
4751                [QIBPORTCNTR_PSSTAT] = krp_psstat,
4752                /* pseudo-counter, summed for all ports */
4753                [QIBPORTCNTR_KHDROVFL] = 0xffff,
4754        };
4755
4756        if (reg >= ARRAY_SIZE(xlator)) {
4757                qib_devinfo(ppd->dd->pcidev,
4758                         "Unimplemented portcounter %u\n", reg);
4759                goto done;
4760        }
4761        creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4762
4763        /* handle non-counters and special cases first */
4764        if (reg == QIBPORTCNTR_KHDROVFL) {
4765                int i;
4766
4767                /* sum over all kernel contexts (skip if mini_init) */
4768                for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4769                        struct qib_ctxtdata *rcd = dd->rcd[i];
4770
4771                        if (!rcd || rcd->ppd != ppd)
4772                                continue;
4773                        ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4774                }
4775                goto done;
4776        } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4777                /*
4778                 * Used as part of the synthesis of port_rcv_errors
4779                 * in the verbs code for IBTA counters.  Not needed for 7322,
4780                 * because all the errors are already counted by other cntrs.
4781                 */
4782                goto done;
4783        } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4784                   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4785                /* were counters in older chips, now per-port kernel regs */
4786                ret = qib_read_kreg_port(ppd, creg);
4787                goto done;
4788        }
4789
4790        /*
4791         * Only fast increment counters are 64 bits; use 32 bit reads to
4792         * avoid two independent reads when on Opteron.
4793         */
4794        if (xlator[reg] & _PORT_64BIT_FLAG)
4795                ret = read_7322_creg_port(ppd, creg);
4796        else
4797                ret = read_7322_creg32_port(ppd, creg);
4798        if (creg == crp_ibsymbolerr) {
4799                if (ppd->cpspec->ibdeltainprog)
4800                        ret -= ret - ppd->cpspec->ibsymsnap;
4801                ret -= ppd->cpspec->ibsymdelta;
4802        } else if (creg == crp_iblinkerrrecov) {
4803                if (ppd->cpspec->ibdeltainprog)
4804                        ret -= ret - ppd->cpspec->iblnkerrsnap;
4805                ret -= ppd->cpspec->iblnkerrdelta;
4806        } else if (creg == crp_errlink)
4807                ret -= ppd->cpspec->ibmalfdelta;
4808        else if (creg == crp_iblinkdown)
4809                ret += ppd->cpspec->iblnkdowndelta;
4810done:
4811        return ret;
4812}
4813
4814/*
4815 * Device counter names (not port-specific), one line per stat,
4816 * single string.  Used by utilities like ipathstats to print the stats
4817 * in a way which works for different versions of drivers, without changing
4818 * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4819 * display by utility.
4820 * Non-error counters are first.
4821 * Start of "error" conters is indicated by a leading "E " on the first
4822 * "error" counter, and doesn't count in label length.
4823 * The EgrOvfl list needs to be last so we truncate them at the configured
4824 * context count for the device.
4825 * cntr7322indices contains the corresponding register indices.
4826 */
4827static const char cntr7322names[] =
4828        "Interrupts\n"
4829        "HostBusStall\n"
4830        "E RxTIDFull\n"
4831        "RxTIDInvalid\n"
4832        "RxTIDFloDrop\n" /* 7322 only */
4833        "Ctxt0EgrOvfl\n"
4834        "Ctxt1EgrOvfl\n"
4835        "Ctxt2EgrOvfl\n"
4836        "Ctxt3EgrOvfl\n"
4837        "Ctxt4EgrOvfl\n"
4838        "Ctxt5EgrOvfl\n"
4839        "Ctxt6EgrOvfl\n"
4840        "Ctxt7EgrOvfl\n"
4841        "Ctxt8EgrOvfl\n"
4842        "Ctxt9EgrOvfl\n"
4843        "Ctx10EgrOvfl\n"
4844        "Ctx11EgrOvfl\n"
4845        "Ctx12EgrOvfl\n"
4846        "Ctx13EgrOvfl\n"
4847        "Ctx14EgrOvfl\n"
4848        "Ctx15EgrOvfl\n"
4849        "Ctx16EgrOvfl\n"
4850        "Ctx17EgrOvfl\n"
4851        ;
4852
4853static const u32 cntr7322indices[] = {
4854        cr_lbint | _PORT_64BIT_FLAG,
4855        cr_lbstall | _PORT_64BIT_FLAG,
4856        cr_tidfull,
4857        cr_tidinvalid,
4858        cr_rxtidflowdrop,
4859        cr_base_egrovfl + 0,
4860        cr_base_egrovfl + 1,
4861        cr_base_egrovfl + 2,
4862        cr_base_egrovfl + 3,
4863        cr_base_egrovfl + 4,
4864        cr_base_egrovfl + 5,
4865        cr_base_egrovfl + 6,
4866        cr_base_egrovfl + 7,
4867        cr_base_egrovfl + 8,
4868        cr_base_egrovfl + 9,
4869        cr_base_egrovfl + 10,
4870        cr_base_egrovfl + 11,
4871        cr_base_egrovfl + 12,
4872        cr_base_egrovfl + 13,
4873        cr_base_egrovfl + 14,
4874        cr_base_egrovfl + 15,
4875        cr_base_egrovfl + 16,
4876        cr_base_egrovfl + 17,
4877};
4878
4879/*
4880 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4881 * portcntr7322indices is somewhat complicated by some registers needing
4882 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4883 */
4884static const char portcntr7322names[] =
4885        "TxPkt\n"
4886        "TxFlowPkt\n"
4887        "TxWords\n"
4888        "RxPkt\n"
4889        "RxFlowPkt\n"
4890        "RxWords\n"
4891        "TxFlowStall\n"
4892        "TxDmaDesc\n"  /* 7220 and 7322-only */
4893        "E RxDlidFltr\n"  /* 7220 and 7322-only */
4894        "IBStatusChng\n"
4895        "IBLinkDown\n"
4896        "IBLnkRecov\n"
4897        "IBRxLinkErr\n"
4898        "IBSymbolErr\n"
4899        "RxLLIErr\n"
4900        "RxBadFormat\n"
4901        "RxBadLen\n"
4902        "RxBufOvrfl\n"
4903        "RxEBP\n"
4904        "RxFlowCtlErr\n"
4905        "RxICRCerr\n"
4906        "RxLPCRCerr\n"
4907        "RxVCRCerr\n"
4908        "RxInvalLen\n"
4909        "RxInvalPKey\n"
4910        "RxPktDropped\n"
4911        "TxBadLength\n"
4912        "TxDropped\n"
4913        "TxInvalLen\n"
4914        "TxUnderrun\n"
4915        "TxUnsupVL\n"
4916        "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4917        "RxVL15Drop\n"
4918        "RxVlErr\n"
4919        "XcessBufOvfl\n"
4920        "RxQPBadCtxt\n" /* 7322-only from here down */
4921        "TXBadHeader\n"
4922        ;
4923
4924static const u32 portcntr7322indices[] = {
4925        QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4926        crp_pktsendflow,
4927        QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4928        QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4929        crp_pktrcvflowctrl,
4930        QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4931        QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4932        crp_txsdmadesc | _PORT_64BIT_FLAG,
4933        crp_rxdlidfltr,
4934        crp_ibstatuschange,
4935        QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4936        QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4937        QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4938        QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4939        QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4940        QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4941        QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4942        QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4943        QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4944        crp_rcvflowctrlviol,
4945        QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4946        QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4947        QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4948        QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4949        QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4950        QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4951        crp_txminmaxlenerr,
4952        crp_txdroppedpkt,
4953        crp_txlenerr,
4954        crp_txunderrun,
4955        crp_txunsupvl,
4956        QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4957        QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4958        QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4959        QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4960        crp_rxqpinvalidctxt,
4961        crp_txhdrerr,
4962};
4963
4964/* do all the setup to make the counter reads efficient later */
4965static void init_7322_cntrnames(struct qib_devdata *dd)
4966{
4967        int i, j = 0;
4968        char *s;
4969
4970        for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4971             i++) {
4972                /* we always have at least one counter before the egrovfl */
4973                if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4974                        j = 1;
4975                s = strchr(s + 1, '\n');
4976                if (s && j)
4977                        j++;
4978        }
4979        dd->cspec->ncntrs = i;
4980        if (!s)
4981                /* full list; size is without terminating null */
4982                dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4983        else
4984                dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4985        dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
4986                                         GFP_KERNEL);
4987
4988        for (i = 0, s = (char *)portcntr7322names; s; i++)
4989                s = strchr(s + 1, '\n');
4990        dd->cspec->nportcntrs = i - 1;
4991        dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4992        for (i = 0; i < dd->num_pports; ++i) {
4993                dd->pport[i].cpspec->portcntrs =
4994                        kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
4995                                      GFP_KERNEL);
4996        }
4997}
4998
4999static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5000                              u64 **cntrp)
5001{
5002        u32 ret;
5003
5004        if (namep) {
5005                ret = dd->cspec->cntrnamelen;
5006                if (pos >= ret)
5007                        ret = 0; /* final read after getting everything */
5008                else
5009                        *namep = (char *) cntr7322names;
5010        } else {
5011                u64 *cntr = dd->cspec->cntrs;
5012                int i;
5013
5014                ret = dd->cspec->ncntrs * sizeof(u64);
5015                if (!cntr || pos >= ret) {
5016                        /* everything read, or couldn't get memory */
5017                        ret = 0;
5018                        goto done;
5019                }
5020                *cntrp = cntr;
5021                for (i = 0; i < dd->cspec->ncntrs; i++)
5022                        if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5023                                *cntr++ = read_7322_creg(dd,
5024                                                         cntr7322indices[i] &
5025                                                         _PORT_CNTR_IDXMASK);
5026                        else
5027                                *cntr++ = read_7322_creg32(dd,
5028                                                           cntr7322indices[i]);
5029        }
5030done:
5031        return ret;
5032}
5033
5034static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5035                                  char **namep, u64 **cntrp)
5036{
5037        u32 ret;
5038
5039        if (namep) {
5040                ret = dd->cspec->portcntrnamelen;
5041                if (pos >= ret)
5042                        ret = 0; /* final read after getting everything */
5043                else
5044                        *namep = (char *)portcntr7322names;
5045        } else {
5046                struct qib_pportdata *ppd = &dd->pport[port];
5047                u64 *cntr = ppd->cpspec->portcntrs;
5048                int i;
5049
5050                ret = dd->cspec->nportcntrs * sizeof(u64);
5051                if (!cntr || pos >= ret) {
5052                        /* everything read, or couldn't get memory */
5053                        ret = 0;
5054                        goto done;
5055                }
5056                *cntrp = cntr;
5057                for (i = 0; i < dd->cspec->nportcntrs; i++) {
5058                        if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5059                                *cntr++ = qib_portcntr_7322(ppd,
5060                                        portcntr7322indices[i] &
5061                                        _PORT_CNTR_IDXMASK);
5062                        else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5063                                *cntr++ = read_7322_creg_port(ppd,
5064                                           portcntr7322indices[i] &
5065                                            _PORT_CNTR_IDXMASK);
5066                        else
5067                                *cntr++ = read_7322_creg32_port(ppd,
5068                                           portcntr7322indices[i]);
5069                }
5070        }
5071done:
5072        return ret;
5073}
5074
5075/**
5076 * qib_get_7322_faststats - get word counters from chip before they overflow
5077 * @t: contains a pointer to the qlogic_ib device qib_devdata
5078 *
5079 * VESTIGIAL IBA7322 has no "small fast counters", so the only
5080 * real purpose of this function is to maintain the notion of
5081 * "active time", which in turn is only logged into the eeprom,
5082 * which we don;t have, yet, for 7322-based boards.
5083 *
5084 * called from add_timer
5085 */
5086static void qib_get_7322_faststats(struct timer_list *t)
5087{
5088        struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5089        struct qib_pportdata *ppd;
5090        unsigned long flags;
5091        u64 traffic_wds;
5092        int pidx;
5093
5094        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5095                ppd = dd->pport + pidx;
5096
5097                /*
5098                 * If port isn't enabled or not operational ports, or
5099                 * diags is running (can cause memory diags to fail)
5100                 * skip this port this time.
5101                 */
5102                if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5103                    || dd->diag_client)
5104                        continue;
5105
5106                /*
5107                 * Maintain an activity timer, based on traffic
5108                 * exceeding a threshold, so we need to check the word-counts
5109                 * even if they are 64-bit.
5110                 */
5111                traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5112                        qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5113                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5114                traffic_wds -= ppd->dd->traffic_wds;
5115                ppd->dd->traffic_wds += traffic_wds;
5116                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5117                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5118                                                QIB_IB_QDR) &&
5119                    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5120                                    QIBL_LINKACTIVE)) &&
5121                    ppd->cpspec->qdr_dfe_time &&
5122                    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5123                        ppd->cpspec->qdr_dfe_on = 0;
5124
5125                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5126                                            ppd->dd->cspec->r1 ?
5127                                            QDR_STATIC_ADAPT_INIT_R1 :
5128                                            QDR_STATIC_ADAPT_INIT);
5129                        force_h1(ppd);
5130                }
5131        }
5132        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5133}
5134
5135/*
5136 * If we were using MSIx, try to fallback to INTx.
5137 */
5138static int qib_7322_intr_fallback(struct qib_devdata *dd)
5139{
5140        if (!dd->cspec->num_msix_entries)
5141                return 0; /* already using INTx */
5142
5143        qib_devinfo(dd->pcidev,
5144                "MSIx interrupt not detected, trying INTx interrupts\n");
5145        qib_7322_free_irq(dd);
5146        if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5147                qib_dev_err(dd, "Failed to enable INTx\n");
5148        qib_setup_7322_interrupt(dd, 0);
5149        return 1;
5150}
5151
5152/*
5153 * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5154 * than resetting the IBC or external link state, and useful in some
5155 * cases to cause some retraining.  To do this right, we reset IBC
5156 * as well, then return to previous state (which may be still in reset)
5157 * NOTE: some callers of this "know" this writes the current value
5158 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5159 * check all callers.
5160 */
5161static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5162{
5163        u64 val;
5164        struct qib_devdata *dd = ppd->dd;
5165        const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5166                SYM_MASK(IBPCSConfig_0, xcv_treset) |
5167                SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5168
5169        val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5170        qib_write_kreg(dd, kr_hwerrmask,
5171                       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5172        qib_write_kreg_port(ppd, krp_ibcctrl_a,
5173                            ppd->cpspec->ibcctrl_a &
5174                            ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5175
5176        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5177        qib_read_kreg32(dd, kr_scratch);
5178        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5179        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5180        qib_write_kreg(dd, kr_scratch, 0ULL);
5181        qib_write_kreg(dd, kr_hwerrclear,
5182                       SYM_MASK(HwErrClear, statusValidNoEopClear));
5183        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5184}
5185
5186/*
5187 * This code for non-IBTA-compliant IB speed negotiation is only known to
5188 * work for the SDR to DDR transition, and only between an HCA and a switch
5189 * with recent firmware.  It is based on observed heuristics, rather than
5190 * actual knowledge of the non-compliant speed negotiation.
5191 * It has a number of hard-coded fields, since the hope is to rewrite this
5192 * when a spec is available on how the negoation is intended to work.
5193 */
5194static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5195                                 u32 dcnt, u32 *data)
5196{
5197        int i;
5198        u64 pbc;
5199        u32 __iomem *piobuf;
5200        u32 pnum, control, len;
5201        struct qib_devdata *dd = ppd->dd;
5202
5203        i = 0;
5204        len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5205        control = qib_7322_setpbc_control(ppd, len, 0, 15);
5206        pbc = ((u64) control << 32) | len;
5207        while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5208                if (i++ > 15)
5209                        return;
5210                udelay(2);
5211        }
5212        /* disable header check on this packet, since it can't be valid */
5213        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5214        writeq(pbc, piobuf);
5215        qib_flush_wc();
5216        qib_pio_copy(piobuf + 2, hdr, 7);
5217        qib_pio_copy(piobuf + 9, data, dcnt);
5218        if (dd->flags & QIB_USE_SPCL_TRIG) {
5219                u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5220
5221                qib_flush_wc();
5222                __raw_writel(0xaebecede, piobuf + spcl_off);
5223        }
5224        qib_flush_wc();
5225        qib_sendbuf_done(dd, pnum);
5226        /* and re-enable hdr check */
5227        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5228}
5229
5230/*
5231 * _start packet gets sent twice at start, _done gets sent twice at end
5232 */
5233static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5234{
5235        struct qib_devdata *dd = ppd->dd;
5236        static u32 swapped;
5237        u32 dw, i, hcnt, dcnt, *data;
5238        static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5239        static u32 madpayload_start[0x40] = {
5240                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5241                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5242                0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5243                };
5244        static u32 madpayload_done[0x40] = {
5245                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5246                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5247                0x40000001, 0x1388, 0x15e, /* rest 0's */
5248                };
5249
5250        dcnt = ARRAY_SIZE(madpayload_start);
5251        hcnt = ARRAY_SIZE(hdr);
5252        if (!swapped) {
5253                /* for maintainability, do it at runtime */
5254                for (i = 0; i < hcnt; i++) {
5255                        dw = (__force u32) cpu_to_be32(hdr[i]);
5256                        hdr[i] = dw;
5257                }
5258                for (i = 0; i < dcnt; i++) {
5259                        dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5260                        madpayload_start[i] = dw;
5261                        dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5262                        madpayload_done[i] = dw;
5263                }
5264                swapped = 1;
5265        }
5266
5267        data = which ? madpayload_done : madpayload_start;
5268
5269        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5270        qib_read_kreg64(dd, kr_scratch);
5271        udelay(2);
5272        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5273        qib_read_kreg64(dd, kr_scratch);
5274        udelay(2);
5275}
5276
5277/*
5278 * Do the absolute minimum to cause an IB speed change, and make it
5279 * ready, but don't actually trigger the change.   The caller will
5280 * do that when ready (if link is in Polling training state, it will
5281 * happen immediately, otherwise when link next goes down)
5282 *
5283 * This routine should only be used as part of the DDR autonegotation
5284 * code for devices that are not compliant with IB 1.2 (or code that
5285 * fixes things up for same).
5286 *
5287 * When link has gone down, and autoneg enabled, or autoneg has
5288 * failed and we give up until next time we set both speeds, and
5289 * then we want IBTA enabled as well as "use max enabled speed.
5290 */
5291static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5292{
5293        u64 newctrlb;
5294
5295        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5296                                    IBA7322_IBC_IBTA_1_2_MASK |
5297                                    IBA7322_IBC_MAX_SPEED_MASK);
5298
5299        if (speed & (speed - 1)) /* multiple speeds */
5300                newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5301                                    IBA7322_IBC_IBTA_1_2_MASK |
5302                                    IBA7322_IBC_MAX_SPEED_MASK;
5303        else
5304                newctrlb |= speed == QIB_IB_QDR ?
5305                        IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5306                        ((speed == QIB_IB_DDR ?
5307                          IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5308
5309        if (newctrlb == ppd->cpspec->ibcctrl_b)
5310                return;
5311
5312        ppd->cpspec->ibcctrl_b = newctrlb;
5313        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5314        qib_write_kreg(ppd->dd, kr_scratch, 0);
5315}
5316
5317/*
5318 * This routine is only used when we are not talking to another
5319 * IB 1.2-compliant device that we think can do DDR.
5320 * (This includes all existing switch chips as of Oct 2007.)
5321 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5322 */
5323static void try_7322_autoneg(struct qib_pportdata *ppd)
5324{
5325        unsigned long flags;
5326
5327        spin_lock_irqsave(&ppd->lflags_lock, flags);
5328        ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5329        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5330        qib_autoneg_7322_send(ppd, 0);
5331        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5332        qib_7322_mini_pcs_reset(ppd);
5333        /* 2 msec is minimum length of a poll cycle */
5334        queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5335                           msecs_to_jiffies(2));
5336}
5337
5338/*
5339 * Handle the empirically determined mechanism for auto-negotiation
5340 * of DDR speed with switches.
5341 */
5342static void autoneg_7322_work(struct work_struct *work)
5343{
5344        struct qib_pportdata *ppd;
5345        u32 i;
5346        unsigned long flags;
5347
5348        ppd = container_of(work, struct qib_chippport_specific,
5349                            autoneg_work.work)->ppd;
5350
5351        /*
5352         * Busy wait for this first part, it should be at most a
5353         * few hundred usec, since we scheduled ourselves for 2msec.
5354         */
5355        for (i = 0; i < 25; i++) {
5356                if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5357                     == IB_7322_LT_STATE_POLLQUIET) {
5358                        qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5359                        break;
5360                }
5361                udelay(100);
5362        }
5363
5364        if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5365                goto done; /* we got there early or told to stop */
5366
5367        /* we expect this to timeout */
5368        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5369                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5370                               msecs_to_jiffies(90)))
5371                goto done;
5372        qib_7322_mini_pcs_reset(ppd);
5373
5374        /* we expect this to timeout */
5375        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5376                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5377                               msecs_to_jiffies(1700)))
5378                goto done;
5379        qib_7322_mini_pcs_reset(ppd);
5380
5381        set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5382
5383        /*
5384         * Wait up to 250 msec for link to train and get to INIT at DDR;
5385         * this should terminate early.
5386         */
5387        wait_event_timeout(ppd->cpspec->autoneg_wait,
5388                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5389                msecs_to_jiffies(250));
5390done:
5391        if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5392                spin_lock_irqsave(&ppd->lflags_lock, flags);
5393                ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5394                if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5395                        ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5396                        ppd->cpspec->autoneg_tries = 0;
5397                }
5398                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5399                set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5400        }
5401}
5402
5403/*
5404 * This routine is used to request IPG set in the QLogic switch.
5405 * Only called if r1.
5406 */
5407static void try_7322_ipg(struct qib_pportdata *ppd)
5408{
5409        struct qib_ibport *ibp = &ppd->ibport_data;
5410        struct ib_mad_send_buf *send_buf;
5411        struct ib_mad_agent *agent;
5412        struct ib_smp *smp;
5413        unsigned delay;
5414        int ret;
5415
5416        agent = ibp->rvp.send_agent;
5417        if (!agent)
5418                goto retry;
5419
5420        send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5421                                      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5422                                      IB_MGMT_BASE_VERSION);
5423        if (IS_ERR(send_buf))
5424                goto retry;
5425
5426        if (!ibp->smi_ah) {
5427                struct ib_ah *ah;
5428
5429                ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5430                if (IS_ERR(ah))
5431                        ret = PTR_ERR(ah);
5432                else {
5433                        send_buf->ah = ah;
5434                        ibp->smi_ah = ibah_to_rvtah(ah);
5435                        ret = 0;
5436                }
5437        } else {
5438                send_buf->ah = &ibp->smi_ah->ibah;
5439                ret = 0;
5440        }
5441
5442        smp = send_buf->mad;
5443        smp->base_version = IB_MGMT_BASE_VERSION;
5444        smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5445        smp->class_version = 1;
5446        smp->method = IB_MGMT_METHOD_SEND;
5447        smp->hop_cnt = 1;
5448        smp->attr_id = QIB_VENDOR_IPG;
5449        smp->attr_mod = 0;
5450
5451        if (!ret)
5452                ret = ib_post_send_mad(send_buf, NULL);
5453        if (ret)
5454                ib_free_send_mad(send_buf);
5455retry:
5456        delay = 2 << ppd->cpspec->ipg_tries;
5457        queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5458                           msecs_to_jiffies(delay));
5459}
5460
5461/*
5462 * Timeout handler for setting IPG.
5463 * Only called if r1.
5464 */
5465static void ipg_7322_work(struct work_struct *work)
5466{
5467        struct qib_pportdata *ppd;
5468
5469        ppd = container_of(work, struct qib_chippport_specific,
5470                           ipg_work.work)->ppd;
5471        if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5472            && ++ppd->cpspec->ipg_tries <= 10)
5473                try_7322_ipg(ppd);
5474}
5475
5476static u32 qib_7322_iblink_state(u64 ibcs)
5477{
5478        u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5479
5480        switch (state) {
5481        case IB_7322_L_STATE_INIT:
5482                state = IB_PORT_INIT;
5483                break;
5484        case IB_7322_L_STATE_ARM:
5485                state = IB_PORT_ARMED;
5486                break;
5487        case IB_7322_L_STATE_ACTIVE:
5488        case IB_7322_L_STATE_ACT_DEFER:
5489                state = IB_PORT_ACTIVE;
5490                break;
5491        default:
5492                fallthrough;
5493        case IB_7322_L_STATE_DOWN:
5494                state = IB_PORT_DOWN;
5495                break;
5496        }
5497        return state;
5498}
5499
5500/* returns the IBTA port state, rather than the IBC link training state */
5501static u8 qib_7322_phys_portstate(u64 ibcs)
5502{
5503        u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5504        return qib_7322_physportstate[state];
5505}
5506
5507static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5508{
5509        int ret = 0, symadj = 0;
5510        unsigned long flags;
5511        int mult;
5512
5513        spin_lock_irqsave(&ppd->lflags_lock, flags);
5514        ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5515        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5516
5517        /* Update our picture of width and speed from chip */
5518        if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5519                ppd->link_speed_active = QIB_IB_QDR;
5520                mult = 4;
5521        } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5522                ppd->link_speed_active = QIB_IB_DDR;
5523                mult = 2;
5524        } else {
5525                ppd->link_speed_active = QIB_IB_SDR;
5526                mult = 1;
5527        }
5528        if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5529                ppd->link_width_active = IB_WIDTH_4X;
5530                mult *= 4;
5531        } else
5532                ppd->link_width_active = IB_WIDTH_1X;
5533        ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5534
5535        if (!ibup) {
5536                u64 clr;
5537
5538                /* Link went down. */
5539                /* do IPG MAD again after linkdown, even if last time failed */
5540                ppd->cpspec->ipg_tries = 0;
5541                clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5542                        (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5543                         SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5544                if (clr)
5545                        qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5546                if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5547                                     QIBL_IB_AUTONEG_INPROG)))
5548                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5549                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5550                        struct qib_qsfp_data *qd =
5551                                &ppd->cpspec->qsfp_data;
5552                        /* unlock the Tx settings, speed may change */
5553                        qib_write_kreg_port(ppd, krp_tx_deemph_override,
5554                                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5555                                reset_tx_deemphasis_override));
5556                        qib_cancel_sends(ppd);
5557                        /* on link down, ensure sane pcs state */
5558                        qib_7322_mini_pcs_reset(ppd);
5559                        /* schedule the qsfp refresh which should turn the link
5560                           off */
5561                        if (ppd->dd->flags & QIB_HAS_QSFP) {
5562                                qd->t_insert = jiffies;
5563                                queue_work(ib_wq, &qd->work);
5564                        }
5565                        spin_lock_irqsave(&ppd->sdma_lock, flags);
5566                        if (__qib_sdma_running(ppd))
5567                                __qib_sdma_process_event(ppd,
5568                                        qib_sdma_event_e70_go_idle);
5569                        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5570                }
5571                clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5572                if (clr == ppd->cpspec->iblnkdownsnap)
5573                        ppd->cpspec->iblnkdowndelta++;
5574        } else {
5575                if (qib_compat_ddr_negotiate &&
5576                    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5577                                     QIBL_IB_AUTONEG_INPROG)) &&
5578                    ppd->link_speed_active == QIB_IB_SDR &&
5579                    (ppd->link_speed_enabled & QIB_IB_DDR)
5580                    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5581                        /* we are SDR, and auto-negotiation enabled */
5582                        ++ppd->cpspec->autoneg_tries;
5583                        if (!ppd->cpspec->ibdeltainprog) {
5584                                ppd->cpspec->ibdeltainprog = 1;
5585                                ppd->cpspec->ibsymdelta +=
5586                                        read_7322_creg32_port(ppd,
5587                                                crp_ibsymbolerr) -
5588                                                ppd->cpspec->ibsymsnap;
5589                                ppd->cpspec->iblnkerrdelta +=
5590                                        read_7322_creg32_port(ppd,
5591                                                crp_iblinkerrrecov) -
5592                                                ppd->cpspec->iblnkerrsnap;
5593                        }
5594                        try_7322_autoneg(ppd);
5595                        ret = 1; /* no other IB status change processing */
5596                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5597                           ppd->link_speed_active == QIB_IB_SDR) {
5598                        qib_autoneg_7322_send(ppd, 1);
5599                        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5600                        qib_7322_mini_pcs_reset(ppd);
5601                        udelay(2);
5602                        ret = 1; /* no other IB status change processing */
5603                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5604                           (ppd->link_speed_active & QIB_IB_DDR)) {
5605                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5606                        ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5607                                         QIBL_IB_AUTONEG_FAILED);
5608                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5609                        ppd->cpspec->autoneg_tries = 0;
5610                        /* re-enable SDR, for next link down */
5611                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5612                        wake_up(&ppd->cpspec->autoneg_wait);
5613                        symadj = 1;
5614                } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5615                        /*
5616                         * Clear autoneg failure flag, and do setup
5617                         * so we'll try next time link goes down and
5618                         * back to INIT (possibly connected to a
5619                         * different device).
5620                         */
5621                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5622                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5623                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5624                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5625                        symadj = 1;
5626                }
5627                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5628                        symadj = 1;
5629                        if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5630                                try_7322_ipg(ppd);
5631                        if (!ppd->cpspec->recovery_init)
5632                                setup_7322_link_recovery(ppd, 0);
5633                        ppd->cpspec->qdr_dfe_time = jiffies +
5634                                msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5635                }
5636                ppd->cpspec->ibmalfusesnap = 0;
5637                ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5638                        crp_errlink);
5639        }
5640        if (symadj) {
5641                ppd->cpspec->iblnkdownsnap =
5642                        read_7322_creg32_port(ppd, crp_iblinkdown);
5643                if (ppd->cpspec->ibdeltainprog) {
5644                        ppd->cpspec->ibdeltainprog = 0;
5645                        ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5646                                crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5647                        ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5648                                crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5649                }
5650        } else if (!ibup && qib_compat_ddr_negotiate &&
5651                   !ppd->cpspec->ibdeltainprog &&
5652                        !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5653                ppd->cpspec->ibdeltainprog = 1;
5654                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5655                        crp_ibsymbolerr);
5656                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5657                        crp_iblinkerrrecov);
5658        }
5659
5660        if (!ret)
5661                qib_setup_7322_setextled(ppd, ibup);
5662        return ret;
5663}
5664
5665/*
5666 * Does read/modify/write to appropriate registers to
5667 * set output and direction bits selected by mask.
5668 * these are in their canonical postions (e.g. lsb of
5669 * dir will end up in D48 of extctrl on existing chips).
5670 * returns contents of GP Inputs.
5671 */
5672static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5673{
5674        u64 read_val, new_out;
5675        unsigned long flags;
5676
5677        if (mask) {
5678                /* some bits being written, lock access to GPIO */
5679                dir &= mask;
5680                out &= mask;
5681                spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5682                dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5683                dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5684                new_out = (dd->cspec->gpio_out & ~mask) | out;
5685
5686                qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5687                qib_write_kreg(dd, kr_gpio_out, new_out);
5688                dd->cspec->gpio_out = new_out;
5689                spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5690        }
5691        /*
5692         * It is unlikely that a read at this time would get valid
5693         * data on a pin whose direction line was set in the same
5694         * call to this function. We include the read here because
5695         * that allows us to potentially combine a change on one pin with
5696         * a read on another, and because the old code did something like
5697         * this.
5698         */
5699        read_val = qib_read_kreg64(dd, kr_extstatus);
5700        return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5701}
5702
5703/* Enable writes to config EEPROM, if possible. Returns previous state */
5704static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5705{
5706        int prev_wen;
5707        u32 mask;
5708
5709        mask = 1 << QIB_EEPROM_WEN_NUM;
5710        prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5711        gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5712
5713        return prev_wen & 1;
5714}
5715
5716/*
5717 * Read fundamental info we need to use the chip.  These are
5718 * the registers that describe chip capabilities, and are
5719 * saved in shadow registers.
5720 */
5721static void get_7322_chip_params(struct qib_devdata *dd)
5722{
5723        u64 val;
5724        u32 piobufs;
5725        int mtu;
5726
5727        dd->palign = qib_read_kreg32(dd, kr_pagealign);
5728
5729        dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5730
5731        dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5732        dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5733        dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5734        dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5735        dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5736
5737        val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5738        dd->piobcnt2k = val & ~0U;
5739        dd->piobcnt4k = val >> 32;
5740        val = qib_read_kreg64(dd, kr_sendpiosize);
5741        dd->piosize2k = val & ~0U;
5742        dd->piosize4k = val >> 32;
5743
5744        mtu = ib_mtu_enum_to_int(qib_ibmtu);
5745        if (mtu == -1)
5746                mtu = QIB_DEFAULT_MTU;
5747        dd->pport[0].ibmtu = (u32)mtu;
5748        dd->pport[1].ibmtu = (u32)mtu;
5749
5750        /* these may be adjusted in init_chip_wc_pat() */
5751        dd->pio2kbase = (u32 __iomem *)
5752                ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5753        dd->pio4kbase = (u32 __iomem *)
5754                ((char __iomem *) dd->kregbase +
5755                 (dd->piobufbase >> 32));
5756        /*
5757         * 4K buffers take 2 pages; we use roundup just to be
5758         * paranoid; we calculate it once here, rather than on
5759         * ever buf allocate
5760         */
5761        dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5762
5763        piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5764
5765        dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5766                (sizeof(u64) * BITS_PER_BYTE / 2);
5767}
5768
5769/*
5770 * The chip base addresses in cspec and cpspec have to be set
5771 * after possible init_chip_wc_pat(), rather than in
5772 * get_7322_chip_params(), so split out as separate function
5773 */
5774static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5775{
5776        u32 cregbase;
5777
5778        cregbase = qib_read_kreg32(dd, kr_counterregbase);
5779
5780        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5781                (char __iomem *)dd->kregbase);
5782
5783        dd->egrtidbase = (u64 __iomem *)
5784                ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5785
5786        /* port registers are defined as relative to base of chip */
5787        dd->pport[0].cpspec->kpregbase =
5788                (u64 __iomem *)((char __iomem *)dd->kregbase);
5789        dd->pport[1].cpspec->kpregbase =
5790                (u64 __iomem *)(dd->palign +
5791                (char __iomem *)dd->kregbase);
5792        dd->pport[0].cpspec->cpregbase =
5793                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5794                kr_counterregbase) + (char __iomem *)dd->kregbase);
5795        dd->pport[1].cpspec->cpregbase =
5796                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5797                kr_counterregbase) + (char __iomem *)dd->kregbase);
5798}
5799
5800/*
5801 * This is a fairly special-purpose observer, so we only support
5802 * the port-specific parts of SendCtrl
5803 */
5804
5805#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |           \
5806                           SYM_MASK(SendCtrl_0, SDmaEnable) |           \
5807                           SYM_MASK(SendCtrl_0, SDmaIntEnable) |        \
5808                           SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5809                           SYM_MASK(SendCtrl_0, SDmaHalt) |             \
5810                           SYM_MASK(SendCtrl_0, IBVLArbiterEn) |        \
5811                           SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5812
5813static int sendctrl_hook(struct qib_devdata *dd,
5814                         const struct diag_observer *op, u32 offs,
5815                         u64 *data, u64 mask, int only_32)
5816{
5817        unsigned long flags;
5818        unsigned idx;
5819        unsigned pidx;
5820        struct qib_pportdata *ppd = NULL;
5821        u64 local_data, all_bits;
5822
5823        /*
5824         * The fixed correspondence between Physical ports and pports is
5825         * severed. We need to hunt for the ppd that corresponds
5826         * to the offset we got. And we have to do that without admitting
5827         * we know the stride, apparently.
5828         */
5829        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5830                u64 __iomem *psptr;
5831                u32 psoffs;
5832
5833                ppd = dd->pport + pidx;
5834                if (!ppd->cpspec->kpregbase)
5835                        continue;
5836
5837                psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5838                psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5839                if (psoffs == offs)
5840                        break;
5841        }
5842
5843        /* If pport is not being managed by driver, just avoid shadows. */
5844        if (pidx >= dd->num_pports)
5845                ppd = NULL;
5846
5847        /* In any case, "idx" is flat index in kreg space */
5848        idx = offs / sizeof(u64);
5849
5850        all_bits = ~0ULL;
5851        if (only_32)
5852                all_bits >>= 32;
5853
5854        spin_lock_irqsave(&dd->sendctrl_lock, flags);
5855        if (!ppd || (mask & all_bits) != all_bits) {
5856                /*
5857                 * At least some mask bits are zero, so we need
5858                 * to read. The judgement call is whether from
5859                 * reg or shadow. First-cut: read reg, and complain
5860                 * if any bits which should be shadowed are different
5861                 * from their shadowed value.
5862                 */
5863                if (only_32)
5864                        local_data = (u64)qib_read_kreg32(dd, idx);
5865                else
5866                        local_data = qib_read_kreg64(dd, idx);
5867                *data = (local_data & ~mask) | (*data & mask);
5868        }
5869        if (mask) {
5870                /*
5871                 * At least some mask bits are one, so we need
5872                 * to write, but only shadow some bits.
5873                 */
5874                u64 sval, tval; /* Shadowed, transient */
5875
5876                /*
5877                 * New shadow val is bits we don't want to touch,
5878                 * ORed with bits we do, that are intended for shadow.
5879                 */
5880                if (ppd) {
5881                        sval = ppd->p_sendctrl & ~mask;
5882                        sval |= *data & SENDCTRL_SHADOWED & mask;
5883                        ppd->p_sendctrl = sval;
5884                } else
5885                        sval = *data & SENDCTRL_SHADOWED & mask;
5886                tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5887                qib_write_kreg(dd, idx, tval);
5888                qib_write_kreg(dd, kr_scratch, 0Ull);
5889        }
5890        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5891        return only_32 ? 4 : 8;
5892}
5893
5894static const struct diag_observer sendctrl_0_observer = {
5895        sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5896        KREG_IDX(SendCtrl_0) * sizeof(u64)
5897};
5898
5899static const struct diag_observer sendctrl_1_observer = {
5900        sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5901        KREG_IDX(SendCtrl_1) * sizeof(u64)
5902};
5903
5904static ushort sdma_fetch_prio = 8;
5905module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5906MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5907
5908/* Besides logging QSFP events, we set appropriate TxDDS values */
5909static void init_txdds_table(struct qib_pportdata *ppd, int override);
5910
5911static void qsfp_7322_event(struct work_struct *work)
5912{
5913        struct qib_qsfp_data *qd;
5914        struct qib_pportdata *ppd;
5915        unsigned long pwrup;
5916        unsigned long flags;
5917        int ret;
5918        u32 le2;
5919
5920        qd = container_of(work, struct qib_qsfp_data, work);
5921        ppd = qd->ppd;
5922        pwrup = qd->t_insert +
5923                msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5924
5925        /* Delay for 20 msecs to allow ModPrs resistor to setup */
5926        mdelay(QSFP_MODPRS_LAG_MSEC);
5927
5928        if (!qib_qsfp_mod_present(ppd)) {
5929                ppd->cpspec->qsfp_data.modpresent = 0;
5930                /* Set the physical link to disabled */
5931                qib_set_ib_7322_lstate(ppd, 0,
5932                                       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5933                spin_lock_irqsave(&ppd->lflags_lock, flags);
5934                ppd->lflags &= ~QIBL_LINKV;
5935                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5936        } else {
5937                /*
5938                 * Some QSFP's not only do not respond until the full power-up
5939                 * time, but may behave badly if we try. So hold off responding
5940                 * to insertion.
5941                 */
5942                while (1) {
5943                        if (time_is_before_jiffies(pwrup))
5944                                break;
5945                        msleep(20);
5946                }
5947
5948                ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5949
5950                /*
5951                 * Need to change LE2 back to defaults if we couldn't
5952                 * read the cable type (to handle cable swaps), so do this
5953                 * even on failure to read cable information.  We don't
5954                 * get here for QME, so IS_QME check not needed here.
5955                 */
5956                if (!ret && !ppd->dd->cspec->r1) {
5957                        if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5958                                le2 = LE2_QME;
5959                        else if (qd->cache.atten[1] >= qib_long_atten &&
5960                                 QSFP_IS_CU(qd->cache.tech))
5961                                le2 = LE2_5m;
5962                        else
5963                                le2 = LE2_DEFAULT;
5964                } else
5965                        le2 = LE2_DEFAULT;
5966                ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5967                /*
5968                 * We always change parameteters, since we can choose
5969                 * values for cables without eeproms, and the cable may have
5970                 * changed from a cable with full or partial eeprom content
5971                 * to one with partial or no content.
5972                 */
5973                init_txdds_table(ppd, 0);
5974                /* The physical link is being re-enabled only when the
5975                 * previous state was DISABLED and the VALID bit is not
5976                 * set. This should only happen when  the cable has been
5977                 * physically pulled. */
5978                if (!ppd->cpspec->qsfp_data.modpresent &&
5979                    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
5980                        ppd->cpspec->qsfp_data.modpresent = 1;
5981                        qib_set_ib_7322_lstate(ppd, 0,
5982                                QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5983                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5984                        ppd->lflags |= QIBL_LINKV;
5985                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5986                }
5987        }
5988}
5989
5990/*
5991 * There is little we can do but complain to the user if QSFP
5992 * initialization fails.
5993 */
5994static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5995{
5996        unsigned long flags;
5997        struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5998        struct qib_devdata *dd = ppd->dd;
5999        u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6000
6001        mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6002        qd->ppd = ppd;
6003        qib_qsfp_init(qd, qsfp_7322_event);
6004        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6005        dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6006        dd->cspec->gpio_mask |= mod_prs_bit;
6007        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6008        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6009        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6010}
6011
6012/*
6013 * called at device initialization time, and also if the txselect
6014 * module parameter is changed.  This is used for cables that don't
6015 * have valid QSFP EEPROMs (not present, or attenuation is zero).
6016 * We initialize to the default, then if there is a specific
6017 * unit,port match, we use that (and set it immediately, for the
6018 * current speed, if the link is at INIT or better).
6019 * String format is "default# unit#,port#=# ... u,p=#", separators must
6020 * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6021 * optionally have "u,p=#,#", where the final # is the H1 value
6022 * The last specific match is used (actually, all are used, but last
6023 * one is the one that winds up set); if none at all, fall back on default.
6024 */
6025static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6026{
6027        char *nxt, *str;
6028        u32 pidx, unit, port, deflt, h1;
6029        unsigned long val;
6030        int any = 0, seth1;
6031        int txdds_size;
6032
6033        str = txselect_list;
6034
6035        /* default number is validated in setup_txselect() */
6036        deflt = simple_strtoul(str, &nxt, 0);
6037        for (pidx = 0; pidx < dd->num_pports; ++pidx)
6038                dd->pport[pidx].cpspec->no_eep = deflt;
6039
6040        txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6041        if (IS_QME(dd) || IS_QMH(dd))
6042                txdds_size += TXDDS_MFG_SZ;
6043
6044        while (*nxt && nxt[1]) {
6045                str = ++nxt;
6046                unit = simple_strtoul(str, &nxt, 0);
6047                if (nxt == str || !*nxt || *nxt != ',') {
6048                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6049                                ;
6050                        continue;
6051                }
6052                str = ++nxt;
6053                port = simple_strtoul(str, &nxt, 0);
6054                if (nxt == str || *nxt != '=') {
6055                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6056                                ;
6057                        continue;
6058                }
6059                str = ++nxt;
6060                val = simple_strtoul(str, &nxt, 0);
6061                if (nxt == str) {
6062                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6063                                ;
6064                        continue;
6065                }
6066                if (val >= txdds_size)
6067                        continue;
6068                seth1 = 0;
6069                h1 = 0; /* gcc thinks it might be used uninitted */
6070                if (*nxt == ',' && nxt[1]) {
6071                        str = ++nxt;
6072                        h1 = (u32)simple_strtoul(str, &nxt, 0);
6073                        if (nxt == str)
6074                                while (*nxt && *nxt++ != ' ') /* skip */
6075                                        ;
6076                        else
6077                                seth1 = 1;
6078                }
6079                for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6080                     ++pidx) {
6081                        struct qib_pportdata *ppd = &dd->pport[pidx];
6082
6083                        if (ppd->port != port || !ppd->link_speed_supported)
6084                                continue;
6085                        ppd->cpspec->no_eep = val;
6086                        if (seth1)
6087                                ppd->cpspec->h1_val = h1;
6088                        /* now change the IBC and serdes, overriding generic */
6089                        init_txdds_table(ppd, 1);
6090                        /* Re-enable the physical state machine on mezz boards
6091                         * now that the correct settings have been set.
6092                         * QSFP boards are handles by the QSFP event handler */
6093                        if (IS_QMH(dd) || IS_QME(dd))
6094                                qib_set_ib_7322_lstate(ppd, 0,
6095                                            QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6096                        any++;
6097                }
6098                if (*nxt == '\n')
6099                        break; /* done */
6100        }
6101        if (change && !any) {
6102                /* no specific setting, use the default.
6103                 * Change the IBC and serdes, but since it's
6104                 * general, don't override specific settings.
6105                 */
6106                for (pidx = 0; pidx < dd->num_pports; ++pidx)
6107                        if (dd->pport[pidx].link_speed_supported)
6108                                init_txdds_table(&dd->pport[pidx], 0);
6109        }
6110}
6111
6112/* handle the txselect parameter changing */
6113static int setup_txselect(const char *str, const struct kernel_param *kp)
6114{
6115        struct qib_devdata *dd;
6116        unsigned long index, val;
6117        char *n;
6118
6119        if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6120                pr_info("txselect_values string too long\n");
6121                return -ENOSPC;
6122        }
6123        val = simple_strtoul(str, &n, 0);
6124        if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6125                                TXDDS_MFG_SZ)) {
6126                pr_info("txselect_values must start with a number < %d\n",
6127                        TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6128                return -EINVAL;
6129        }
6130        strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6131
6132        xa_for_each(&qib_dev_table, index, dd)
6133                if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6134                        set_no_qsfp_atten(dd, 1);
6135        return 0;
6136}
6137
6138/*
6139 * Write the final few registers that depend on some of the
6140 * init setup.  Done late in init, just before bringing up
6141 * the serdes.
6142 */
6143static int qib_late_7322_initreg(struct qib_devdata *dd)
6144{
6145        int ret = 0, n;
6146        u64 val;
6147
6148        qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6149        qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6150        qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6151        qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6152        val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6153        if (val != dd->pioavailregs_phys) {
6154                qib_dev_err(dd,
6155                        "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6156                        (unsigned long) dd->pioavailregs_phys,
6157                        (unsigned long long) val);
6158                ret = -EINVAL;
6159        }
6160
6161        n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6162        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6163        /* driver sends get pkey, lid, etc. checking also, to catch bugs */
6164        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6165
6166        qib_register_observer(dd, &sendctrl_0_observer);
6167        qib_register_observer(dd, &sendctrl_1_observer);
6168
6169        dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6170        qib_write_kreg(dd, kr_control, dd->control);
6171        /*
6172         * Set SendDmaFetchPriority and init Tx params, including
6173         * QSFP handler on boards that have QSFP.
6174         * First set our default attenuation entry for cables that
6175         * don't have valid attenuation.
6176         */
6177        set_no_qsfp_atten(dd, 0);
6178        for (n = 0; n < dd->num_pports; ++n) {
6179                struct qib_pportdata *ppd = dd->pport + n;
6180
6181                qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6182                                    sdma_fetch_prio & 0xf);
6183                /* Initialize qsfp if present on board. */
6184                if (dd->flags & QIB_HAS_QSFP)
6185                        qib_init_7322_qsfp(ppd);
6186        }
6187        dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6188        qib_write_kreg(dd, kr_control, dd->control);
6189
6190        return ret;
6191}
6192
6193/* per IB port errors.  */
6194#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6195        MASK_ACROSS(8, 15))
6196#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6197#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6198        MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6199        MASK_ACROSS(0, 11))
6200
6201/*
6202 * Write the initialization per-port registers that need to be done at
6203 * driver load and after reset completes (i.e., that aren't done as part
6204 * of other init procedures called from qib_init.c).
6205 * Some of these should be redundant on reset, but play safe.
6206 */
6207static void write_7322_init_portregs(struct qib_pportdata *ppd)
6208{
6209        u64 val;
6210        int i;
6211
6212        if (!ppd->link_speed_supported) {
6213                /* no buffer credits for this port */
6214                for (i = 1; i < 8; i++)
6215                        qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6216                qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6217                qib_write_kreg(ppd->dd, kr_scratch, 0);
6218                return;
6219        }
6220
6221        /*
6222         * Set the number of supported virtual lanes in IBC,
6223         * for flow control packet handling on unsupported VLs
6224         */
6225        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6226        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6227        val |= (u64)(ppd->vls_supported - 1) <<
6228                SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6229        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6230
6231        qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6232
6233        /* enable tx header checking */
6234        qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6235                            IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6236                            IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6237
6238        qib_write_kreg_port(ppd, krp_ncmodectrl,
6239                SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6240
6241        /*
6242         * Unconditionally clear the bufmask bits.  If SDMA is
6243         * enabled, we'll set them appropriately later.
6244         */
6245        qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6246        qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6247        qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6248        if (ppd->dd->cspec->r1)
6249                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6250}
6251
6252/*
6253 * Write the initialization per-device registers that need to be done at
6254 * driver load and after reset completes (i.e., that aren't done as part
6255 * of other init procedures called from qib_init.c).  Also write per-port
6256 * registers that are affected by overall device config, such as QP mapping
6257 * Some of these should be redundant on reset, but play safe.
6258 */
6259static void write_7322_initregs(struct qib_devdata *dd)
6260{
6261        struct qib_pportdata *ppd;
6262        int i, pidx;
6263        u64 val;
6264
6265        /* Set Multicast QPs received by port 2 to map to context one. */
6266        qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6267
6268        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6269                unsigned n, regno;
6270                unsigned long flags;
6271
6272                if (dd->n_krcv_queues < 2 ||
6273                        !dd->pport[pidx].link_speed_supported)
6274                        continue;
6275
6276                ppd = &dd->pport[pidx];
6277
6278                /* be paranoid against later code motion, etc. */
6279                spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6280                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6281                spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6282
6283                /* Initialize QP to context mapping */
6284                regno = krp_rcvqpmaptable;
6285                val = 0;
6286                if (dd->num_pports > 1)
6287                        n = dd->first_user_ctxt / dd->num_pports;
6288                else
6289                        n = dd->first_user_ctxt - 1;
6290                for (i = 0; i < 32; ) {
6291                        unsigned ctxt;
6292
6293                        if (dd->num_pports > 1)
6294                                ctxt = (i % n) * dd->num_pports + pidx;
6295                        else if (i % n)
6296                                ctxt = (i % n) + 1;
6297                        else
6298                                ctxt = ppd->hw_pidx;
6299                        val |= ctxt << (5 * (i % 6));
6300                        i++;
6301                        if (i % 6 == 0) {
6302                                qib_write_kreg_port(ppd, regno, val);
6303                                val = 0;
6304                                regno++;
6305                        }
6306                }
6307                qib_write_kreg_port(ppd, regno, val);
6308        }
6309
6310        /*
6311         * Setup up interrupt mitigation for kernel contexts, but
6312         * not user contexts (user contexts use interrupts when
6313         * stalled waiting for any packet, so want those interrupts
6314         * right away).
6315         */
6316        for (i = 0; i < dd->first_user_ctxt; i++) {
6317                dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6318                qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6319        }
6320
6321        /*
6322         * Initialize  as (disabled) rcvflow tables.  Application code
6323         * will setup each flow as it uses the flow.
6324         * Doesn't clear any of the error bits that might be set.
6325         */
6326        val = TIDFLOW_ERRBITS; /* these are W1C */
6327        for (i = 0; i < dd->cfgctxts; i++) {
6328                int flow;
6329
6330                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6331                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6332        }
6333
6334        /*
6335         * dual cards init to dual port recovery, single port cards to
6336         * the one port.  Dual port cards may later adjust to 1 port,
6337         * and then back to dual port if both ports are connected
6338         * */
6339        if (dd->num_pports)
6340                setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6341}
6342
6343static int qib_init_7322_variables(struct qib_devdata *dd)
6344{
6345        struct qib_pportdata *ppd;
6346        unsigned features, pidx, sbufcnt;
6347        int ret, mtu;
6348        u32 sbufs, updthresh;
6349        resource_size_t vl15off;
6350
6351        /* pport structs are contiguous, allocated after devdata */
6352        ppd = (struct qib_pportdata *)(dd + 1);
6353        dd->pport = ppd;
6354        ppd[0].dd = dd;
6355        ppd[1].dd = dd;
6356
6357        dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6358
6359        ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6360        ppd[1].cpspec = &ppd[0].cpspec[1];
6361        ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6362        ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6363
6364        spin_lock_init(&dd->cspec->rcvmod_lock);
6365        spin_lock_init(&dd->cspec->gpio_lock);
6366
6367        /* we haven't yet set QIB_PRESENT, so use read directly */
6368        dd->revision = readq(&dd->kregbase[kr_revision]);
6369
6370        if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6371                qib_dev_err(dd,
6372                        "Revision register read failure, giving up initialization\n");
6373                ret = -ENODEV;
6374                goto bail;
6375        }
6376        dd->flags |= QIB_PRESENT;  /* now register routines work */
6377
6378        dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6379        dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6380        dd->cspec->r1 = dd->minrev == 1;
6381
6382        get_7322_chip_params(dd);
6383        features = qib_7322_boardname(dd);
6384
6385        /* now that piobcnt2k and 4k set, we can allocate these */
6386        sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6387                NUM_VL15_BUFS + BITS_PER_LONG - 1;
6388        sbufcnt /= BITS_PER_LONG;
6389        dd->cspec->sendchkenable =
6390                kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
6391                              GFP_KERNEL);
6392        dd->cspec->sendgrhchk =
6393                kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
6394                              GFP_KERNEL);
6395        dd->cspec->sendibchk =
6396                kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
6397                              GFP_KERNEL);
6398        if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6399                !dd->cspec->sendibchk) {
6400                ret = -ENOMEM;
6401                goto bail;
6402        }
6403
6404        ppd = dd->pport;
6405
6406        /*
6407         * GPIO bits for TWSI data and clock,
6408         * used for serial EEPROM.
6409         */
6410        dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6411        dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6412        dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6413
6414        dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6415                QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6416                QIB_HAS_THRESH_UPDATE |
6417                (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6418        dd->flags |= qib_special_trigger ?
6419                QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6420
6421        /*
6422         * Setup initial values.  These may change when PAT is enabled, but
6423         * we need these to do initial chip register accesses.
6424         */
6425        qib_7322_set_baseaddrs(dd);
6426
6427        mtu = ib_mtu_enum_to_int(qib_ibmtu);
6428        if (mtu == -1)
6429                mtu = QIB_DEFAULT_MTU;
6430
6431        dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6432        /* all hwerrors become interrupts, unless special purposed */
6433        dd->cspec->hwerrmask = ~0ULL;
6434        /*  link_recovery setup causes these errors, so ignore them,
6435         *  other than clearing them when they occur */
6436        dd->cspec->hwerrmask &=
6437                ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6438                  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6439                  HWE_MASK(LATriggered));
6440
6441        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6442                struct qib_chippport_specific *cp = ppd->cpspec;
6443
6444                ppd->link_speed_supported = features & PORT_SPD_CAP;
6445                features >>=  PORT_SPD_CAP_SHIFT;
6446                if (!ppd->link_speed_supported) {
6447                        /* single port mode (7340, or configured) */
6448                        dd->skip_kctxt_mask |= 1 << pidx;
6449                        if (pidx == 0) {
6450                                /* Make sure port is disabled. */
6451                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6452                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6453                                ppd[0] = ppd[1];
6454                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6455                                                  IBSerdesPClkNotDetectMask_0)
6456                                                  | SYM_MASK(HwErrMask,
6457                                                  SDmaMemReadErrMask_0));
6458                                dd->cspec->int_enable_mask &= ~(
6459                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6460                                     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6461                                     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6462                                     SYM_MASK(IntMask, SDmaIntMask_0) |
6463                                     SYM_MASK(IntMask, ErrIntMask_0) |
6464                                     SYM_MASK(IntMask, SendDoneIntMask_0));
6465                        } else {
6466                                /* Make sure port is disabled. */
6467                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6468                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6469                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6470                                                  IBSerdesPClkNotDetectMask_1)
6471                                                  | SYM_MASK(HwErrMask,
6472                                                  SDmaMemReadErrMask_1));
6473                                dd->cspec->int_enable_mask &= ~(
6474                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6475                                     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6476                                     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6477                                     SYM_MASK(IntMask, SDmaIntMask_1) |
6478                                     SYM_MASK(IntMask, ErrIntMask_1) |
6479                                     SYM_MASK(IntMask, SendDoneIntMask_1));
6480                        }
6481                        continue;
6482                }
6483
6484                dd->num_pports++;
6485                ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6486                if (ret) {
6487                        dd->num_pports--;
6488                        goto bail;
6489                }
6490
6491                ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6492                ppd->link_width_enabled = IB_WIDTH_4X;
6493                ppd->link_speed_enabled = ppd->link_speed_supported;
6494                /*
6495                 * Set the initial values to reasonable default, will be set
6496                 * for real when link is up.
6497                 */
6498                ppd->link_width_active = IB_WIDTH_4X;
6499                ppd->link_speed_active = QIB_IB_SDR;
6500                ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6501                switch (qib_num_cfg_vls) {
6502                case 1:
6503                        ppd->vls_supported = IB_VL_VL0;
6504                        break;
6505                case 2:
6506                        ppd->vls_supported = IB_VL_VL0_1;
6507                        break;
6508                default:
6509                        qib_devinfo(dd->pcidev,
6510                                    "Invalid num_vls %u, using 4 VLs\n",
6511                                    qib_num_cfg_vls);
6512                        qib_num_cfg_vls = 4;
6513                        fallthrough;
6514                case 4:
6515                        ppd->vls_supported = IB_VL_VL0_3;
6516                        break;
6517                case 8:
6518                        if (mtu <= 2048)
6519                                ppd->vls_supported = IB_VL_VL0_7;
6520                        else {
6521                                qib_devinfo(dd->pcidev,
6522                                            "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6523                                            qib_num_cfg_vls, mtu);
6524                                ppd->vls_supported = IB_VL_VL0_3;
6525                                qib_num_cfg_vls = 4;
6526                        }
6527                        break;
6528                }
6529                ppd->vls_operational = ppd->vls_supported;
6530
6531                init_waitqueue_head(&cp->autoneg_wait);
6532                INIT_DELAYED_WORK(&cp->autoneg_work,
6533                                  autoneg_7322_work);
6534                if (ppd->dd->cspec->r1)
6535                        INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6536
6537                /*
6538                 * For Mez and similar cards, no qsfp info, so do
6539                 * the "cable info" setup here.  Can be overridden
6540                 * in adapter-specific routines.
6541                 */
6542                if (!(dd->flags & QIB_HAS_QSFP)) {
6543                        if (!IS_QMH(dd) && !IS_QME(dd))
6544                                qib_devinfo(dd->pcidev,
6545                                        "IB%u:%u: Unknown mezzanine card type\n",
6546                                        dd->unit, ppd->port);
6547                        cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6548                        /*
6549                         * Choose center value as default tx serdes setting
6550                         * until changed through module parameter.
6551                         */
6552                        ppd->cpspec->no_eep = IS_QMH(dd) ?
6553                                TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6554                } else
6555                        cp->h1_val = H1_FORCE_VAL;
6556
6557                /* Avoid writes to chip for mini_init */
6558                if (!qib_mini_init)
6559                        write_7322_init_portregs(ppd);
6560
6561                timer_setup(&cp->chase_timer, reenable_chase, 0);
6562
6563                ppd++;
6564        }
6565
6566        dd->rcvhdrentsize = qib_rcvhdrentsize ?
6567                qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6568        dd->rcvhdrsize = qib_rcvhdrsize ?
6569                qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6570        dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6571
6572        /* we always allocate at least 2048 bytes for eager buffers */
6573        dd->rcvegrbufsize = max(mtu, 2048);
6574        dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6575
6576        qib_7322_tidtemplate(dd);
6577
6578        /*
6579         * We can request a receive interrupt for 1 or
6580         * more packets from current offset.
6581         */
6582        dd->rhdrhead_intr_off =
6583                (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6584
6585        /* setup the stats timer; the add_timer is done at end of init */
6586        timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6587
6588        dd->ureg_align = 0x10000;  /* 64KB alignment */
6589
6590        dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6591
6592        qib_7322_config_ctxts(dd);
6593        qib_set_ctxtcnt(dd);
6594
6595        /*
6596         * We do not set WC on the VL15 buffers to avoid
6597         * a rare problem with unaligned writes from
6598         * interrupt-flushed store buffers, so we need
6599         * to map those separately here.  We can't solve
6600         * this for the rarely used mtrr case.
6601         */
6602        ret = init_chip_wc_pat(dd, 0);
6603        if (ret)
6604                goto bail;
6605
6606        /* vl15 buffers start just after the 4k buffers */
6607        vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6608                  dd->piobcnt4k * dd->align4k;
6609        dd->piovl15base = ioremap(vl15off,
6610                                          NUM_VL15_BUFS * dd->align4k);
6611        if (!dd->piovl15base) {
6612                ret = -ENOMEM;
6613                goto bail;
6614        }
6615
6616        qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6617
6618        ret = 0;
6619        if (qib_mini_init)
6620                goto bail;
6621        if (!dd->num_pports) {
6622                qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6623                goto bail; /* no error, so can still figure out why err */
6624        }
6625
6626        write_7322_initregs(dd);
6627        ret = qib_create_ctxts(dd);
6628        init_7322_cntrnames(dd);
6629
6630        updthresh = 8U; /* update threshold */
6631
6632        /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6633         * reserve the update threshold amount for other kernel use, such
6634         * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6635         * unless we aren't enabling SDMA, in which case we want to use
6636         * all the 4k bufs for the kernel.
6637         * if this was less than the update threshold, we could wait
6638         * a long time for an update.  Coded this way because we
6639         * sometimes change the update threshold for various reasons,
6640         * and we want this to remain robust.
6641         */
6642        if (dd->flags & QIB_HAS_SEND_DMA) {
6643                dd->cspec->sdmabufcnt = dd->piobcnt4k;
6644                sbufs = updthresh > 3 ? updthresh : 3;
6645        } else {
6646                dd->cspec->sdmabufcnt = 0;
6647                sbufs = dd->piobcnt4k;
6648        }
6649        dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6650                dd->cspec->sdmabufcnt;
6651        dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6652        dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6653        dd->last_pio = dd->cspec->lastbuf_for_pio;
6654        dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6655                dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6656
6657        /*
6658         * If we have 16 user contexts, we will have 7 sbufs
6659         * per context, so reduce the update threshold to match.  We
6660         * want to update before we actually run out, at low pbufs/ctxt
6661         * so give ourselves some margin.
6662         */
6663        if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6664                updthresh = dd->pbufsctxt - 2;
6665        dd->cspec->updthresh_dflt = updthresh;
6666        dd->cspec->updthresh = updthresh;
6667
6668        /* before full enable, no interrupts, no locking needed */
6669        dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6670                             << SYM_LSB(SendCtrl, AvailUpdThld)) |
6671                        SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6672
6673        dd->psxmitwait_supported = 1;
6674        dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6675bail:
6676        if (!dd->ctxtcnt)
6677                dd->ctxtcnt = 1; /* for other initialization code */
6678
6679        return ret;
6680}
6681
6682static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6683                                        u32 *pbufnum)
6684{
6685        u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6686        struct qib_devdata *dd = ppd->dd;
6687
6688        /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6689        if (pbc & PBC_7322_VL15_SEND) {
6690                first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6691                last = first;
6692        } else {
6693                if ((plen + 1) > dd->piosize2kmax_dwords)
6694                        first = dd->piobcnt2k;
6695                else
6696                        first = 0;
6697                last = dd->cspec->lastbuf_for_pio;
6698        }
6699        return qib_getsendbuf_range(dd, pbufnum, first, last);
6700}
6701
6702static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6703                                     u32 start)
6704{
6705        qib_write_kreg_port(ppd, krp_psinterval, intv);
6706        qib_write_kreg_port(ppd, krp_psstart, start);
6707}
6708
6709/*
6710 * Must be called with sdma_lock held, or before init finished.
6711 */
6712static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6713{
6714        qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6715}
6716
6717/*
6718 * sdma_lock should be acquired before calling this routine
6719 */
6720static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6721{
6722        u64 reg, reg1, reg2;
6723
6724        reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6725        qib_dev_porterr(ppd->dd, ppd->port,
6726                "SDMA senddmastatus: 0x%016llx\n", reg);
6727
6728        reg = qib_read_kreg_port(ppd, krp_sendctrl);
6729        qib_dev_porterr(ppd->dd, ppd->port,
6730                "SDMA sendctrl: 0x%016llx\n", reg);
6731
6732        reg = qib_read_kreg_port(ppd, krp_senddmabase);
6733        qib_dev_porterr(ppd->dd, ppd->port,
6734                "SDMA senddmabase: 0x%016llx\n", reg);
6735
6736        reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6737        reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6738        reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6739        qib_dev_porterr(ppd->dd, ppd->port,
6740                "SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6741                 reg, reg1, reg2);
6742
6743        /* get bufuse bits, clear them, and print them again if non-zero */
6744        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6745        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6746        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6747        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6748        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6749        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6750        /* 0 and 1 should always be zero, so print as short form */
6751        qib_dev_porterr(ppd->dd, ppd->port,
6752                 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6753                 reg, reg1, reg2);
6754        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6755        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6756        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6757        /* 0 and 1 should always be zero, so print as short form */
6758        qib_dev_porterr(ppd->dd, ppd->port,
6759                 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6760                 reg, reg1, reg2);
6761
6762        reg = qib_read_kreg_port(ppd, krp_senddmatail);
6763        qib_dev_porterr(ppd->dd, ppd->port,
6764                "SDMA senddmatail: 0x%016llx\n", reg);
6765
6766        reg = qib_read_kreg_port(ppd, krp_senddmahead);
6767        qib_dev_porterr(ppd->dd, ppd->port,
6768                "SDMA senddmahead: 0x%016llx\n", reg);
6769
6770        reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6771        qib_dev_porterr(ppd->dd, ppd->port,
6772                "SDMA senddmaheadaddr: 0x%016llx\n", reg);
6773
6774        reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6775        qib_dev_porterr(ppd->dd, ppd->port,
6776                "SDMA senddmalengen: 0x%016llx\n", reg);
6777
6778        reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6779        qib_dev_porterr(ppd->dd, ppd->port,
6780                "SDMA senddmadesccnt: 0x%016llx\n", reg);
6781
6782        reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6783        qib_dev_porterr(ppd->dd, ppd->port,
6784                "SDMA senddmaidlecnt: 0x%016llx\n", reg);
6785
6786        reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6787        qib_dev_porterr(ppd->dd, ppd->port,
6788                "SDMA senddmapriorityhld: 0x%016llx\n", reg);
6789
6790        reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6791        qib_dev_porterr(ppd->dd, ppd->port,
6792                "SDMA senddmareloadcnt: 0x%016llx\n", reg);
6793
6794        dump_sdma_state(ppd);
6795}
6796
6797static struct sdma_set_state_action sdma_7322_action_table[] = {
6798        [qib_sdma_state_s00_hw_down] = {
6799                .go_s99_running_tofalse = 1,
6800                .op_enable = 0,
6801                .op_intenable = 0,
6802                .op_halt = 0,
6803                .op_drain = 0,
6804        },
6805        [qib_sdma_state_s10_hw_start_up_wait] = {
6806                .op_enable = 0,
6807                .op_intenable = 1,
6808                .op_halt = 1,
6809                .op_drain = 0,
6810        },
6811        [qib_sdma_state_s20_idle] = {
6812                .op_enable = 1,
6813                .op_intenable = 1,
6814                .op_halt = 1,
6815                .op_drain = 0,
6816        },
6817        [qib_sdma_state_s30_sw_clean_up_wait] = {
6818                .op_enable = 0,
6819                .op_intenable = 1,
6820                .op_halt = 1,
6821                .op_drain = 0,
6822        },
6823        [qib_sdma_state_s40_hw_clean_up_wait] = {
6824                .op_enable = 1,
6825                .op_intenable = 1,
6826                .op_halt = 1,
6827                .op_drain = 0,
6828        },
6829        [qib_sdma_state_s50_hw_halt_wait] = {
6830                .op_enable = 1,
6831                .op_intenable = 1,
6832                .op_halt = 1,
6833                .op_drain = 1,
6834        },
6835        [qib_sdma_state_s99_running] = {
6836                .op_enable = 1,
6837                .op_intenable = 1,
6838                .op_halt = 0,
6839                .op_drain = 0,
6840                .go_s99_running_totrue = 1,
6841        },
6842};
6843
6844static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6845{
6846        ppd->sdma_state.set_state_action = sdma_7322_action_table;
6847}
6848
6849static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6850{
6851        struct qib_devdata *dd = ppd->dd;
6852        unsigned lastbuf, erstbuf;
6853        u64 senddmabufmask[3] = { 0 };
6854        int n;
6855
6856        qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6857        qib_sdma_7322_setlengen(ppd);
6858        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6859        qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6860        qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6861        qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6862
6863        if (dd->num_pports)
6864                n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6865        else
6866                n = dd->cspec->sdmabufcnt; /* failsafe for init */
6867        erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6868                ((dd->num_pports == 1 || ppd->port == 2) ? n :
6869                dd->cspec->sdmabufcnt);
6870        lastbuf = erstbuf + n;
6871
6872        ppd->sdma_state.first_sendbuf = erstbuf;
6873        ppd->sdma_state.last_sendbuf = lastbuf;
6874        for (; erstbuf < lastbuf; ++erstbuf) {
6875                unsigned word = erstbuf / BITS_PER_LONG;
6876                unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6877
6878                senddmabufmask[word] |= 1ULL << bit;
6879        }
6880        qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6881        qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6882        qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6883        return 0;
6884}
6885
6886/* sdma_lock must be held */
6887static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6888{
6889        struct qib_devdata *dd = ppd->dd;
6890        int sane;
6891        int use_dmahead;
6892        u16 swhead;
6893        u16 swtail;
6894        u16 cnt;
6895        u16 hwhead;
6896
6897        use_dmahead = __qib_sdma_running(ppd) &&
6898                (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6899retry:
6900        hwhead = use_dmahead ?
6901                (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6902                (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6903
6904        swhead = ppd->sdma_descq_head;
6905        swtail = ppd->sdma_descq_tail;
6906        cnt = ppd->sdma_descq_cnt;
6907
6908        if (swhead < swtail)
6909                /* not wrapped */
6910                sane = (hwhead >= swhead) & (hwhead <= swtail);
6911        else if (swhead > swtail)
6912                /* wrapped around */
6913                sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6914                        (hwhead <= swtail);
6915        else
6916                /* empty */
6917                sane = (hwhead == swhead);
6918
6919        if (unlikely(!sane)) {
6920                if (use_dmahead) {
6921                        /* try one more time, directly from the register */
6922                        use_dmahead = 0;
6923                        goto retry;
6924                }
6925                /* proceed as if no progress */
6926                hwhead = swhead;
6927        }
6928
6929        return hwhead;
6930}
6931
6932static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6933{
6934        u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6935
6936        return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6937               (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6938               !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6939               !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6940}
6941
6942/*
6943 * Compute the amount of delay before sending the next packet if the
6944 * port's send rate differs from the static rate set for the QP.
6945 * The delay affects the next packet and the amount of the delay is
6946 * based on the length of the this packet.
6947 */
6948static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6949                                   u8 srate, u8 vl)
6950{
6951        u8 snd_mult = ppd->delay_mult;
6952        u8 rcv_mult = ib_rate_to_delay[srate];
6953        u32 ret;
6954
6955        ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6956
6957        /* Indicate VL15, else set the VL in the control word */
6958        if (vl == 15)
6959                ret |= PBC_7322_VL15_SEND_CTRL;
6960        else
6961                ret |= vl << PBC_VL_NUM_LSB;
6962        ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6963
6964        return ret;
6965}
6966
6967/*
6968 * Enable the per-port VL15 send buffers for use.
6969 * They follow the rest of the buffers, without a config parameter.
6970 * This was in initregs, but that is done before the shadow
6971 * is set up, and this has to be done after the shadow is
6972 * set up.
6973 */
6974static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6975{
6976        unsigned vl15bufs;
6977
6978        vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6979        qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6980                               TXCHK_CHG_TYPE_KERN, NULL);
6981}
6982
6983static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6984{
6985        if (rcd->ctxt < NUM_IB_PORTS) {
6986                if (rcd->dd->num_pports > 1) {
6987                        rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6988                        rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6989                } else {
6990                        rcd->rcvegrcnt = KCTXT0_EGRCNT;
6991                        rcd->rcvegr_tid_base = 0;
6992                }
6993        } else {
6994                rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6995                rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6996                        (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6997        }
6998}
6999
7000#define QTXSLEEPS 5000
7001static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7002                                  u32 len, u32 which, struct qib_ctxtdata *rcd)
7003{
7004        int i;
7005        const int last = start + len - 1;
7006        const int lastr = last / BITS_PER_LONG;
7007        u32 sleeps = 0;
7008        int wait = rcd != NULL;
7009        unsigned long flags;
7010
7011        while (wait) {
7012                unsigned long shadow = 0;
7013                int cstart, previ = -1;
7014
7015                /*
7016                 * when flipping from kernel to user, we can't change
7017                 * the checking type if the buffer is allocated to the
7018                 * driver.   It's OK the other direction, because it's
7019                 * from close, and we have just disarm'ed all the
7020                 * buffers.  All the kernel to kernel changes are also
7021                 * OK.
7022                 */
7023                for (cstart = start; cstart <= last; cstart++) {
7024                        i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7025                                / BITS_PER_LONG;
7026                        if (i != previ) {
7027                                shadow = (unsigned long)
7028                                        le64_to_cpu(dd->pioavailregs_dma[i]);
7029                                previ = i;
7030                        }
7031                        if (test_bit(((2 * cstart) +
7032                                      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7033                                     % BITS_PER_LONG, &shadow))
7034                                break;
7035                }
7036
7037                if (cstart > last)
7038                        break;
7039
7040                if (sleeps == QTXSLEEPS)
7041                        break;
7042                /* make sure we see an updated copy next time around */
7043                sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7044                sleeps++;
7045                msleep(20);
7046        }
7047
7048        switch (which) {
7049        case TXCHK_CHG_TYPE_DIS1:
7050                /*
7051                 * disable checking on a range; used by diags; just
7052                 * one buffer, but still written generically
7053                 */
7054                for (i = start; i <= last; i++)
7055                        clear_bit(i, dd->cspec->sendchkenable);
7056                break;
7057
7058        case TXCHK_CHG_TYPE_ENAB1:
7059                /*
7060                 * (re)enable checking on a range; used by diags; just
7061                 * one buffer, but still written generically; read
7062                 * scratch to be sure buffer actually triggered, not
7063                 * just flushed from processor.
7064                 */
7065                qib_read_kreg32(dd, kr_scratch);
7066                for (i = start; i <= last; i++)
7067                        set_bit(i, dd->cspec->sendchkenable);
7068                break;
7069
7070        case TXCHK_CHG_TYPE_KERN:
7071                /* usable by kernel */
7072                for (i = start; i <= last; i++) {
7073                        set_bit(i, dd->cspec->sendibchk);
7074                        clear_bit(i, dd->cspec->sendgrhchk);
7075                }
7076                spin_lock_irqsave(&dd->uctxt_lock, flags);
7077                /* see if we need to raise avail update threshold */
7078                for (i = dd->first_user_ctxt;
7079                     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7080                     && i < dd->cfgctxts; i++)
7081                        if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7082                           ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7083                           < dd->cspec->updthresh_dflt)
7084                                break;
7085                spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7086                if (i == dd->cfgctxts) {
7087                        spin_lock_irqsave(&dd->sendctrl_lock, flags);
7088                        dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7089                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7090                        dd->sendctrl |= (dd->cspec->updthresh &
7091                                         SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7092                                           SYM_LSB(SendCtrl, AvailUpdThld);
7093                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7094                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7095                }
7096                break;
7097
7098        case TXCHK_CHG_TYPE_USER:
7099                /* for user process */
7100                for (i = start; i <= last; i++) {
7101                        clear_bit(i, dd->cspec->sendibchk);
7102                        set_bit(i, dd->cspec->sendgrhchk);
7103                }
7104                spin_lock_irqsave(&dd->sendctrl_lock, flags);
7105                if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7106                        / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7107                        dd->cspec->updthresh = (rcd->piocnt /
7108                                                rcd->subctxt_cnt) - 1;
7109                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7110                        dd->sendctrl |= (dd->cspec->updthresh &
7111                                        SYM_RMASK(SendCtrl, AvailUpdThld))
7112                                        << SYM_LSB(SendCtrl, AvailUpdThld);
7113                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7114                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7115                } else
7116                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7117                break;
7118
7119        default:
7120                break;
7121        }
7122
7123        for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7124                qib_write_kreg(dd, kr_sendcheckmask + i,
7125                               dd->cspec->sendchkenable[i]);
7126
7127        for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7128                qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7129                               dd->cspec->sendgrhchk[i]);
7130                qib_write_kreg(dd, kr_sendibpktmask + i,
7131                               dd->cspec->sendibchk[i]);
7132        }
7133
7134        /*
7135         * Be sure whatever we did was seen by the chip and acted upon,
7136         * before we return.  Mostly important for which >= 2.
7137         */
7138        qib_read_kreg32(dd, kr_scratch);
7139}
7140
7141
7142/* useful for trigger analyzers, etc. */
7143static void writescratch(struct qib_devdata *dd, u32 val)
7144{
7145        qib_write_kreg(dd, kr_scratch, val);
7146}
7147
7148/* Dummy for now, use chip regs soon */
7149static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7150{
7151        return -ENXIO;
7152}
7153
7154/**
7155 * qib_init_iba7322_funcs - set up the chip-specific function pointers
7156 * @pdev: the pci_dev for qlogic_ib device
7157 * @ent: pci_device_id struct for this dev
7158 *
7159 * Also allocates, inits, and returns the devdata struct for this
7160 * device instance
7161 *
7162 * This is global, and is called directly at init to set up the
7163 * chip-specific function pointers for later use.
7164 */
7165struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7166                                           const struct pci_device_id *ent)
7167{
7168        struct qib_devdata *dd;
7169        int ret, i;
7170        u32 tabsize, actual_cnt = 0;
7171
7172        dd = qib_alloc_devdata(pdev,
7173                NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7174                sizeof(struct qib_chip_specific) +
7175                NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7176        if (IS_ERR(dd))
7177                goto bail;
7178
7179        dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7180        dd->f_cleanup           = qib_setup_7322_cleanup;
7181        dd->f_clear_tids        = qib_7322_clear_tids;
7182        dd->f_free_irq          = qib_7322_free_irq;
7183        dd->f_get_base_info     = qib_7322_get_base_info;
7184        dd->f_get_msgheader     = qib_7322_get_msgheader;
7185        dd->f_getsendbuf        = qib_7322_getsendbuf;
7186        dd->f_gpio_mod          = gpio_7322_mod;
7187        dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7188        dd->f_hdrqempty         = qib_7322_hdrqempty;
7189        dd->f_ib_updown         = qib_7322_ib_updown;
7190        dd->f_init_ctxt         = qib_7322_init_ctxt;
7191        dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7192        dd->f_intr_fallback     = qib_7322_intr_fallback;
7193        dd->f_late_initreg      = qib_late_7322_initreg;
7194        dd->f_setpbc_control    = qib_7322_setpbc_control;
7195        dd->f_portcntr          = qib_portcntr_7322;
7196        dd->f_put_tid           = qib_7322_put_tid;
7197        dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7198        dd->f_rcvctrl           = rcvctrl_7322_mod;
7199        dd->f_read_cntrs        = qib_read_7322cntrs;
7200        dd->f_read_portcntrs    = qib_read_7322portcntrs;
7201        dd->f_reset             = qib_do_7322_reset;
7202        dd->f_init_sdma_regs    = init_sdma_7322_regs;
7203        dd->f_sdma_busy         = qib_sdma_7322_busy;
7204        dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7205        dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7206        dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7207        dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7208        dd->f_sendctrl          = sendctrl_7322_mod;
7209        dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7210        dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7211        dd->f_iblink_state      = qib_7322_iblink_state;
7212        dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7213        dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7214        dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7215        dd->f_set_ib_loopback   = qib_7322_set_loopback;
7216        dd->f_get_ib_table      = qib_7322_get_ib_table;
7217        dd->f_set_ib_table      = qib_7322_set_ib_table;
7218        dd->f_set_intr_state    = qib_7322_set_intr_state;
7219        dd->f_setextled         = qib_setup_7322_setextled;
7220        dd->f_txchk_change      = qib_7322_txchk_change;
7221        dd->f_update_usrhead    = qib_update_7322_usrhead;
7222        dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7223        dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7224        dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7225        dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7226        dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7227        dd->f_writescratch      = writescratch;
7228        dd->f_tempsense_rd      = qib_7322_tempsense_rd;
7229#ifdef CONFIG_INFINIBAND_QIB_DCA
7230        dd->f_notify_dca        = qib_7322_notify_dca;
7231#endif
7232        /*
7233         * Do remaining PCIe setup and save PCIe values in dd.
7234         * Any error printing is already done by the init code.
7235         * On return, we have the chip mapped, but chip registers
7236         * are not set up until start of qib_init_7322_variables.
7237         */
7238        ret = qib_pcie_ddinit(dd, pdev, ent);
7239        if (ret < 0)
7240                goto bail_free;
7241
7242        /* initialize chip-specific variables */
7243        ret = qib_init_7322_variables(dd);
7244        if (ret)
7245                goto bail_cleanup;
7246
7247        if (qib_mini_init || !dd->num_pports)
7248                goto bail;
7249
7250        /*
7251         * Determine number of vectors we want; depends on port count
7252         * and number of configured kernel receive queues actually used.
7253         * Should also depend on whether sdma is enabled or not, but
7254         * that's such a rare testing case it's not worth worrying about.
7255         */
7256        tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7257        for (i = 0; i < tabsize; i++)
7258                if ((i < ARRAY_SIZE(irq_table) &&
7259                     irq_table[i].port <= dd->num_pports) ||
7260                    (i >= ARRAY_SIZE(irq_table) &&
7261                     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7262                        actual_cnt++;
7263        /* reduce by ctxt's < 2 */
7264        if (qib_krcvq01_no_msi)
7265                actual_cnt -= dd->num_pports;
7266
7267        tabsize = actual_cnt;
7268        dd->cspec->msix_entries = kcalloc(tabsize,
7269                                          sizeof(struct qib_msix_entry),
7270                                          GFP_KERNEL);
7271        if (!dd->cspec->msix_entries)
7272                tabsize = 0;
7273
7274        if (qib_pcie_params(dd, 8, &tabsize))
7275                qib_dev_err(dd,
7276                        "Failed to setup PCIe or interrupts; continuing anyway\n");
7277        /* may be less than we wanted, if not enough available */
7278        dd->cspec->num_msix_entries = tabsize;
7279
7280        /* setup interrupt handler */
7281        qib_setup_7322_interrupt(dd, 1);
7282
7283        /* clear diagctrl register, in case diags were running and crashed */
7284        qib_write_kreg(dd, kr_hwdiagctrl, 0);
7285#ifdef CONFIG_INFINIBAND_QIB_DCA
7286        if (!dca_add_requester(&pdev->dev)) {
7287                qib_devinfo(dd->pcidev, "DCA enabled\n");
7288                dd->flags |= QIB_DCA_ENABLED;
7289                qib_setup_dca(dd);
7290        }
7291#endif
7292        goto bail;
7293
7294bail_cleanup:
7295        qib_pcie_ddcleanup(dd);
7296bail_free:
7297        qib_free_devdata(dd);
7298        dd = ERR_PTR(ret);
7299bail:
7300        return dd;
7301}
7302
7303/*
7304 * Set the table entry at the specified index from the table specifed.
7305 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7306 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7307 * 'idx' below addresses the correct entry, while its 4 LSBs select the
7308 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7309 */
7310#define DDS_ENT_AMP_LSB 14
7311#define DDS_ENT_MAIN_LSB 9
7312#define DDS_ENT_POST_LSB 5
7313#define DDS_ENT_PRE_XTRA_LSB 3
7314#define DDS_ENT_PRE_LSB 0
7315
7316/*
7317 * Set one entry in the TxDDS table for spec'd port
7318 * ridx picks one of the entries, while tp points
7319 * to the appropriate table entry.
7320 */
7321static void set_txdds(struct qib_pportdata *ppd, int ridx,
7322                      const struct txdds_ent *tp)
7323{
7324        struct qib_devdata *dd = ppd->dd;
7325        u32 pack_ent;
7326        int regidx;
7327
7328        /* Get correct offset in chip-space, and in source table */
7329        regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7330        /*
7331         * We do not use qib_write_kreg_port() because it was intended
7332         * only for registers in the lower "port specific" pages.
7333         * So do index calculation  by hand.
7334         */
7335        if (ppd->hw_pidx)
7336                regidx += (dd->palign / sizeof(u64));
7337
7338        pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7339        pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7340        pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7341        pack_ent |= tp->post << DDS_ENT_POST_LSB;
7342        qib_write_kreg(dd, regidx, pack_ent);
7343        /* Prevent back-to-back writes by hitting scratch */
7344        qib_write_kreg(ppd->dd, kr_scratch, 0);
7345}
7346
7347static const struct vendor_txdds_ent vendor_txdds[] = {
7348        { /* Amphenol 1m 30awg NoEq */
7349                { 0x41, 0x50, 0x48 }, "584470002       ",
7350                { 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7351        },
7352        { /* Amphenol 3m 28awg NoEq */
7353                { 0x41, 0x50, 0x48 }, "584470004       ",
7354                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7355        },
7356        { /* Finisar 3m OM2 Optical */
7357                { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7358                {  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7359        },
7360        { /* Finisar 30m OM2 Optical */
7361                { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7362                {  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7363        },
7364        { /* Finisar Default OM2 Optical */
7365                { 0x00, 0x90, 0x65 }, NULL,
7366                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7367        },
7368        { /* Gore 1m 30awg NoEq */
7369                { 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7370                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7371        },
7372        { /* Gore 2m 30awg NoEq */
7373                { 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7374                {  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7375        },
7376        { /* Gore 1m 28awg NoEq */
7377                { 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7378                {  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7379        },
7380        { /* Gore 3m 28awg NoEq */
7381                { 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7382                {  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7383        },
7384        { /* Gore 5m 24awg Eq */
7385                { 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7386                {  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7387        },
7388        { /* Gore 7m 24awg Eq */
7389                { 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7390                {  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7391        },
7392        { /* Gore 5m 26awg Eq */
7393                { 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7394                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7395        },
7396        { /* Gore 7m 26awg Eq */
7397                { 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7398                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7399        },
7400        { /* Intersil 12m 24awg Active */
7401                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7402                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7403        },
7404        { /* Intersil 10m 28awg Active */
7405                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7406                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7407        },
7408        { /* Intersil 7m 30awg Active */
7409                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7410                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7411        },
7412        { /* Intersil 5m 32awg Active */
7413                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7414                {  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7415        },
7416        { /* Intersil Default Active */
7417                { 0x00, 0x30, 0xB4 }, NULL,
7418                {  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7419        },
7420        { /* Luxtera 20m Active Optical */
7421                { 0x00, 0x25, 0x63 }, NULL,
7422                {  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7423        },
7424        { /* Molex 1M Cu loopback */
7425                { 0x00, 0x09, 0x3A }, "74763-0025      ",
7426                {  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7427        },
7428        { /* Molex 2m 28awg NoEq */
7429                { 0x00, 0x09, 0x3A }, "74757-2201      ",
7430                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7431        },
7432};
7433
7434static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7435        /* amp, pre, main, post */
7436        {  2, 2, 15,  6 },      /* Loopback */
7437        {  0, 0,  0,  1 },      /*  2 dB */
7438        {  0, 0,  0,  2 },      /*  3 dB */
7439        {  0, 0,  0,  3 },      /*  4 dB */
7440        {  0, 0,  0,  4 },      /*  5 dB */
7441        {  0, 0,  0,  5 },      /*  6 dB */
7442        {  0, 0,  0,  6 },      /*  7 dB */
7443        {  0, 0,  0,  7 },      /*  8 dB */
7444        {  0, 0,  0,  8 },      /*  9 dB */
7445        {  0, 0,  0,  9 },      /* 10 dB */
7446        {  0, 0,  0, 10 },      /* 11 dB */
7447        {  0, 0,  0, 11 },      /* 12 dB */
7448        {  0, 0,  0, 12 },      /* 13 dB */
7449        {  0, 0,  0, 13 },      /* 14 dB */
7450        {  0, 0,  0, 14 },      /* 15 dB */
7451        {  0, 0,  0, 15 },      /* 16 dB */
7452};
7453
7454static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7455        /* amp, pre, main, post */
7456        {  2, 2, 15,  6 },      /* Loopback */
7457        {  0, 0,  0,  8 },      /*  2 dB */
7458        {  0, 0,  0,  8 },      /*  3 dB */
7459        {  0, 0,  0,  9 },      /*  4 dB */
7460        {  0, 0,  0,  9 },      /*  5 dB */
7461        {  0, 0,  0, 10 },      /*  6 dB */
7462        {  0, 0,  0, 10 },      /*  7 dB */
7463        {  0, 0,  0, 11 },      /*  8 dB */
7464        {  0, 0,  0, 11 },      /*  9 dB */
7465        {  0, 0,  0, 12 },      /* 10 dB */
7466        {  0, 0,  0, 12 },      /* 11 dB */
7467        {  0, 0,  0, 13 },      /* 12 dB */
7468        {  0, 0,  0, 13 },      /* 13 dB */
7469        {  0, 0,  0, 14 },      /* 14 dB */
7470        {  0, 0,  0, 14 },      /* 15 dB */
7471        {  0, 0,  0, 15 },      /* 16 dB */
7472};
7473
7474static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7475        /* amp, pre, main, post */
7476        {  2, 2, 15,  6 },      /* Loopback */
7477        {  0, 1,  0,  7 },      /*  2 dB (also QMH7342) */
7478        {  0, 1,  0,  9 },      /*  3 dB (also QMH7342) */
7479        {  0, 1,  0, 11 },      /*  4 dB */
7480        {  0, 1,  0, 13 },      /*  5 dB */
7481        {  0, 1,  0, 15 },      /*  6 dB */
7482        {  0, 1,  3, 15 },      /*  7 dB */
7483        {  0, 1,  7, 15 },      /*  8 dB */
7484        {  0, 1,  7, 15 },      /*  9 dB */
7485        {  0, 1,  8, 15 },      /* 10 dB */
7486        {  0, 1,  9, 15 },      /* 11 dB */
7487        {  0, 1, 10, 15 },      /* 12 dB */
7488        {  0, 2,  6, 15 },      /* 13 dB */
7489        {  0, 2,  7, 15 },      /* 14 dB */
7490        {  0, 2,  8, 15 },      /* 15 dB */
7491        {  0, 2,  9, 15 },      /* 16 dB */
7492};
7493
7494/*
7495 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7496 * These are mostly used for mez cards going through connectors
7497 * and backplane traces, but can be used to add other "unusual"
7498 * table values as well.
7499 */
7500static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7501        /* amp, pre, main, post */
7502        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7503        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7504        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7505        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7506        {  0, 0, 0,  3 },       /* QMH7342 backplane settings */
7507        {  0, 0, 0,  4 },       /* QMH7342 backplane settings */
7508        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7509        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7510        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7511        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7512        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7513        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7514        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7515        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7516        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7517        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7518        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7519        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7520};
7521
7522static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7523        /* amp, pre, main, post */
7524        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7525        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7526        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7527        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7528        {  0, 0, 0,  9 },       /* QMH7342 backplane settings */
7529        {  0, 0, 0, 10 },       /* QMH7342 backplane settings */
7530        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7531        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7532        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7533        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7534        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7535        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7536        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7537        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7538        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7539        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7540        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7541        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7542};
7543
7544static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7545        /* amp, pre, main, post */
7546        {  0, 1,  0,  4 },      /* QMH7342 backplane settings */
7547        {  0, 1,  0,  5 },      /* QMH7342 backplane settings */
7548        {  0, 1,  0,  6 },      /* QMH7342 backplane settings */
7549        {  0, 1,  0,  8 },      /* QMH7342 backplane settings */
7550        {  0, 1,  0, 10 },      /* QMH7342 backplane settings */
7551        {  0, 1,  0, 12 },      /* QMH7342 backplane settings */
7552        {  0, 1,  4, 15 },      /* QME7342 backplane settings 1.0 */
7553        {  0, 1,  3, 15 },      /* QME7342 backplane settings 1.0 */
7554        {  0, 1,  0, 12 },      /* QME7342 backplane settings 1.0 */
7555        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.0 */
7556        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.0 */
7557        {  0, 1,  0, 14 },      /* QME7342 backplane settings 1.0 */
7558        {  0, 1,  2, 15 },      /* QME7342 backplane settings 1.0 */
7559        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7560        {  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7561        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7562        {  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7563        {  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7564};
7565
7566static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7567        /* amp, pre, main, post */
7568        { 0, 0, 0, 0 },         /* QME7342 mfg settings */
7569        { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7570};
7571
7572static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7573                                               unsigned atten)
7574{
7575        /*
7576         * The attenuation table starts at 2dB for entry 1,
7577         * with entry 0 being the loopback entry.
7578         */
7579        if (atten <= 2)
7580                atten = 1;
7581        else if (atten > TXDDS_TABLE_SZ)
7582                atten = TXDDS_TABLE_SZ - 1;
7583        else
7584                atten--;
7585        return txdds + atten;
7586}
7587
7588/*
7589 * if override is set, the module parameter txselect has a value
7590 * for this specific port, so use it, rather than our normal mechanism.
7591 */
7592static void find_best_ent(struct qib_pportdata *ppd,
7593                          const struct txdds_ent **sdr_dds,
7594                          const struct txdds_ent **ddr_dds,
7595                          const struct txdds_ent **qdr_dds, int override)
7596{
7597        struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7598        int idx;
7599
7600        /* Search table of known cables */
7601        for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7602                const struct vendor_txdds_ent *v = vendor_txdds + idx;
7603
7604                if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7605                    (!v->partnum ||
7606                     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7607                        *sdr_dds = &v->sdr;
7608                        *ddr_dds = &v->ddr;
7609                        *qdr_dds = &v->qdr;
7610                        return;
7611                }
7612        }
7613
7614        /* Active cables don't have attenuation so we only set SERDES
7615         * settings to account for the attenuation of the board traces. */
7616        if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7617                *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7618                *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7619                *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7620                return;
7621        }
7622
7623        if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7624                                                      qd->atten[1])) {
7625                *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7626                *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7627                *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7628                return;
7629        } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7630                /*
7631                 * If we have no (or incomplete) data from the cable
7632                 * EEPROM, or no QSFP, or override is set, use the
7633                 * module parameter value to index into the attentuation
7634                 * table.
7635                 */
7636                idx = ppd->cpspec->no_eep;
7637                *sdr_dds = &txdds_sdr[idx];
7638                *ddr_dds = &txdds_ddr[idx];
7639                *qdr_dds = &txdds_qdr[idx];
7640        } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7641                /* similar to above, but index into the "extra" table. */
7642                idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7643                *sdr_dds = &txdds_extra_sdr[idx];
7644                *ddr_dds = &txdds_extra_ddr[idx];
7645                *qdr_dds = &txdds_extra_qdr[idx];
7646        } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7647                   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7648                                          TXDDS_MFG_SZ)) {
7649                idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7650                pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7651                        ppd->dd->unit, ppd->port, idx);
7652                *sdr_dds = &txdds_extra_mfg[idx];
7653                *ddr_dds = &txdds_extra_mfg[idx];
7654                *qdr_dds = &txdds_extra_mfg[idx];
7655        } else {
7656                /* this shouldn't happen, it's range checked */
7657                *sdr_dds = txdds_sdr + qib_long_atten;
7658                *ddr_dds = txdds_ddr + qib_long_atten;
7659                *qdr_dds = txdds_qdr + qib_long_atten;
7660        }
7661}
7662
7663static void init_txdds_table(struct qib_pportdata *ppd, int override)
7664{
7665        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7666        struct txdds_ent *dds;
7667        int idx;
7668        int single_ent = 0;
7669
7670        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7671
7672        /* for mez cards or override, use the selected value for all entries */
7673        if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7674                single_ent = 1;
7675
7676        /* Fill in the first entry with the best entry found. */
7677        set_txdds(ppd, 0, sdr_dds);
7678        set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7679        set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7680        if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7681                QIBL_LINKACTIVE)) {
7682                dds = (struct txdds_ent *)(ppd->link_speed_active ==
7683                                           QIB_IB_QDR ?  qdr_dds :
7684                                           (ppd->link_speed_active ==
7685                                            QIB_IB_DDR ? ddr_dds : sdr_dds));
7686                write_tx_serdes_param(ppd, dds);
7687        }
7688
7689        /* Fill in the remaining entries with the default table values. */
7690        for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7691                set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7692                set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7693                          single_ent ? ddr_dds : txdds_ddr + idx);
7694                set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7695                          single_ent ? qdr_dds : txdds_qdr + idx);
7696        }
7697}
7698
7699#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7700#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7701#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7702#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7703#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7704#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7705#define AHB_TRANS_TRIES 10
7706
7707/*
7708 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7709 * 5=subsystem which is why most calls have "chan + chan >> 1"
7710 * for the channel argument.
7711 */
7712static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7713                    u32 data, u32 mask)
7714{
7715        u32 rd_data, wr_data, sz_mask;
7716        u64 trans, acc, prev_acc;
7717        u32 ret = 0xBAD0BAD;
7718        int tries;
7719
7720        prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7721        /* From this point on, make sure we return access */
7722        acc = (quad << 1) | 1;
7723        qib_write_kreg(dd, KR_AHB_ACC, acc);
7724
7725        for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7726                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7727                if (trans & AHB_TRANS_RDY)
7728                        break;
7729        }
7730        if (tries >= AHB_TRANS_TRIES) {
7731                qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7732                goto bail;
7733        }
7734
7735        /* If mask is not all 1s, we need to read, but different SerDes
7736         * entities have different sizes
7737         */
7738        sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7739        wr_data = data & mask & sz_mask;
7740        if ((~mask & sz_mask) != 0) {
7741                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7742                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7743
7744                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7745                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7746                        if (trans & AHB_TRANS_RDY)
7747                                break;
7748                }
7749                if (tries >= AHB_TRANS_TRIES) {
7750                        qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7751                                    AHB_TRANS_TRIES);
7752                        goto bail;
7753                }
7754                /* Re-read in case host split reads and read data first */
7755                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7756                rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7757                wr_data |= (rd_data & ~mask & sz_mask);
7758        }
7759
7760        /* If mask is not zero, we need to write. */
7761        if (mask & sz_mask) {
7762                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7763                trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7764                trans |= AHB_WR;
7765                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7766
7767                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7768                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7769                        if (trans & AHB_TRANS_RDY)
7770                                break;
7771                }
7772                if (tries >= AHB_TRANS_TRIES) {
7773                        qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7774                                    AHB_TRANS_TRIES);
7775                        goto bail;
7776                }
7777        }
7778        ret = wr_data;
7779bail:
7780        qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7781        return ret;
7782}
7783
7784static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7785                             unsigned mask)
7786{
7787        struct qib_devdata *dd = ppd->dd;
7788        int chan;
7789
7790        for (chan = 0; chan < SERDES_CHANS; ++chan) {
7791                ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7792                        data, mask);
7793                ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7794                        0, 0);
7795        }
7796}
7797
7798static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7799{
7800        u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7801        u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7802
7803        if (enable && !state) {
7804                pr_info("IB%u:%u Turning LOS on\n",
7805                        ppd->dd->unit, ppd->port);
7806                data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7807        } else if (!enable && state) {
7808                pr_info("IB%u:%u Turning LOS off\n",
7809                        ppd->dd->unit, ppd->port);
7810                data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7811        }
7812        qib_write_kreg_port(ppd, krp_serdesctrl, data);
7813}
7814
7815static int serdes_7322_init(struct qib_pportdata *ppd)
7816{
7817        int ret = 0;
7818
7819        if (ppd->dd->cspec->r1)
7820                ret = serdes_7322_init_old(ppd);
7821        else
7822                ret = serdes_7322_init_new(ppd);
7823        return ret;
7824}
7825
7826static int serdes_7322_init_old(struct qib_pportdata *ppd)
7827{
7828        u32 le_val;
7829
7830        /*
7831         * Initialize the Tx DDS tables.  Also done every QSFP event,
7832         * for adapters with QSFP
7833         */
7834        init_txdds_table(ppd, 0);
7835
7836        /* ensure no tx overrides from earlier driver loads */
7837        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7838                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7839                reset_tx_deemphasis_override));
7840
7841        /* Patch some SerDes defaults to "Better for IB" */
7842        /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7843        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7844
7845        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7846        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7847        /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7848        ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7849
7850        /* May be overridden in qsfp_7322_event */
7851        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7852        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7853
7854        /* enable LE1 adaptation for all but QME, which is disabled */
7855        le_val = IS_QME(ppd->dd) ? 0 : 1;
7856        ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7857
7858        /* Clear cmode-override, may be set from older driver */
7859        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7860
7861        /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7862        ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7863
7864        /* setup LoS params; these are subsystem, so chan == 5 */
7865        /* LoS filter threshold_count on, ch 0-3, set to 8 */
7866        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7867        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7868        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7869        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7870
7871        /* LoS filter threshold_count off, ch 0-3, set to 4 */
7872        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7873        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7874        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7875        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7876
7877        /* LoS filter select enabled */
7878        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7879
7880        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7881        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7882        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7883        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7884
7885        serdes_7322_los_enable(ppd, 1);
7886
7887        /* rxbistena; set 0 to avoid effects of it switch later */
7888        ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7889
7890        /* Configure 4 DFE taps, and only they adapt */
7891        ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7892
7893        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7894        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7895        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7896
7897        /*
7898         * Set receive adaptation mode.  SDR and DDR adaptation are
7899         * always on, and QDR is initially enabled; later disabled.
7900         */
7901        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7902        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7903        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7904                            ppd->dd->cspec->r1 ?
7905                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7906        ppd->cpspec->qdr_dfe_on = 1;
7907
7908        /* FLoop LOS gate: PPM filter  enabled */
7909        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7910
7911        /* rx offset center enabled */
7912        ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7913
7914        if (!ppd->dd->cspec->r1) {
7915                ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7916                ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7917        }
7918
7919        /* Set the frequency loop bandwidth to 15 */
7920        ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7921
7922        return 0;
7923}
7924
7925static int serdes_7322_init_new(struct qib_pportdata *ppd)
7926{
7927        unsigned long tend;
7928        u32 le_val, rxcaldone;
7929        int chan, chan_done = (1 << SERDES_CHANS) - 1;
7930
7931        /* Clear cmode-override, may be set from older driver */
7932        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7933
7934        /* ensure no tx overrides from earlier driver loads */
7935        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7936                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7937                reset_tx_deemphasis_override));
7938
7939        /* START OF LSI SUGGESTED SERDES BRINGUP */
7940        /* Reset - Calibration Setup */
7941        /*       Stop DFE adaptaion */
7942        ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7943        /*       Disable LE1 */
7944        ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7945        /*       Disable autoadapt for LE1 */
7946        ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7947        /*       Disable LE2 */
7948        ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7949        /*       Disable VGA */
7950        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7951        /*       Disable AFE Offset Cancel */
7952        ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7953        /*       Disable Timing Loop */
7954        ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7955        /*       Disable Frequency Loop */
7956        ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7957        /*       Disable Baseline Wander Correction */
7958        ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7959        /*       Disable RX Calibration */
7960        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7961        /*       Disable RX Offset Calibration */
7962        ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7963        /*       Select BB CDR */
7964        ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7965        /*       CDR Step Size */
7966        ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7967        /*       Enable phase Calibration */
7968        ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7969        /*       DFE Bandwidth [2:14-12] */
7970        ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7971        /*       DFE Config (4 taps only) */
7972        ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7973        /*       Gain Loop Bandwidth */
7974        if (!ppd->dd->cspec->r1) {
7975                ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7976                ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7977        } else {
7978                ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7979        }
7980        /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
7981        /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
7982        /*       Data Rate Select [5:7-6] (leave as default) */
7983        /*       RX Parallel Word Width [3:10-8] (leave as default) */
7984
7985        /* RX REST */
7986        /*       Single- or Multi-channel reset */
7987        /*       RX Analog reset */
7988        /*       RX Digital reset */
7989        ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7990        msleep(20);
7991        /*       RX Analog reset */
7992        ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7993        msleep(20);
7994        /*       RX Digital reset */
7995        ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7996        msleep(20);
7997
7998        /* setup LoS params; these are subsystem, so chan == 5 */
7999        /* LoS filter threshold_count on, ch 0-3, set to 8 */
8000        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8001        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8002        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8003        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8004
8005        /* LoS filter threshold_count off, ch 0-3, set to 4 */
8006        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8007        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8008        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8009        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8010
8011        /* LoS filter select enabled */
8012        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8013
8014        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
8015        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8016        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8017        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8018
8019        /* Turn on LOS on initial SERDES init */
8020        serdes_7322_los_enable(ppd, 1);
8021        /* FLoop LOS gate: PPM filter  enabled */
8022        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8023
8024        /* RX LATCH CALIBRATION */
8025        /*       Enable Eyefinder Phase Calibration latch */
8026        ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8027        /*       Enable RX Offset Calibration latch */
8028        ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8029        msleep(20);
8030        /*       Start Calibration */
8031        ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8032        tend = jiffies + msecs_to_jiffies(500);
8033        while (chan_done && !time_is_before_jiffies(tend)) {
8034                msleep(20);
8035                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8036                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8037                                            (chan + (chan >> 1)),
8038                                            25, 0, 0);
8039                        if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8040                            (~chan_done & (1 << chan)) == 0)
8041                                chan_done &= ~(1 << chan);
8042                }
8043        }
8044        if (chan_done) {
8045                pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8046                         IBSD(ppd->hw_pidx), chan_done);
8047        } else {
8048                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8049                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8050                                            (chan + (chan >> 1)),
8051                                            25, 0, 0);
8052                        if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8053                                pr_info("Serdes %d chan %d calibration failed\n",
8054                                        IBSD(ppd->hw_pidx), chan);
8055                }
8056        }
8057
8058        /*       Turn off Calibration */
8059        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8060        msleep(20);
8061
8062        /* BRING RX UP */
8063        /*       Set LE2 value (May be overridden in qsfp_7322_event) */
8064        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8065        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8066        /*       Set LE2 Loop bandwidth */
8067        ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8068        /*       Enable LE2 */
8069        ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8070        msleep(20);
8071        /*       Enable H0 only */
8072        ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8073        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8074        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8075        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8076        /*       Enable VGA */
8077        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8078        msleep(20);
8079        /*       Set Frequency Loop Bandwidth */
8080        ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8081        /*       Enable Frequency Loop */
8082        ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8083        /*       Set Timing Loop Bandwidth */
8084        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8085        /*       Enable Timing Loop */
8086        ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8087        msleep(50);
8088        /*       Enable DFE
8089         *       Set receive adaptation mode.  SDR and DDR adaptation are
8090         *       always on, and QDR is initially enabled; later disabled.
8091         */
8092        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8093        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8094        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8095                            ppd->dd->cspec->r1 ?
8096                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8097        ppd->cpspec->qdr_dfe_on = 1;
8098        /*       Disable LE1  */
8099        ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8100        /*       Disable auto adapt for LE1 */
8101        ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8102        msleep(20);
8103        /*       Enable AFE Offset Cancel */
8104        ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8105        /*       Enable Baseline Wander Correction */
8106        ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8107        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8108        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8109        /* VGA output common mode */
8110        ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8111
8112        /*
8113         * Initialize the Tx DDS tables.  Also done every QSFP event,
8114         * for adapters with QSFP
8115         */
8116        init_txdds_table(ppd, 0);
8117
8118        return 0;
8119}
8120
8121/* start adjust QMH serdes parameters */
8122
8123static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8124{
8125        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8126                9, code << 9, 0x3f << 9);
8127}
8128
8129static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8130        int enable, u32 tapenable)
8131{
8132        if (enable)
8133                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8134                        1, 3 << 10, 0x1f << 10);
8135        else
8136                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8137                        1, 0, 0x1f << 10);
8138}
8139
8140/* Set clock to 1, 0, 1, 0 */
8141static void clock_man(struct qib_pportdata *ppd, int chan)
8142{
8143        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8144                4, 0x4000, 0x4000);
8145        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8146                4, 0, 0x4000);
8147        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8148                4, 0x4000, 0x4000);
8149        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8150                4, 0, 0x4000);
8151}
8152
8153/*
8154 * write the current Tx serdes pre,post,main,amp settings into the serdes.
8155 * The caller must pass the settings appropriate for the current speed,
8156 * or not care if they are correct for the current speed.
8157 */
8158static void write_tx_serdes_param(struct qib_pportdata *ppd,
8159                                  struct txdds_ent *txdds)
8160{
8161        u64 deemph;
8162
8163        deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8164        /* field names for amp, main, post, pre, respectively */
8165        deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8166                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8167                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8168                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8169
8170        deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8171                           tx_override_deemphasis_select);
8172        deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8173                    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8174                                       txampcntl_d2a);
8175        deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8176                     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8177                                   txc0_ena);
8178        deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8179                     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8180                                    txcp1_ena);
8181        deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8182                     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8183                                    txcn1_ena);
8184        qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8185}
8186
8187/*
8188 * Set the parameters for mez cards on link bounce, so they are
8189 * always exactly what was requested.  Similar logic to init_txdds
8190 * but does just the serdes.
8191 */
8192static void adj_tx_serdes(struct qib_pportdata *ppd)
8193{
8194        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8195        struct txdds_ent *dds;
8196
8197        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8198        dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8199                qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8200                                ddr_dds : sdr_dds));
8201        write_tx_serdes_param(ppd, dds);
8202}
8203
8204/* set QDR forced value for H1, if needed */
8205static void force_h1(struct qib_pportdata *ppd)
8206{
8207        int chan;
8208
8209        ppd->cpspec->qdr_reforce = 0;
8210        if (!ppd->dd->cspec->r1)
8211                return;
8212
8213        for (chan = 0; chan < SERDES_CHANS; chan++) {
8214                set_man_mode_h1(ppd, chan, 1, 0);
8215                set_man_code(ppd, chan, ppd->cpspec->h1_val);
8216                clock_man(ppd, chan);
8217                set_man_mode_h1(ppd, chan, 0, 0);
8218        }
8219}
8220
8221#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8222#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8223
8224#define R_OPCODE_LSB 3
8225#define R_OP_NOP 0
8226#define R_OP_SHIFT 2
8227#define R_OP_UPDATE 3
8228#define R_TDI_LSB 2
8229#define R_TDO_LSB 1
8230#define R_RDY 1
8231
8232static int qib_r_grab(struct qib_devdata *dd)
8233{
8234        u64 val = SJA_EN;
8235
8236        qib_write_kreg(dd, kr_r_access, val);
8237        qib_read_kreg32(dd, kr_scratch);
8238        return 0;
8239}
8240
8241/* qib_r_wait_for_rdy() not only waits for the ready bit, it
8242 * returns the current state of R_TDO
8243 */
8244static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8245{
8246        u64 val;
8247        int timeout;
8248
8249        for (timeout = 0; timeout < 100 ; ++timeout) {
8250                val = qib_read_kreg32(dd, kr_r_access);
8251                if (val & R_RDY)
8252                        return (val >> R_TDO_LSB) & 1;
8253        }
8254        return -1;
8255}
8256
8257static int qib_r_shift(struct qib_devdata *dd, int bisten,
8258                       int len, u8 *inp, u8 *outp)
8259{
8260        u64 valbase, val;
8261        int ret, pos;
8262
8263        valbase = SJA_EN | (bisten << BISTEN_LSB) |
8264                (R_OP_SHIFT << R_OPCODE_LSB);
8265        ret = qib_r_wait_for_rdy(dd);
8266        if (ret < 0)
8267                goto bail;
8268        for (pos = 0; pos < len; ++pos) {
8269                val = valbase;
8270                if (outp) {
8271                        outp[pos >> 3] &= ~(1 << (pos & 7));
8272                        outp[pos >> 3] |= (ret << (pos & 7));
8273                }
8274                if (inp) {
8275                        int tdi = inp[pos >> 3] >> (pos & 7);
8276
8277                        val |= ((tdi & 1) << R_TDI_LSB);
8278                }
8279                qib_write_kreg(dd, kr_r_access, val);
8280                qib_read_kreg32(dd, kr_scratch);
8281                ret = qib_r_wait_for_rdy(dd);
8282                if (ret < 0)
8283                        break;
8284        }
8285        /* Restore to NOP between operations. */
8286        val =  SJA_EN | (bisten << BISTEN_LSB);
8287        qib_write_kreg(dd, kr_r_access, val);
8288        qib_read_kreg32(dd, kr_scratch);
8289        ret = qib_r_wait_for_rdy(dd);
8290
8291        if (ret >= 0)
8292                ret = pos;
8293bail:
8294        return ret;
8295}
8296
8297static int qib_r_update(struct qib_devdata *dd, int bisten)
8298{
8299        u64 val;
8300        int ret;
8301
8302        val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8303        ret = qib_r_wait_for_rdy(dd);
8304        if (ret >= 0) {
8305                qib_write_kreg(dd, kr_r_access, val);
8306                qib_read_kreg32(dd, kr_scratch);
8307        }
8308        return ret;
8309}
8310
8311#define BISTEN_PORT_SEL 15
8312#define LEN_PORT_SEL 625
8313#define BISTEN_AT 17
8314#define LEN_AT 156
8315#define BISTEN_ETM 16
8316#define LEN_ETM 632
8317
8318#define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8319
8320/* these are common for all IB port use cases. */
8321static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8322        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8323        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8324};
8325static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8326        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8327        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8328        0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8329        0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8330        0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8331        0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8332        0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8333        0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8334};
8335static u8 at[BIT2BYTE(LEN_AT)] = {
8336        0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8337        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8338};
8339
8340/* used for IB1 or IB2, only one in use */
8341static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8342        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8343        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8344        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8345        0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8346        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8347        0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8348        0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8349        0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8350};
8351
8352/* used when both IB1 and IB2 are in use */
8353static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8354        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8355        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8356        0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8357        0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8358        0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8359        0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8360        0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8361        0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8362};
8363
8364/* used when only IB1 is in use */
8365static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8366        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8367        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8368        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8369        0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8370        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8371        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8372        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8373        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8374};
8375
8376/* used when only IB2 is in use */
8377static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8378        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8379        0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8380        0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8381        0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8382        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8383        0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8384        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8385        0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8386};
8387
8388/* used when both IB1 and IB2 are in use */
8389static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8390        0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8391        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8392        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8393        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8394        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8395        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8396        0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8397        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8398};
8399
8400/*
8401 * Do setup to properly handle IB link recovery; if port is zero, we
8402 * are initializing to cover both ports; otherwise we are initializing
8403 * to cover a single port card, or the port has reached INIT and we may
8404 * need to switch coverage types.
8405 */
8406static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8407{
8408        u8 *portsel, *etm;
8409        struct qib_devdata *dd = ppd->dd;
8410
8411        if (!ppd->dd->cspec->r1)
8412                return;
8413        if (!both) {
8414                dd->cspec->recovery_ports_initted++;
8415                ppd->cpspec->recovery_init = 1;
8416        }
8417        if (!both && dd->cspec->recovery_ports_initted == 1) {
8418                portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8419                etm = atetm_1port;
8420        } else {
8421                portsel = portsel_2port;
8422                etm = atetm_2port;
8423        }
8424
8425        if (qib_r_grab(dd) < 0 ||
8426                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8427                qib_r_update(dd, BISTEN_ETM) < 0 ||
8428                qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8429                qib_r_update(dd, BISTEN_AT) < 0 ||
8430                qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8431                            portsel, NULL) < 0 ||
8432                qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8433                qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8434                qib_r_update(dd, BISTEN_AT) < 0 ||
8435                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8436                qib_r_update(dd, BISTEN_ETM) < 0)
8437                qib_dev_err(dd, "Failed IB link recovery setup\n");
8438}
8439
8440static void check_7322_rxe_status(struct qib_pportdata *ppd)
8441{
8442        struct qib_devdata *dd = ppd->dd;
8443        u64 fmask;
8444
8445        if (dd->cspec->recovery_ports_initted != 1)
8446                return; /* rest doesn't apply to dualport */
8447        qib_write_kreg(dd, kr_control, dd->control |
8448                       SYM_MASK(Control, FreezeMode));
8449        (void)qib_read_kreg64(dd, kr_scratch);
8450        udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8451        fmask = qib_read_kreg64(dd, kr_act_fmask);
8452        if (!fmask) {
8453                /*
8454                 * require a powercycle before we'll work again, and make
8455                 * sure we get no more interrupts, and don't turn off
8456                 * freeze.
8457                 */
8458                ppd->dd->cspec->stay_in_freeze = 1;
8459                qib_7322_set_intr_state(ppd->dd, 0);
8460                qib_write_kreg(dd, kr_fmask, 0ULL);
8461                qib_dev_err(dd, "HCA unusable until powercycled\n");
8462                return; /* eventually reset */
8463        }
8464
8465        qib_write_kreg(ppd->dd, kr_hwerrclear,
8466            SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8467
8468        /* don't do the full clear_freeze(), not needed for this */
8469        qib_write_kreg(dd, kr_control, dd->control);
8470        qib_read_kreg32(dd, kr_scratch);
8471        /* take IBC out of reset */
8472        if (ppd->link_speed_supported) {
8473                ppd->cpspec->ibcctrl_a &=
8474                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8475                qib_write_kreg_port(ppd, krp_ibcctrl_a,
8476                                    ppd->cpspec->ibcctrl_a);
8477                qib_read_kreg32(dd, kr_scratch);
8478                if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8479                        qib_set_ib_7322_lstate(ppd, 0,
8480                                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8481        }
8482}
8483