linux/drivers/infiniband/hw/qib/qib_iba7322.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * This file contains all of the code that is specific to the
  36 * InfiniPath 7322 chip
  37 */
  38
  39#include <linux/interrupt.h>
  40#include <linux/pci.h>
  41#include <linux/delay.h>
  42#include <linux/io.h>
  43#include <linux/jiffies.h>
  44#include <linux/module.h>
  45#include <rdma/ib_verbs.h>
  46#include <rdma/ib_smi.h>
  47#ifdef CONFIG_INFINIBAND_QIB_DCA
  48#include <linux/dca.h>
  49#endif
  50
  51#include "qib.h"
  52#include "qib_7322_regs.h"
  53#include "qib_qsfp.h"
  54
  55#include "qib_mad.h"
  56#include "qib_verbs.h"
  57
  58#undef pr_fmt
  59#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  60
  61static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  62static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  63static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  64static irqreturn_t qib_7322intr(int irq, void *data);
  65static irqreturn_t qib_7322bufavail(int irq, void *data);
  66static irqreturn_t sdma_intr(int irq, void *data);
  67static irqreturn_t sdma_idle_intr(int irq, void *data);
  68static irqreturn_t sdma_progress_intr(int irq, void *data);
  69static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  70static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  71                                  struct qib_ctxtdata *rcd);
  72static u8 qib_7322_phys_portstate(u64);
  73static u32 qib_7322_iblink_state(u64);
  74static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  75                                   u16 linitcmd);
  76static void force_h1(struct qib_pportdata *);
  77static void adj_tx_serdes(struct qib_pportdata *);
  78static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  79static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  80
  81static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  82static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  83static void serdes_7322_los_enable(struct qib_pportdata *, int);
  84static int serdes_7322_init_old(struct qib_pportdata *);
  85static int serdes_7322_init_new(struct qib_pportdata *);
  86static void dump_sdma_7322_state(struct qib_pportdata *);
  87
  88#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  89
  90/* LE2 serdes values for different cases */
  91#define LE2_DEFAULT 5
  92#define LE2_5m 4
  93#define LE2_QME 0
  94
  95/* Below is special-purpose, so only really works for the IB SerDes blocks. */
  96#define IBSD(hw_pidx) (hw_pidx + 2)
  97
  98/* these are variables for documentation and experimentation purposes */
  99static const unsigned rcv_int_timeout = 375;
 100static const unsigned rcv_int_count = 16;
 101static const unsigned sdma_idle_cnt = 64;
 102
 103/* Time to stop altering Rx Equalization parameters, after link up. */
 104#define RXEQ_DISABLE_MSECS 2500
 105
 106/*
 107 * Number of VLs we are configured to use (to allow for more
 108 * credits per vl, etc.)
 109 */
 110ushort qib_num_cfg_vls = 2;
 111module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
 112MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
 113
 114static ushort qib_chase = 1;
 115module_param_named(chase, qib_chase, ushort, S_IRUGO);
 116MODULE_PARM_DESC(chase, "Enable state chase handling");
 117
 118static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
 120MODULE_PARM_DESC(long_attenuation,
 121                 "attenuation cutoff (dB) for long copper cable setup");
 122
 123static ushort qib_singleport;
 124module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 125MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 126
 127static ushort qib_krcvq01_no_msi;
 128module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
 129MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
 130
 131/*
 132 * Receive header queue sizes
 133 */
 134static unsigned qib_rcvhdrcnt;
 135module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
 136MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
 137
 138static unsigned qib_rcvhdrsize;
 139module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
 140MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
 141
 142static unsigned qib_rcvhdrentsize;
 143module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
 144MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
 145
 146#define MAX_ATTEN_LEN 64 /* plenty for any real system */
 147/* for read back, default index is ~5m copper cable */
 148static char txselect_list[MAX_ATTEN_LEN] = "10";
 149static struct kparam_string kp_txselect = {
 150        .string = txselect_list,
 151        .maxlen = MAX_ATTEN_LEN
 152};
 153static int  setup_txselect(const char *, const struct kernel_param *);
 154module_param_call(txselect, setup_txselect, param_get_string,
 155                  &kp_txselect, S_IWUSR | S_IRUGO);
 156MODULE_PARM_DESC(txselect,
 157                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 158
 159#define BOARD_QME7342 5
 160#define BOARD_QMH7342 6
 161#define BOARD_QMH7360 9
 162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 163                    BOARD_QMH7342)
 164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 165                    BOARD_QME7342)
 166
 167#define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
 168
 169#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
 170
 171#define MASK_ACROSS(lsb, msb) \
 172        (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
 173
 174#define SYM_RMASK(regname, fldname) ((u64)              \
 175        QIB_7322_##regname##_##fldname##_RMASK)
 176
 177#define SYM_MASK(regname, fldname) ((u64)               \
 178        QIB_7322_##regname##_##fldname##_RMASK <<       \
 179         QIB_7322_##regname##_##fldname##_LSB)
 180
 181#define SYM_FIELD(value, regname, fldname) ((u64)       \
 182        (((value) >> SYM_LSB(regname, fldname)) &       \
 183         SYM_RMASK(regname, fldname)))
 184
 185/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
 186#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
 187        (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
 188
 189#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
 190#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
 191#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
 192#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
 193#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
 194/* Below because most, but not all, fields of IntMask have that full suffix */
 195#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
 196
 197
 198#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
 199
 200/*
 201 * the size bits give us 2^N, in KB units.  0 marks as invalid,
 202 * and 7 is reserved.  We currently use only 2KB and 4KB
 203 */
 204#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
 205#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
 206#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
 207#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
 208
 209#define SendIBSLIDAssignMask \
 210        QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
 211#define SendIBSLMCMask \
 212        QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
 213
 214#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
 215#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
 216#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
 217#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
 218#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
 219#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
 220
 221#define _QIB_GPIO_SDA_NUM 1
 222#define _QIB_GPIO_SCL_NUM 0
 223#define QIB_EEPROM_WEN_NUM 14
 224#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
 225
 226/* HW counter clock is at 4nsec */
 227#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
 228
 229/* full speed IB port 1 only */
 230#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
 231#define PORT_SPD_CAP_SHIFT 3
 232
 233/* full speed featuremask, both ports */
 234#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
 235
 236/*
 237 * This file contains almost all the chip-specific register information and
 238 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
 239 */
 240
 241/* Use defines to tie machine-generated names to lower-case names */
 242#define kr_contextcnt KREG_IDX(ContextCnt)
 243#define kr_control KREG_IDX(Control)
 244#define kr_counterregbase KREG_IDX(CntrRegBase)
 245#define kr_errclear KREG_IDX(ErrClear)
 246#define kr_errmask KREG_IDX(ErrMask)
 247#define kr_errstatus KREG_IDX(ErrStatus)
 248#define kr_extctrl KREG_IDX(EXTCtrl)
 249#define kr_extstatus KREG_IDX(EXTStatus)
 250#define kr_gpio_clear KREG_IDX(GPIOClear)
 251#define kr_gpio_mask KREG_IDX(GPIOMask)
 252#define kr_gpio_out KREG_IDX(GPIOOut)
 253#define kr_gpio_status KREG_IDX(GPIOStatus)
 254#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
 255#define kr_debugportval KREG_IDX(DebugPortValueReg)
 256#define kr_fmask KREG_IDX(feature_mask)
 257#define kr_act_fmask KREG_IDX(active_feature_mask)
 258#define kr_hwerrclear KREG_IDX(HwErrClear)
 259#define kr_hwerrmask KREG_IDX(HwErrMask)
 260#define kr_hwerrstatus KREG_IDX(HwErrStatus)
 261#define kr_intclear KREG_IDX(IntClear)
 262#define kr_intmask KREG_IDX(IntMask)
 263#define kr_intredirect KREG_IDX(IntRedirect0)
 264#define kr_intstatus KREG_IDX(IntStatus)
 265#define kr_pagealign KREG_IDX(PageAlign)
 266#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
 267#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
 268#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
 269#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
 270#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
 271#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
 272#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
 273#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
 274#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
 275#define kr_revision KREG_IDX(Revision)
 276#define kr_scratch KREG_IDX(Scratch)
 277#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
 278#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
 279#define kr_sendctrl KREG_IDX(SendCtrl)
 280#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
 281#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
 282#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
 283#define kr_sendpiobufbase KREG_IDX(SendBufBase)
 284#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
 285#define kr_sendpiosize KREG_IDX(SendBufSize)
 286#define kr_sendregbase KREG_IDX(SendRegBase)
 287#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
 288#define kr_userregbase KREG_IDX(UserRegBase)
 289#define kr_intgranted KREG_IDX(Int_Granted)
 290#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
 291#define kr_intblocked KREG_IDX(IntBlocked)
 292#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
 293
 294/*
 295 * per-port kernel registers.  Access only with qib_read_kreg_port()
 296 * or qib_write_kreg_port()
 297 */
 298#define krp_errclear KREG_IBPORT_IDX(ErrClear)
 299#define krp_errmask KREG_IBPORT_IDX(ErrMask)
 300#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
 301#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
 302#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
 303#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
 304#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
 305#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
 306#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
 307#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
 308#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
 309#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
 310#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
 311#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
 312#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
 313#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
 314#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
 315#define krp_psstart KREG_IBPORT_IDX(PSStart)
 316#define krp_psstat KREG_IBPORT_IDX(PSStat)
 317#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
 318#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
 319#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
 320#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
 321#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
 322#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
 323#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
 324#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
 325#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
 326#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
 327#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
 328#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
 329#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
 330#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
 331#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
 332#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
 333#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
 334#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
 335#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
 336#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
 337#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
 338#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
 339#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
 340#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
 341#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
 342#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
 343#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
 344#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
 345#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
 346#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
 347#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 348
 349/*
 350 * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
 351 * or qib_write_kreg_ctxt()
 352 */
 353#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
 354#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
 355
 356/*
 357 * TID Flow table, per context.  Reduces
 358 * number of hdrq updates to one per flow (or on errors).
 359 * context 0 and 1 share same memory, but have distinct
 360 * addresses.  Since for now, we never use expected sends
 361 * on kernel contexts, we don't worry about that (we initialize
 362 * those entries for ctxt 0/1 on driver load twice, for example).
 363 */
 364#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
 365#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
 366
 367/* these are the error bits in the tid flows, and are W1C */
 368#define TIDFLOW_ERRBITS  ( \
 369        (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
 370        SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
 371        (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
 372        SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
 373
 374/* Most (not all) Counters are per-IBport.
 375 * Requires LBIntCnt is at offset 0 in the group
 376 */
 377#define CREG_IDX(regname) \
 378((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 379
 380#define crp_badformat CREG_IDX(RxVersionErrCnt)
 381#define crp_err_rlen CREG_IDX(RxLenErrCnt)
 382#define crp_erricrc CREG_IDX(RxICRCErrCnt)
 383#define crp_errlink CREG_IDX(RxLinkMalformCnt)
 384#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
 385#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
 386#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
 387#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
 388#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
 389#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
 390#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
 391#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
 392#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
 393#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
 394#define crp_pktrcv CREG_IDX(RxDataPktCnt)
 395#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
 396#define crp_pktsend CREG_IDX(TxDataPktCnt)
 397#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
 398#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
 399#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
 400#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
 401#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
 402#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
 403#define crp_rcvebp CREG_IDX(RxEBPCnt)
 404#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
 405#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
 406#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
 407#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
 408#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
 409#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
 410#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
 411#define crp_sendstall CREG_IDX(TxFlowStallCnt)
 412#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
 413#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
 414#define crp_txlenerr CREG_IDX(TxLenErrCnt)
 415#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
 416#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
 417#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
 418#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
 419#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
 420#define crp_wordrcv CREG_IDX(RxDwordCnt)
 421#define crp_wordsend CREG_IDX(TxDwordCnt)
 422#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
 423
 424/* these are the (few) counters that are not port-specific */
 425#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
 426                        QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 427#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
 428#define cr_lbint CREG_DEVIDX(LBIntCnt)
 429#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
 430#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
 431#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
 432#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
 433#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
 434
 435/* no chip register for # of IB ports supported, so define */
 436#define NUM_IB_PORTS 2
 437
 438/* 1 VL15 buffer per hardware IB port, no register for this, so define */
 439#define NUM_VL15_BUFS NUM_IB_PORTS
 440
 441/*
 442 * context 0 and 1 are special, and there is no chip register that
 443 * defines this value, so we have to define it here.
 444 * These are all allocated to either 0 or 1 for single port
 445 * hardware configuration, otherwise each gets half
 446 */
 447#define KCTXT0_EGRCNT 2048
 448
 449/* values for vl and port fields in PBC, 7322-specific */
 450#define PBC_PORT_SEL_LSB 26
 451#define PBC_PORT_SEL_RMASK 1
 452#define PBC_VL_NUM_LSB 27
 453#define PBC_VL_NUM_RMASK 7
 454#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
 455#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
 456
 457static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 458        [IB_RATE_2_5_GBPS] = 16,
 459        [IB_RATE_5_GBPS] = 8,
 460        [IB_RATE_10_GBPS] = 4,
 461        [IB_RATE_20_GBPS] = 2,
 462        [IB_RATE_30_GBPS] = 2,
 463        [IB_RATE_40_GBPS] = 1
 464};
 465
 466static const char * const qib_sdma_state_names[] = {
 467        [qib_sdma_state_s00_hw_down]          = "s00_HwDown",
 468        [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
 469        [qib_sdma_state_s20_idle]             = "s20_Idle",
 470        [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
 471        [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
 472        [qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
 473        [qib_sdma_state_s99_running]          = "s99_Running",
 474};
 475
 476#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
 477#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
 478
 479/* link training states, from IBC */
 480#define IB_7322_LT_STATE_DISABLED        0x00
 481#define IB_7322_LT_STATE_LINKUP          0x01
 482#define IB_7322_LT_STATE_POLLACTIVE      0x02
 483#define IB_7322_LT_STATE_POLLQUIET       0x03
 484#define IB_7322_LT_STATE_SLEEPDELAY      0x04
 485#define IB_7322_LT_STATE_SLEEPQUIET      0x05
 486#define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
 487#define IB_7322_LT_STATE_CFGRCVFCFG      0x09
 488#define IB_7322_LT_STATE_CFGWAITRMT      0x0a
 489#define IB_7322_LT_STATE_CFGIDLE         0x0b
 490#define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
 491#define IB_7322_LT_STATE_TXREVLANES      0x0d
 492#define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
 493#define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 494#define IB_7322_LT_STATE_CFGENH          0x10
 495#define IB_7322_LT_STATE_CFGTEST         0x11
 496#define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
 497#define IB_7322_LT_STATE_CFGWAITENH      0x13
 498
 499/* link state machine states from IBC */
 500#define IB_7322_L_STATE_DOWN             0x0
 501#define IB_7322_L_STATE_INIT             0x1
 502#define IB_7322_L_STATE_ARM              0x2
 503#define IB_7322_L_STATE_ACTIVE           0x3
 504#define IB_7322_L_STATE_ACT_DEFER        0x4
 505
 506static const u8 qib_7322_physportstate[0x20] = {
 507        [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
 508        [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
 509        [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
 510        [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
 511        [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
 512        [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
 513        [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
 514        [IB_7322_LT_STATE_CFGRCVFCFG] =
 515                IB_PHYSPORTSTATE_CFG_TRAIN,
 516        [IB_7322_LT_STATE_CFGWAITRMT] =
 517                IB_PHYSPORTSTATE_CFG_TRAIN,
 518        [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
 519        [IB_7322_LT_STATE_RECOVERRETRAIN] =
 520                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 521        [IB_7322_LT_STATE_RECOVERWAITRMT] =
 522                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 523        [IB_7322_LT_STATE_RECOVERIDLE] =
 524                IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 525        [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
 526        [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
 527        [IB_7322_LT_STATE_CFGWAITRMTTEST] =
 528                IB_PHYSPORTSTATE_CFG_TRAIN,
 529        [IB_7322_LT_STATE_CFGWAITENH] =
 530                IB_PHYSPORTSTATE_CFG_WAIT_ENH,
 531        [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
 532        [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
 533        [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
 534        [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
 535};
 536
 537#ifdef CONFIG_INFINIBAND_QIB_DCA
 538struct qib_irq_notify {
 539        int rcv;
 540        void *arg;
 541        struct irq_affinity_notify notify;
 542};
 543#endif
 544
 545struct qib_chip_specific {
 546        u64 __iomem *cregbase;
 547        u64 *cntrs;
 548        spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
 549        spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
 550        u64 main_int_mask;      /* clear bits which have dedicated handlers */
 551        u64 int_enable_mask;  /* for per port interrupts in single port mode */
 552        u64 errormask;
 553        u64 hwerrmask;
 554        u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
 555        u64 gpio_mask; /* shadow the gpio mask register */
 556        u64 extctrl; /* shadow the gpio output enable, etc... */
 557        u32 ncntrs;
 558        u32 nportcntrs;
 559        u32 cntrnamelen;
 560        u32 portcntrnamelen;
 561        u32 numctxts;
 562        u32 rcvegrcnt;
 563        u32 updthresh; /* current AvailUpdThld */
 564        u32 updthresh_dflt; /* default AvailUpdThld */
 565        u32 r1;
 566        u32 num_msix_entries;
 567        u32 sdmabufcnt;
 568        u32 lastbuf_for_pio;
 569        u32 stay_in_freeze;
 570        u32 recovery_ports_initted;
 571#ifdef CONFIG_INFINIBAND_QIB_DCA
 572        u32 dca_ctrl;
 573        int rhdr_cpu[18];
 574        int sdma_cpu[2];
 575        u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
 576#endif
 577        struct qib_msix_entry *msix_entries;
 578        unsigned long *sendchkenable;
 579        unsigned long *sendgrhchk;
 580        unsigned long *sendibchk;
 581        u32 rcvavail_timeout[18];
 582        char emsgbuf[128]; /* for device error interrupt msg buffer */
 583};
 584
 585/* Table of entries in "human readable" form Tx Emphasis. */
 586struct txdds_ent {
 587        u8 amp;
 588        u8 pre;
 589        u8 main;
 590        u8 post;
 591};
 592
 593struct vendor_txdds_ent {
 594        u8 oui[QSFP_VOUI_LEN];
 595        u8 *partnum;
 596        struct txdds_ent sdr;
 597        struct txdds_ent ddr;
 598        struct txdds_ent qdr;
 599};
 600
 601static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 602
 603#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 604#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
 605#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 606#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 607
 608#define H1_FORCE_VAL 8
 609#define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
 610#define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
 611
 612/* The static and dynamic registers are paired, and the pairs indexed by spd */
 613#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
 614        + ((spd) * 2))
 615
 616#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
 617#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
 618#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
 619#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
 620#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
 621
 622struct qib_chippport_specific {
 623        u64 __iomem *kpregbase;
 624        u64 __iomem *cpregbase;
 625        u64 *portcntrs;
 626        struct qib_pportdata *ppd;
 627        wait_queue_head_t autoneg_wait;
 628        struct delayed_work autoneg_work;
 629        struct delayed_work ipg_work;
 630        struct timer_list chase_timer;
 631        /*
 632         * these 5 fields are used to establish deltas for IB symbol
 633         * errors and linkrecovery errors.  They can be reported on
 634         * some chips during link negotiation prior to INIT, and with
 635         * DDR when faking DDR negotiations with non-IBTA switches.
 636         * The chip counters are adjusted at driver unload if there is
 637         * a non-zero delta.
 638         */
 639        u64 ibdeltainprog;
 640        u64 ibsymdelta;
 641        u64 ibsymsnap;
 642        u64 iblnkerrdelta;
 643        u64 iblnkerrsnap;
 644        u64 iblnkdownsnap;
 645        u64 iblnkdowndelta;
 646        u64 ibmalfdelta;
 647        u64 ibmalfsnap;
 648        u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
 649        u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
 650        unsigned long qdr_dfe_time;
 651        unsigned long chase_end;
 652        u32 autoneg_tries;
 653        u32 recovery_init;
 654        u32 qdr_dfe_on;
 655        u32 qdr_reforce;
 656        /*
 657         * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
 658         * entry zero is unused, to simplify indexing
 659         */
 660        u8 h1_val;
 661        u8 no_eep;  /* txselect table index to use if no qsfp info */
 662        u8 ipg_tries;
 663        u8 ibmalfusesnap;
 664        struct qib_qsfp_data qsfp_data;
 665        char epmsgbuf[192]; /* for port error interrupt msg buffer */
 666        char sdmamsgbuf[192]; /* for per-port sdma error messages */
 667};
 668
 669static struct {
 670        const char *name;
 671        irq_handler_t handler;
 672        int lsb;
 673        int port; /* 0 if not port-specific, else port # */
 674        int dca;
 675} irq_table[] = {
 676        { "", qib_7322intr, -1, 0, 0 },
 677        { " (buf avail)", qib_7322bufavail,
 678                SYM_LSB(IntStatus, SendBufAvail), 0, 0},
 679        { " (sdma 0)", sdma_intr,
 680                SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
 681        { " (sdma 1)", sdma_intr,
 682                SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
 683        { " (sdmaI 0)", sdma_idle_intr,
 684                SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
 685        { " (sdmaI 1)", sdma_idle_intr,
 686                SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
 687        { " (sdmaP 0)", sdma_progress_intr,
 688                SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
 689        { " (sdmaP 1)", sdma_progress_intr,
 690                SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
 691        { " (sdmaC 0)", sdma_cleanup_intr,
 692                SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
 693        { " (sdmaC 1)", sdma_cleanup_intr,
 694                SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
 695};
 696
 697#ifdef CONFIG_INFINIBAND_QIB_DCA
 698
 699static const struct dca_reg_map {
 700        int     shadow_inx;
 701        int     lsb;
 702        u64     mask;
 703        u16     regno;
 704} dca_rcvhdr_reg_map[] = {
 705        { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
 706           ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
 707        { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
 708           ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
 709        { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
 710           ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
 711        { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
 712           ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
 713        { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
 714           ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
 715        { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
 716           ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
 717        { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
 718           ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
 719        { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
 720           ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
 721        { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
 722           ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
 723        { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
 724           ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
 725        { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
 726           ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
 727        { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
 728           ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
 729        { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
 730           ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
 731        { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
 732           ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
 733        { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
 734           ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
 735        { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
 736           ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
 737        { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
 738           ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
 739        { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
 740           ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
 741};
 742#endif
 743
 744/* ibcctrl bits */
 745#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
 746/* cycle through TS1/TS2 till OK */
 747#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
 748/* wait for TS1, then go on */
 749#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
 750#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
 751
 752#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
 753#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
 754#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
 755
 756#define BLOB_7322_IBCHG 0x101
 757
 758static inline void qib_write_kreg(const struct qib_devdata *dd,
 759                                  const u32 regno, u64 value);
 760static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
 761static void write_7322_initregs(struct qib_devdata *);
 762static void write_7322_init_portregs(struct qib_pportdata *);
 763static void setup_7322_link_recovery(struct qib_pportdata *, u32);
 764static void check_7322_rxe_status(struct qib_pportdata *);
 765static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
 766#ifdef CONFIG_INFINIBAND_QIB_DCA
 767static void qib_setup_dca(struct qib_devdata *dd);
 768static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
 769static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
 770#endif
 771
 772/**
 773 * qib_read_ureg32 - read 32-bit virtualized per-context register
 774 * @dd: device
 775 * @regno: register number
 776 * @ctxt: context number
 777 *
 778 * Return the contents of a register that is virtualized to be per context.
 779 * Returns -1 on errors (not distinguishable from valid contents at
 780 * runtime; we may add a separate error variable at some point).
 781 */
 782static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
 783                                  enum qib_ureg regno, int ctxt)
 784{
 785        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 786                return 0;
 787        return readl(regno + (u64 __iomem *)(
 788                (dd->ureg_align * ctxt) + (dd->userbase ?
 789                 (char __iomem *)dd->userbase :
 790                 (char __iomem *)dd->kregbase + dd->uregbase)));
 791}
 792
 793/**
 794 * qib_read_ureg - read virtualized per-context register
 795 * @dd: device
 796 * @regno: register number
 797 * @ctxt: context number
 798 *
 799 * Return the contents of a register that is virtualized to be per context.
 800 * Returns -1 on errors (not distinguishable from valid contents at
 801 * runtime; we may add a separate error variable at some point).
 802 */
 803static inline u64 qib_read_ureg(const struct qib_devdata *dd,
 804                                enum qib_ureg regno, int ctxt)
 805{
 806
 807        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 808                return 0;
 809        return readq(regno + (u64 __iomem *)(
 810                (dd->ureg_align * ctxt) + (dd->userbase ?
 811                 (char __iomem *)dd->userbase :
 812                 (char __iomem *)dd->kregbase + dd->uregbase)));
 813}
 814
 815/**
 816 * qib_write_ureg - write virtualized per-context register
 817 * @dd: device
 818 * @regno: register number
 819 * @value: value
 820 * @ctxt: context
 821 *
 822 * Write the contents of a register that is virtualized to be per context.
 823 */
 824static inline void qib_write_ureg(const struct qib_devdata *dd,
 825                                  enum qib_ureg regno, u64 value, int ctxt)
 826{
 827        u64 __iomem *ubase;
 828
 829        if (dd->userbase)
 830                ubase = (u64 __iomem *)
 831                        ((char __iomem *) dd->userbase +
 832                         dd->ureg_align * ctxt);
 833        else
 834                ubase = (u64 __iomem *)
 835                        (dd->uregbase +
 836                         (char __iomem *) dd->kregbase +
 837                         dd->ureg_align * ctxt);
 838
 839        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 840                writeq(value, &ubase[regno]);
 841}
 842
 843static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
 844                                  const u32 regno)
 845{
 846        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 847                return -1;
 848        return readl((u32 __iomem *) &dd->kregbase[regno]);
 849}
 850
 851static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
 852                                  const u32 regno)
 853{
 854        if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 855                return -1;
 856        return readq(&dd->kregbase[regno]);
 857}
 858
 859static inline void qib_write_kreg(const struct qib_devdata *dd,
 860                                  const u32 regno, u64 value)
 861{
 862        if (dd->kregbase && (dd->flags & QIB_PRESENT))
 863                writeq(value, &dd->kregbase[regno]);
 864}
 865
 866/*
 867 * not many sanity checks for the port-specific kernel register routines,
 868 * since they are only used when it's known to be safe.
 869*/
 870static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
 871                                     const u16 regno)
 872{
 873        if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
 874                return 0ULL;
 875        return readq(&ppd->cpspec->kpregbase[regno]);
 876}
 877
 878static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
 879                                       const u16 regno, u64 value)
 880{
 881        if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
 882            (ppd->dd->flags & QIB_PRESENT))
 883                writeq(value, &ppd->cpspec->kpregbase[regno]);
 884}
 885
 886/**
 887 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
 888 * @dd: the qlogic_ib device
 889 * @regno: the register number to write
 890 * @ctxt: the context containing the register
 891 * @value: the value to write
 892 */
 893static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
 894                                       const u16 regno, unsigned ctxt,
 895                                       u64 value)
 896{
 897        qib_write_kreg(dd, regno + ctxt, value);
 898}
 899
 900static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
 901{
 902        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 903                return 0;
 904        return readq(&dd->cspec->cregbase[regno]);
 905
 906
 907}
 908
 909static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
 910{
 911        if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 912                return 0;
 913        return readl(&dd->cspec->cregbase[regno]);
 914
 915
 916}
 917
 918static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
 919                                        u16 regno, u64 value)
 920{
 921        if (ppd->cpspec && ppd->cpspec->cpregbase &&
 922            (ppd->dd->flags & QIB_PRESENT))
 923                writeq(value, &ppd->cpspec->cpregbase[regno]);
 924}
 925
 926static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
 927                                      u16 regno)
 928{
 929        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 930            !(ppd->dd->flags & QIB_PRESENT))
 931                return 0;
 932        return readq(&ppd->cpspec->cpregbase[regno]);
 933}
 934
 935static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
 936                                        u16 regno)
 937{
 938        if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 939            !(ppd->dd->flags & QIB_PRESENT))
 940                return 0;
 941        return readl(&ppd->cpspec->cpregbase[regno]);
 942}
 943
 944/* bits in Control register */
 945#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
 946#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
 947
 948/* bits in general interrupt regs */
 949#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
 950#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
 951#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
 952#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
 953#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
 954#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
 955#define QIB_I_C_ERROR INT_MASK(Err)
 956
 957#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
 958#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
 959#define QIB_I_GPIO INT_MASK(AssertGPIO)
 960#define QIB_I_P_SDMAINT(pidx) \
 961        (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 962         INT_MASK_P(SDmaProgress, pidx) | \
 963         INT_MASK_PM(SDmaCleanupDone, pidx))
 964
 965/* Interrupt bits that are "per port" */
 966#define QIB_I_P_BITSEXTANT(pidx) \
 967        (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
 968        INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 969        INT_MASK_P(SDmaProgress, pidx) | \
 970        INT_MASK_PM(SDmaCleanupDone, pidx))
 971
 972/* Interrupt bits that are common to a device */
 973/* currently unused: QIB_I_SPIOSENT */
 974#define QIB_I_C_BITSEXTANT \
 975        (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
 976        QIB_I_SPIOSENT | \
 977        QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
 978
 979#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
 980        QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
 981
 982/*
 983 * Error bits that are "per port".
 984 */
 985#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
 986#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
 987#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
 988#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
 989#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
 990#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
 991#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
 992#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
 993#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
 994#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
 995#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
 996#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
 997#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
 998#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
 999#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
1000#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
1001#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
1002#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
1003#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
1004#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
1005#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
1006#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1007#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1008#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1009#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1010#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1011#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1012#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1013
1014#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1015#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1016#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1017#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1018#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1019#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1020#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1021#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1022#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1023#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1024#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1025
1026/* Error bits that are common to a device */
1027#define QIB_E_RESET ERR_MASK(ResetNegated)
1028#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1029#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1030
1031
1032/*
1033 * Per chip (rather than per-port) errors.  Most either do
1034 * nothing but trigger a print (because they self-recover, or
1035 * always occur in tandem with other errors that handle the
1036 * issue), or because they indicate errors with no recovery,
1037 * but we want to know that they happened.
1038 */
1039#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1040#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1041#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1042#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1043#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1044#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1045#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1046#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1047
1048/* SDMA chip errors (not per port)
1049 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1050 * the SDMAHALT error immediately, so we just print the dup error via the
1051 * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1052 * as well, but since this is port-independent, by definition, it's
1053 * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1054 * packet send errors, and so are handled in the same manner as other
1055 * per-packet errors.
1056 */
1057#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1058#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1059#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1060
1061/*
1062 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1063 * it is used to print "common" packet errors.
1064 */
1065#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1066        QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1067        QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1068        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1069        QIB_E_P_REBP)
1070
1071/* Error Bits that Packet-related (Receive, per-port) */
1072#define QIB_E_P_RPKTERRS (\
1073        QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1074        QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1075        QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1076        QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1077        QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1078        QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1079
1080/*
1081 * Error bits that are Send-related (per port)
1082 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1083 * All of these potentially need to have a buffer disarmed
1084 */
1085#define QIB_E_P_SPKTERRS (\
1086        QIB_E_P_SUNEXP_PKTNUM |\
1087        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1088        QIB_E_P_SMAXPKTLEN |\
1089        QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1090        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1091        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1092
1093#define QIB_E_SPKTERRS ( \
1094                QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1095                ERR_MASK_N(SendUnsupportedVLErr) |                      \
1096                QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1097
1098#define QIB_E_P_SDMAERRS ( \
1099        QIB_E_P_SDMAHALT | \
1100        QIB_E_P_SDMADESCADDRMISALIGN | \
1101        QIB_E_P_SDMAUNEXPDATA | \
1102        QIB_E_P_SDMAMISSINGDW | \
1103        QIB_E_P_SDMADWEN | \
1104        QIB_E_P_SDMARPYTAG | \
1105        QIB_E_P_SDMA1STDESC | \
1106        QIB_E_P_SDMABASE | \
1107        QIB_E_P_SDMATAILOUTOFBOUND | \
1108        QIB_E_P_SDMAOUTOFBOUND | \
1109        QIB_E_P_SDMAGENMISMATCH)
1110
1111/*
1112 * This sets some bits more than once, but makes it more obvious which
1113 * bits are not handled under other categories, and the repeat definition
1114 * is not a problem.
1115 */
1116#define QIB_E_P_BITSEXTANT ( \
1117        QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1118        QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1119        QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1120        QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1121        )
1122
1123/*
1124 * These are errors that can occur when the link
1125 * changes state while a packet is being sent or received.  This doesn't
1126 * cover things like EBP or VCRC that can be the result of a sending
1127 * having the link change state, so we receive a "known bad" packet.
1128 * All of these are "per port", so renamed:
1129 */
1130#define QIB_E_P_LINK_PKTERRS (\
1131        QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1132        QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1133        QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1134        QIB_E_P_RUNEXPCHAR)
1135
1136/*
1137 * This sets some bits more than once, but makes it more obvious which
1138 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1139 * and the repeat definition is not a problem.
1140 */
1141#define QIB_E_C_BITSEXTANT (\
1142        QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1143        QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1144        QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1145
1146/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1147#define E_SPKT_ERRS_IGNORE 0
1148
1149#define QIB_EXTS_MEMBIST_DISABLED \
1150        SYM_MASK(EXTStatus, MemBISTDisabled)
1151#define QIB_EXTS_MEMBIST_ENDTEST \
1152        SYM_MASK(EXTStatus, MemBISTEndTest)
1153
1154#define QIB_E_SPIOARMLAUNCH \
1155        ERR_MASK(SendArmLaunchErr)
1156
1157#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1158#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1159
1160/*
1161 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1162 * and also if forced QDR (only QDR enabled).  It's enabled for the
1163 * forced QDR case so that scrambling will be enabled by the TS3
1164 * exchange, when supported by both sides of the link.
1165 */
1166#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1167#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1168#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1169#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1170#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1171#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1172        SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1173#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1174
1175#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1176#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1177
1178#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1179#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1180#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1181
1182#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1183#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1184#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1185        SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1186#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1187        SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1188#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1189
1190#define IBA7322_REDIRECT_VEC_PER_REG 12
1191
1192#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1193#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1194#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1195#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1196#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1197
1198#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1199
1200#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1201        .msg = #fldname , .sz = sizeof(#fldname) }
1202#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1203        fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1204static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1205        HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1206        HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1207        HWE_AUTO(PCIESerdesPClkNotDetect),
1208        HWE_AUTO(PowerOnBISTFailed),
1209        HWE_AUTO(TempsenseTholdReached),
1210        HWE_AUTO(MemoryErr),
1211        HWE_AUTO(PCIeBusParityErr),
1212        HWE_AUTO(PcieCplTimeout),
1213        HWE_AUTO(PciePoisonedTLP),
1214        HWE_AUTO_P(SDmaMemReadErr, 1),
1215        HWE_AUTO_P(SDmaMemReadErr, 0),
1216        HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1217        HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1218        HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1219        HWE_AUTO(statusValidNoEop),
1220        HWE_AUTO(LATriggered),
1221        { .mask = 0, .sz = 0 }
1222};
1223
1224#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1225        .msg = #fldname, .sz = sizeof(#fldname) }
1226#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1227        .msg = #fldname, .sz = sizeof(#fldname) }
1228static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1229        E_AUTO(RcvEgrFullErr),
1230        E_AUTO(RcvHdrFullErr),
1231        E_AUTO(ResetNegated),
1232        E_AUTO(HardwareErr),
1233        E_AUTO(InvalidAddrErr),
1234        E_AUTO(SDmaVL15Err),
1235        E_AUTO(SBufVL15MisUseErr),
1236        E_AUTO(InvalidEEPCmd),
1237        E_AUTO(RcvContextShareErr),
1238        E_AUTO(SendVLMismatchErr),
1239        E_AUTO(SendArmLaunchErr),
1240        E_AUTO(SendSpecialTriggerErr),
1241        E_AUTO(SDmaWrongPortErr),
1242        E_AUTO(SDmaBufMaskDuplicateErr),
1243        { .mask = 0, .sz = 0 }
1244};
1245
1246static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1247        E_P_AUTO(IBStatusChanged),
1248        E_P_AUTO(SHeadersErr),
1249        E_P_AUTO(VL15BufMisuseErr),
1250        /*
1251         * SDmaHaltErr is not really an error, make it clearer;
1252         */
1253        {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1254                .sz = 11},
1255        E_P_AUTO(SDmaDescAddrMisalignErr),
1256        E_P_AUTO(SDmaUnexpDataErr),
1257        E_P_AUTO(SDmaMissingDwErr),
1258        E_P_AUTO(SDmaDwEnErr),
1259        E_P_AUTO(SDmaRpyTagErr),
1260        E_P_AUTO(SDma1stDescErr),
1261        E_P_AUTO(SDmaBaseErr),
1262        E_P_AUTO(SDmaTailOutOfBoundErr),
1263        E_P_AUTO(SDmaOutOfBoundErr),
1264        E_P_AUTO(SDmaGenMismatchErr),
1265        E_P_AUTO(SendBufMisuseErr),
1266        E_P_AUTO(SendUnsupportedVLErr),
1267        E_P_AUTO(SendUnexpectedPktNumErr),
1268        E_P_AUTO(SendDroppedDataPktErr),
1269        E_P_AUTO(SendDroppedSmpPktErr),
1270        E_P_AUTO(SendPktLenErr),
1271        E_P_AUTO(SendUnderRunErr),
1272        E_P_AUTO(SendMaxPktLenErr),
1273        E_P_AUTO(SendMinPktLenErr),
1274        E_P_AUTO(RcvIBLostLinkErr),
1275        E_P_AUTO(RcvHdrErr),
1276        E_P_AUTO(RcvHdrLenErr),
1277        E_P_AUTO(RcvBadTidErr),
1278        E_P_AUTO(RcvBadVersionErr),
1279        E_P_AUTO(RcvIBFlowErr),
1280        E_P_AUTO(RcvEBPErr),
1281        E_P_AUTO(RcvUnsupportedVLErr),
1282        E_P_AUTO(RcvUnexpectedCharErr),
1283        E_P_AUTO(RcvShortPktLenErr),
1284        E_P_AUTO(RcvLongPktLenErr),
1285        E_P_AUTO(RcvMaxPktLenErr),
1286        E_P_AUTO(RcvMinPktLenErr),
1287        E_P_AUTO(RcvICRCErr),
1288        E_P_AUTO(RcvVCRCErr),
1289        E_P_AUTO(RcvFormatErr),
1290        { .mask = 0, .sz = 0 }
1291};
1292
1293/*
1294 * Below generates "auto-message" for interrupts not specific to any port or
1295 * context
1296 */
1297#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1298        .msg = #fldname, .sz = sizeof(#fldname) }
1299/* Below generates "auto-message" for interrupts specific to a port */
1300#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1301        SYM_LSB(IntMask, fldname##Mask##_0), \
1302        SYM_LSB(IntMask, fldname##Mask##_1)), \
1303        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1304/* For some reason, the SerDesTrimDone bits are reversed */
1305#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1306        SYM_LSB(IntMask, fldname##Mask##_1), \
1307        SYM_LSB(IntMask, fldname##Mask##_0)), \
1308        .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1309/*
1310 * Below generates "auto-message" for interrupts specific to a context,
1311 * with ctxt-number appended
1312 */
1313#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1314        SYM_LSB(IntMask, fldname##0IntMask), \
1315        SYM_LSB(IntMask, fldname##17IntMask)), \
1316        .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1317
1318#define TXSYMPTOM_AUTO_P(fldname) \
1319        { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1320        .msg = #fldname, .sz = sizeof(#fldname) }
1321static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1322        TXSYMPTOM_AUTO_P(NonKeyPacket),
1323        TXSYMPTOM_AUTO_P(GRHFail),
1324        TXSYMPTOM_AUTO_P(PkeyFail),
1325        TXSYMPTOM_AUTO_P(QPFail),
1326        TXSYMPTOM_AUTO_P(SLIDFail),
1327        TXSYMPTOM_AUTO_P(RawIPV6),
1328        TXSYMPTOM_AUTO_P(PacketTooSmall),
1329        { .mask = 0, .sz = 0 }
1330};
1331
1332#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1333
1334/*
1335 * Called when we might have an error that is specific to a particular
1336 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1337 * because we don't need to force the update of pioavail
1338 */
1339static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1340{
1341        struct qib_devdata *dd = ppd->dd;
1342        u32 i;
1343        int any;
1344        u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1345        u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1346        unsigned long sbuf[4];
1347
1348        /*
1349         * It's possible that sendbuffererror could have bits set; might
1350         * have already done this as a result of hardware error handling.
1351         */
1352        any = 0;
1353        for (i = 0; i < regcnt; ++i) {
1354                sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1355                if (sbuf[i]) {
1356                        any = 1;
1357                        qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1358                }
1359        }
1360
1361        if (any)
1362                qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1363}
1364
1365/* No txe_recover yet, if ever */
1366
1367/* No decode__errors yet */
1368static void err_decode(char *msg, size_t len, u64 errs,
1369                       const struct qib_hwerror_msgs *msp)
1370{
1371        u64 these, lmask;
1372        int took, multi, n = 0;
1373
1374        while (errs && msp && msp->mask) {
1375                multi = (msp->mask & (msp->mask - 1));
1376                while (errs & msp->mask) {
1377                        these = (errs & msp->mask);
1378                        lmask = (these & (these - 1)) ^ these;
1379                        if (len) {
1380                                if (n++) {
1381                                        /* separate the strings */
1382                                        *msg++ = ',';
1383                                        len--;
1384                                }
1385                                /* msp->sz counts the nul */
1386                                took = min_t(size_t, msp->sz - (size_t)1, len);
1387                                memcpy(msg,  msp->msg, took);
1388                                len -= took;
1389                                msg += took;
1390                                if (len)
1391                                        *msg = '\0';
1392                        }
1393                        errs &= ~lmask;
1394                        if (len && multi) {
1395                                /* More than one bit this mask */
1396                                int idx = -1;
1397
1398                                while (lmask & msp->mask) {
1399                                        ++idx;
1400                                        lmask >>= 1;
1401                                }
1402                                took = scnprintf(msg, len, "_%d", idx);
1403                                len -= took;
1404                                msg += took;
1405                        }
1406                }
1407                ++msp;
1408        }
1409        /* If some bits are left, show in hex. */
1410        if (len && errs)
1411                snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1412                        (unsigned long long) errs);
1413}
1414
1415/* only called if r1 set */
1416static void flush_fifo(struct qib_pportdata *ppd)
1417{
1418        struct qib_devdata *dd = ppd->dd;
1419        u32 __iomem *piobuf;
1420        u32 bufn;
1421        u32 *hdr;
1422        u64 pbc;
1423        const unsigned hdrwords = 7;
1424        static struct ib_header ibhdr = {
1425                .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1426                .lrh[1] = IB_LID_PERMISSIVE,
1427                .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1428                .lrh[3] = IB_LID_PERMISSIVE,
1429                .u.oth.bth[0] = cpu_to_be32(
1430                        (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1431                .u.oth.bth[1] = cpu_to_be32(0),
1432                .u.oth.bth[2] = cpu_to_be32(0),
1433                .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1434                .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1435        };
1436
1437        /*
1438         * Send a dummy VL15 packet to flush the launch FIFO.
1439         * This will not actually be sent since the TxeBypassIbc bit is set.
1440         */
1441        pbc = PBC_7322_VL15_SEND |
1442                (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1443                (hdrwords + SIZE_OF_CRC);
1444        piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1445        if (!piobuf)
1446                return;
1447        writeq(pbc, piobuf);
1448        hdr = (u32 *) &ibhdr;
1449        if (dd->flags & QIB_PIO_FLUSH_WC) {
1450                qib_flush_wc();
1451                qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1452                qib_flush_wc();
1453                __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1454                qib_flush_wc();
1455        } else
1456                qib_pio_copy(piobuf + 2, hdr, hdrwords);
1457        qib_sendbuf_done(dd, bufn);
1458}
1459
1460/*
1461 * This is called with interrupts disabled and sdma_lock held.
1462 */
1463static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1464{
1465        struct qib_devdata *dd = ppd->dd;
1466        u64 set_sendctrl = 0;
1467        u64 clr_sendctrl = 0;
1468
1469        if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1470                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1471        else
1472                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1473
1474        if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1475                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1476        else
1477                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1478
1479        if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1480                set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1481        else
1482                clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1483
1484        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1485                set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1486                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1487                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1488        else
1489                clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1490                                SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1491                                SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1492
1493        spin_lock(&dd->sendctrl_lock);
1494
1495        /* If we are draining everything, block sends first */
1496        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1497                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1498                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1499                qib_write_kreg(dd, kr_scratch, 0);
1500        }
1501
1502        ppd->p_sendctrl |= set_sendctrl;
1503        ppd->p_sendctrl &= ~clr_sendctrl;
1504
1505        if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1506                qib_write_kreg_port(ppd, krp_sendctrl,
1507                                    ppd->p_sendctrl |
1508                                    SYM_MASK(SendCtrl_0, SDmaCleanup));
1509        else
1510                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1511        qib_write_kreg(dd, kr_scratch, 0);
1512
1513        if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1514                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1515                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1516                qib_write_kreg(dd, kr_scratch, 0);
1517        }
1518
1519        spin_unlock(&dd->sendctrl_lock);
1520
1521        if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1522                flush_fifo(ppd);
1523}
1524
1525static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1526{
1527        __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1528}
1529
1530static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1531{
1532        /*
1533         * Set SendDmaLenGen and clear and set
1534         * the MSB of the generation count to enable generation checking
1535         * and load the internal generation counter.
1536         */
1537        qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1538        qib_write_kreg_port(ppd, krp_senddmalengen,
1539                            ppd->sdma_descq_cnt |
1540                            (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1541}
1542
1543/*
1544 * Must be called with sdma_lock held, or before init finished.
1545 */
1546static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1547{
1548        /* Commit writes to memory and advance the tail on the chip */
1549        wmb();
1550        ppd->sdma_descq_tail = tail;
1551        qib_write_kreg_port(ppd, krp_senddmatail, tail);
1552}
1553
1554/*
1555 * This is called with interrupts disabled and sdma_lock held.
1556 */
1557static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1558{
1559        /*
1560         * Drain all FIFOs.
1561         * The hardware doesn't require this but we do it so that verbs
1562         * and user applications don't wait for link active to send stale
1563         * data.
1564         */
1565        sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1566
1567        qib_sdma_7322_setlengen(ppd);
1568        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1569        ppd->sdma_head_dma[0] = 0;
1570        qib_7322_sdma_sendctrl(ppd,
1571                ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1572}
1573
1574#define DISABLES_SDMA ( \
1575        QIB_E_P_SDMAHALT | \
1576        QIB_E_P_SDMADESCADDRMISALIGN | \
1577        QIB_E_P_SDMAMISSINGDW | \
1578        QIB_E_P_SDMADWEN | \
1579        QIB_E_P_SDMARPYTAG | \
1580        QIB_E_P_SDMA1STDESC | \
1581        QIB_E_P_SDMABASE | \
1582        QIB_E_P_SDMATAILOUTOFBOUND | \
1583        QIB_E_P_SDMAOUTOFBOUND | \
1584        QIB_E_P_SDMAGENMISMATCH)
1585
1586static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1587{
1588        unsigned long flags;
1589        struct qib_devdata *dd = ppd->dd;
1590
1591        errs &= QIB_E_P_SDMAERRS;
1592        err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1593                   errs, qib_7322p_error_msgs);
1594
1595        if (errs & QIB_E_P_SDMAUNEXPDATA)
1596                qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1597                            ppd->port);
1598
1599        spin_lock_irqsave(&ppd->sdma_lock, flags);
1600
1601        if (errs != QIB_E_P_SDMAHALT) {
1602                /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1603                qib_dev_porterr(dd, ppd->port,
1604                        "SDMA %s 0x%016llx %s\n",
1605                        qib_sdma_state_names[ppd->sdma_state.current_state],
1606                        errs, ppd->cpspec->sdmamsgbuf);
1607                dump_sdma_7322_state(ppd);
1608        }
1609
1610        switch (ppd->sdma_state.current_state) {
1611        case qib_sdma_state_s00_hw_down:
1612                break;
1613
1614        case qib_sdma_state_s10_hw_start_up_wait:
1615                if (errs & QIB_E_P_SDMAHALT)
1616                        __qib_sdma_process_event(ppd,
1617                                qib_sdma_event_e20_hw_started);
1618                break;
1619
1620        case qib_sdma_state_s20_idle:
1621                break;
1622
1623        case qib_sdma_state_s30_sw_clean_up_wait:
1624                break;
1625
1626        case qib_sdma_state_s40_hw_clean_up_wait:
1627                if (errs & QIB_E_P_SDMAHALT)
1628                        __qib_sdma_process_event(ppd,
1629                                qib_sdma_event_e50_hw_cleaned);
1630                break;
1631
1632        case qib_sdma_state_s50_hw_halt_wait:
1633                if (errs & QIB_E_P_SDMAHALT)
1634                        __qib_sdma_process_event(ppd,
1635                                qib_sdma_event_e60_hw_halted);
1636                break;
1637
1638        case qib_sdma_state_s99_running:
1639                __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1640                __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1641                break;
1642        }
1643
1644        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1645}
1646
1647/*
1648 * handle per-device errors (not per-port errors)
1649 */
1650static noinline void handle_7322_errors(struct qib_devdata *dd)
1651{
1652        char *msg;
1653        u64 iserr = 0;
1654        u64 errs;
1655        u64 mask;
1656
1657        qib_stats.sps_errints++;
1658        errs = qib_read_kreg64(dd, kr_errstatus);
1659        if (!errs) {
1660                qib_devinfo(dd->pcidev,
1661                        "device error interrupt, but no error bits set!\n");
1662                goto done;
1663        }
1664
1665        /* don't report errors that are masked */
1666        errs &= dd->cspec->errormask;
1667        msg = dd->cspec->emsgbuf;
1668
1669        /* do these first, they are most important */
1670        if (errs & QIB_E_HARDWARE) {
1671                *msg = '\0';
1672                qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1673        }
1674
1675        if (errs & QIB_E_SPKTERRS) {
1676                qib_disarm_7322_senderrbufs(dd->pport);
1677                qib_stats.sps_txerrs++;
1678        } else if (errs & QIB_E_INVALIDADDR)
1679                qib_stats.sps_txerrs++;
1680        else if (errs & QIB_E_ARMLAUNCH) {
1681                qib_stats.sps_txerrs++;
1682                qib_disarm_7322_senderrbufs(dd->pport);
1683        }
1684        qib_write_kreg(dd, kr_errclear, errs);
1685
1686        /*
1687         * The ones we mask off are handled specially below
1688         * or above.  Also mask SDMADISABLED by default as it
1689         * is too chatty.
1690         */
1691        mask = QIB_E_HARDWARE;
1692        *msg = '\0';
1693
1694        err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1695                   qib_7322error_msgs);
1696
1697        /*
1698         * Getting reset is a tragedy for all ports. Mark the device
1699         * _and_ the ports as "offline" in way meaningful to each.
1700         */
1701        if (errs & QIB_E_RESET) {
1702                int pidx;
1703
1704                qib_dev_err(dd,
1705                        "Got reset, requires re-init (unload and reload driver)\n");
1706                dd->flags &= ~QIB_INITTED;  /* needs re-init */
1707                /* mark as having had error */
1708                *dd->devstatusp |= QIB_STATUS_HWERROR;
1709                for (pidx = 0; pidx < dd->num_pports; ++pidx)
1710                        if (dd->pport[pidx].link_speed_supported)
1711                                *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1712        }
1713
1714        if (*msg && iserr)
1715                qib_dev_err(dd, "%s error\n", msg);
1716
1717        /*
1718         * If there were hdrq or egrfull errors, wake up any processes
1719         * waiting in poll.  We used to try to check which contexts had
1720         * the overflow, but given the cost of that and the chip reads
1721         * to support it, it's better to just wake everybody up if we
1722         * get an overflow; waiters can poll again if it's not them.
1723         */
1724        if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1725                qib_handle_urcv(dd, ~0U);
1726                if (errs & ERR_MASK(RcvEgrFullErr))
1727                        qib_stats.sps_buffull++;
1728                else
1729                        qib_stats.sps_hdrfull++;
1730        }
1731
1732done:
1733        return;
1734}
1735
1736static void qib_error_tasklet(unsigned long data)
1737{
1738        struct qib_devdata *dd = (struct qib_devdata *)data;
1739
1740        handle_7322_errors(dd);
1741        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1742}
1743
1744static void reenable_chase(struct timer_list *t)
1745{
1746        struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1747        struct qib_pportdata *ppd = cp->ppd;
1748
1749        ppd->cpspec->chase_timer.expires = 0;
1750        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1751                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1752}
1753
1754static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1755                u8 ibclt)
1756{
1757        ppd->cpspec->chase_end = 0;
1758
1759        if (!qib_chase)
1760                return;
1761
1762        qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1763                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1764        ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1765        add_timer(&ppd->cpspec->chase_timer);
1766}
1767
1768static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1769{
1770        u8 ibclt;
1771        unsigned long tnow;
1772
1773        ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1774
1775        /*
1776         * Detect and handle the state chase issue, where we can
1777         * get stuck if we are unlucky on timing on both sides of
1778         * the link.   If we are, we disable, set a timer, and
1779         * then re-enable.
1780         */
1781        switch (ibclt) {
1782        case IB_7322_LT_STATE_CFGRCVFCFG:
1783        case IB_7322_LT_STATE_CFGWAITRMT:
1784        case IB_7322_LT_STATE_TXREVLANES:
1785        case IB_7322_LT_STATE_CFGENH:
1786                tnow = jiffies;
1787                if (ppd->cpspec->chase_end &&
1788                     time_after(tnow, ppd->cpspec->chase_end))
1789                        disable_chase(ppd, tnow, ibclt);
1790                else if (!ppd->cpspec->chase_end)
1791                        ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1792                break;
1793        default:
1794                ppd->cpspec->chase_end = 0;
1795                break;
1796        }
1797
1798        if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1799              ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1800             ibclt == IB_7322_LT_STATE_LINKUP) &&
1801            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1802                force_h1(ppd);
1803                ppd->cpspec->qdr_reforce = 1;
1804                if (!ppd->dd->cspec->r1)
1805                        serdes_7322_los_enable(ppd, 0);
1806        } else if (ppd->cpspec->qdr_reforce &&
1807                (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1808                 (ibclt == IB_7322_LT_STATE_CFGENH ||
1809                ibclt == IB_7322_LT_STATE_CFGIDLE ||
1810                ibclt == IB_7322_LT_STATE_LINKUP))
1811                force_h1(ppd);
1812
1813        if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1814            ppd->link_speed_enabled == QIB_IB_QDR &&
1815            (ibclt == IB_7322_LT_STATE_CFGTEST ||
1816             ibclt == IB_7322_LT_STATE_CFGENH ||
1817             (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1818              ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1819                adj_tx_serdes(ppd);
1820
1821        if (ibclt != IB_7322_LT_STATE_LINKUP) {
1822                u8 ltstate = qib_7322_phys_portstate(ibcst);
1823                u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1824                                          LinkTrainingState);
1825                if (!ppd->dd->cspec->r1 &&
1826                    pibclt == IB_7322_LT_STATE_LINKUP &&
1827                    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1828                    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1829                    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1830                    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1831                        /* If the link went down (but no into recovery,
1832                         * turn LOS back on */
1833                        serdes_7322_los_enable(ppd, 1);
1834                if (!ppd->cpspec->qdr_dfe_on &&
1835                    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1836                        ppd->cpspec->qdr_dfe_on = 1;
1837                        ppd->cpspec->qdr_dfe_time = 0;
1838                        /* On link down, reenable QDR adaptation */
1839                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1840                                            ppd->dd->cspec->r1 ?
1841                                            QDR_STATIC_ADAPT_DOWN_R1 :
1842                                            QDR_STATIC_ADAPT_DOWN);
1843                        pr_info(
1844                                "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1845                                ppd->dd->unit, ppd->port, ibclt);
1846                }
1847        }
1848}
1849
1850static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1851
1852/*
1853 * This is per-pport error handling.
1854 * will likely get it's own MSIx interrupt (one for each port,
1855 * although just a single handler).
1856 */
1857static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1858{
1859        char *msg;
1860        u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1861        struct qib_devdata *dd = ppd->dd;
1862
1863        /* do this as soon as possible */
1864        fmask = qib_read_kreg64(dd, kr_act_fmask);
1865        if (!fmask)
1866                check_7322_rxe_status(ppd);
1867
1868        errs = qib_read_kreg_port(ppd, krp_errstatus);
1869        if (!errs)
1870                qib_devinfo(dd->pcidev,
1871                         "Port%d error interrupt, but no error bits set!\n",
1872                         ppd->port);
1873        if (!fmask)
1874                errs &= ~QIB_E_P_IBSTATUSCHANGED;
1875        if (!errs)
1876                goto done;
1877
1878        msg = ppd->cpspec->epmsgbuf;
1879        *msg = '\0';
1880
1881        if (errs & ~QIB_E_P_BITSEXTANT) {
1882                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1883                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1884                if (!*msg)
1885                        snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1886                                 "no others");
1887                qib_dev_porterr(dd, ppd->port,
1888                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1889                        (errs & ~QIB_E_P_BITSEXTANT), msg);
1890                *msg = '\0';
1891        }
1892
1893        if (errs & QIB_E_P_SHDR) {
1894                u64 symptom;
1895
1896                /* determine cause, then write to clear */
1897                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1898                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1899                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1900                           hdrchk_msgs);
1901                *msg = '\0';
1902                /* senderrbuf cleared in SPKTERRS below */
1903        }
1904
1905        if (errs & QIB_E_P_SPKTERRS) {
1906                if ((errs & QIB_E_P_LINK_PKTERRS) &&
1907                    !(ppd->lflags & QIBL_LINKACTIVE)) {
1908                        /*
1909                         * This can happen when trying to bring the link
1910                         * up, but the IB link changes state at the "wrong"
1911                         * time. The IB logic then complains that the packet
1912                         * isn't valid.  We don't want to confuse people, so
1913                         * we just don't print them, except at debug
1914                         */
1915                        err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1916                                   (errs & QIB_E_P_LINK_PKTERRS),
1917                                   qib_7322p_error_msgs);
1918                        *msg = '\0';
1919                        ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1920                }
1921                qib_disarm_7322_senderrbufs(ppd);
1922        } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1923                   !(ppd->lflags & QIBL_LINKACTIVE)) {
1924                /*
1925                 * This can happen when SMA is trying to bring the link
1926                 * up, but the IB link changes state at the "wrong" time.
1927                 * The IB logic then complains that the packet isn't
1928                 * valid.  We don't want to confuse people, so we just
1929                 * don't print them, except at debug
1930                 */
1931                err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1932                           qib_7322p_error_msgs);
1933                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1934                *msg = '\0';
1935        }
1936
1937        qib_write_kreg_port(ppd, krp_errclear, errs);
1938
1939        errs &= ~ignore_this_time;
1940        if (!errs)
1941                goto done;
1942
1943        if (errs & QIB_E_P_RPKTERRS)
1944                qib_stats.sps_rcverrs++;
1945        if (errs & QIB_E_P_SPKTERRS)
1946                qib_stats.sps_txerrs++;
1947
1948        iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1949
1950        if (errs & QIB_E_P_SDMAERRS)
1951                sdma_7322_p_errors(ppd, errs);
1952
1953        if (errs & QIB_E_P_IBSTATUSCHANGED) {
1954                u64 ibcs;
1955                u8 ltstate;
1956
1957                ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1958                ltstate = qib_7322_phys_portstate(ibcs);
1959
1960                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1961                        handle_serdes_issues(ppd, ibcs);
1962                if (!(ppd->cpspec->ibcctrl_a &
1963                      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1964                        /*
1965                         * We got our interrupt, so init code should be
1966                         * happy and not try alternatives. Now squelch
1967                         * other "chatter" from link-negotiation (pre Init)
1968                         */
1969                        ppd->cpspec->ibcctrl_a |=
1970                                SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1971                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
1972                                            ppd->cpspec->ibcctrl_a);
1973                }
1974
1975                /* Update our picture of width and speed from chip */
1976                ppd->link_width_active =
1977                        (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1978                            IB_WIDTH_4X : IB_WIDTH_1X;
1979                ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1980                        LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1981                          SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1982                                   QIB_IB_DDR : QIB_IB_SDR;
1983
1984                if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1985                    IB_PHYSPORTSTATE_DISABLED)
1986                        qib_set_ib_7322_lstate(ppd, 0,
1987                               QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1988                else
1989                        /*
1990                         * Since going into a recovery state causes the link
1991                         * state to go down and since recovery is transitory,
1992                         * it is better if we "miss" ever seeing the link
1993                         * training state go into recovery (i.e., ignore this
1994                         * transition for link state special handling purposes)
1995                         * without updating lastibcstat.
1996                         */
1997                        if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1998                            ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1999                            ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2000                            ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2001                                qib_handle_e_ibstatuschanged(ppd, ibcs);
2002        }
2003        if (*msg && iserr)
2004                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2005
2006        if (ppd->state_wanted & ppd->lflags)
2007                wake_up_interruptible(&ppd->state_wait);
2008done:
2009        return;
2010}
2011
2012/* enable/disable chip from delivering interrupts */
2013static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2014{
2015        if (enable) {
2016                if (dd->flags & QIB_BADINTR)
2017                        return;
2018                qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2019                /* cause any pending enabled interrupts to be re-delivered */
2020                qib_write_kreg(dd, kr_intclear, 0ULL);
2021                if (dd->cspec->num_msix_entries) {
2022                        /* and same for MSIx */
2023                        u64 val = qib_read_kreg64(dd, kr_intgranted);
2024
2025                        if (val)
2026                                qib_write_kreg(dd, kr_intgranted, val);
2027                }
2028        } else
2029                qib_write_kreg(dd, kr_intmask, 0ULL);
2030}
2031
2032/*
2033 * Try to cleanup as much as possible for anything that might have gone
2034 * wrong while in freeze mode, such as pio buffers being written by user
2035 * processes (causing armlaunch), send errors due to going into freeze mode,
2036 * etc., and try to avoid causing extra interrupts while doing so.
2037 * Forcibly update the in-memory pioavail register copies after cleanup
2038 * because the chip won't do it while in freeze mode (the register values
2039 * themselves are kept correct).
2040 * Make sure that we don't lose any important interrupts by using the chip
2041 * feature that says that writing 0 to a bit in *clear that is set in
2042 * *status will cause an interrupt to be generated again (if allowed by
2043 * the *mask value).
2044 * This is in chip-specific code because of all of the register accesses,
2045 * even though the details are similar on most chips.
2046 */
2047static void qib_7322_clear_freeze(struct qib_devdata *dd)
2048{
2049        int pidx;
2050
2051        /* disable error interrupts, to avoid confusion */
2052        qib_write_kreg(dd, kr_errmask, 0ULL);
2053
2054        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2055                if (dd->pport[pidx].link_speed_supported)
2056                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2057                                            0ULL);
2058
2059        /* also disable interrupts; errormask is sometimes overwritten */
2060        qib_7322_set_intr_state(dd, 0);
2061
2062        /* clear the freeze, and be sure chip saw it */
2063        qib_write_kreg(dd, kr_control, dd->control);
2064        qib_read_kreg32(dd, kr_scratch);
2065
2066        /*
2067         * Force new interrupt if any hwerr, error or interrupt bits are
2068         * still set, and clear "safe" send packet errors related to freeze
2069         * and cancelling sends.  Re-enable error interrupts before possible
2070         * force of re-interrupt on pending interrupts.
2071         */
2072        qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2073        qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2074        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2075        /* We need to purge per-port errs and reset mask, too */
2076        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2077                if (!dd->pport[pidx].link_speed_supported)
2078                        continue;
2079                qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2080                qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2081        }
2082        qib_7322_set_intr_state(dd, 1);
2083}
2084
2085/* no error handling to speak of */
2086/**
2087 * qib_7322_handle_hwerrors - display hardware errors.
2088 * @dd: the qlogic_ib device
2089 * @msg: the output buffer
2090 * @msgl: the size of the output buffer
2091 *
2092 * Use same msg buffer as regular errors to avoid excessive stack
2093 * use.  Most hardware errors are catastrophic, but for right now,
2094 * we'll print them and continue.  We reuse the same message buffer as
2095 * qib_handle_errors() to avoid excessive stack usage.
2096 */
2097static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2098                                     size_t msgl)
2099{
2100        u64 hwerrs;
2101        u32 ctrl;
2102        int isfatal = 0;
2103
2104        hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2105        if (!hwerrs)
2106                goto bail;
2107        if (hwerrs == ~0ULL) {
2108                qib_dev_err(dd,
2109                        "Read of hardware error status failed (all bits set); ignoring\n");
2110                goto bail;
2111        }
2112        qib_stats.sps_hwerrs++;
2113
2114        /* Always clear the error status register, except BIST fail */
2115        qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2116                       ~HWE_MASK(PowerOnBISTFailed));
2117
2118        hwerrs &= dd->cspec->hwerrmask;
2119
2120        /* no EEPROM logging, yet */
2121
2122        if (hwerrs)
2123                qib_devinfo(dd->pcidev,
2124                        "Hardware error: hwerr=0x%llx (cleared)\n",
2125                        (unsigned long long) hwerrs);
2126
2127        ctrl = qib_read_kreg32(dd, kr_control);
2128        if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2129                /*
2130                 * No recovery yet...
2131                 */
2132                if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2133                    dd->cspec->stay_in_freeze) {
2134                        /*
2135                         * If any set that we aren't ignoring only make the
2136                         * complaint once, in case it's stuck or recurring,
2137                         * and we get here multiple times
2138                         * Force link down, so switch knows, and
2139                         * LEDs are turned off.
2140                         */
2141                        if (dd->flags & QIB_INITTED)
2142                                isfatal = 1;
2143                } else
2144                        qib_7322_clear_freeze(dd);
2145        }
2146
2147        if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2148                isfatal = 1;
2149                strlcpy(msg,
2150                        "[Memory BIST test failed, InfiniPath hardware unusable]",
2151                        msgl);
2152                /* ignore from now on, so disable until driver reloaded */
2153                dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2154                qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2155        }
2156
2157        err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2158
2159        /* Ignore esoteric PLL failures et al. */
2160
2161        qib_dev_err(dd, "%s hardware error\n", msg);
2162
2163        if (hwerrs &
2164                   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2165                    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2166                int pidx = 0;
2167                int err;
2168                unsigned long flags;
2169                struct qib_pportdata *ppd = dd->pport;
2170
2171                for (; pidx < dd->num_pports; ++pidx, ppd++) {
2172                        err = 0;
2173                        if (pidx == 0 && (hwerrs &
2174                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2175                                err++;
2176                        if (pidx == 1 && (hwerrs &
2177                                SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2178                                err++;
2179                        if (err) {
2180                                spin_lock_irqsave(&ppd->sdma_lock, flags);
2181                                dump_sdma_7322_state(ppd);
2182                                spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2183                        }
2184                }
2185        }
2186
2187        if (isfatal && !dd->diag_client) {
2188                qib_dev_err(dd,
2189                        "Fatal Hardware Error, no longer usable, SN %.16s\n",
2190                        dd->serial);
2191                /*
2192                 * for /sys status file and user programs to print; if no
2193                 * trailing brace is copied, we'll know it was truncated.
2194                 */
2195                if (dd->freezemsg)
2196                        snprintf(dd->freezemsg, dd->freezelen,
2197                                 "{%s}", msg);
2198                qib_disable_after_error(dd);
2199        }
2200bail:;
2201}
2202
2203/**
2204 * qib_7322_init_hwerrors - enable hardware errors
2205 * @dd: the qlogic_ib device
2206 *
2207 * now that we have finished initializing everything that might reasonably
2208 * cause a hardware error, and cleared those errors bits as they occur,
2209 * we can enable hardware errors in the mask (potentially enabling
2210 * freeze mode), and enable hardware errors as errors (along with
2211 * everything else) in errormask
2212 */
2213static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2214{
2215        int pidx;
2216        u64 extsval;
2217
2218        extsval = qib_read_kreg64(dd, kr_extstatus);
2219        if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2220                         QIB_EXTS_MEMBIST_ENDTEST)))
2221                qib_dev_err(dd, "MemBIST did not complete!\n");
2222
2223        /* never clear BIST failure, so reported on each driver load */
2224        qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2225        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2226
2227        /* clear all */
2228        qib_write_kreg(dd, kr_errclear, ~0ULL);
2229        /* enable errors that are masked, at least this first time. */
2230        qib_write_kreg(dd, kr_errmask, ~0ULL);
2231        dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2232        for (pidx = 0; pidx < dd->num_pports; ++pidx)
2233                if (dd->pport[pidx].link_speed_supported)
2234                        qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2235                                            ~0ULL);
2236}
2237
2238/*
2239 * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2240 * on chips that are count-based, rather than trigger-based.  There is no
2241 * reference counting, but that's also fine, given the intended use.
2242 * Only chip-specific because it's all register accesses
2243 */
2244static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2245{
2246        if (enable) {
2247                qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2248                dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2249        } else
2250                dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2251        qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2252}
2253
2254/*
2255 * Formerly took parameter <which> in pre-shifted,
2256 * pre-merged form with LinkCmd and LinkInitCmd
2257 * together, and assuming the zero was NOP.
2258 */
2259static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2260                                   u16 linitcmd)
2261{
2262        u64 mod_wd;
2263        struct qib_devdata *dd = ppd->dd;
2264        unsigned long flags;
2265
2266        if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2267                /*
2268                 * If we are told to disable, note that so link-recovery
2269                 * code does not attempt to bring us back up.
2270                 * Also reset everything that we can, so we start
2271                 * completely clean when re-enabled (before we
2272                 * actually issue the disable to the IBC)
2273                 */
2274                qib_7322_mini_pcs_reset(ppd);
2275                spin_lock_irqsave(&ppd->lflags_lock, flags);
2276                ppd->lflags |= QIBL_IB_LINK_DISABLED;
2277                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2278        } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2279                /*
2280                 * Any other linkinitcmd will lead to LINKDOWN and then
2281                 * to INIT (if all is well), so clear flag to let
2282                 * link-recovery code attempt to bring us back up.
2283                 */
2284                spin_lock_irqsave(&ppd->lflags_lock, flags);
2285                ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2286                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2287                /*
2288                 * Clear status change interrupt reduction so the
2289                 * new state is seen.
2290                 */
2291                ppd->cpspec->ibcctrl_a &=
2292                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2293        }
2294
2295        mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2296                (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2297
2298        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2299                            mod_wd);
2300        /* write to chip to prevent back-to-back writes of ibc reg */
2301        qib_write_kreg(dd, kr_scratch, 0);
2302
2303}
2304
2305/*
2306 * The total RCV buffer memory is 64KB, used for both ports, and is
2307 * in units of 64 bytes (same as IB flow control credit unit).
2308 * The consumedVL unit in the same registers are in 32 byte units!
2309 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2310 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2311 * in krp_rxcreditvl15, rather than 10.
2312 */
2313#define RCV_BUF_UNITSZ 64
2314#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2315
2316static void set_vls(struct qib_pportdata *ppd)
2317{
2318        int i, numvls, totcred, cred_vl, vl0extra;
2319        struct qib_devdata *dd = ppd->dd;
2320        u64 val;
2321
2322        numvls = qib_num_vls(ppd->vls_operational);
2323
2324        /*
2325         * Set up per-VL credits. Below is kluge based on these assumptions:
2326         * 1) port is disabled at the time early_init is called.
2327         * 2) give VL15 17 credits, for two max-plausible packets.
2328         * 3) Give VL0-N the rest, with any rounding excess used for VL0
2329         */
2330        /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2331        totcred = NUM_RCV_BUF_UNITS(dd);
2332        cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2333        totcred -= cred_vl;
2334        qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2335        cred_vl = totcred / numvls;
2336        vl0extra = totcred - cred_vl * numvls;
2337        qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2338        for (i = 1; i < numvls; i++)
2339                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2340        for (; i < 8; i++) /* no buffer space for other VLs */
2341                qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2342
2343        /* Notify IBC that credits need to be recalculated */
2344        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2345        val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2346        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2347        qib_write_kreg(dd, kr_scratch, 0ULL);
2348        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2349        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2350
2351        for (i = 0; i < numvls; i++)
2352                val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2353        val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2354
2355        /* Change the number of operational VLs */
2356        ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2357                                ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2358                ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2359        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2360        qib_write_kreg(dd, kr_scratch, 0ULL);
2361}
2362
2363/*
2364 * The code that deals with actual SerDes is in serdes_7322_init().
2365 * Compared to the code for iba7220, it is minimal.
2366 */
2367static int serdes_7322_init(struct qib_pportdata *ppd);
2368
2369/**
2370 * qib_7322_bringup_serdes - bring up the serdes
2371 * @ppd: physical port on the qlogic_ib device
2372 */
2373static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2374{
2375        struct qib_devdata *dd = ppd->dd;
2376        u64 val, guid, ibc;
2377        unsigned long flags;
2378
2379        /*
2380         * SerDes model not in Pd, but still need to
2381         * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2382         * eventually.
2383         */
2384        /* Put IBC in reset, sends disabled (should be in reset already) */
2385        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2386        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2387        qib_write_kreg(dd, kr_scratch, 0ULL);
2388
2389        /* ensure previous Tx parameters are not still forced */
2390        qib_write_kreg_port(ppd, krp_tx_deemph_override,
2391                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2392                reset_tx_deemphasis_override));
2393
2394        if (qib_compat_ddr_negotiate) {
2395                ppd->cpspec->ibdeltainprog = 1;
2396                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2397                                                crp_ibsymbolerr);
2398                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2399                                                crp_iblinkerrrecov);
2400        }
2401
2402        /* flowcontrolwatermark is in units of KBytes */
2403        ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2404        /*
2405         * Flow control is sent this often, even if no changes in
2406         * buffer space occur.  Units are 128ns for this chip.
2407         * Set to 3usec.
2408         */
2409        ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2410        /* max error tolerance */
2411        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2412        /* IB credit flow control. */
2413        ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2414        /*
2415         * set initial max size pkt IBC will send, including ICRC; it's the
2416         * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2417         */
2418        ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2419                SYM_LSB(IBCCtrlA_0, MaxPktLen);
2420        ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2421
2422        /*
2423         * Reset the PCS interface to the serdes (and also ibc, which is still
2424         * in reset from above).  Writes new value of ibcctrl_a as last step.
2425         */
2426        qib_7322_mini_pcs_reset(ppd);
2427
2428        if (!ppd->cpspec->ibcctrl_b) {
2429                unsigned lse = ppd->link_speed_enabled;
2430
2431                /*
2432                 * Not on re-init after reset, establish shadow
2433                 * and force initial config.
2434                 */
2435                ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2436                                                             krp_ibcctrl_b);
2437                ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2438                                IBA7322_IBC_SPEED_DDR |
2439                                IBA7322_IBC_SPEED_SDR |
2440                                IBA7322_IBC_WIDTH_AUTONEG |
2441                                SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2442                if (lse & (lse - 1)) /* Muliple speeds enabled */
2443                        ppd->cpspec->ibcctrl_b |=
2444                                (lse << IBA7322_IBC_SPEED_LSB) |
2445                                IBA7322_IBC_IBTA_1_2_MASK |
2446                                IBA7322_IBC_MAX_SPEED_MASK;
2447                else
2448                        ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2449                                IBA7322_IBC_SPEED_QDR |
2450                                 IBA7322_IBC_IBTA_1_2_MASK :
2451                                (lse == QIB_IB_DDR) ?
2452                                        IBA7322_IBC_SPEED_DDR :
2453                                        IBA7322_IBC_SPEED_SDR;
2454                if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2455                    (IB_WIDTH_1X | IB_WIDTH_4X))
2456                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2457                else
2458                        ppd->cpspec->ibcctrl_b |=
2459                                ppd->link_width_enabled == IB_WIDTH_4X ?
2460                                IBA7322_IBC_WIDTH_4X_ONLY :
2461                                IBA7322_IBC_WIDTH_1X_ONLY;
2462
2463                /* always enable these on driver reload, not sticky */
2464                ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2465                        IBA7322_IBC_HRTBT_MASK);
2466        }
2467        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2468
2469        /* setup so we have more time at CFGTEST to change H1 */
2470        val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2471        val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2472        val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2473        qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2474
2475        serdes_7322_init(ppd);
2476
2477        guid = be64_to_cpu(ppd->guid);
2478        if (!guid) {
2479                if (dd->base_guid)
2480                        guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2481                ppd->guid = cpu_to_be64(guid);
2482        }
2483
2484        qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2485        /* write to chip to prevent back-to-back writes of ibc reg */
2486        qib_write_kreg(dd, kr_scratch, 0);
2487
2488        /* Enable port */
2489        ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2490        set_vls(ppd);
2491
2492        /* initially come up DISABLED, without sending anything. */
2493        val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2494                                        QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2495        qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2496        qib_write_kreg(dd, kr_scratch, 0ULL);
2497        /* clear the linkinit cmds */
2498        ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2499
2500        /* be paranoid against later code motion, etc. */
2501        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2502        ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2503        qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2504        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2505
2506        /* Also enable IBSTATUSCHG interrupt.  */
2507        val = qib_read_kreg_port(ppd, krp_errmask);
2508        qib_write_kreg_port(ppd, krp_errmask,
2509                val | ERR_MASK_N(IBStatusChanged));
2510
2511        /* Always zero until we start messing with SerDes for real */
2512        return 0;
2513}
2514
2515/**
2516 * qib_7322_quiet_serdes - set serdes to txidle
2517 * @dd: the qlogic_ib device
2518 * Called when driver is being unloaded
2519 */
2520static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2521{
2522        u64 val;
2523        unsigned long flags;
2524
2525        qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2526
2527        spin_lock_irqsave(&ppd->lflags_lock, flags);
2528        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2529        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2530        wake_up(&ppd->cpspec->autoneg_wait);
2531        cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2532        if (ppd->dd->cspec->r1)
2533                cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2534
2535        ppd->cpspec->chase_end = 0;
2536        if (ppd->cpspec->chase_timer.function) /* if initted */
2537                del_timer_sync(&ppd->cpspec->chase_timer);
2538
2539        /*
2540         * Despite the name, actually disables IBC as well. Do it when
2541         * we are as sure as possible that no more packets can be
2542         * received, following the down and the PCS reset.
2543         * The actual disabling happens in qib_7322_mini_pci_reset(),
2544         * along with the PCS being reset.
2545         */
2546        ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2547        qib_7322_mini_pcs_reset(ppd);
2548
2549        /*
2550         * Update the adjusted counters so the adjustment persists
2551         * across driver reload.
2552         */
2553        if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2554            ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2555                struct qib_devdata *dd = ppd->dd;
2556                u64 diagc;
2557
2558                /* enable counter writes */
2559                diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2560                qib_write_kreg(dd, kr_hwdiagctrl,
2561                               diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2562
2563                if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2564                        val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2565                        if (ppd->cpspec->ibdeltainprog)
2566                                val -= val - ppd->cpspec->ibsymsnap;
2567                        val -= ppd->cpspec->ibsymdelta;
2568                        write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2569                }
2570                if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2571                        val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2572                        if (ppd->cpspec->ibdeltainprog)
2573                                val -= val - ppd->cpspec->iblnkerrsnap;
2574                        val -= ppd->cpspec->iblnkerrdelta;
2575                        write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2576                }
2577                if (ppd->cpspec->iblnkdowndelta) {
2578                        val = read_7322_creg32_port(ppd, crp_iblinkdown);
2579                        val += ppd->cpspec->iblnkdowndelta;
2580                        write_7322_creg_port(ppd, crp_iblinkdown, val);
2581                }
2582                /*
2583                 * No need to save ibmalfdelta since IB perfcounters
2584                 * are cleared on driver reload.
2585                 */
2586
2587                /* and disable counter writes */
2588                qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2589        }
2590}
2591
2592/**
2593 * qib_setup_7322_setextled - set the state of the two external LEDs
2594 * @ppd: physical port on the qlogic_ib device
2595 * @on: whether the link is up or not
2596 *
2597 * The exact combo of LEDs if on is true is determined by looking
2598 * at the ibcstatus.
2599 *
2600 * These LEDs indicate the physical and logical state of IB link.
2601 * For this chip (at least with recommended board pinouts), LED1
2602 * is Yellow (logical state) and LED2 is Green (physical state),
2603 *
2604 * Note:  We try to match the Mellanox HCA LED behavior as best
2605 * we can.  Green indicates physical link state is OK (something is
2606 * plugged in, and we can train).
2607 * Amber indicates the link is logically up (ACTIVE).
2608 * Mellanox further blinks the amber LED to indicate data packet
2609 * activity, but we have no hardware support for that, so it would
2610 * require waking up every 10-20 msecs and checking the counters
2611 * on the chip, and then turning the LED off if appropriate.  That's
2612 * visible overhead, so not something we will do.
2613 */
2614static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2615{
2616        struct qib_devdata *dd = ppd->dd;
2617        u64 extctl, ledblink = 0, val;
2618        unsigned long flags;
2619        int yel, grn;
2620
2621        /*
2622         * The diags use the LED to indicate diag info, so we leave
2623         * the external LED alone when the diags are running.
2624         */
2625        if (dd->diag_client)
2626                return;
2627
2628        /* Allow override of LED display for, e.g. Locating system in rack */
2629        if (ppd->led_override) {
2630                grn = (ppd->led_override & QIB_LED_PHYS);
2631                yel = (ppd->led_override & QIB_LED_LOG);
2632        } else if (on) {
2633                val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2634                grn = qib_7322_phys_portstate(val) ==
2635                        IB_PHYSPORTSTATE_LINKUP;
2636                yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2637        } else {
2638                grn = 0;
2639                yel = 0;
2640        }
2641
2642        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2643        extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2644                ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2645        if (grn) {
2646                extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2647                /*
2648                 * Counts are in chip clock (4ns) periods.
2649                 * This is 1/16 sec (66.6ms) on,
2650                 * 3/16 sec (187.5 ms) off, with packets rcvd.
2651                 */
2652                ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2653                        ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2654        }
2655        if (yel)
2656                extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2657        dd->cspec->extctrl = extctl;
2658        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2659        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2660
2661        if (ledblink) /* blink the LED on packet receive */
2662                qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2663}
2664
2665#ifdef CONFIG_INFINIBAND_QIB_DCA
2666
2667static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2668{
2669        switch (event) {
2670        case DCA_PROVIDER_ADD:
2671                if (dd->flags & QIB_DCA_ENABLED)
2672                        break;
2673                if (!dca_add_requester(&dd->pcidev->dev)) {
2674                        qib_devinfo(dd->pcidev, "DCA enabled\n");
2675                        dd->flags |= QIB_DCA_ENABLED;
2676                        qib_setup_dca(dd);
2677                }
2678                break;
2679        case DCA_PROVIDER_REMOVE:
2680                if (dd->flags & QIB_DCA_ENABLED) {
2681                        dca_remove_requester(&dd->pcidev->dev);
2682                        dd->flags &= ~QIB_DCA_ENABLED;
2683                        dd->cspec->dca_ctrl = 0;
2684                        qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2685                                dd->cspec->dca_ctrl);
2686                }
2687                break;
2688        }
2689        return 0;
2690}
2691
2692static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2693{
2694        struct qib_devdata *dd = rcd->dd;
2695        struct qib_chip_specific *cspec = dd->cspec;
2696
2697        if (!(dd->flags & QIB_DCA_ENABLED))
2698                return;
2699        if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2700                const struct dca_reg_map *rmp;
2701
2702                cspec->rhdr_cpu[rcd->ctxt] = cpu;
2703                rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2704                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2705                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2706                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2707                qib_devinfo(dd->pcidev,
2708                        "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2709                        (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2710                qib_write_kreg(dd, rmp->regno,
2711                               cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2712                cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2713                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2714        }
2715}
2716
2717static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2718{
2719        struct qib_devdata *dd = ppd->dd;
2720        struct qib_chip_specific *cspec = dd->cspec;
2721        unsigned pidx = ppd->port - 1;
2722
2723        if (!(dd->flags & QIB_DCA_ENABLED))
2724                return;
2725        if (cspec->sdma_cpu[pidx] != cpu) {
2726                cspec->sdma_cpu[pidx] = cpu;
2727                cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2728                        SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2729                        SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2730                cspec->dca_rcvhdr_ctrl[4] |=
2731                        (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2732                                (ppd->hw_pidx ?
2733                                        SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2734                                        SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2735                qib_devinfo(dd->pcidev,
2736                        "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2737                        (long long) cspec->dca_rcvhdr_ctrl[4]);
2738                qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2739                               cspec->dca_rcvhdr_ctrl[4]);
2740                cspec->dca_ctrl |= ppd->hw_pidx ?
2741                        SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2742                        SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2743                qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2744        }
2745}
2746
2747static void qib_setup_dca(struct qib_devdata *dd)
2748{
2749        struct qib_chip_specific *cspec = dd->cspec;
2750        int i;
2751
2752        for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2753                cspec->rhdr_cpu[i] = -1;
2754        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2755                cspec->sdma_cpu[i] = -1;
2756        cspec->dca_rcvhdr_ctrl[0] =
2757                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2758                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2759                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2760                (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2761        cspec->dca_rcvhdr_ctrl[1] =
2762                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2763                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2764                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2765                (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2766        cspec->dca_rcvhdr_ctrl[2] =
2767                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2768                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2769                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2770                (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2771        cspec->dca_rcvhdr_ctrl[3] =
2772                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2773                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2774                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2775                (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2776        cspec->dca_rcvhdr_ctrl[4] =
2777                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2778                (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2779        for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2780                qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2781                               cspec->dca_rcvhdr_ctrl[i]);
2782        for (i = 0; i < cspec->num_msix_entries; i++)
2783                setup_dca_notifier(dd, i);
2784}
2785
2786static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2787                             const cpumask_t *mask)
2788{
2789        struct qib_irq_notify *n =
2790                container_of(notify, struct qib_irq_notify, notify);
2791        int cpu = cpumask_first(mask);
2792
2793        if (n->rcv) {
2794                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2795
2796                qib_update_rhdrq_dca(rcd, cpu);
2797        } else {
2798                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2799
2800                qib_update_sdma_dca(ppd, cpu);
2801        }
2802}
2803
2804static void qib_irq_notifier_release(struct kref *ref)
2805{
2806        struct qib_irq_notify *n =
2807                container_of(ref, struct qib_irq_notify, notify.kref);
2808        struct qib_devdata *dd;
2809
2810        if (n->rcv) {
2811                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2812
2813                dd = rcd->dd;
2814        } else {
2815                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2816
2817                dd = ppd->dd;
2818        }
2819        qib_devinfo(dd->pcidev,
2820                "release on HCA notify 0x%p n 0x%p\n", ref, n);
2821        kfree(n);
2822}
2823#endif
2824
2825static void qib_7322_free_irq(struct qib_devdata *dd)
2826{
2827        u64 intgranted;
2828        int i;
2829
2830        dd->cspec->main_int_mask = ~0ULL;
2831
2832        for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2833                /* only free IRQs that were allocated */
2834                if (dd->cspec->msix_entries[i].arg) {
2835#ifdef CONFIG_INFINIBAND_QIB_DCA
2836                        reset_dca_notifier(dd, i);
2837#endif
2838                        irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2839                                              NULL);
2840                        free_cpumask_var(dd->cspec->msix_entries[i].mask);
2841                        pci_free_irq(dd->pcidev, i,
2842                                     dd->cspec->msix_entries[i].arg);
2843                }
2844        }
2845
2846        /* If num_msix_entries was 0, disable the INTx IRQ */
2847        if (!dd->cspec->num_msix_entries)
2848                pci_free_irq(dd->pcidev, 0, dd);
2849        else
2850                dd->cspec->num_msix_entries = 0;
2851
2852        pci_free_irq_vectors(dd->pcidev);
2853
2854        /* make sure no MSIx interrupts are left pending */
2855        intgranted = qib_read_kreg64(dd, kr_intgranted);
2856        if (intgranted)
2857                qib_write_kreg(dd, kr_intgranted, intgranted);
2858}
2859
2860static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2861{
2862        int i;
2863
2864#ifdef CONFIG_INFINIBAND_QIB_DCA
2865        if (dd->flags & QIB_DCA_ENABLED) {
2866                dca_remove_requester(&dd->pcidev->dev);
2867                dd->flags &= ~QIB_DCA_ENABLED;
2868                dd->cspec->dca_ctrl = 0;
2869                qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2870        }
2871#endif
2872
2873        qib_7322_free_irq(dd);
2874        kfree(dd->cspec->cntrs);
2875        kfree(dd->cspec->sendchkenable);
2876        kfree(dd->cspec->sendgrhchk);
2877        kfree(dd->cspec->sendibchk);
2878        kfree(dd->cspec->msix_entries);
2879        for (i = 0; i < dd->num_pports; i++) {
2880                unsigned long flags;
2881                u32 mask = QSFP_GPIO_MOD_PRS_N |
2882                        (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2883
2884                kfree(dd->pport[i].cpspec->portcntrs);
2885                if (dd->flags & QIB_HAS_QSFP) {
2886                        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2887                        dd->cspec->gpio_mask &= ~mask;
2888                        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2889                        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2890                }
2891        }
2892}
2893
2894/* handle SDMA interrupts */
2895static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2896{
2897        struct qib_pportdata *ppd0 = &dd->pport[0];
2898        struct qib_pportdata *ppd1 = &dd->pport[1];
2899        u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2900                INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2901        u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2902                INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2903
2904        if (intr0)
2905                qib_sdma_intr(ppd0);
2906        if (intr1)
2907                qib_sdma_intr(ppd1);
2908
2909        if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2910                qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2911        if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2912                qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2913}
2914
2915/*
2916 * Set or clear the Send buffer available interrupt enable bit.
2917 */
2918static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2919{
2920        unsigned long flags;
2921
2922        spin_lock_irqsave(&dd->sendctrl_lock, flags);
2923        if (needint)
2924                dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2925        else
2926                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2927        qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2928        qib_write_kreg(dd, kr_scratch, 0ULL);
2929        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2930}
2931
2932/*
2933 * Somehow got an interrupt with reserved bits set in interrupt status.
2934 * Print a message so we know it happened, then clear them.
2935 * keep mainline interrupt handler cache-friendly
2936 */
2937static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2938{
2939        u64 kills;
2940        char msg[128];
2941
2942        kills = istat & ~QIB_I_BITSEXTANT;
2943        qib_dev_err(dd,
2944                "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2945                (unsigned long long) kills, msg);
2946        qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2947}
2948
2949/* keep mainline interrupt handler cache-friendly */
2950static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2951{
2952        u32 gpiostatus;
2953        int handled = 0;
2954        int pidx;
2955
2956        /*
2957         * Boards for this chip currently don't use GPIO interrupts,
2958         * so clear by writing GPIOstatus to GPIOclear, and complain
2959         * to developer.  To avoid endless repeats, clear
2960         * the bits in the mask, since there is some kind of
2961         * programming error or chip problem.
2962         */
2963        gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2964        /*
2965         * In theory, writing GPIOstatus to GPIOclear could
2966         * have a bad side-effect on some diagnostic that wanted
2967         * to poll for a status-change, but the various shadows
2968         * make that problematic at best. Diags will just suppress
2969         * all GPIO interrupts during such tests.
2970         */
2971        qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2972        /*
2973         * Check for QSFP MOD_PRS changes
2974         * only works for single port if IB1 != pidx1
2975         */
2976        for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2977             ++pidx) {
2978                struct qib_pportdata *ppd;
2979                struct qib_qsfp_data *qd;
2980                u32 mask;
2981
2982                if (!dd->pport[pidx].link_speed_supported)
2983                        continue;
2984                mask = QSFP_GPIO_MOD_PRS_N;
2985                ppd = dd->pport + pidx;
2986                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2987                if (gpiostatus & dd->cspec->gpio_mask & mask) {
2988                        u64 pins;
2989
2990                        qd = &ppd->cpspec->qsfp_data;
2991                        gpiostatus &= ~mask;
2992                        pins = qib_read_kreg64(dd, kr_extstatus);
2993                        pins >>= SYM_LSB(EXTStatus, GPIOIn);
2994                        if (!(pins & mask)) {
2995                                ++handled;
2996                                qd->t_insert = jiffies;
2997                                queue_work(ib_wq, &qd->work);
2998                        }
2999                }
3000        }
3001
3002        if (gpiostatus && !handled) {
3003                const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3004                u32 gpio_irq = mask & gpiostatus;
3005
3006                /*
3007                 * Clear any troublemakers, and update chip from shadow
3008                 */
3009                dd->cspec->gpio_mask &= ~gpio_irq;
3010                qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3011        }
3012}
3013
3014/*
3015 * Handle errors and unusual events first, separate function
3016 * to improve cache hits for fast path interrupt handling.
3017 */
3018static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3019{
3020        if (istat & ~QIB_I_BITSEXTANT)
3021                unknown_7322_ibits(dd, istat);
3022        if (istat & QIB_I_GPIO)
3023                unknown_7322_gpio_intr(dd);
3024        if (istat & QIB_I_C_ERROR) {
3025                qib_write_kreg(dd, kr_errmask, 0ULL);
3026                tasklet_schedule(&dd->error_tasklet);
3027        }
3028        if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3029                handle_7322_p_errors(dd->rcd[0]->ppd);
3030        if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3031                handle_7322_p_errors(dd->rcd[1]->ppd);
3032}
3033
3034/*
3035 * Dynamically adjust the rcv int timeout for a context based on incoming
3036 * packet rate.
3037 */
3038static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3039{
3040        struct qib_devdata *dd = rcd->dd;
3041        u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3042
3043        /*
3044         * Dynamically adjust idle timeout on chip
3045         * based on number of packets processed.
3046         */
3047        if (npkts < rcv_int_count && timeout > 2)
3048                timeout >>= 1;
3049        else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3050                timeout = min(timeout << 1, rcv_int_timeout);
3051        else
3052                return;
3053
3054        dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3055        qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3056}
3057
3058/*
3059 * This is the main interrupt handler.
3060 * It will normally only be used for low frequency interrupts but may
3061 * have to handle all interrupts if INTx is enabled or fewer than normal
3062 * MSIx interrupts were allocated.
3063 * This routine should ignore the interrupt bits for any of the
3064 * dedicated MSIx handlers.
3065 */
3066static irqreturn_t qib_7322intr(int irq, void *data)
3067{
3068        struct qib_devdata *dd = data;
3069        irqreturn_t ret;
3070        u64 istat;
3071        u64 ctxtrbits;
3072        u64 rmask;
3073        unsigned i;
3074        u32 npkts;
3075
3076        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3077                /*
3078                 * This return value is not great, but we do not want the
3079                 * interrupt core code to remove our interrupt handler
3080                 * because we don't appear to be handling an interrupt
3081                 * during a chip reset.
3082                 */
3083                ret = IRQ_HANDLED;
3084                goto bail;
3085        }
3086
3087        istat = qib_read_kreg64(dd, kr_intstatus);
3088
3089        if (unlikely(istat == ~0ULL)) {
3090                qib_bad_intrstatus(dd);
3091                qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3092                /* don't know if it was our interrupt or not */
3093                ret = IRQ_NONE;
3094                goto bail;
3095        }
3096
3097        istat &= dd->cspec->main_int_mask;
3098        if (unlikely(!istat)) {
3099                /* already handled, or shared and not us */
3100                ret = IRQ_NONE;
3101                goto bail;
3102        }
3103
3104        this_cpu_inc(*dd->int_counter);
3105
3106        /* handle "errors" of various kinds first, device ahead of port */
3107        if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3108                              QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3109                              INT_MASK_P(Err, 1))))
3110                unlikely_7322_intr(dd, istat);
3111
3112        /*
3113         * Clear the interrupt bits we found set, relatively early, so we
3114         * "know" know the chip will have seen this by the time we process
3115         * the queue, and will re-interrupt if necessary.  The processor
3116         * itself won't take the interrupt again until we return.
3117         */
3118        qib_write_kreg(dd, kr_intclear, istat);
3119
3120        /*
3121         * Handle kernel receive queues before checking for pio buffers
3122         * available since receives can overflow; piobuf waiters can afford
3123         * a few extra cycles, since they were waiting anyway.
3124         */
3125        ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3126        if (ctxtrbits) {
3127                rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3128                        (1ULL << QIB_I_RCVURG_LSB);
3129                for (i = 0; i < dd->first_user_ctxt; i++) {
3130                        if (ctxtrbits & rmask) {
3131                                ctxtrbits &= ~rmask;
3132                                if (dd->rcd[i])
3133                                        qib_kreceive(dd->rcd[i], NULL, &npkts);
3134                        }
3135                        rmask <<= 1;
3136                }
3137                if (ctxtrbits) {
3138                        ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3139                                (ctxtrbits >> QIB_I_RCVURG_LSB);
3140                        qib_handle_urcv(dd, ctxtrbits);
3141                }
3142        }
3143
3144        if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3145                sdma_7322_intr(dd, istat);
3146
3147        if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3148                qib_ib_piobufavail(dd);
3149
3150        ret = IRQ_HANDLED;
3151bail:
3152        return ret;
3153}
3154
3155/*
3156 * Dedicated receive packet available interrupt handler.
3157 */
3158static irqreturn_t qib_7322pintr(int irq, void *data)
3159{
3160        struct qib_ctxtdata *rcd = data;
3161        struct qib_devdata *dd = rcd->dd;
3162        u32 npkts;
3163
3164        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3165                /*
3166                 * This return value is not great, but we do not want the
3167                 * interrupt core code to remove our interrupt handler
3168                 * because we don't appear to be handling an interrupt
3169                 * during a chip reset.
3170                 */
3171                return IRQ_HANDLED;
3172
3173        this_cpu_inc(*dd->int_counter);
3174
3175        /* Clear the interrupt bit we expect to be set. */
3176        qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3177                       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3178
3179        qib_kreceive(rcd, NULL, &npkts);
3180
3181        return IRQ_HANDLED;
3182}
3183
3184/*
3185 * Dedicated Send buffer available interrupt handler.
3186 */
3187static irqreturn_t qib_7322bufavail(int irq, void *data)
3188{
3189        struct qib_devdata *dd = data;
3190
3191        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3192                /*
3193                 * This return value is not great, but we do not want the
3194                 * interrupt core code to remove our interrupt handler
3195                 * because we don't appear to be handling an interrupt
3196                 * during a chip reset.
3197                 */
3198                return IRQ_HANDLED;
3199
3200        this_cpu_inc(*dd->int_counter);
3201
3202        /* Clear the interrupt bit we expect to be set. */
3203        qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3204
3205        /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3206        if (dd->flags & QIB_INITTED)
3207                qib_ib_piobufavail(dd);
3208        else
3209                qib_wantpiobuf_7322_intr(dd, 0);
3210
3211        return IRQ_HANDLED;
3212}
3213
3214/*
3215 * Dedicated Send DMA interrupt handler.
3216 */
3217static irqreturn_t sdma_intr(int irq, void *data)
3218{
3219        struct qib_pportdata *ppd = data;
3220        struct qib_devdata *dd = ppd->dd;
3221
3222        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3223                /*
3224                 * This return value is not great, but we do not want the
3225                 * interrupt core code to remove our interrupt handler
3226                 * because we don't appear to be handling an interrupt
3227                 * during a chip reset.
3228                 */
3229                return IRQ_HANDLED;
3230
3231        this_cpu_inc(*dd->int_counter);
3232
3233        /* Clear the interrupt bit we expect to be set. */
3234        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3235                       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3236        qib_sdma_intr(ppd);
3237
3238        return IRQ_HANDLED;
3239}
3240
3241/*
3242 * Dedicated Send DMA idle interrupt handler.
3243 */
3244static irqreturn_t sdma_idle_intr(int irq, void *data)
3245{
3246        struct qib_pportdata *ppd = data;
3247        struct qib_devdata *dd = ppd->dd;
3248
3249        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3250                /*
3251                 * This return value is not great, but we do not want the
3252                 * interrupt core code to remove our interrupt handler
3253                 * because we don't appear to be handling an interrupt
3254                 * during a chip reset.
3255                 */
3256                return IRQ_HANDLED;
3257
3258        this_cpu_inc(*dd->int_counter);
3259
3260        /* Clear the interrupt bit we expect to be set. */
3261        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3262                       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3263        qib_sdma_intr(ppd);
3264
3265        return IRQ_HANDLED;
3266}
3267
3268/*
3269 * Dedicated Send DMA progress interrupt handler.
3270 */
3271static irqreturn_t sdma_progress_intr(int irq, void *data)
3272{
3273        struct qib_pportdata *ppd = data;
3274        struct qib_devdata *dd = ppd->dd;
3275
3276        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3277                /*
3278                 * This return value is not great, but we do not want the
3279                 * interrupt core code to remove our interrupt handler
3280                 * because we don't appear to be handling an interrupt
3281                 * during a chip reset.
3282                 */
3283                return IRQ_HANDLED;
3284
3285        this_cpu_inc(*dd->int_counter);
3286
3287        /* Clear the interrupt bit we expect to be set. */
3288        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3289                       INT_MASK_P(SDmaProgress, 1) :
3290                       INT_MASK_P(SDmaProgress, 0));
3291        qib_sdma_intr(ppd);
3292
3293        return IRQ_HANDLED;
3294}
3295
3296/*
3297 * Dedicated Send DMA cleanup interrupt handler.
3298 */
3299static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3300{
3301        struct qib_pportdata *ppd = data;
3302        struct qib_devdata *dd = ppd->dd;
3303
3304        if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3305                /*
3306                 * This return value is not great, but we do not want the
3307                 * interrupt core code to remove our interrupt handler
3308                 * because we don't appear to be handling an interrupt
3309                 * during a chip reset.
3310                 */
3311                return IRQ_HANDLED;
3312
3313        this_cpu_inc(*dd->int_counter);
3314
3315        /* Clear the interrupt bit we expect to be set. */
3316        qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3317                       INT_MASK_PM(SDmaCleanupDone, 1) :
3318                       INT_MASK_PM(SDmaCleanupDone, 0));
3319        qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3320
3321        return IRQ_HANDLED;
3322}
3323
3324#ifdef CONFIG_INFINIBAND_QIB_DCA
3325
3326static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3327{
3328        if (!dd->cspec->msix_entries[msixnum].dca)
3329                return;
3330
3331        qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3332                    dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3333        irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3334        dd->cspec->msix_entries[msixnum].notifier = NULL;
3335}
3336
3337static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3338{
3339        struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3340        struct qib_irq_notify *n;
3341
3342        if (!m->dca)
3343                return;
3344        n = kzalloc(sizeof(*n), GFP_KERNEL);
3345        if (n) {
3346                int ret;
3347
3348                m->notifier = n;
3349                n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3350                n->notify.notify = qib_irq_notifier_notify;
3351                n->notify.release = qib_irq_notifier_release;
3352                n->arg = m->arg;
3353                n->rcv = m->rcv;
3354                qib_devinfo(dd->pcidev,
3355                        "set notifier irq %d rcv %d notify %p\n",
3356                        n->notify.irq, n->rcv, &n->notify);
3357                ret = irq_set_affinity_notifier(
3358                                n->notify.irq,
3359                                &n->notify);
3360                if (ret) {
3361                        m->notifier = NULL;
3362                        kfree(n);
3363                }
3364        }
3365}
3366
3367#endif
3368
3369/*
3370 * Set up our chip-specific interrupt handler.
3371 * The interrupt type has already been setup, so
3372 * we just need to do the registration and error checking.
3373 * If we are using MSIx interrupts, we may fall back to
3374 * INTx later, if the interrupt handler doesn't get called
3375 * within 1/2 second (see verify_interrupt()).
3376 */
3377static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3378{
3379        int ret, i, msixnum;
3380        u64 redirect[6];
3381        u64 mask;
3382        const struct cpumask *local_mask;
3383        int firstcpu, secondcpu = 0, currrcvcpu = 0;
3384
3385        if (!dd->num_pports)
3386                return;
3387
3388        if (clearpend) {
3389                /*
3390                 * if not switching interrupt types, be sure interrupts are
3391                 * disabled, and then clear anything pending at this point,
3392                 * because we are starting clean.
3393                 */
3394                qib_7322_set_intr_state(dd, 0);
3395
3396                /* clear the reset error, init error/hwerror mask */
3397                qib_7322_init_hwerrors(dd);
3398
3399                /* clear any interrupt bits that might be set */
3400                qib_write_kreg(dd, kr_intclear, ~0ULL);
3401
3402                /* make sure no pending MSIx intr, and clear diag reg */
3403                qib_write_kreg(dd, kr_intgranted, ~0ULL);
3404                qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3405        }
3406
3407        if (!dd->cspec->num_msix_entries) {
3408                /* Try to get INTx interrupt */
3409try_intx:
3410                ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3411                                      QIB_DRV_NAME);
3412                if (ret) {
3413                        qib_dev_err(
3414                                dd,
3415                                "Couldn't setup INTx interrupt (irq=%d): %d\n",
3416                                pci_irq_vector(dd->pcidev, 0), ret);
3417                        return;
3418                }
3419                dd->cspec->main_int_mask = ~0ULL;
3420                return;
3421        }
3422
3423        /* Try to get MSIx interrupts */
3424        memset(redirect, 0, sizeof(redirect));
3425        mask = ~0ULL;
3426        msixnum = 0;
3427        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3428        firstcpu = cpumask_first(local_mask);
3429        if (firstcpu >= nr_cpu_ids ||
3430                        cpumask_weight(local_mask) == num_online_cpus()) {
3431                local_mask = topology_core_cpumask(0);
3432                firstcpu = cpumask_first(local_mask);
3433        }
3434        if (firstcpu < nr_cpu_ids) {
3435                secondcpu = cpumask_next(firstcpu, local_mask);
3436                if (secondcpu >= nr_cpu_ids)
3437                        secondcpu = firstcpu;
3438                currrcvcpu = secondcpu;
3439        }
3440        for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3441                irq_handler_t handler;
3442                void *arg;
3443                int lsb, reg, sh;
3444#ifdef CONFIG_INFINIBAND_QIB_DCA
3445                int dca = 0;
3446#endif
3447                if (i < ARRAY_SIZE(irq_table)) {
3448                        if (irq_table[i].port) {
3449                                /* skip if for a non-configured port */
3450                                if (irq_table[i].port > dd->num_pports)
3451                                        continue;
3452                                arg = dd->pport + irq_table[i].port - 1;
3453                        } else
3454                                arg = dd;
3455#ifdef CONFIG_INFINIBAND_QIB_DCA
3456                        dca = irq_table[i].dca;
3457#endif
3458                        lsb = irq_table[i].lsb;
3459                        handler = irq_table[i].handler;
3460                        ret = pci_request_irq(dd->pcidev, msixnum, handler,
3461                                              NULL, arg, QIB_DRV_NAME "%d%s",
3462                                              dd->unit,
3463                                              irq_table[i].name);
3464                } else {
3465                        unsigned ctxt;
3466
3467                        ctxt = i - ARRAY_SIZE(irq_table);
3468                        /* per krcvq context receive interrupt */
3469                        arg = dd->rcd[ctxt];
3470                        if (!arg)
3471                                continue;
3472                        if (qib_krcvq01_no_msi && ctxt < 2)
3473                                continue;
3474#ifdef CONFIG_INFINIBAND_QIB_DCA
3475                        dca = 1;
3476#endif
3477                        lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3478                        handler = qib_7322pintr;
3479                        ret = pci_request_irq(dd->pcidev, msixnum, handler,
3480                                              NULL, arg,
3481                                              QIB_DRV_NAME "%d (kctx)",
3482                                              dd->unit);
3483                }
3484
3485                if (ret) {
3486                        /*
3487                         * Shouldn't happen since the enable said we could
3488                         * have as many as we are trying to setup here.
3489                         */
3490                        qib_dev_err(dd,
3491                                    "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3492                                    msixnum,
3493                                    pci_irq_vector(dd->pcidev, msixnum),
3494                                    ret);
3495                        qib_7322_free_irq(dd);
3496                        pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3497                                              PCI_IRQ_LEGACY);
3498                        goto try_intx;
3499                }
3500                dd->cspec->msix_entries[msixnum].arg = arg;
3501#ifdef CONFIG_INFINIBAND_QIB_DCA
3502                dd->cspec->msix_entries[msixnum].dca = dca;
3503                dd->cspec->msix_entries[msixnum].rcv =
3504                        handler == qib_7322pintr;
3505#endif
3506                if (lsb >= 0) {
3507                        reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3508                        sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3509                                SYM_LSB(IntRedirect0, vec1);
3510                        mask &= ~(1ULL << lsb);
3511                        redirect[reg] |= ((u64) msixnum) << sh;
3512                }
3513                qib_read_kreg64(dd, 2 * msixnum + 1 +
3514                                (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3515                if (firstcpu < nr_cpu_ids &&
3516                        zalloc_cpumask_var(
3517                                &dd->cspec->msix_entries[msixnum].mask,
3518                                GFP_KERNEL)) {
3519                        if (handler == qib_7322pintr) {
3520                                cpumask_set_cpu(currrcvcpu,
3521                                        dd->cspec->msix_entries[msixnum].mask);
3522                                currrcvcpu = cpumask_next(currrcvcpu,
3523                                        local_mask);
3524                                if (currrcvcpu >= nr_cpu_ids)
3525                                        currrcvcpu = secondcpu;
3526                        } else {
3527                                cpumask_set_cpu(firstcpu,
3528                                        dd->cspec->msix_entries[msixnum].mask);
3529                        }
3530                        irq_set_affinity_hint(
3531                                pci_irq_vector(dd->pcidev, msixnum),
3532                                dd->cspec->msix_entries[msixnum].mask);
3533                }
3534                msixnum++;
3535        }
3536        /* Initialize the vector mapping */
3537        for (i = 0; i < ARRAY_SIZE(redirect); i++)
3538                qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3539        dd->cspec->main_int_mask = mask;
3540        tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3541                (unsigned long)dd);
3542}
3543
3544/**
3545 * qib_7322_boardname - fill in the board name and note features
3546 * @dd: the qlogic_ib device
3547 *
3548 * info will be based on the board revision register
3549 */
3550static unsigned qib_7322_boardname(struct qib_devdata *dd)
3551{
3552        /* Will need enumeration of board-types here */
3553        u32 boardid;
3554        unsigned int features = DUAL_PORT_CAP;
3555
3556        boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3557
3558        switch (boardid) {
3559        case 0:
3560                dd->boardname = "InfiniPath_QLE7342_Emulation";
3561                break;
3562        case 1:
3563                dd->boardname = "InfiniPath_QLE7340";
3564                dd->flags |= QIB_HAS_QSFP;
3565                features = PORT_SPD_CAP;
3566                break;
3567        case 2:
3568                dd->boardname = "InfiniPath_QLE7342";
3569                dd->flags |= QIB_HAS_QSFP;
3570                break;
3571        case 3:
3572                dd->boardname = "InfiniPath_QMI7342";
3573                break;
3574        case 4:
3575                dd->boardname = "InfiniPath_Unsupported7342";
3576                qib_dev_err(dd, "Unsupported version of QMH7342\n");
3577                features = 0;
3578                break;
3579        case BOARD_QMH7342:
3580                dd->boardname = "InfiniPath_QMH7342";
3581                features = 0x24;
3582                break;
3583        case BOARD_QME7342:
3584                dd->boardname = "InfiniPath_QME7342";
3585                break;
3586        case 8:
3587                dd->boardname = "InfiniPath_QME7362";
3588                dd->flags |= QIB_HAS_QSFP;
3589                break;
3590        case BOARD_QMH7360:
3591                dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3592                dd->flags |= QIB_HAS_QSFP;
3593                break;
3594        case 15:
3595                dd->boardname = "InfiniPath_QLE7342_TEST";
3596                dd->flags |= QIB_HAS_QSFP;
3597                break;
3598        default:
3599                dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3600                qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3601                break;
3602        }
3603        dd->board_atten = 1; /* index into txdds_Xdr */
3604
3605        snprintf(dd->boardversion, sizeof(dd->boardversion),
3606                 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3607                 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3608                 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3609                 dd->majrev, dd->minrev,
3610                 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3611
3612        if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3613                qib_devinfo(dd->pcidev,
3614                            "IB%u: Forced to single port mode by module parameter\n",
3615                            dd->unit);
3616                features &= PORT_SPD_CAP;
3617        }
3618
3619        return features;
3620}
3621
3622/*
3623 * This routine sleeps, so it can only be called from user context, not
3624 * from interrupt context.
3625 */
3626static int qib_do_7322_reset(struct qib_devdata *dd)
3627{
3628        u64 val;
3629        u64 *msix_vecsave = NULL;
3630        int i, msix_entries, ret = 1;
3631        u16 cmdval;
3632        u8 int_line, clinesz;
3633        unsigned long flags;
3634
3635        /* Use dev_err so it shows up in logs, etc. */
3636        qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3637
3638        qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3639
3640        msix_entries = dd->cspec->num_msix_entries;
3641
3642        /* no interrupts till re-initted */
3643        qib_7322_set_intr_state(dd, 0);
3644
3645        qib_7322_free_irq(dd);
3646
3647        if (msix_entries) {
3648                /* can be up to 512 bytes, too big for stack */
3649                msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
3650                                             sizeof(u64),
3651                                             GFP_KERNEL);
3652        }
3653
3654        /*
3655         * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3656         * info that is set up by the BIOS, so we have to save and restore
3657         * it ourselves.   There is some risk something could change it,
3658         * after we save it, but since we have disabled the MSIx, it
3659         * shouldn't be touched...
3660         */
3661        for (i = 0; i < msix_entries; i++) {
3662                u64 vecaddr, vecdata;
3663
3664                vecaddr = qib_read_kreg64(dd, 2 * i +
3665                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3666                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3667                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3668                if (msix_vecsave) {
3669                        msix_vecsave[2 * i] = vecaddr;
3670                        /* save it without the masked bit set */
3671                        msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3672                }
3673        }
3674
3675        dd->pport->cpspec->ibdeltainprog = 0;
3676        dd->pport->cpspec->ibsymdelta = 0;
3677        dd->pport->cpspec->iblnkerrdelta = 0;
3678        dd->pport->cpspec->ibmalfdelta = 0;
3679        /* so we check interrupts work again */
3680        dd->z_int_counter = qib_int_counter(dd);
3681
3682        /*
3683         * Keep chip from being accessed until we are ready.  Use
3684         * writeq() directly, to allow the write even though QIB_PRESENT
3685         * isn't set.
3686         */
3687        dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3688        dd->flags |= QIB_DOING_RESET;
3689        val = dd->control | QLOGIC_IB_C_RESET;
3690        writeq(val, &dd->kregbase[kr_control]);
3691
3692        for (i = 1; i <= 5; i++) {
3693                /*
3694                 * Allow MBIST, etc. to complete; longer on each retry.
3695                 * We sometimes get machine checks from bus timeout if no
3696                 * response, so for now, make it *really* long.
3697                 */
3698                msleep(1000 + (1 + i) * 3000);
3699
3700                qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3701
3702                /*
3703                 * Use readq directly, so we don't need to mark it as PRESENT
3704                 * until we get a successful indication that all is well.
3705                 */
3706                val = readq(&dd->kregbase[kr_revision]);
3707                if (val == dd->revision)
3708                        break;
3709                if (i == 5) {
3710                        qib_dev_err(dd,
3711                                "Failed to initialize after reset, unusable\n");
3712                        ret = 0;
3713                        goto  bail;
3714                }
3715        }
3716
3717        dd->flags |= QIB_PRESENT; /* it's back */
3718
3719        if (msix_entries) {
3720                /* restore the MSIx vector address and data if saved above */
3721                for (i = 0; i < msix_entries; i++) {
3722                        if (!msix_vecsave || !msix_vecsave[2 * i])
3723                                continue;
3724                        qib_write_kreg(dd, 2 * i +
3725                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3726                                msix_vecsave[2 * i]);
3727                        qib_write_kreg(dd, 1 + 2 * i +
3728                                (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3729                                msix_vecsave[1 + 2 * i]);
3730                }
3731        }
3732
3733        /* initialize the remaining registers.  */
3734        for (i = 0; i < dd->num_pports; ++i)
3735                write_7322_init_portregs(&dd->pport[i]);
3736        write_7322_initregs(dd);
3737
3738        if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3739                qib_dev_err(dd,
3740                        "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3741
3742        dd->cspec->num_msix_entries = msix_entries;
3743        qib_setup_7322_interrupt(dd, 1);
3744
3745        for (i = 0; i < dd->num_pports; ++i) {
3746                struct qib_pportdata *ppd = &dd->pport[i];
3747
3748                spin_lock_irqsave(&ppd->lflags_lock, flags);
3749                ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3750                ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3751                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3752        }
3753
3754bail:
3755        dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3756        kfree(msix_vecsave);
3757        return ret;
3758}
3759
3760/**
3761 * qib_7322_put_tid - write a TID to the chip
3762 * @dd: the qlogic_ib device
3763 * @tidptr: pointer to the expected TID (in chip) to update
3764 * @tidtype: 0 for eager, 1 for expected
3765 * @pa: physical address of in memory buffer; tidinvalid if freeing
3766 */
3767static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3768                             u32 type, unsigned long pa)
3769{
3770        if (!(dd->flags & QIB_PRESENT))
3771                return;
3772        if (pa != dd->tidinvalid) {
3773                u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3774
3775                /* paranoia checks */
3776                if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3777                        qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3778                                    pa);
3779                        return;
3780                }
3781                if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3782                        qib_dev_err(dd,
3783                                "Physical page address 0x%lx larger than supported\n",
3784                                pa);
3785                        return;
3786                }
3787
3788                if (type == RCVHQ_RCV_TYPE_EAGER)
3789                        chippa |= dd->tidtemplate;
3790                else /* for now, always full 4KB page */
3791                        chippa |= IBA7322_TID_SZ_4K;
3792                pa = chippa;
3793        }
3794        writeq(pa, tidptr);
3795}
3796
3797/**
3798 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3799 * @dd: the qlogic_ib device
3800 * @ctxt: the ctxt
3801 *
3802 * clear all TID entries for a ctxt, expected and eager.
3803 * Used from qib_close().
3804 */
3805static void qib_7322_clear_tids(struct qib_devdata *dd,
3806                                struct qib_ctxtdata *rcd)
3807{
3808        u64 __iomem *tidbase;
3809        unsigned long tidinv;
3810        u32 ctxt;
3811        int i;
3812
3813        if (!dd->kregbase || !rcd)
3814                return;
3815
3816        ctxt = rcd->ctxt;
3817
3818        tidinv = dd->tidinvalid;
3819        tidbase = (u64 __iomem *)
3820                ((char __iomem *) dd->kregbase +
3821                 dd->rcvtidbase +
3822                 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3823
3824        for (i = 0; i < dd->rcvtidcnt; i++)
3825                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3826                                 tidinv);
3827
3828        tidbase = (u64 __iomem *)
3829                ((char __iomem *) dd->kregbase +
3830                 dd->rcvegrbase +
3831                 rcd->rcvegr_tid_base * sizeof(*tidbase));
3832
3833        for (i = 0; i < rcd->rcvegrcnt; i++)
3834                qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3835                                 tidinv);
3836}
3837
3838/**
3839 * qib_7322_tidtemplate - setup constants for TID updates
3840 * @dd: the qlogic_ib device
3841 *
3842 * We setup stuff that we use a lot, to avoid calculating each time
3843 */
3844static void qib_7322_tidtemplate(struct qib_devdata *dd)
3845{
3846        /*
3847         * For now, we always allocate 4KB buffers (at init) so we can
3848         * receive max size packets.  We may want a module parameter to
3849         * specify 2KB or 4KB and/or make it per port instead of per device
3850         * for those who want to reduce memory footprint.  Note that the
3851         * rcvhdrentsize size must be large enough to hold the largest
3852         * IB header (currently 96 bytes) that we expect to handle (plus of
3853         * course the 2 dwords of RHF).
3854         */
3855        if (dd->rcvegrbufsize == 2048)
3856                dd->tidtemplate = IBA7322_TID_SZ_2K;
3857        else if (dd->rcvegrbufsize == 4096)
3858                dd->tidtemplate = IBA7322_TID_SZ_4K;
3859        dd->tidinvalid = 0;
3860}
3861
3862/**
3863 * qib_init_7322_get_base_info - set chip-specific flags for user code
3864 * @rcd: the qlogic_ib ctxt
3865 * @kbase: qib_base_info pointer
3866 *
3867 * We set the PCIE flag because the lower bandwidth on PCIe vs
3868 * HyperTransport can affect some user packet algorithims.
3869 */
3870
3871static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3872                                  struct qib_base_info *kinfo)
3873{
3874        kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3875                QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3876                QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3877        if (rcd->dd->cspec->r1)
3878                kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3879        if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3880                kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3881
3882        return 0;
3883}
3884
3885static struct qib_message_header *
3886qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3887{
3888        u32 offset = qib_hdrget_offset(rhf_addr);
3889
3890        return (struct qib_message_header *)
3891                (rhf_addr - dd->rhf_offset + offset);
3892}
3893
3894/*
3895 * Configure number of contexts.
3896 */
3897static void qib_7322_config_ctxts(struct qib_devdata *dd)
3898{
3899        unsigned long flags;
3900        u32 nchipctxts;
3901
3902        nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3903        dd->cspec->numctxts = nchipctxts;
3904        if (qib_n_krcv_queues > 1 && dd->num_pports) {
3905                dd->first_user_ctxt = NUM_IB_PORTS +
3906                        (qib_n_krcv_queues - 1) * dd->num_pports;
3907                if (dd->first_user_ctxt > nchipctxts)
3908                        dd->first_user_ctxt = nchipctxts;
3909                dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3910        } else {
3911                dd->first_user_ctxt = NUM_IB_PORTS;
3912                dd->n_krcv_queues = 1;
3913        }
3914
3915        if (!qib_cfgctxts) {
3916                int nctxts = dd->first_user_ctxt + num_online_cpus();
3917
3918                if (nctxts <= 6)
3919                        dd->ctxtcnt = 6;
3920                else if (nctxts <= 10)
3921                        dd->ctxtcnt = 10;
3922                else if (nctxts <= nchipctxts)
3923                        dd->ctxtcnt = nchipctxts;
3924        } else if (qib_cfgctxts < dd->num_pports)
3925                dd->ctxtcnt = dd->num_pports;
3926        else if (qib_cfgctxts <= nchipctxts)
3927                dd->ctxtcnt = qib_cfgctxts;
3928        if (!dd->ctxtcnt) /* none of the above, set to max */
3929                dd->ctxtcnt = nchipctxts;
3930
3931        /*
3932         * Chip can be configured for 6, 10, or 18 ctxts, and choice
3933         * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3934         * Lock to be paranoid about later motion, etc.
3935         */
3936        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3937        if (dd->ctxtcnt > 10)
3938                dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3939        else if (dd->ctxtcnt > 6)
3940                dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3941        /* else configure for default 6 receive ctxts */
3942
3943        /* The XRC opcode is 5. */
3944        dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3945
3946        /*
3947         * RcvCtrl *must* be written here so that the
3948         * chip understands how to change rcvegrcnt below.
3949         */
3950        qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3951        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3952
3953        /* kr_rcvegrcnt changes based on the number of contexts enabled */
3954        dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3955        if (qib_rcvhdrcnt)
3956                dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3957        else
3958                dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3959                                    dd->num_pports > 1 ? 1024U : 2048U);
3960}
3961
3962static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3963{
3964
3965        int lsb, ret = 0;
3966        u64 maskr; /* right-justified mask */
3967
3968        switch (which) {
3969
3970        case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3971                ret = ppd->link_width_enabled;
3972                goto done;
3973
3974        case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3975                ret = ppd->link_width_active;
3976                goto done;
3977
3978        case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3979                ret = ppd->link_speed_enabled;
3980                goto done;
3981
3982        case QIB_IB_CFG_SPD: /* Get current Link spd */
3983                ret = ppd->link_speed_active;
3984                goto done;
3985
3986        case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3987                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3988                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3989                break;
3990
3991        case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3992                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3993                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3994                break;
3995
3996        case QIB_IB_CFG_LINKLATENCY:
3997                ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3998                        SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3999                goto done;
4000
4001        case QIB_IB_CFG_OP_VLS:
4002                ret = ppd->vls_operational;
4003                goto done;
4004
4005        case QIB_IB_CFG_VL_HIGH_CAP:
4006                ret = 16;
4007                goto done;
4008
4009        case QIB_IB_CFG_VL_LOW_CAP:
4010                ret = 16;
4011                goto done;
4012
4013        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4014                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4015                                OverrunThreshold);
4016                goto done;
4017
4018        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4019                ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4020                                PhyerrThreshold);
4021                goto done;
4022
4023        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4024                /* will only take effect when the link state changes */
4025                ret = (ppd->cpspec->ibcctrl_a &
4026                       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4027                        IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4028                goto done;
4029
4030        case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4031                lsb = IBA7322_IBC_HRTBT_LSB;
4032                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4033                break;
4034
4035        case QIB_IB_CFG_PMA_TICKS:
4036                /*
4037                 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4038                 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4039                 */
4040                if (ppd->link_speed_active == QIB_IB_QDR)
4041                        ret = 3;
4042                else if (ppd->link_speed_active == QIB_IB_DDR)
4043                        ret = 1;
4044                else
4045                        ret = 0;
4046                goto done;
4047
4048        default:
4049                ret = -EINVAL;
4050                goto done;
4051        }
4052        ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4053done:
4054        return ret;
4055}
4056
4057/*
4058 * Below again cribbed liberally from older version. Do not lean
4059 * heavily on it.
4060 */
4061#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4062#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4063        | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4064
4065static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4066{
4067        struct qib_devdata *dd = ppd->dd;
4068        u64 maskr; /* right-justified mask */
4069        int lsb, ret = 0;
4070        u16 lcmd, licmd;
4071        unsigned long flags;
4072
4073        switch (which) {
4074        case QIB_IB_CFG_LIDLMC:
4075                /*
4076                 * Set LID and LMC. Combined to avoid possible hazard
4077                 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4078                 */
4079                lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4080                maskr = IBA7322_IBC_DLIDLMC_MASK;
4081                /*
4082                 * For header-checking, the SLID in the packet will
4083                 * be masked with SendIBSLMCMask, and compared
4084                 * with SendIBSLIDAssignMask. Make sure we do not
4085                 * set any bits not covered by the mask, or we get
4086                 * false-positives.
4087                 */
4088                qib_write_kreg_port(ppd, krp_sendslid,
4089                                    val & (val >> 16) & SendIBSLIDAssignMask);
4090                qib_write_kreg_port(ppd, krp_sendslidmask,
4091                                    (val >> 16) & SendIBSLMCMask);
4092                break;
4093
4094        case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4095                ppd->link_width_enabled = val;
4096                /* convert IB value to chip register value */
4097                if (val == IB_WIDTH_1X)
4098                        val = 0;
4099                else if (val == IB_WIDTH_4X)
4100                        val = 1;
4101                else
4102                        val = 3;
4103                maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4104                lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4105                break;
4106
4107        case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4108                /*
4109                 * As with width, only write the actual register if the
4110                 * link is currently down, otherwise takes effect on next
4111                 * link change.  Since setting is being explicitly requested
4112                 * (via MAD or sysfs), clear autoneg failure status if speed
4113                 * autoneg is enabled.
4114                 */
4115                ppd->link_speed_enabled = val;
4116                val <<= IBA7322_IBC_SPEED_LSB;
4117                maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4118                        IBA7322_IBC_MAX_SPEED_MASK;
4119                if (val & (val - 1)) {
4120                        /* Muliple speeds enabled */
4121                        val |= IBA7322_IBC_IBTA_1_2_MASK |
4122                                IBA7322_IBC_MAX_SPEED_MASK;
4123                        spin_lock_irqsave(&ppd->lflags_lock, flags);
4124                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4125                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4126                } else if (val & IBA7322_IBC_SPEED_QDR)
4127                        val |= IBA7322_IBC_IBTA_1_2_MASK;
4128                /* IBTA 1.2 mode + min/max + speed bits are contiguous */
4129                lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4130                break;
4131
4132        case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4133                lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4134                maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4135                break;
4136
4137        case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4138                lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4139                maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4140                break;
4141
4142        case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4143                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4144                                  OverrunThreshold);
4145                if (maskr != val) {
4146                        ppd->cpspec->ibcctrl_a &=
4147                                ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4148                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4149                                SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4150                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4151                                            ppd->cpspec->ibcctrl_a);
4152                        qib_write_kreg(dd, kr_scratch, 0ULL);
4153                }
4154                goto bail;
4155
4156        case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4157                maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4158                                  PhyerrThreshold);
4159                if (maskr != val) {
4160                        ppd->cpspec->ibcctrl_a &=
4161                                ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4162                        ppd->cpspec->ibcctrl_a |= (u64) val <<
4163                                SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4164                        qib_write_kreg_port(ppd, krp_ibcctrl_a,
4165                                            ppd->cpspec->ibcctrl_a);
4166                        qib_write_kreg(dd, kr_scratch, 0ULL);
4167                }
4168                goto bail;
4169
4170        case QIB_IB_CFG_PKEYS: /* update pkeys */
4171                maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4172                        ((u64) ppd->pkeys[2] << 32) |
4173                        ((u64) ppd->pkeys[3] << 48);
4174                qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4175                goto bail;
4176
4177        case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4178                /* will only take effect when the link state changes */
4179                if (val == IB_LINKINITCMD_POLL)
4180                        ppd->cpspec->ibcctrl_a &=
4181                                ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4182                else /* SLEEP */
4183                        ppd->cpspec->ibcctrl_a |=
4184                                SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4185                qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4186                qib_write_kreg(dd, kr_scratch, 0ULL);
4187                goto bail;
4188
4189        case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4190                /*
4191                 * Update our housekeeping variables, and set IBC max
4192                 * size, same as init code; max IBC is max we allow in
4193                 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4194                 * Set even if it's unchanged, print debug message only
4195                 * on changes.
4196                 */
4197                val = (ppd->ibmaxlen >> 2) + 1;
4198                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4199                ppd->cpspec->ibcctrl_a |= (u64)val <<
4200                        SYM_LSB(IBCCtrlA_0, MaxPktLen);
4201                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4202                                    ppd->cpspec->ibcctrl_a);
4203                qib_write_kreg(dd, kr_scratch, 0ULL);
4204                goto bail;
4205
4206        case QIB_IB_CFG_LSTATE: /* set the IB link state */
4207                switch (val & 0xffff0000) {
4208                case IB_LINKCMD_DOWN:
4209                        lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4210                        ppd->cpspec->ibmalfusesnap = 1;
4211                        ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4212                                crp_errlink);
4213                        if (!ppd->cpspec->ibdeltainprog &&
4214                            qib_compat_ddr_negotiate) {
4215                                ppd->cpspec->ibdeltainprog = 1;
4216                                ppd->cpspec->ibsymsnap =
4217                                        read_7322_creg32_port(ppd,
4218                                                              crp_ibsymbolerr);
4219                                ppd->cpspec->iblnkerrsnap =
4220                                        read_7322_creg32_port(ppd,
4221                                                      crp_iblinkerrrecov);
4222                        }
4223                        break;
4224
4225                case IB_LINKCMD_ARMED:
4226                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4227                        if (ppd->cpspec->ibmalfusesnap) {
4228                                ppd->cpspec->ibmalfusesnap = 0;
4229                                ppd->cpspec->ibmalfdelta +=
4230                                        read_7322_creg32_port(ppd,
4231                                                              crp_errlink) -
4232                                        ppd->cpspec->ibmalfsnap;
4233                        }
4234                        break;
4235
4236                case IB_LINKCMD_ACTIVE:
4237                        lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4238                        break;
4239
4240                default:
4241                        ret = -EINVAL;
4242                        qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4243                        goto bail;
4244                }
4245                switch (val & 0xffff) {
4246                case IB_LINKINITCMD_NOP:
4247                        licmd = 0;
4248                        break;
4249
4250                case IB_LINKINITCMD_POLL:
4251                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4252                        break;
4253
4254                case IB_LINKINITCMD_SLEEP:
4255                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4256                        break;
4257
4258                case IB_LINKINITCMD_DISABLE:
4259                        licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4260                        ppd->cpspec->chase_end = 0;
4261                        /*
4262                         * stop state chase counter and timer, if running.
4263                         * wait forpending timer, but don't clear .data (ppd)!
4264                         */
4265                        if (ppd->cpspec->chase_timer.expires) {
4266                                del_timer_sync(&ppd->cpspec->chase_timer);
4267                                ppd->cpspec->chase_timer.expires = 0;
4268                        }
4269                        break;
4270
4271                default:
4272                        ret = -EINVAL;
4273                        qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4274                                    val & 0xffff);
4275                        goto bail;
4276                }
4277                qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4278                goto bail;
4279
4280        case QIB_IB_CFG_OP_VLS:
4281                if (ppd->vls_operational != val) {
4282                        ppd->vls_operational = val;
4283                        set_vls(ppd);
4284                }
4285                goto bail;
4286
4287        case QIB_IB_CFG_VL_HIGH_LIMIT:
4288                qib_write_kreg_port(ppd, krp_highprio_limit, val);
4289                goto bail;
4290
4291        case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4292                if (val > 3) {
4293                        ret = -EINVAL;
4294                        goto bail;
4295                }
4296                lsb = IBA7322_IBC_HRTBT_LSB;
4297                maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4298                break;
4299
4300        case QIB_IB_CFG_PORT:
4301                /* val is the port number of the switch we are connected to. */
4302                if (ppd->dd->cspec->r1) {
4303                        cancel_delayed_work(&ppd->cpspec->ipg_work);
4304                        ppd->cpspec->ipg_tries = 0;
4305                }
4306                goto bail;
4307
4308        default:
4309                ret = -EINVAL;
4310                goto bail;
4311        }
4312        ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4313        ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4314        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4315        qib_write_kreg(dd, kr_scratch, 0);
4316bail:
4317        return ret;
4318}
4319
4320static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4321{
4322        int ret = 0;
4323        u64 val, ctrlb;
4324
4325        /* only IBC loopback, may add serdes and xgxs loopbacks later */
4326        if (!strncmp(what, "ibc", 3)) {
4327                ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4328                                                       Loopback);
4329                val = 0; /* disable heart beat, so link will come up */
4330                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4331                         ppd->dd->unit, ppd->port);
4332        } else if (!strncmp(what, "off", 3)) {
4333                ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4334                                                        Loopback);
4335                /* enable heart beat again */
4336                val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4337                qib_devinfo(ppd->dd->pcidev,
4338                        "Disabling IB%u:%u IBC loopback (normal)\n",
4339                        ppd->dd->unit, ppd->port);
4340        } else
4341                ret = -EINVAL;
4342        if (!ret) {
4343                qib_write_kreg_port(ppd, krp_ibcctrl_a,
4344                                    ppd->cpspec->ibcctrl_a);
4345                ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4346                                             << IBA7322_IBC_HRTBT_LSB);
4347                ppd->cpspec->ibcctrl_b = ctrlb | val;
4348                qib_write_kreg_port(ppd, krp_ibcctrl_b,
4349                                    ppd->cpspec->ibcctrl_b);
4350                qib_write_kreg(ppd->dd, kr_scratch, 0);
4351        }
4352        return ret;
4353}
4354
4355static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4356                           struct ib_vl_weight_elem *vl)
4357{
4358        unsigned i;
4359
4360        for (i = 0; i < 16; i++, regno++, vl++) {
4361                u32 val = qib_read_kreg_port(ppd, regno);
4362
4363                vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4364                        SYM_RMASK(LowPriority0_0, VirtualLane);
4365                vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4366                        SYM_RMASK(LowPriority0_0, Weight);
4367        }
4368}
4369
4370static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4371                           struct ib_vl_weight_elem *vl)
4372{
4373        unsigned i;
4374
4375        for (i = 0; i < 16; i++, regno++, vl++) {
4376                u64 val;
4377
4378                val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4379                        SYM_LSB(LowPriority0_0, VirtualLane)) |
4380                      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4381                        SYM_LSB(LowPriority0_0, Weight));
4382                qib_write_kreg_port(ppd, regno, val);
4383        }
4384        if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4385                struct qib_devdata *dd = ppd->dd;
4386                unsigned long flags;
4387
4388                spin_lock_irqsave(&dd->sendctrl_lock, flags);
4389                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4390                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4391                qib_write_kreg(dd, kr_scratch, 0);
4392                spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4393        }
4394}
4395
4396static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4397{
4398        switch (which) {
4399        case QIB_IB_TBL_VL_HIGH_ARB:
4400                get_vl_weights(ppd, krp_highprio_0, t);
4401                break;
4402
4403        case QIB_IB_TBL_VL_LOW_ARB:
4404                get_vl_weights(ppd, krp_lowprio_0, t);
4405                break;
4406
4407        default:
4408                return -EINVAL;
4409        }
4410        return 0;
4411}
4412
4413static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4414{
4415        switch (which) {
4416        case QIB_IB_TBL_VL_HIGH_ARB:
4417                set_vl_weights(ppd, krp_highprio_0, t);
4418                break;
4419
4420        case QIB_IB_TBL_VL_LOW_ARB:
4421                set_vl_weights(ppd, krp_lowprio_0, t);
4422                break;
4423
4424        default:
4425                return -EINVAL;
4426        }
4427        return 0;
4428}
4429
4430static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4431                                    u32 updegr, u32 egrhd, u32 npkts)
4432{
4433        /*
4434         * Need to write timeout register before updating rcvhdrhead to ensure
4435         * that the timer is enabled on reception of a packet.
4436         */
4437        if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4438                adjust_rcv_timeout(rcd, npkts);
4439        if (updegr)
4440                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4441        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4442        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4443}
4444
4445static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4446{
4447        u32 head, tail;
4448
4449        head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4450        if (rcd->rcvhdrtail_kvaddr)
4451                tail = qib_get_rcvhdrtail(rcd);
4452        else
4453                tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4454        return head == tail;
4455}
4456
4457#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4458        QIB_RCVCTRL_CTXT_DIS | \
4459        QIB_RCVCTRL_TIDFLOW_ENB | \
4460        QIB_RCVCTRL_TIDFLOW_DIS | \
4461        QIB_RCVCTRL_TAILUPD_ENB | \
4462        QIB_RCVCTRL_TAILUPD_DIS | \
4463        QIB_RCVCTRL_INTRAVAIL_ENB | \
4464        QIB_RCVCTRL_INTRAVAIL_DIS | \
4465        QIB_RCVCTRL_BP_ENB | \
4466        QIB_RCVCTRL_BP_DIS)
4467
4468#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4469        QIB_RCVCTRL_CTXT_DIS | \
4470        QIB_RCVCTRL_PKEY_DIS | \
4471        QIB_RCVCTRL_PKEY_ENB)
4472
4473/*
4474 * Modify the RCVCTRL register in chip-specific way. This
4475 * is a function because bit positions and (future) register
4476 * location is chip-specifc, but the needed operations are
4477 * generic. <op> is a bit-mask because we often want to
4478 * do multiple modifications.
4479 */
4480static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4481                             int ctxt)
4482{
4483        struct qib_devdata *dd = ppd->dd;
4484        struct qib_ctxtdata *rcd;
4485        u64 mask, val;
4486        unsigned long flags;
4487
4488        spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4489
4490        if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4491                dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4492        if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4493                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4494        if (op & QIB_RCVCTRL_TAILUPD_ENB)
4495                dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4496        if (op & QIB_RCVCTRL_TAILUPD_DIS)
4497                dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4498        if (op & QIB_RCVCTRL_PKEY_ENB)
4499                ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4500        if (op & QIB_RCVCTRL_PKEY_DIS)
4501                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4502        if (ctxt < 0) {
4503                mask = (1ULL << dd->ctxtcnt) - 1;
4504                rcd = NULL;
4505        } else {
4506                mask = (1ULL << ctxt);
4507                rcd = dd->rcd[ctxt];
4508        }
4509        if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4510                ppd->p_rcvctrl |=
4511                        (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4512                if (!(dd->flags & QIB_NODMA_RTAIL)) {
4513                        op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4514                        dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4515                }
4516                /* Write these registers before the context is enabled. */
4517                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4518                                    rcd->rcvhdrqtailaddr_phys);
4519                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4520                                    rcd->rcvhdrq_phys);
4521                rcd->seq_cnt = 1;
4522        }
4523        if (op & QIB_RCVCTRL_CTXT_DIS)
4524                ppd->p_rcvctrl &=
4525                        ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4526        if (op & QIB_RCVCTRL_BP_ENB)
4527                dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4528        if (op & QIB_RCVCTRL_BP_DIS)
4529                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4530        if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4531                dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4532        if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4533                dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4534        /*
4535         * Decide which registers to write depending on the ops enabled.
4536         * Special case is "flush" (no bits set at all)
4537         * which needs to write both.
4538         */
4539        if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4540                qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4541        if (op == 0 || (op & RCVCTRL_PORT_MODS))
4542                qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4543        if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4544                /*
4545                 * Init the context registers also; if we were
4546                 * disabled, tail and head should both be zero
4547                 * already from the enable, but since we don't
4548                 * know, we have to do it explicitly.
4549                 */
4550                val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4551                qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4552
4553                /* be sure enabling write seen; hd/tl should be 0 */
4554                (void) qib_read_kreg32(dd, kr_scratch);
4555                val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4556                dd->rcd[ctxt]->head = val;
4557                /* If kctxt, interrupt on next receive. */
4558                if (ctxt < dd->first_user_ctxt)
4559                        val |= dd->rhdrhead_intr_off;
4560                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4561        } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4562                dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4563                /* arm rcv interrupt */
4564                val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4565                qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4566        }
4567        if (op & QIB_RCVCTRL_CTXT_DIS) {
4568                unsigned f;
4569
4570                /* Now that the context is disabled, clear these registers. */
4571                if (ctxt >= 0) {
4572                        qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4573                        qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4574                        for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4575                                qib_write_ureg(dd, ur_rcvflowtable + f,
4576                                               TIDFLOW_ERRBITS, ctxt);
4577                } else {
4578                        unsigned i;
4579
4580                        for (i = 0; i < dd->cfgctxts; i++) {
4581                                qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4582                                                    i, 0);
4583                                qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4584                                for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4585                                        qib_write_ureg(dd, ur_rcvflowtable + f,
4586                                                       TIDFLOW_ERRBITS, i);
4587                        }
4588                }
4589        }
4590        spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4591}
4592
4593/*
4594 * Modify the SENDCTRL register in chip-specific way. This
4595 * is a function where there are multiple such registers with
4596 * slightly different layouts.
4597 * The chip doesn't allow back-to-back sendctrl writes, so write
4598 * the scratch register after writing sendctrl.
4599 *
4600 * Which register is written depends on the operation.
4601 * Most operate on the common register, while
4602 * SEND_ENB and SEND_DIS operate on the per-port ones.
4603 * SEND_ENB is included in common because it can change SPCL_TRIG
4604 */
4605#define SENDCTRL_COMMON_MODS (\
4606        QIB_SENDCTRL_CLEAR | \
4607        QIB_SENDCTRL_AVAIL_DIS | \
4608        QIB_SENDCTRL_AVAIL_ENB | \
4609        QIB_SENDCTRL_AVAIL_BLIP | \
4610        QIB_SENDCTRL_DISARM | \
4611        QIB_SENDCTRL_DISARM_ALL | \
4612        QIB_SENDCTRL_SEND_ENB)
4613
4614#define SENDCTRL_PORT_MODS (\
4615        QIB_SENDCTRL_CLEAR | \
4616        QIB_SENDCTRL_SEND_ENB | \
4617        QIB_SENDCTRL_SEND_DIS | \
4618        QIB_SENDCTRL_FLUSH)
4619
4620static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4621{
4622        struct qib_devdata *dd = ppd->dd;
4623        u64 tmp_dd_sendctrl;
4624        unsigned long flags;
4625
4626        spin_lock_irqsave(&dd->sendctrl_lock, flags);
4627
4628        /* First the dd ones that are "sticky", saved in shadow */
4629        if (op & QIB_SENDCTRL_CLEAR)
4630                dd->sendctrl = 0;
4631        if (op & QIB_SENDCTRL_AVAIL_DIS)
4632                dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4633        else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4634                dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4635                if (dd->flags & QIB_USE_SPCL_TRIG)
4636                        dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4637        }
4638
4639        /* Then the ppd ones that are "sticky", saved in shadow */
4640        if (op & QIB_SENDCTRL_SEND_DIS)
4641                ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4642        else if (op & QIB_SENDCTRL_SEND_ENB)
4643                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4644
4645        if (op & QIB_SENDCTRL_DISARM_ALL) {
4646                u32 i, last;
4647
4648                tmp_dd_sendctrl = dd->sendctrl;
4649                last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4650                /*
4651                 * Disarm any buffers that are not yet launched,
4652                 * disabling updates until done.
4653                 */
4654                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4655                for (i = 0; i < last; i++) {
4656                        qib_write_kreg(dd, kr_sendctrl,
4657                                       tmp_dd_sendctrl |
4658                                       SYM_MASK(SendCtrl, Disarm) | i);
4659                        qib_write_kreg(dd, kr_scratch, 0);
4660                }
4661        }
4662
4663        if (op & QIB_SENDCTRL_FLUSH) {
4664                u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4665
4666                /*
4667                 * Now drain all the fifos.  The Abort bit should never be
4668                 * needed, so for now, at least, we don't use it.
4669                 */
4670                tmp_ppd_sendctrl |=
4671                        SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4672                        SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4673                        SYM_MASK(SendCtrl_0, TxeBypassIbc);
4674                qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4675                qib_write_kreg(dd, kr_scratch, 0);
4676        }
4677
4678        tmp_dd_sendctrl = dd->sendctrl;
4679
4680        if (op & QIB_SENDCTRL_DISARM)
4681                tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4682                        ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4683                         SYM_LSB(SendCtrl, DisarmSendBuf));
4684        if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4685            (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4686                tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4687
4688        if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4689                qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4690                qib_write_kreg(dd, kr_scratch, 0);
4691        }
4692
4693        if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4694                qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4695                qib_write_kreg(dd, kr_scratch, 0);
4696        }
4697
4698        if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4699                qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4700                qib_write_kreg(dd, kr_scratch, 0);
4701        }
4702
4703        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4704
4705        if (op & QIB_SENDCTRL_FLUSH) {
4706                u32 v;
4707                /*
4708                 * ensure writes have hit chip, then do a few
4709                 * more reads, to allow DMA of pioavail registers
4710                 * to occur, so in-memory copy is in sync with
4711                 * the chip.  Not always safe to sleep.
4712                 */
4713                v = qib_read_kreg32(dd, kr_scratch);
4714                qib_write_kreg(dd, kr_scratch, v);
4715                v = qib_read_kreg32(dd, kr_scratch);
4716                qib_write_kreg(dd, kr_scratch, v);
4717                qib_read_kreg32(dd, kr_scratch);
4718        }
4719}
4720
4721#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4722#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4723#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4724
4725/**
4726 * qib_portcntr_7322 - read a per-port chip counter
4727 * @ppd: the qlogic_ib pport
4728 * @creg: the counter to read (not a chip offset)
4729 */
4730static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4731{
4732        struct qib_devdata *dd = ppd->dd;
4733        u64 ret = 0ULL;
4734        u16 creg;
4735        /* 0xffff for unimplemented or synthesized counters */
4736        static const u32 xlator[] = {
4737                [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4738                [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4739                [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4740                [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4741                [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4742                [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4743                [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4744                [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4745                [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4746                [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4747                [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4748                [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4749                [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4750                [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4751                [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4752                [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4753                [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4754                [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4755                [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4756                [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4757                [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4758                [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4759                [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4760                [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4761                [QIBPORTCNTR_ERRLINK] = crp_errlink,
4762                [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4763                [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4764                [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4765                [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4766                [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4767                /*
4768                 * the next 3 aren't really counters, but were implemented
4769                 * as counters in older chips, so still get accessed as
4770                 * though they were counters from this code.
4771                 */
4772                [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4773                [QIBPORTCNTR_PSSTART] = krp_psstart,
4774                [QIBPORTCNTR_PSSTAT] = krp_psstat,
4775                /* pseudo-counter, summed for all ports */
4776                [QIBPORTCNTR_KHDROVFL] = 0xffff,
4777        };
4778
4779        if (reg >= ARRAY_SIZE(xlator)) {
4780                qib_devinfo(ppd->dd->pcidev,
4781                         "Unimplemented portcounter %u\n", reg);
4782                goto done;
4783        }
4784        creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4785
4786        /* handle non-counters and special cases first */
4787        if (reg == QIBPORTCNTR_KHDROVFL) {
4788                int i;
4789
4790                /* sum over all kernel contexts (skip if mini_init) */
4791                for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4792                        struct qib_ctxtdata *rcd = dd->rcd[i];
4793
4794                        if (!rcd || rcd->ppd != ppd)
4795                                continue;
4796                        ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4797                }
4798                goto done;
4799        } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4800                /*
4801                 * Used as part of the synthesis of port_rcv_errors
4802                 * in the verbs code for IBTA counters.  Not needed for 7322,
4803                 * because all the errors are already counted by other cntrs.
4804                 */
4805                goto done;
4806        } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4807                   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4808                /* were counters in older chips, now per-port kernel regs */
4809                ret = qib_read_kreg_port(ppd, creg);
4810                goto done;
4811        }
4812
4813        /*
4814         * Only fast increment counters are 64 bits; use 32 bit reads to
4815         * avoid two independent reads when on Opteron.
4816         */
4817        if (xlator[reg] & _PORT_64BIT_FLAG)
4818                ret = read_7322_creg_port(ppd, creg);
4819        else
4820                ret = read_7322_creg32_port(ppd, creg);
4821        if (creg == crp_ibsymbolerr) {
4822                if (ppd->cpspec->ibdeltainprog)
4823                        ret -= ret - ppd->cpspec->ibsymsnap;
4824                ret -= ppd->cpspec->ibsymdelta;
4825        } else if (creg == crp_iblinkerrrecov) {
4826                if (ppd->cpspec->ibdeltainprog)
4827                        ret -= ret - ppd->cpspec->iblnkerrsnap;
4828                ret -= ppd->cpspec->iblnkerrdelta;
4829        } else if (creg == crp_errlink)
4830                ret -= ppd->cpspec->ibmalfdelta;
4831        else if (creg == crp_iblinkdown)
4832                ret += ppd->cpspec->iblnkdowndelta;
4833done:
4834        return ret;
4835}
4836
4837/*
4838 * Device counter names (not port-specific), one line per stat,
4839 * single string.  Used by utilities like ipathstats to print the stats
4840 * in a way which works for different versions of drivers, without changing
4841 * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4842 * display by utility.
4843 * Non-error counters are first.
4844 * Start of "error" conters is indicated by a leading "E " on the first
4845 * "error" counter, and doesn't count in label length.
4846 * The EgrOvfl list needs to be last so we truncate them at the configured
4847 * context count for the device.
4848 * cntr7322indices contains the corresponding register indices.
4849 */
4850static const char cntr7322names[] =
4851        "Interrupts\n"
4852        "HostBusStall\n"
4853        "E RxTIDFull\n"
4854        "RxTIDInvalid\n"
4855        "RxTIDFloDrop\n" /* 7322 only */
4856        "Ctxt0EgrOvfl\n"
4857        "Ctxt1EgrOvfl\n"
4858        "Ctxt2EgrOvfl\n"
4859        "Ctxt3EgrOvfl\n"
4860        "Ctxt4EgrOvfl\n"
4861        "Ctxt5EgrOvfl\n"
4862        "Ctxt6EgrOvfl\n"
4863        "Ctxt7EgrOvfl\n"
4864        "Ctxt8EgrOvfl\n"
4865        "Ctxt9EgrOvfl\n"
4866        "Ctx10EgrOvfl\n"
4867        "Ctx11EgrOvfl\n"
4868        "Ctx12EgrOvfl\n"
4869        "Ctx13EgrOvfl\n"
4870        "Ctx14EgrOvfl\n"
4871        "Ctx15EgrOvfl\n"
4872        "Ctx16EgrOvfl\n"
4873        "Ctx17EgrOvfl\n"
4874        ;
4875
4876static const u32 cntr7322indices[] = {
4877        cr_lbint | _PORT_64BIT_FLAG,
4878        cr_lbstall | _PORT_64BIT_FLAG,
4879        cr_tidfull,
4880        cr_tidinvalid,
4881        cr_rxtidflowdrop,
4882        cr_base_egrovfl + 0,
4883        cr_base_egrovfl + 1,
4884        cr_base_egrovfl + 2,
4885        cr_base_egrovfl + 3,
4886        cr_base_egrovfl + 4,
4887        cr_base_egrovfl + 5,
4888        cr_base_egrovfl + 6,
4889        cr_base_egrovfl + 7,
4890        cr_base_egrovfl + 8,
4891        cr_base_egrovfl + 9,
4892        cr_base_egrovfl + 10,
4893        cr_base_egrovfl + 11,
4894        cr_base_egrovfl + 12,
4895        cr_base_egrovfl + 13,
4896        cr_base_egrovfl + 14,
4897        cr_base_egrovfl + 15,
4898        cr_base_egrovfl + 16,
4899        cr_base_egrovfl + 17,
4900};
4901
4902/*
4903 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4904 * portcntr7322indices is somewhat complicated by some registers needing
4905 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4906 */
4907static const char portcntr7322names[] =
4908        "TxPkt\n"
4909        "TxFlowPkt\n"
4910        "TxWords\n"
4911        "RxPkt\n"
4912        "RxFlowPkt\n"
4913        "RxWords\n"
4914        "TxFlowStall\n"
4915        "TxDmaDesc\n"  /* 7220 and 7322-only */
4916        "E RxDlidFltr\n"  /* 7220 and 7322-only */
4917        "IBStatusChng\n"
4918        "IBLinkDown\n"
4919        "IBLnkRecov\n"
4920        "IBRxLinkErr\n"
4921        "IBSymbolErr\n"
4922        "RxLLIErr\n"
4923        "RxBadFormat\n"
4924        "RxBadLen\n"
4925        "RxBufOvrfl\n"
4926        "RxEBP\n"
4927        "RxFlowCtlErr\n"
4928        "RxICRCerr\n"
4929        "RxLPCRCerr\n"
4930        "RxVCRCerr\n"
4931        "RxInvalLen\n"
4932        "RxInvalPKey\n"
4933        "RxPktDropped\n"
4934        "TxBadLength\n"
4935        "TxDropped\n"
4936        "TxInvalLen\n"
4937        "TxUnderrun\n"
4938        "TxUnsupVL\n"
4939        "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4940        "RxVL15Drop\n"
4941        "RxVlErr\n"
4942        "XcessBufOvfl\n"
4943        "RxQPBadCtxt\n" /* 7322-only from here down */
4944        "TXBadHeader\n"
4945        ;
4946
4947static const u32 portcntr7322indices[] = {
4948        QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4949        crp_pktsendflow,
4950        QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4951        QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4952        crp_pktrcvflowctrl,
4953        QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4954        QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4955        crp_txsdmadesc | _PORT_64BIT_FLAG,
4956        crp_rxdlidfltr,
4957        crp_ibstatuschange,
4958        QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4959        QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4960        QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4961        QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4962        QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4963        QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4964        QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4965        QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4966        QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4967        crp_rcvflowctrlviol,
4968        QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4969        QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4970        QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4971        QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4972        QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4973        QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4974        crp_txminmaxlenerr,
4975        crp_txdroppedpkt,
4976        crp_txlenerr,
4977        crp_txunderrun,
4978        crp_txunsupvl,
4979        QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4980        QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4981        QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4982        QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4983        crp_rxqpinvalidctxt,
4984        crp_txhdrerr,
4985};
4986
4987/* do all the setup to make the counter reads efficient later */
4988static void init_7322_cntrnames(struct qib_devdata *dd)
4989{
4990        int i, j = 0;
4991        char *s;
4992
4993        for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4994             i++) {
4995                /* we always have at least one counter before the egrovfl */
4996                if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4997                        j = 1;
4998                s = strchr(s + 1, '\n');
4999                if (s && j)
5000                        j++;
5001        }
5002        dd->cspec->ncntrs = i;
5003        if (!s)
5004                /* full list; size is without terminating null */
5005                dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5006        else
5007                dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5008        dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
5009                                         GFP_KERNEL);
5010
5011        for (i = 0, s = (char *)portcntr7322names; s; i++)
5012                s = strchr(s + 1, '\n');
5013        dd->cspec->nportcntrs = i - 1;
5014        dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5015        for (i = 0; i < dd->num_pports; ++i) {
5016                dd->pport[i].cpspec->portcntrs =
5017                        kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
5018                                      GFP_KERNEL);
5019        }
5020}
5021
5022static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5023                              u64 **cntrp)
5024{
5025        u32 ret;
5026
5027        if (namep) {
5028                ret = dd->cspec->cntrnamelen;
5029                if (pos >= ret)
5030                        ret = 0; /* final read after getting everything */
5031                else
5032                        *namep = (char *) cntr7322names;
5033        } else {
5034                u64 *cntr = dd->cspec->cntrs;
5035                int i;
5036
5037                ret = dd->cspec->ncntrs * sizeof(u64);
5038                if (!cntr || pos >= ret) {
5039                        /* everything read, or couldn't get memory */
5040                        ret = 0;
5041                        goto done;
5042                }
5043                *cntrp = cntr;
5044                for (i = 0; i < dd->cspec->ncntrs; i++)
5045                        if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5046                                *cntr++ = read_7322_creg(dd,
5047                                                         cntr7322indices[i] &
5048                                                         _PORT_CNTR_IDXMASK);
5049                        else
5050                                *cntr++ = read_7322_creg32(dd,
5051                                                           cntr7322indices[i]);
5052        }
5053done:
5054        return ret;
5055}
5056
5057static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5058                                  char **namep, u64 **cntrp)
5059{
5060        u32 ret;
5061
5062        if (namep) {
5063                ret = dd->cspec->portcntrnamelen;
5064                if (pos >= ret)
5065                        ret = 0; /* final read after getting everything */
5066                else
5067                        *namep = (char *)portcntr7322names;
5068        } else {
5069                struct qib_pportdata *ppd = &dd->pport[port];
5070                u64 *cntr = ppd->cpspec->portcntrs;
5071                int i;
5072
5073                ret = dd->cspec->nportcntrs * sizeof(u64);
5074                if (!cntr || pos >= ret) {
5075                        /* everything read, or couldn't get memory */
5076                        ret = 0;
5077                        goto done;
5078                }
5079                *cntrp = cntr;
5080                for (i = 0; i < dd->cspec->nportcntrs; i++) {
5081                        if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5082                                *cntr++ = qib_portcntr_7322(ppd,
5083                                        portcntr7322indices[i] &
5084                                        _PORT_CNTR_IDXMASK);
5085                        else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5086                                *cntr++ = read_7322_creg_port(ppd,
5087                                           portcntr7322indices[i] &
5088                                            _PORT_CNTR_IDXMASK);
5089                        else
5090                                *cntr++ = read_7322_creg32_port(ppd,
5091                                           portcntr7322indices[i]);
5092                }
5093        }
5094done:
5095        return ret;
5096}
5097
5098/**
5099 * qib_get_7322_faststats - get word counters from chip before they overflow
5100 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5101 *
5102 * VESTIGIAL IBA7322 has no "small fast counters", so the only
5103 * real purpose of this function is to maintain the notion of
5104 * "active time", which in turn is only logged into the eeprom,
5105 * which we don;t have, yet, for 7322-based boards.
5106 *
5107 * called from add_timer
5108 */
5109static void qib_get_7322_faststats(struct timer_list *t)
5110{
5111        struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5112        struct qib_pportdata *ppd;
5113        unsigned long flags;
5114        u64 traffic_wds;
5115        int pidx;
5116
5117        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5118                ppd = dd->pport + pidx;
5119
5120                /*
5121                 * If port isn't enabled or not operational ports, or
5122                 * diags is running (can cause memory diags to fail)
5123                 * skip this port this time.
5124                 */
5125                if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5126                    || dd->diag_client)
5127                        continue;
5128
5129                /*
5130                 * Maintain an activity timer, based on traffic
5131                 * exceeding a threshold, so we need to check the word-counts
5132                 * even if they are 64-bit.
5133                 */
5134                traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5135                        qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5136                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5137                traffic_wds -= ppd->dd->traffic_wds;
5138                ppd->dd->traffic_wds += traffic_wds;
5139                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5140                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5141                                                QIB_IB_QDR) &&
5142                    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5143                                    QIBL_LINKACTIVE)) &&
5144                    ppd->cpspec->qdr_dfe_time &&
5145                    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5146                        ppd->cpspec->qdr_dfe_on = 0;
5147
5148                        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5149                                            ppd->dd->cspec->r1 ?
5150                                            QDR_STATIC_ADAPT_INIT_R1 :
5151                                            QDR_STATIC_ADAPT_INIT);
5152                        force_h1(ppd);
5153                }
5154        }
5155        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5156}
5157
5158/*
5159 * If we were using MSIx, try to fallback to INTx.
5160 */
5161static int qib_7322_intr_fallback(struct qib_devdata *dd)
5162{
5163        if (!dd->cspec->num_msix_entries)
5164                return 0; /* already using INTx */
5165
5166        qib_devinfo(dd->pcidev,
5167                "MSIx interrupt not detected, trying INTx interrupts\n");
5168        qib_7322_free_irq(dd);
5169        if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5170                qib_dev_err(dd, "Failed to enable INTx\n");
5171        qib_setup_7322_interrupt(dd, 0);
5172        return 1;
5173}
5174
5175/*
5176 * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5177 * than resetting the IBC or external link state, and useful in some
5178 * cases to cause some retraining.  To do this right, we reset IBC
5179 * as well, then return to previous state (which may be still in reset)
5180 * NOTE: some callers of this "know" this writes the current value
5181 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5182 * check all callers.
5183 */
5184static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5185{
5186        u64 val;
5187        struct qib_devdata *dd = ppd->dd;
5188        const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5189                SYM_MASK(IBPCSConfig_0, xcv_treset) |
5190                SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5191
5192        val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5193        qib_write_kreg(dd, kr_hwerrmask,
5194                       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5195        qib_write_kreg_port(ppd, krp_ibcctrl_a,
5196                            ppd->cpspec->ibcctrl_a &
5197                            ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5198
5199        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5200        qib_read_kreg32(dd, kr_scratch);
5201        qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5202        qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5203        qib_write_kreg(dd, kr_scratch, 0ULL);
5204        qib_write_kreg(dd, kr_hwerrclear,
5205                       SYM_MASK(HwErrClear, statusValidNoEopClear));
5206        qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5207}
5208
5209/*
5210 * This code for non-IBTA-compliant IB speed negotiation is only known to
5211 * work for the SDR to DDR transition, and only between an HCA and a switch
5212 * with recent firmware.  It is based on observed heuristics, rather than
5213 * actual knowledge of the non-compliant speed negotiation.
5214 * It has a number of hard-coded fields, since the hope is to rewrite this
5215 * when a spec is available on how the negoation is intended to work.
5216 */
5217static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5218                                 u32 dcnt, u32 *data)
5219{
5220        int i;
5221        u64 pbc;
5222        u32 __iomem *piobuf;
5223        u32 pnum, control, len;
5224        struct qib_devdata *dd = ppd->dd;
5225
5226        i = 0;
5227        len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5228        control = qib_7322_setpbc_control(ppd, len, 0, 15);
5229        pbc = ((u64) control << 32) | len;
5230        while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5231                if (i++ > 15)
5232                        return;
5233                udelay(2);
5234        }
5235        /* disable header check on this packet, since it can't be valid */
5236        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5237        writeq(pbc, piobuf);
5238        qib_flush_wc();
5239        qib_pio_copy(piobuf + 2, hdr, 7);
5240        qib_pio_copy(piobuf + 9, data, dcnt);
5241        if (dd->flags & QIB_USE_SPCL_TRIG) {
5242                u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5243
5244                qib_flush_wc();
5245                __raw_writel(0xaebecede, piobuf + spcl_off);
5246        }
5247        qib_flush_wc();
5248        qib_sendbuf_done(dd, pnum);
5249        /* and re-enable hdr check */
5250        dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5251}
5252
5253/*
5254 * _start packet gets sent twice at start, _done gets sent twice at end
5255 */
5256static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5257{
5258        struct qib_devdata *dd = ppd->dd;
5259        static u32 swapped;
5260        u32 dw, i, hcnt, dcnt, *data;
5261        static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5262        static u32 madpayload_start[0x40] = {
5263                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5264                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5265                0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5266                };
5267        static u32 madpayload_done[0x40] = {
5268                0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5269                0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5270                0x40000001, 0x1388, 0x15e, /* rest 0's */
5271                };
5272
5273        dcnt = ARRAY_SIZE(madpayload_start);
5274        hcnt = ARRAY_SIZE(hdr);
5275        if (!swapped) {
5276                /* for maintainability, do it at runtime */
5277                for (i = 0; i < hcnt; i++) {
5278                        dw = (__force u32) cpu_to_be32(hdr[i]);
5279                        hdr[i] = dw;
5280                }
5281                for (i = 0; i < dcnt; i++) {
5282                        dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5283                        madpayload_start[i] = dw;
5284                        dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5285                        madpayload_done[i] = dw;
5286                }
5287                swapped = 1;
5288        }
5289
5290        data = which ? madpayload_done : madpayload_start;
5291
5292        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5293        qib_read_kreg64(dd, kr_scratch);
5294        udelay(2);
5295        autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5296        qib_read_kreg64(dd, kr_scratch);
5297        udelay(2);
5298}
5299
5300/*
5301 * Do the absolute minimum to cause an IB speed change, and make it
5302 * ready, but don't actually trigger the change.   The caller will
5303 * do that when ready (if link is in Polling training state, it will
5304 * happen immediately, otherwise when link next goes down)
5305 *
5306 * This routine should only be used as part of the DDR autonegotation
5307 * code for devices that are not compliant with IB 1.2 (or code that
5308 * fixes things up for same).
5309 *
5310 * When link has gone down, and autoneg enabled, or autoneg has
5311 * failed and we give up until next time we set both speeds, and
5312 * then we want IBTA enabled as well as "use max enabled speed.
5313 */
5314static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5315{
5316        u64 newctrlb;
5317
5318        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5319                                    IBA7322_IBC_IBTA_1_2_MASK |
5320                                    IBA7322_IBC_MAX_SPEED_MASK);
5321
5322        if (speed & (speed - 1)) /* multiple speeds */
5323                newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5324                                    IBA7322_IBC_IBTA_1_2_MASK |
5325                                    IBA7322_IBC_MAX_SPEED_MASK;
5326        else
5327                newctrlb |= speed == QIB_IB_QDR ?
5328                        IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5329                        ((speed == QIB_IB_DDR ?
5330                          IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5331
5332        if (newctrlb == ppd->cpspec->ibcctrl_b)
5333                return;
5334
5335        ppd->cpspec->ibcctrl_b = newctrlb;
5336        qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5337        qib_write_kreg(ppd->dd, kr_scratch, 0);
5338}
5339
5340/*
5341 * This routine is only used when we are not talking to another
5342 * IB 1.2-compliant device that we think can do DDR.
5343 * (This includes all existing switch chips as of Oct 2007.)
5344 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5345 */
5346static void try_7322_autoneg(struct qib_pportdata *ppd)
5347{
5348        unsigned long flags;
5349
5350        spin_lock_irqsave(&ppd->lflags_lock, flags);
5351        ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5352        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5353        qib_autoneg_7322_send(ppd, 0);
5354        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5355        qib_7322_mini_pcs_reset(ppd);
5356        /* 2 msec is minimum length of a poll cycle */
5357        queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5358                           msecs_to_jiffies(2));
5359}
5360
5361/*
5362 * Handle the empirically determined mechanism for auto-negotiation
5363 * of DDR speed with switches.
5364 */
5365static void autoneg_7322_work(struct work_struct *work)
5366{
5367        struct qib_pportdata *ppd;
5368        u32 i;
5369        unsigned long flags;
5370
5371        ppd = container_of(work, struct qib_chippport_specific,
5372                            autoneg_work.work)->ppd;
5373
5374        /*
5375         * Busy wait for this first part, it should be at most a
5376         * few hundred usec, since we scheduled ourselves for 2msec.
5377         */
5378        for (i = 0; i < 25; i++) {
5379                if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5380                     == IB_7322_LT_STATE_POLLQUIET) {
5381                        qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5382                        break;
5383                }
5384                udelay(100);
5385        }
5386
5387        if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5388                goto done; /* we got there early or told to stop */
5389
5390        /* we expect this to timeout */
5391        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5392                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5393                               msecs_to_jiffies(90)))
5394                goto done;
5395        qib_7322_mini_pcs_reset(ppd);
5396
5397        /* we expect this to timeout */
5398        if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5399                               !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5400                               msecs_to_jiffies(1700)))
5401                goto done;
5402        qib_7322_mini_pcs_reset(ppd);
5403
5404        set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5405
5406        /*
5407         * Wait up to 250 msec for link to train and get to INIT at DDR;
5408         * this should terminate early.
5409         */
5410        wait_event_timeout(ppd->cpspec->autoneg_wait,
5411                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5412                msecs_to_jiffies(250));
5413done:
5414        if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5415                spin_lock_irqsave(&ppd->lflags_lock, flags);
5416                ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5417                if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5418                        ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5419                        ppd->cpspec->autoneg_tries = 0;
5420                }
5421                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5422                set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5423        }
5424}
5425
5426/*
5427 * This routine is used to request IPG set in the QLogic switch.
5428 * Only called if r1.
5429 */
5430static void try_7322_ipg(struct qib_pportdata *ppd)
5431{
5432        struct qib_ibport *ibp = &ppd->ibport_data;
5433        struct ib_mad_send_buf *send_buf;
5434        struct ib_mad_agent *agent;
5435        struct ib_smp *smp;
5436        unsigned delay;
5437        int ret;
5438
5439        agent = ibp->rvp.send_agent;
5440        if (!agent)
5441                goto retry;
5442
5443        send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5444                                      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5445                                      IB_MGMT_BASE_VERSION);
5446        if (IS_ERR(send_buf))
5447                goto retry;
5448
5449        if (!ibp->smi_ah) {
5450                struct ib_ah *ah;
5451
5452                ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5453                if (IS_ERR(ah))
5454                        ret = PTR_ERR(ah);
5455                else {
5456                        send_buf->ah = ah;
5457                        ibp->smi_ah = ibah_to_rvtah(ah);
5458                        ret = 0;
5459                }
5460        } else {
5461                send_buf->ah = &ibp->smi_ah->ibah;
5462                ret = 0;
5463        }
5464
5465        smp = send_buf->mad;
5466        smp->base_version = IB_MGMT_BASE_VERSION;
5467        smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5468        smp->class_version = 1;
5469        smp->method = IB_MGMT_METHOD_SEND;
5470        smp->hop_cnt = 1;
5471        smp->attr_id = QIB_VENDOR_IPG;
5472        smp->attr_mod = 0;
5473
5474        if (!ret)
5475                ret = ib_post_send_mad(send_buf, NULL);
5476        if (ret)
5477                ib_free_send_mad(send_buf);
5478retry:
5479        delay = 2 << ppd->cpspec->ipg_tries;
5480        queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5481                           msecs_to_jiffies(delay));
5482}
5483
5484/*
5485 * Timeout handler for setting IPG.
5486 * Only called if r1.
5487 */
5488static void ipg_7322_work(struct work_struct *work)
5489{
5490        struct qib_pportdata *ppd;
5491
5492        ppd = container_of(work, struct qib_chippport_specific,
5493                           ipg_work.work)->ppd;
5494        if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5495            && ++ppd->cpspec->ipg_tries <= 10)
5496                try_7322_ipg(ppd);
5497}
5498
5499static u32 qib_7322_iblink_state(u64 ibcs)
5500{
5501        u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5502
5503        switch (state) {
5504        case IB_7322_L_STATE_INIT:
5505                state = IB_PORT_INIT;
5506                break;
5507        case IB_7322_L_STATE_ARM:
5508                state = IB_PORT_ARMED;
5509                break;
5510        case IB_7322_L_STATE_ACTIVE:
5511        case IB_7322_L_STATE_ACT_DEFER:
5512                state = IB_PORT_ACTIVE;
5513                break;
5514        default:
5515                fallthrough;
5516        case IB_7322_L_STATE_DOWN:
5517                state = IB_PORT_DOWN;
5518                break;
5519        }
5520        return state;
5521}
5522
5523/* returns the IBTA port state, rather than the IBC link training state */
5524static u8 qib_7322_phys_portstate(u64 ibcs)
5525{
5526        u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5527        return qib_7322_physportstate[state];
5528}
5529
5530static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5531{
5532        int ret = 0, symadj = 0;
5533        unsigned long flags;
5534        int mult;
5535
5536        spin_lock_irqsave(&ppd->lflags_lock, flags);
5537        ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5538        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5539
5540        /* Update our picture of width and speed from chip */
5541        if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5542                ppd->link_speed_active = QIB_IB_QDR;
5543                mult = 4;
5544        } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5545                ppd->link_speed_active = QIB_IB_DDR;
5546                mult = 2;
5547        } else {
5548                ppd->link_speed_active = QIB_IB_SDR;
5549                mult = 1;
5550        }
5551        if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5552                ppd->link_width_active = IB_WIDTH_4X;
5553                mult *= 4;
5554        } else
5555                ppd->link_width_active = IB_WIDTH_1X;
5556        ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5557
5558        if (!ibup) {
5559                u64 clr;
5560
5561                /* Link went down. */
5562                /* do IPG MAD again after linkdown, even if last time failed */
5563                ppd->cpspec->ipg_tries = 0;
5564                clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5565                        (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5566                         SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5567                if (clr)
5568                        qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5569                if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5570                                     QIBL_IB_AUTONEG_INPROG)))
5571                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5572                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5573                        struct qib_qsfp_data *qd =
5574                                &ppd->cpspec->qsfp_data;
5575                        /* unlock the Tx settings, speed may change */
5576                        qib_write_kreg_port(ppd, krp_tx_deemph_override,
5577                                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5578                                reset_tx_deemphasis_override));
5579                        qib_cancel_sends(ppd);
5580                        /* on link down, ensure sane pcs state */
5581                        qib_7322_mini_pcs_reset(ppd);
5582                        /* schedule the qsfp refresh which should turn the link
5583                           off */
5584                        if (ppd->dd->flags & QIB_HAS_QSFP) {
5585                                qd->t_insert = jiffies;
5586                                queue_work(ib_wq, &qd->work);
5587                        }
5588                        spin_lock_irqsave(&ppd->sdma_lock, flags);
5589                        if (__qib_sdma_running(ppd))
5590                                __qib_sdma_process_event(ppd,
5591                                        qib_sdma_event_e70_go_idle);
5592                        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5593                }
5594                clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5595                if (clr == ppd->cpspec->iblnkdownsnap)
5596                        ppd->cpspec->iblnkdowndelta++;
5597        } else {
5598                if (qib_compat_ddr_negotiate &&
5599                    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5600                                     QIBL_IB_AUTONEG_INPROG)) &&
5601                    ppd->link_speed_active == QIB_IB_SDR &&
5602                    (ppd->link_speed_enabled & QIB_IB_DDR)
5603                    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5604                        /* we are SDR, and auto-negotiation enabled */
5605                        ++ppd->cpspec->autoneg_tries;
5606                        if (!ppd->cpspec->ibdeltainprog) {
5607                                ppd->cpspec->ibdeltainprog = 1;
5608                                ppd->cpspec->ibsymdelta +=
5609                                        read_7322_creg32_port(ppd,
5610                                                crp_ibsymbolerr) -
5611                                                ppd->cpspec->ibsymsnap;
5612                                ppd->cpspec->iblnkerrdelta +=
5613                                        read_7322_creg32_port(ppd,
5614                                                crp_iblinkerrrecov) -
5615                                                ppd->cpspec->iblnkerrsnap;
5616                        }
5617                        try_7322_autoneg(ppd);
5618                        ret = 1; /* no other IB status change processing */
5619                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5620                           ppd->link_speed_active == QIB_IB_SDR) {
5621                        qib_autoneg_7322_send(ppd, 1);
5622                        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5623                        qib_7322_mini_pcs_reset(ppd);
5624                        udelay(2);
5625                        ret = 1; /* no other IB status change processing */
5626                } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5627                           (ppd->link_speed_active & QIB_IB_DDR)) {
5628                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5629                        ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5630                                         QIBL_IB_AUTONEG_FAILED);
5631                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5632                        ppd->cpspec->autoneg_tries = 0;
5633                        /* re-enable SDR, for next link down */
5634                        set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5635                        wake_up(&ppd->cpspec->autoneg_wait);
5636                        symadj = 1;
5637                } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5638                        /*
5639                         * Clear autoneg failure flag, and do setup
5640                         * so we'll try next time link goes down and
5641                         * back to INIT (possibly connected to a
5642                         * different device).
5643                         */
5644                        spin_lock_irqsave(&ppd->lflags_lock, flags);
5645                        ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5646                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5647                        ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5648                        symadj = 1;
5649                }
5650                if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5651                        symadj = 1;
5652                        if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5653                                try_7322_ipg(ppd);
5654                        if (!ppd->cpspec->recovery_init)
5655                                setup_7322_link_recovery(ppd, 0);
5656                        ppd->cpspec->qdr_dfe_time = jiffies +
5657                                msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5658                }
5659                ppd->cpspec->ibmalfusesnap = 0;
5660                ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5661                        crp_errlink);
5662        }
5663        if (symadj) {
5664                ppd->cpspec->iblnkdownsnap =
5665                        read_7322_creg32_port(ppd, crp_iblinkdown);
5666                if (ppd->cpspec->ibdeltainprog) {
5667                        ppd->cpspec->ibdeltainprog = 0;
5668                        ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5669                                crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5670                        ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5671                                crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5672                }
5673        } else if (!ibup && qib_compat_ddr_negotiate &&
5674                   !ppd->cpspec->ibdeltainprog &&
5675                        !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5676                ppd->cpspec->ibdeltainprog = 1;
5677                ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5678                        crp_ibsymbolerr);
5679                ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5680                        crp_iblinkerrrecov);
5681        }
5682
5683        if (!ret)
5684                qib_setup_7322_setextled(ppd, ibup);
5685        return ret;
5686}
5687
5688/*
5689 * Does read/modify/write to appropriate registers to
5690 * set output and direction bits selected by mask.
5691 * these are in their canonical postions (e.g. lsb of
5692 * dir will end up in D48 of extctrl on existing chips).
5693 * returns contents of GP Inputs.
5694 */
5695static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5696{
5697        u64 read_val, new_out;
5698        unsigned long flags;
5699
5700        if (mask) {
5701                /* some bits being written, lock access to GPIO */
5702                dir &= mask;
5703                out &= mask;
5704                spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5705                dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5706                dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5707                new_out = (dd->cspec->gpio_out & ~mask) | out;
5708
5709                qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5710                qib_write_kreg(dd, kr_gpio_out, new_out);
5711                dd->cspec->gpio_out = new_out;
5712                spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5713        }
5714        /*
5715         * It is unlikely that a read at this time would get valid
5716         * data on a pin whose direction line was set in the same
5717         * call to this function. We include the read here because
5718         * that allows us to potentially combine a change on one pin with
5719         * a read on another, and because the old code did something like
5720         * this.
5721         */
5722        read_val = qib_read_kreg64(dd, kr_extstatus);
5723        return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5724}
5725
5726/* Enable writes to config EEPROM, if possible. Returns previous state */
5727static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5728{
5729        int prev_wen;
5730        u32 mask;
5731
5732        mask = 1 << QIB_EEPROM_WEN_NUM;
5733        prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5734        gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5735
5736        return prev_wen & 1;
5737}
5738
5739/*
5740 * Read fundamental info we need to use the chip.  These are
5741 * the registers that describe chip capabilities, and are
5742 * saved in shadow registers.
5743 */
5744static void get_7322_chip_params(struct qib_devdata *dd)
5745{
5746        u64 val;
5747        u32 piobufs;
5748        int mtu;
5749
5750        dd->palign = qib_read_kreg32(dd, kr_pagealign);
5751
5752        dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5753
5754        dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5755        dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5756        dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5757        dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5758        dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5759
5760        val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5761        dd->piobcnt2k = val & ~0U;
5762        dd->piobcnt4k = val >> 32;
5763        val = qib_read_kreg64(dd, kr_sendpiosize);
5764        dd->piosize2k = val & ~0U;
5765        dd->piosize4k = val >> 32;
5766
5767        mtu = ib_mtu_enum_to_int(qib_ibmtu);
5768        if (mtu == -1)
5769                mtu = QIB_DEFAULT_MTU;
5770        dd->pport[0].ibmtu = (u32)mtu;
5771        dd->pport[1].ibmtu = (u32)mtu;
5772
5773        /* these may be adjusted in init_chip_wc_pat() */
5774        dd->pio2kbase = (u32 __iomem *)
5775                ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5776        dd->pio4kbase = (u32 __iomem *)
5777                ((char __iomem *) dd->kregbase +
5778                 (dd->piobufbase >> 32));
5779        /*
5780         * 4K buffers take 2 pages; we use roundup just to be
5781         * paranoid; we calculate it once here, rather than on
5782         * ever buf allocate
5783         */
5784        dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5785
5786        piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5787
5788        dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5789                (sizeof(u64) * BITS_PER_BYTE / 2);
5790}
5791
5792/*
5793 * The chip base addresses in cspec and cpspec have to be set
5794 * after possible init_chip_wc_pat(), rather than in
5795 * get_7322_chip_params(), so split out as separate function
5796 */
5797static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5798{
5799        u32 cregbase;
5800
5801        cregbase = qib_read_kreg32(dd, kr_counterregbase);
5802
5803        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5804                (char __iomem *)dd->kregbase);
5805
5806        dd->egrtidbase = (u64 __iomem *)
5807                ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5808
5809        /* port registers are defined as relative to base of chip */
5810        dd->pport[0].cpspec->kpregbase =
5811                (u64 __iomem *)((char __iomem *)dd->kregbase);
5812        dd->pport[1].cpspec->kpregbase =
5813                (u64 __iomem *)(dd->palign +
5814                (char __iomem *)dd->kregbase);
5815        dd->pport[0].cpspec->cpregbase =
5816                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5817                kr_counterregbase) + (char __iomem *)dd->kregbase);
5818        dd->pport[1].cpspec->cpregbase =
5819                (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5820                kr_counterregbase) + (char __iomem *)dd->kregbase);
5821}
5822
5823/*
5824 * This is a fairly special-purpose observer, so we only support
5825 * the port-specific parts of SendCtrl
5826 */
5827
5828#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |           \
5829                           SYM_MASK(SendCtrl_0, SDmaEnable) |           \
5830                           SYM_MASK(SendCtrl_0, SDmaIntEnable) |        \
5831                           SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5832                           SYM_MASK(SendCtrl_0, SDmaHalt) |             \
5833                           SYM_MASK(SendCtrl_0, IBVLArbiterEn) |        \
5834                           SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5835
5836static int sendctrl_hook(struct qib_devdata *dd,
5837                         const struct diag_observer *op, u32 offs,
5838                         u64 *data, u64 mask, int only_32)
5839{
5840        unsigned long flags;
5841        unsigned idx;
5842        unsigned pidx;
5843        struct qib_pportdata *ppd = NULL;
5844        u64 local_data, all_bits;
5845
5846        /*
5847         * The fixed correspondence between Physical ports and pports is
5848         * severed. We need to hunt for the ppd that corresponds
5849         * to the offset we got. And we have to do that without admitting
5850         * we know the stride, apparently.
5851         */
5852        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5853                u64 __iomem *psptr;
5854                u32 psoffs;
5855
5856                ppd = dd->pport + pidx;
5857                if (!ppd->cpspec->kpregbase)
5858                        continue;
5859
5860                psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5861                psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5862                if (psoffs == offs)
5863                        break;
5864        }
5865
5866        /* If pport is not being managed by driver, just avoid shadows. */
5867        if (pidx >= dd->num_pports)
5868                ppd = NULL;
5869
5870        /* In any case, "idx" is flat index in kreg space */
5871        idx = offs / sizeof(u64);
5872
5873        all_bits = ~0ULL;
5874        if (only_32)
5875                all_bits >>= 32;
5876
5877        spin_lock_irqsave(&dd->sendctrl_lock, flags);
5878        if (!ppd || (mask & all_bits) != all_bits) {
5879                /*
5880                 * At least some mask bits are zero, so we need
5881                 * to read. The judgement call is whether from
5882                 * reg or shadow. First-cut: read reg, and complain
5883                 * if any bits which should be shadowed are different
5884                 * from their shadowed value.
5885                 */
5886                if (only_32)
5887                        local_data = (u64)qib_read_kreg32(dd, idx);
5888                else
5889                        local_data = qib_read_kreg64(dd, idx);
5890                *data = (local_data & ~mask) | (*data & mask);
5891        }
5892        if (mask) {
5893                /*
5894                 * At least some mask bits are one, so we need
5895                 * to write, but only shadow some bits.
5896                 */
5897                u64 sval, tval; /* Shadowed, transient */
5898
5899                /*
5900                 * New shadow val is bits we don't want to touch,
5901                 * ORed with bits we do, that are intended for shadow.
5902                 */
5903                if (ppd) {
5904                        sval = ppd->p_sendctrl & ~mask;
5905                        sval |= *data & SENDCTRL_SHADOWED & mask;
5906                        ppd->p_sendctrl = sval;
5907                } else
5908                        sval = *data & SENDCTRL_SHADOWED & mask;
5909                tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5910                qib_write_kreg(dd, idx, tval);
5911                qib_write_kreg(dd, kr_scratch, 0Ull);
5912        }
5913        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5914        return only_32 ? 4 : 8;
5915}
5916
5917static const struct diag_observer sendctrl_0_observer = {
5918        sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5919        KREG_IDX(SendCtrl_0) * sizeof(u64)
5920};
5921
5922static const struct diag_observer sendctrl_1_observer = {
5923        sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5924        KREG_IDX(SendCtrl_1) * sizeof(u64)
5925};
5926
5927static ushort sdma_fetch_prio = 8;
5928module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5929MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5930
5931/* Besides logging QSFP events, we set appropriate TxDDS values */
5932static void init_txdds_table(struct qib_pportdata *ppd, int override);
5933
5934static void qsfp_7322_event(struct work_struct *work)
5935{
5936        struct qib_qsfp_data *qd;
5937        struct qib_pportdata *ppd;
5938        unsigned long pwrup;
5939        unsigned long flags;
5940        int ret;
5941        u32 le2;
5942
5943        qd = container_of(work, struct qib_qsfp_data, work);
5944        ppd = qd->ppd;
5945        pwrup = qd->t_insert +
5946                msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5947
5948        /* Delay for 20 msecs to allow ModPrs resistor to setup */
5949        mdelay(QSFP_MODPRS_LAG_MSEC);
5950
5951        if (!qib_qsfp_mod_present(ppd)) {
5952                ppd->cpspec->qsfp_data.modpresent = 0;
5953                /* Set the physical link to disabled */
5954                qib_set_ib_7322_lstate(ppd, 0,
5955                                       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5956                spin_lock_irqsave(&ppd->lflags_lock, flags);
5957                ppd->lflags &= ~QIBL_LINKV;
5958                spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5959        } else {
5960                /*
5961                 * Some QSFP's not only do not respond until the full power-up
5962                 * time, but may behave badly if we try. So hold off responding
5963                 * to insertion.
5964                 */
5965                while (1) {
5966                        if (time_is_before_jiffies(pwrup))
5967                                break;
5968                        msleep(20);
5969                }
5970
5971                ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5972
5973                /*
5974                 * Need to change LE2 back to defaults if we couldn't
5975                 * read the cable type (to handle cable swaps), so do this
5976                 * even on failure to read cable information.  We don't
5977                 * get here for QME, so IS_QME check not needed here.
5978                 */
5979                if (!ret && !ppd->dd->cspec->r1) {
5980                        if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5981                                le2 = LE2_QME;
5982                        else if (qd->cache.atten[1] >= qib_long_atten &&
5983                                 QSFP_IS_CU(qd->cache.tech))
5984                                le2 = LE2_5m;
5985                        else
5986                                le2 = LE2_DEFAULT;
5987                } else
5988                        le2 = LE2_DEFAULT;
5989                ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5990                /*
5991                 * We always change parameteters, since we can choose
5992                 * values for cables without eeproms, and the cable may have
5993                 * changed from a cable with full or partial eeprom content
5994                 * to one with partial or no content.
5995                 */
5996                init_txdds_table(ppd, 0);
5997                /* The physical link is being re-enabled only when the
5998                 * previous state was DISABLED and the VALID bit is not
5999                 * set. This should only happen when  the cable has been
6000                 * physically pulled. */
6001                if (!ppd->cpspec->qsfp_data.modpresent &&
6002                    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6003                        ppd->cpspec->qsfp_data.modpresent = 1;
6004                        qib_set_ib_7322_lstate(ppd, 0,
6005                                QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6006                        spin_lock_irqsave(&ppd->lflags_lock, flags);
6007                        ppd->lflags |= QIBL_LINKV;
6008                        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6009                }
6010        }
6011}
6012
6013/*
6014 * There is little we can do but complain to the user if QSFP
6015 * initialization fails.
6016 */
6017static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6018{
6019        unsigned long flags;
6020        struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6021        struct qib_devdata *dd = ppd->dd;
6022        u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6023
6024        mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6025        qd->ppd = ppd;
6026        qib_qsfp_init(qd, qsfp_7322_event);
6027        spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6028        dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6029        dd->cspec->gpio_mask |= mod_prs_bit;
6030        qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6031        qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6032        spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6033}
6034
6035/*
6036 * called at device initialization time, and also if the txselect
6037 * module parameter is changed.  This is used for cables that don't
6038 * have valid QSFP EEPROMs (not present, or attenuation is zero).
6039 * We initialize to the default, then if there is a specific
6040 * unit,port match, we use that (and set it immediately, for the
6041 * current speed, if the link is at INIT or better).
6042 * String format is "default# unit#,port#=# ... u,p=#", separators must
6043 * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6044 * optionally have "u,p=#,#", where the final # is the H1 value
6045 * The last specific match is used (actually, all are used, but last
6046 * one is the one that winds up set); if none at all, fall back on default.
6047 */
6048static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6049{
6050        char *nxt, *str;
6051        u32 pidx, unit, port, deflt, h1;
6052        unsigned long val;
6053        int any = 0, seth1;
6054        int txdds_size;
6055
6056        str = txselect_list;
6057
6058        /* default number is validated in setup_txselect() */
6059        deflt = simple_strtoul(str, &nxt, 0);
6060        for (pidx = 0; pidx < dd->num_pports; ++pidx)
6061                dd->pport[pidx].cpspec->no_eep = deflt;
6062
6063        txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6064        if (IS_QME(dd) || IS_QMH(dd))
6065                txdds_size += TXDDS_MFG_SZ;
6066
6067        while (*nxt && nxt[1]) {
6068                str = ++nxt;
6069                unit = simple_strtoul(str, &nxt, 0);
6070                if (nxt == str || !*nxt || *nxt != ',') {
6071                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6072                                ;
6073                        continue;
6074                }
6075                str = ++nxt;
6076                port = simple_strtoul(str, &nxt, 0);
6077                if (nxt == str || *nxt != '=') {
6078                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6079                                ;
6080                        continue;
6081                }
6082                str = ++nxt;
6083                val = simple_strtoul(str, &nxt, 0);
6084                if (nxt == str) {
6085                        while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6086                                ;
6087                        continue;
6088                }
6089                if (val >= txdds_size)
6090                        continue;
6091                seth1 = 0;
6092                h1 = 0; /* gcc thinks it might be used uninitted */
6093                if (*nxt == ',' && nxt[1]) {
6094                        str = ++nxt;
6095                        h1 = (u32)simple_strtoul(str, &nxt, 0);
6096                        if (nxt == str)
6097                                while (*nxt && *nxt++ != ' ') /* skip */
6098                                        ;
6099                        else
6100                                seth1 = 1;
6101                }
6102                for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6103                     ++pidx) {
6104                        struct qib_pportdata *ppd = &dd->pport[pidx];
6105
6106                        if (ppd->port != port || !ppd->link_speed_supported)
6107                                continue;
6108                        ppd->cpspec->no_eep = val;
6109                        if (seth1)
6110                                ppd->cpspec->h1_val = h1;
6111                        /* now change the IBC and serdes, overriding generic */
6112                        init_txdds_table(ppd, 1);
6113                        /* Re-enable the physical state machine on mezz boards
6114                         * now that the correct settings have been set.
6115                         * QSFP boards are handles by the QSFP event handler */
6116                        if (IS_QMH(dd) || IS_QME(dd))
6117                                qib_set_ib_7322_lstate(ppd, 0,
6118                                            QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6119                        any++;
6120                }
6121                if (*nxt == '\n')
6122                        break; /* done */
6123        }
6124        if (change && !any) {
6125                /* no specific setting, use the default.
6126                 * Change the IBC and serdes, but since it's
6127                 * general, don't override specific settings.
6128                 */
6129                for (pidx = 0; pidx < dd->num_pports; ++pidx)
6130                        if (dd->pport[pidx].link_speed_supported)
6131                                init_txdds_table(&dd->pport[pidx], 0);
6132        }
6133}
6134
6135/* handle the txselect parameter changing */
6136static int setup_txselect(const char *str, const struct kernel_param *kp)
6137{
6138        struct qib_devdata *dd;
6139        unsigned long index, val;
6140        char *n;
6141
6142        if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6143                pr_info("txselect_values string too long\n");
6144                return -ENOSPC;
6145        }
6146        val = simple_strtoul(str, &n, 0);
6147        if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6148                                TXDDS_MFG_SZ)) {
6149                pr_info("txselect_values must start with a number < %d\n",
6150                        TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6151                return -EINVAL;
6152        }
6153        strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6154
6155        xa_for_each(&qib_dev_table, index, dd)
6156                if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6157                        set_no_qsfp_atten(dd, 1);
6158        return 0;
6159}
6160
6161/*
6162 * Write the final few registers that depend on some of the
6163 * init setup.  Done late in init, just before bringing up
6164 * the serdes.
6165 */
6166static int qib_late_7322_initreg(struct qib_devdata *dd)
6167{
6168        int ret = 0, n;
6169        u64 val;
6170
6171        qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6172        qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6173        qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6174        qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6175        val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6176        if (val != dd->pioavailregs_phys) {
6177                qib_dev_err(dd,
6178                        "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6179                        (unsigned long) dd->pioavailregs_phys,
6180                        (unsigned long long) val);
6181                ret = -EINVAL;
6182        }
6183
6184        n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6185        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6186        /* driver sends get pkey, lid, etc. checking also, to catch bugs */
6187        qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6188
6189        qib_register_observer(dd, &sendctrl_0_observer);
6190        qib_register_observer(dd, &sendctrl_1_observer);
6191
6192        dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6193        qib_write_kreg(dd, kr_control, dd->control);
6194        /*
6195         * Set SendDmaFetchPriority and init Tx params, including
6196         * QSFP handler on boards that have QSFP.
6197         * First set our default attenuation entry for cables that
6198         * don't have valid attenuation.
6199         */
6200        set_no_qsfp_atten(dd, 0);
6201        for (n = 0; n < dd->num_pports; ++n) {
6202                struct qib_pportdata *ppd = dd->pport + n;
6203
6204                qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6205                                    sdma_fetch_prio & 0xf);
6206                /* Initialize qsfp if present on board. */
6207                if (dd->flags & QIB_HAS_QSFP)
6208                        qib_init_7322_qsfp(ppd);
6209        }
6210        dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6211        qib_write_kreg(dd, kr_control, dd->control);
6212
6213        return ret;
6214}
6215
6216/* per IB port errors.  */
6217#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6218        MASK_ACROSS(8, 15))
6219#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6220#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6221        MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6222        MASK_ACROSS(0, 11))
6223
6224/*
6225 * Write the initialization per-port registers that need to be done at
6226 * driver load and after reset completes (i.e., that aren't done as part
6227 * of other init procedures called from qib_init.c).
6228 * Some of these should be redundant on reset, but play safe.
6229 */
6230static void write_7322_init_portregs(struct qib_pportdata *ppd)
6231{
6232        u64 val;
6233        int i;
6234
6235        if (!ppd->link_speed_supported) {
6236                /* no buffer credits for this port */
6237                for (i = 1; i < 8; i++)
6238                        qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6239                qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6240                qib_write_kreg(ppd->dd, kr_scratch, 0);
6241                return;
6242        }
6243
6244        /*
6245         * Set the number of supported virtual lanes in IBC,
6246         * for flow control packet handling on unsupported VLs
6247         */
6248        val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6249        val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6250        val |= (u64)(ppd->vls_supported - 1) <<
6251                SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6252        qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6253
6254        qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6255
6256        /* enable tx header checking */
6257        qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6258                            IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6259                            IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6260
6261        qib_write_kreg_port(ppd, krp_ncmodectrl,
6262                SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6263
6264        /*
6265         * Unconditionally clear the bufmask bits.  If SDMA is
6266         * enabled, we'll set them appropriately later.
6267         */
6268        qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6269        qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6270        qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6271        if (ppd->dd->cspec->r1)
6272                ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6273}
6274
6275/*
6276 * Write the initialization per-device registers that need to be done at
6277 * driver load and after reset completes (i.e., that aren't done as part
6278 * of other init procedures called from qib_init.c).  Also write per-port
6279 * registers that are affected by overall device config, such as QP mapping
6280 * Some of these should be redundant on reset, but play safe.
6281 */
6282static void write_7322_initregs(struct qib_devdata *dd)
6283{
6284        struct qib_pportdata *ppd;
6285        int i, pidx;
6286        u64 val;
6287
6288        /* Set Multicast QPs received by port 2 to map to context one. */
6289        qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6290
6291        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6292                unsigned n, regno;
6293                unsigned long flags;
6294
6295                if (dd->n_krcv_queues < 2 ||
6296                        !dd->pport[pidx].link_speed_supported)
6297                        continue;
6298
6299                ppd = &dd->pport[pidx];
6300
6301                /* be paranoid against later code motion, etc. */
6302                spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6303                ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6304                spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6305
6306                /* Initialize QP to context mapping */
6307                regno = krp_rcvqpmaptable;
6308                val = 0;
6309                if (dd->num_pports > 1)
6310                        n = dd->first_user_ctxt / dd->num_pports;
6311                else
6312                        n = dd->first_user_ctxt - 1;
6313                for (i = 0; i < 32; ) {
6314                        unsigned ctxt;
6315
6316                        if (dd->num_pports > 1)
6317                                ctxt = (i % n) * dd->num_pports + pidx;
6318                        else if (i % n)
6319                                ctxt = (i % n) + 1;
6320                        else
6321                                ctxt = ppd->hw_pidx;
6322                        val |= ctxt << (5 * (i % 6));
6323                        i++;
6324                        if (i % 6 == 0) {
6325                                qib_write_kreg_port(ppd, regno, val);
6326                                val = 0;
6327                                regno++;
6328                        }
6329                }
6330                qib_write_kreg_port(ppd, regno, val);
6331        }
6332
6333        /*
6334         * Setup up interrupt mitigation for kernel contexts, but
6335         * not user contexts (user contexts use interrupts when
6336         * stalled waiting for any packet, so want those interrupts
6337         * right away).
6338         */
6339        for (i = 0; i < dd->first_user_ctxt; i++) {
6340                dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6341                qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6342        }
6343
6344        /*
6345         * Initialize  as (disabled) rcvflow tables.  Application code
6346         * will setup each flow as it uses the flow.
6347         * Doesn't clear any of the error bits that might be set.
6348         */
6349        val = TIDFLOW_ERRBITS; /* these are W1C */
6350        for (i = 0; i < dd->cfgctxts; i++) {
6351                int flow;
6352
6353                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6354                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6355        }
6356
6357        /*
6358         * dual cards init to dual port recovery, single port cards to
6359         * the one port.  Dual port cards may later adjust to 1 port,
6360         * and then back to dual port if both ports are connected
6361         * */
6362        if (dd->num_pports)
6363                setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6364}
6365
6366static int qib_init_7322_variables(struct qib_devdata *dd)
6367{
6368        struct qib_pportdata *ppd;
6369        unsigned features, pidx, sbufcnt;
6370        int ret, mtu;
6371        u32 sbufs, updthresh;
6372        resource_size_t vl15off;
6373
6374        /* pport structs are contiguous, allocated after devdata */
6375        ppd = (struct qib_pportdata *)(dd + 1);
6376        dd->pport = ppd;
6377        ppd[0].dd = dd;
6378        ppd[1].dd = dd;
6379
6380        dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6381
6382        ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6383        ppd[1].cpspec = &ppd[0].cpspec[1];
6384        ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6385        ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6386
6387        spin_lock_init(&dd->cspec->rcvmod_lock);
6388        spin_lock_init(&dd->cspec->gpio_lock);
6389
6390        /* we haven't yet set QIB_PRESENT, so use read directly */
6391        dd->revision = readq(&dd->kregbase[kr_revision]);
6392
6393        if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6394                qib_dev_err(dd,
6395                        "Revision register read failure, giving up initialization\n");
6396                ret = -ENODEV;
6397                goto bail;
6398        }
6399        dd->flags |= QIB_PRESENT;  /* now register routines work */
6400
6401        dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6402        dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6403        dd->cspec->r1 = dd->minrev == 1;
6404
6405        get_7322_chip_params(dd);
6406        features = qib_7322_boardname(dd);
6407
6408        /* now that piobcnt2k and 4k set, we can allocate these */
6409        sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6410                NUM_VL15_BUFS + BITS_PER_LONG - 1;
6411        sbufcnt /= BITS_PER_LONG;
6412        dd->cspec->sendchkenable =
6413                kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
6414                              GFP_KERNEL);
6415        dd->cspec->sendgrhchk =
6416                kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
6417                              GFP_KERNEL);
6418        dd->cspec->sendibchk =
6419                kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
6420                              GFP_KERNEL);
6421        if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6422                !dd->cspec->sendibchk) {
6423                ret = -ENOMEM;
6424                goto bail;
6425        }
6426
6427        ppd = dd->pport;
6428
6429        /*
6430         * GPIO bits for TWSI data and clock,
6431         * used for serial EEPROM.
6432         */
6433        dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6434        dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6435        dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6436
6437        dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6438                QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6439                QIB_HAS_THRESH_UPDATE |
6440                (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6441        dd->flags |= qib_special_trigger ?
6442                QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6443
6444        /*
6445         * Setup initial values.  These may change when PAT is enabled, but
6446         * we need these to do initial chip register accesses.
6447         */
6448        qib_7322_set_baseaddrs(dd);
6449
6450        mtu = ib_mtu_enum_to_int(qib_ibmtu);
6451        if (mtu == -1)
6452                mtu = QIB_DEFAULT_MTU;
6453
6454        dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6455        /* all hwerrors become interrupts, unless special purposed */
6456        dd->cspec->hwerrmask = ~0ULL;
6457        /*  link_recovery setup causes these errors, so ignore them,
6458         *  other than clearing them when they occur */
6459        dd->cspec->hwerrmask &=
6460                ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6461                  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6462                  HWE_MASK(LATriggered));
6463
6464        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6465                struct qib_chippport_specific *cp = ppd->cpspec;
6466
6467                ppd->link_speed_supported = features & PORT_SPD_CAP;
6468                features >>=  PORT_SPD_CAP_SHIFT;
6469                if (!ppd->link_speed_supported) {
6470                        /* single port mode (7340, or configured) */
6471                        dd->skip_kctxt_mask |= 1 << pidx;
6472                        if (pidx == 0) {
6473                                /* Make sure port is disabled. */
6474                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6475                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6476                                ppd[0] = ppd[1];
6477                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6478                                                  IBSerdesPClkNotDetectMask_0)
6479                                                  | SYM_MASK(HwErrMask,
6480                                                  SDmaMemReadErrMask_0));
6481                                dd->cspec->int_enable_mask &= ~(
6482                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6483                                     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6484                                     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6485                                     SYM_MASK(IntMask, SDmaIntMask_0) |
6486                                     SYM_MASK(IntMask, ErrIntMask_0) |
6487                                     SYM_MASK(IntMask, SendDoneIntMask_0));
6488                        } else {
6489                                /* Make sure port is disabled. */
6490                                qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6491                                qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6492                                dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6493                                                  IBSerdesPClkNotDetectMask_1)
6494                                                  | SYM_MASK(HwErrMask,
6495                                                  SDmaMemReadErrMask_1));
6496                                dd->cspec->int_enable_mask &= ~(
6497                                     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6498                                     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6499                                     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6500                                     SYM_MASK(IntMask, SDmaIntMask_1) |
6501                                     SYM_MASK(IntMask, ErrIntMask_1) |
6502                                     SYM_MASK(IntMask, SendDoneIntMask_1));
6503                        }
6504                        continue;
6505                }
6506
6507                dd->num_pports++;
6508                ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6509                if (ret) {
6510                        dd->num_pports--;
6511                        goto bail;
6512                }
6513
6514                ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6515                ppd->link_width_enabled = IB_WIDTH_4X;
6516                ppd->link_speed_enabled = ppd->link_speed_supported;
6517                /*
6518                 * Set the initial values to reasonable default, will be set
6519                 * for real when link is up.
6520                 */
6521                ppd->link_width_active = IB_WIDTH_4X;
6522                ppd->link_speed_active = QIB_IB_SDR;
6523                ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6524                switch (qib_num_cfg_vls) {
6525                case 1:
6526                        ppd->vls_supported = IB_VL_VL0;
6527                        break;
6528                case 2:
6529                        ppd->vls_supported = IB_VL_VL0_1;
6530                        break;
6531                default:
6532                        qib_devinfo(dd->pcidev,
6533                                    "Invalid num_vls %u, using 4 VLs\n",
6534                                    qib_num_cfg_vls);
6535                        qib_num_cfg_vls = 4;
6536                        fallthrough;
6537                case 4:
6538                        ppd->vls_supported = IB_VL_VL0_3;
6539                        break;
6540                case 8:
6541                        if (mtu <= 2048)
6542                                ppd->vls_supported = IB_VL_VL0_7;
6543                        else {
6544                                qib_devinfo(dd->pcidev,
6545                                            "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6546                                            qib_num_cfg_vls, mtu);
6547                                ppd->vls_supported = IB_VL_VL0_3;
6548                                qib_num_cfg_vls = 4;
6549                        }
6550                        break;
6551                }
6552                ppd->vls_operational = ppd->vls_supported;
6553
6554                init_waitqueue_head(&cp->autoneg_wait);
6555                INIT_DELAYED_WORK(&cp->autoneg_work,
6556                                  autoneg_7322_work);
6557                if (ppd->dd->cspec->r1)
6558                        INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6559
6560                /*
6561                 * For Mez and similar cards, no qsfp info, so do
6562                 * the "cable info" setup here.  Can be overridden
6563                 * in adapter-specific routines.
6564                 */
6565                if (!(dd->flags & QIB_HAS_QSFP)) {
6566                        if (!IS_QMH(dd) && !IS_QME(dd))
6567                                qib_devinfo(dd->pcidev,
6568                                        "IB%u:%u: Unknown mezzanine card type\n",
6569                                        dd->unit, ppd->port);
6570                        cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6571                        /*
6572                         * Choose center value as default tx serdes setting
6573                         * until changed through module parameter.
6574                         */
6575                        ppd->cpspec->no_eep = IS_QMH(dd) ?
6576                                TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6577                } else
6578                        cp->h1_val = H1_FORCE_VAL;
6579
6580                /* Avoid writes to chip for mini_init */
6581                if (!qib_mini_init)
6582                        write_7322_init_portregs(ppd);
6583
6584                timer_setup(&cp->chase_timer, reenable_chase, 0);
6585
6586                ppd++;
6587        }
6588
6589        dd->rcvhdrentsize = qib_rcvhdrentsize ?
6590                qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6591        dd->rcvhdrsize = qib_rcvhdrsize ?
6592                qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6593        dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6594
6595        /* we always allocate at least 2048 bytes for eager buffers */
6596        dd->rcvegrbufsize = max(mtu, 2048);
6597        dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6598
6599        qib_7322_tidtemplate(dd);
6600
6601        /*
6602         * We can request a receive interrupt for 1 or
6603         * more packets from current offset.
6604         */
6605        dd->rhdrhead_intr_off =
6606                (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6607
6608        /* setup the stats timer; the add_timer is done at end of init */
6609        timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6610
6611        dd->ureg_align = 0x10000;  /* 64KB alignment */
6612
6613        dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6614
6615        qib_7322_config_ctxts(dd);
6616        qib_set_ctxtcnt(dd);
6617
6618        /*
6619         * We do not set WC on the VL15 buffers to avoid
6620         * a rare problem with unaligned writes from
6621         * interrupt-flushed store buffers, so we need
6622         * to map those separately here.  We can't solve
6623         * this for the rarely used mtrr case.
6624         */
6625        ret = init_chip_wc_pat(dd, 0);
6626        if (ret)
6627                goto bail;
6628
6629        /* vl15 buffers start just after the 4k buffers */
6630        vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6631                  dd->piobcnt4k * dd->align4k;
6632        dd->piovl15base = ioremap(vl15off,
6633                                          NUM_VL15_BUFS * dd->align4k);
6634        if (!dd->piovl15base) {
6635                ret = -ENOMEM;
6636                goto bail;
6637        }
6638
6639        qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6640
6641        ret = 0;
6642        if (qib_mini_init)
6643                goto bail;
6644        if (!dd->num_pports) {
6645                qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6646                goto bail; /* no error, so can still figure out why err */
6647        }
6648
6649        write_7322_initregs(dd);
6650        ret = qib_create_ctxts(dd);
6651        init_7322_cntrnames(dd);
6652
6653        updthresh = 8U; /* update threshold */
6654
6655        /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6656         * reserve the update threshold amount for other kernel use, such
6657         * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6658         * unless we aren't enabling SDMA, in which case we want to use
6659         * all the 4k bufs for the kernel.
6660         * if this was less than the update threshold, we could wait
6661         * a long time for an update.  Coded this way because we
6662         * sometimes change the update threshold for various reasons,
6663         * and we want this to remain robust.
6664         */
6665        if (dd->flags & QIB_HAS_SEND_DMA) {
6666                dd->cspec->sdmabufcnt = dd->piobcnt4k;
6667                sbufs = updthresh > 3 ? updthresh : 3;
6668        } else {
6669                dd->cspec->sdmabufcnt = 0;
6670                sbufs = dd->piobcnt4k;
6671        }
6672        dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6673                dd->cspec->sdmabufcnt;
6674        dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6675        dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6676        dd->last_pio = dd->cspec->lastbuf_for_pio;
6677        dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6678                dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6679
6680        /*
6681         * If we have 16 user contexts, we will have 7 sbufs
6682         * per context, so reduce the update threshold to match.  We
6683         * want to update before we actually run out, at low pbufs/ctxt
6684         * so give ourselves some margin.
6685         */
6686        if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6687                updthresh = dd->pbufsctxt - 2;
6688        dd->cspec->updthresh_dflt = updthresh;
6689        dd->cspec->updthresh = updthresh;
6690
6691        /* before full enable, no interrupts, no locking needed */
6692        dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6693                             << SYM_LSB(SendCtrl, AvailUpdThld)) |
6694                        SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6695
6696        dd->psxmitwait_supported = 1;
6697        dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6698bail:
6699        if (!dd->ctxtcnt)
6700                dd->ctxtcnt = 1; /* for other initialization code */
6701
6702        return ret;
6703}
6704
6705static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6706                                        u32 *pbufnum)
6707{
6708        u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6709        struct qib_devdata *dd = ppd->dd;
6710
6711        /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6712        if (pbc & PBC_7322_VL15_SEND) {
6713                first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6714                last = first;
6715        } else {
6716                if ((plen + 1) > dd->piosize2kmax_dwords)
6717                        first = dd->piobcnt2k;
6718                else
6719                        first = 0;
6720                last = dd->cspec->lastbuf_for_pio;
6721        }
6722        return qib_getsendbuf_range(dd, pbufnum, first, last);
6723}
6724
6725static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6726                                     u32 start)
6727{
6728        qib_write_kreg_port(ppd, krp_psinterval, intv);
6729        qib_write_kreg_port(ppd, krp_psstart, start);
6730}
6731
6732/*
6733 * Must be called with sdma_lock held, or before init finished.
6734 */
6735static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6736{
6737        qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6738}
6739
6740/*
6741 * sdma_lock should be acquired before calling this routine
6742 */
6743static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6744{
6745        u64 reg, reg1, reg2;
6746
6747        reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6748        qib_dev_porterr(ppd->dd, ppd->port,
6749                "SDMA senddmastatus: 0x%016llx\n", reg);
6750
6751        reg = qib_read_kreg_port(ppd, krp_sendctrl);
6752        qib_dev_porterr(ppd->dd, ppd->port,
6753                "SDMA sendctrl: 0x%016llx\n", reg);
6754
6755        reg = qib_read_kreg_port(ppd, krp_senddmabase);
6756        qib_dev_porterr(ppd->dd, ppd->port,
6757                "SDMA senddmabase: 0x%016llx\n", reg);
6758
6759        reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6760        reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6761        reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6762        qib_dev_porterr(ppd->dd, ppd->port,
6763                "SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6764                 reg, reg1, reg2);
6765
6766        /* get bufuse bits, clear them, and print them again if non-zero */
6767        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6768        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6769        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6770        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6771        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6772        qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6773        /* 0 and 1 should always be zero, so print as short form */
6774        qib_dev_porterr(ppd->dd, ppd->port,
6775                 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6776                 reg, reg1, reg2);
6777        reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6778        reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6779        reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6780        /* 0 and 1 should always be zero, so print as short form */
6781        qib_dev_porterr(ppd->dd, ppd->port,
6782                 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6783                 reg, reg1, reg2);
6784
6785        reg = qib_read_kreg_port(ppd, krp_senddmatail);
6786        qib_dev_porterr(ppd->dd, ppd->port,
6787                "SDMA senddmatail: 0x%016llx\n", reg);
6788
6789        reg = qib_read_kreg_port(ppd, krp_senddmahead);
6790        qib_dev_porterr(ppd->dd, ppd->port,
6791                "SDMA senddmahead: 0x%016llx\n", reg);
6792
6793        reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6794        qib_dev_porterr(ppd->dd, ppd->port,
6795                "SDMA senddmaheadaddr: 0x%016llx\n", reg);
6796
6797        reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6798        qib_dev_porterr(ppd->dd, ppd->port,
6799                "SDMA senddmalengen: 0x%016llx\n", reg);
6800
6801        reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6802        qib_dev_porterr(ppd->dd, ppd->port,
6803                "SDMA senddmadesccnt: 0x%016llx\n", reg);
6804
6805        reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6806        qib_dev_porterr(ppd->dd, ppd->port,
6807                "SDMA senddmaidlecnt: 0x%016llx\n", reg);
6808
6809        reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6810        qib_dev_porterr(ppd->dd, ppd->port,
6811                "SDMA senddmapriorityhld: 0x%016llx\n", reg);
6812
6813        reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6814        qib_dev_porterr(ppd->dd, ppd->port,
6815                "SDMA senddmareloadcnt: 0x%016llx\n", reg);
6816
6817        dump_sdma_state(ppd);
6818}
6819
6820static struct sdma_set_state_action sdma_7322_action_table[] = {
6821        [qib_sdma_state_s00_hw_down] = {
6822                .go_s99_running_tofalse = 1,
6823                .op_enable = 0,
6824                .op_intenable = 0,
6825                .op_halt = 0,
6826                .op_drain = 0,
6827        },
6828        [qib_sdma_state_s10_hw_start_up_wait] = {
6829                .op_enable = 0,
6830                .op_intenable = 1,
6831                .op_halt = 1,
6832                .op_drain = 0,
6833        },
6834        [qib_sdma_state_s20_idle] = {
6835                .op_enable = 1,
6836                .op_intenable = 1,
6837                .op_halt = 1,
6838                .op_drain = 0,
6839        },
6840        [qib_sdma_state_s30_sw_clean_up_wait] = {
6841                .op_enable = 0,
6842                .op_intenable = 1,
6843                .op_halt = 1,
6844                .op_drain = 0,
6845        },
6846        [qib_sdma_state_s40_hw_clean_up_wait] = {
6847                .op_enable = 1,
6848                .op_intenable = 1,
6849                .op_halt = 1,
6850                .op_drain = 0,
6851        },
6852        [qib_sdma_state_s50_hw_halt_wait] = {
6853                .op_enable = 1,
6854                .op_intenable = 1,
6855                .op_halt = 1,
6856                .op_drain = 1,
6857        },
6858        [qib_sdma_state_s99_running] = {
6859                .op_enable = 1,
6860                .op_intenable = 1,
6861                .op_halt = 0,
6862                .op_drain = 0,
6863                .go_s99_running_totrue = 1,
6864        },
6865};
6866
6867static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6868{
6869        ppd->sdma_state.set_state_action = sdma_7322_action_table;
6870}
6871
6872static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6873{
6874        struct qib_devdata *dd = ppd->dd;
6875        unsigned lastbuf, erstbuf;
6876        u64 senddmabufmask[3] = { 0 };
6877        int n;
6878
6879        qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6880        qib_sdma_7322_setlengen(ppd);
6881        qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6882        qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6883        qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6884        qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6885
6886        if (dd->num_pports)
6887                n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6888        else
6889                n = dd->cspec->sdmabufcnt; /* failsafe for init */
6890        erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6891                ((dd->num_pports == 1 || ppd->port == 2) ? n :
6892                dd->cspec->sdmabufcnt);
6893        lastbuf = erstbuf + n;
6894
6895        ppd->sdma_state.first_sendbuf = erstbuf;
6896        ppd->sdma_state.last_sendbuf = lastbuf;
6897        for (; erstbuf < lastbuf; ++erstbuf) {
6898                unsigned word = erstbuf / BITS_PER_LONG;
6899                unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6900
6901                senddmabufmask[word] |= 1ULL << bit;
6902        }
6903        qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6904        qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6905        qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6906        return 0;
6907}
6908
6909/* sdma_lock must be held */
6910static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6911{
6912        struct qib_devdata *dd = ppd->dd;
6913        int sane;
6914        int use_dmahead;
6915        u16 swhead;
6916        u16 swtail;
6917        u16 cnt;
6918        u16 hwhead;
6919
6920        use_dmahead = __qib_sdma_running(ppd) &&
6921                (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6922retry:
6923        hwhead = use_dmahead ?
6924                (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6925                (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6926
6927        swhead = ppd->sdma_descq_head;
6928        swtail = ppd->sdma_descq_tail;
6929        cnt = ppd->sdma_descq_cnt;
6930
6931        if (swhead < swtail)
6932                /* not wrapped */
6933                sane = (hwhead >= swhead) & (hwhead <= swtail);
6934        else if (swhead > swtail)
6935                /* wrapped around */
6936                sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6937                        (hwhead <= swtail);
6938        else
6939                /* empty */
6940                sane = (hwhead == swhead);
6941
6942        if (unlikely(!sane)) {
6943                if (use_dmahead) {
6944                        /* try one more time, directly from the register */
6945                        use_dmahead = 0;
6946                        goto retry;
6947                }
6948                /* proceed as if no progress */
6949                hwhead = swhead;
6950        }
6951
6952        return hwhead;
6953}
6954
6955static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6956{
6957        u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6958
6959        return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6960               (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6961               !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6962               !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6963}
6964
6965/*
6966 * Compute the amount of delay before sending the next packet if the
6967 * port's send rate differs from the static rate set for the QP.
6968 * The delay affects the next packet and the amount of the delay is
6969 * based on the length of the this packet.
6970 */
6971static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6972                                   u8 srate, u8 vl)
6973{
6974        u8 snd_mult = ppd->delay_mult;
6975        u8 rcv_mult = ib_rate_to_delay[srate];
6976        u32 ret;
6977
6978        ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6979
6980        /* Indicate VL15, else set the VL in the control word */
6981        if (vl == 15)
6982                ret |= PBC_7322_VL15_SEND_CTRL;
6983        else
6984                ret |= vl << PBC_VL_NUM_LSB;
6985        ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6986
6987        return ret;
6988}
6989
6990/*
6991 * Enable the per-port VL15 send buffers for use.
6992 * They follow the rest of the buffers, without a config parameter.
6993 * This was in initregs, but that is done before the shadow
6994 * is set up, and this has to be done after the shadow is
6995 * set up.
6996 */
6997static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6998{
6999        unsigned vl15bufs;
7000
7001        vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7002        qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7003                               TXCHK_CHG_TYPE_KERN, NULL);
7004}
7005
7006static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7007{
7008        if (rcd->ctxt < NUM_IB_PORTS) {
7009                if (rcd->dd->num_pports > 1) {
7010                        rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7011                        rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7012                } else {
7013                        rcd->rcvegrcnt = KCTXT0_EGRCNT;
7014                        rcd->rcvegr_tid_base = 0;
7015                }
7016        } else {
7017                rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7018                rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7019                        (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7020        }
7021}
7022
7023#define QTXSLEEPS 5000
7024static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7025                                  u32 len, u32 which, struct qib_ctxtdata *rcd)
7026{
7027        int i;
7028        const int last = start + len - 1;
7029        const int lastr = last / BITS_PER_LONG;
7030        u32 sleeps = 0;
7031        int wait = rcd != NULL;
7032        unsigned long flags;
7033
7034        while (wait) {
7035                unsigned long shadow = 0;
7036                int cstart, previ = -1;
7037
7038                /*
7039                 * when flipping from kernel to user, we can't change
7040                 * the checking type if the buffer is allocated to the
7041                 * driver.   It's OK the other direction, because it's
7042                 * from close, and we have just disarm'ed all the
7043                 * buffers.  All the kernel to kernel changes are also
7044                 * OK.
7045                 */
7046                for (cstart = start; cstart <= last; cstart++) {
7047                        i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7048                                / BITS_PER_LONG;
7049                        if (i != previ) {
7050                                shadow = (unsigned long)
7051                                        le64_to_cpu(dd->pioavailregs_dma[i]);
7052                                previ = i;
7053                        }
7054                        if (test_bit(((2 * cstart) +
7055                                      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7056                                     % BITS_PER_LONG, &shadow))
7057                                break;
7058                }
7059
7060                if (cstart > last)
7061                        break;
7062
7063                if (sleeps == QTXSLEEPS)
7064                        break;
7065                /* make sure we see an updated copy next time around */
7066                sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7067                sleeps++;
7068                msleep(20);
7069        }
7070
7071        switch (which) {
7072        case TXCHK_CHG_TYPE_DIS1:
7073                /*
7074                 * disable checking on a range; used by diags; just
7075                 * one buffer, but still written generically
7076                 */
7077                for (i = start; i <= last; i++)
7078                        clear_bit(i, dd->cspec->sendchkenable);
7079                break;
7080
7081        case TXCHK_CHG_TYPE_ENAB1:
7082                /*
7083                 * (re)enable checking on a range; used by diags; just
7084                 * one buffer, but still written generically; read
7085                 * scratch to be sure buffer actually triggered, not
7086                 * just flushed from processor.
7087                 */
7088                qib_read_kreg32(dd, kr_scratch);
7089                for (i = start; i <= last; i++)
7090                        set_bit(i, dd->cspec->sendchkenable);
7091                break;
7092
7093        case TXCHK_CHG_TYPE_KERN:
7094                /* usable by kernel */
7095                for (i = start; i <= last; i++) {
7096                        set_bit(i, dd->cspec->sendibchk);
7097                        clear_bit(i, dd->cspec->sendgrhchk);
7098                }
7099                spin_lock_irqsave(&dd->uctxt_lock, flags);
7100                /* see if we need to raise avail update threshold */
7101                for (i = dd->first_user_ctxt;
7102                     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7103                     && i < dd->cfgctxts; i++)
7104                        if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7105                           ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7106                           < dd->cspec->updthresh_dflt)
7107                                break;
7108                spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7109                if (i == dd->cfgctxts) {
7110                        spin_lock_irqsave(&dd->sendctrl_lock, flags);
7111                        dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7112                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7113                        dd->sendctrl |= (dd->cspec->updthresh &
7114                                         SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7115                                           SYM_LSB(SendCtrl, AvailUpdThld);
7116                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7117                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7118                }
7119                break;
7120
7121        case TXCHK_CHG_TYPE_USER:
7122                /* for user process */
7123                for (i = start; i <= last; i++) {
7124                        clear_bit(i, dd->cspec->sendibchk);
7125                        set_bit(i, dd->cspec->sendgrhchk);
7126                }
7127                spin_lock_irqsave(&dd->sendctrl_lock, flags);
7128                if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7129                        / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7130                        dd->cspec->updthresh = (rcd->piocnt /
7131                                                rcd->subctxt_cnt) - 1;
7132                        dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7133                        dd->sendctrl |= (dd->cspec->updthresh &
7134                                        SYM_RMASK(SendCtrl, AvailUpdThld))
7135                                        << SYM_LSB(SendCtrl, AvailUpdThld);
7136                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7137                        sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7138                } else
7139                        spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7140                break;
7141
7142        default:
7143                break;
7144        }
7145
7146        for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7147                qib_write_kreg(dd, kr_sendcheckmask + i,
7148                               dd->cspec->sendchkenable[i]);
7149
7150        for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7151                qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7152                               dd->cspec->sendgrhchk[i]);
7153                qib_write_kreg(dd, kr_sendibpktmask + i,
7154                               dd->cspec->sendibchk[i]);
7155        }
7156
7157        /*
7158         * Be sure whatever we did was seen by the chip and acted upon,
7159         * before we return.  Mostly important for which >= 2.
7160         */
7161        qib_read_kreg32(dd, kr_scratch);
7162}
7163
7164
7165/* useful for trigger analyzers, etc. */
7166static void writescratch(struct qib_devdata *dd, u32 val)
7167{
7168        qib_write_kreg(dd, kr_scratch, val);
7169}
7170
7171/* Dummy for now, use chip regs soon */
7172static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7173{
7174        return -ENXIO;
7175}
7176
7177/**
7178 * qib_init_iba7322_funcs - set up the chip-specific function pointers
7179 * @dev: the pci_dev for qlogic_ib device
7180 * @ent: pci_device_id struct for this dev
7181 *
7182 * Also allocates, inits, and returns the devdata struct for this
7183 * device instance
7184 *
7185 * This is global, and is called directly at init to set up the
7186 * chip-specific function pointers for later use.
7187 */
7188struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7189                                           const struct pci_device_id *ent)
7190{
7191        struct qib_devdata *dd;
7192        int ret, i;
7193        u32 tabsize, actual_cnt = 0;
7194
7195        dd = qib_alloc_devdata(pdev,
7196                NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7197                sizeof(struct qib_chip_specific) +
7198                NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7199        if (IS_ERR(dd))
7200                goto bail;
7201
7202        dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7203        dd->f_cleanup           = qib_setup_7322_cleanup;
7204        dd->f_clear_tids        = qib_7322_clear_tids;
7205        dd->f_free_irq          = qib_7322_free_irq;
7206        dd->f_get_base_info     = qib_7322_get_base_info;
7207        dd->f_get_msgheader     = qib_7322_get_msgheader;
7208        dd->f_getsendbuf        = qib_7322_getsendbuf;
7209        dd->f_gpio_mod          = gpio_7322_mod;
7210        dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7211        dd->f_hdrqempty         = qib_7322_hdrqempty;
7212        dd->f_ib_updown         = qib_7322_ib_updown;
7213        dd->f_init_ctxt         = qib_7322_init_ctxt;
7214        dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7215        dd->f_intr_fallback     = qib_7322_intr_fallback;
7216        dd->f_late_initreg      = qib_late_7322_initreg;
7217        dd->f_setpbc_control    = qib_7322_setpbc_control;
7218        dd->f_portcntr          = qib_portcntr_7322;
7219        dd->f_put_tid           = qib_7322_put_tid;
7220        dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7221        dd->f_rcvctrl           = rcvctrl_7322_mod;
7222        dd->f_read_cntrs        = qib_read_7322cntrs;
7223        dd->f_read_portcntrs    = qib_read_7322portcntrs;
7224        dd->f_reset             = qib_do_7322_reset;
7225        dd->f_init_sdma_regs    = init_sdma_7322_regs;
7226        dd->f_sdma_busy         = qib_sdma_7322_busy;
7227        dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7228        dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7229        dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7230        dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7231        dd->f_sendctrl          = sendctrl_7322_mod;
7232        dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7233        dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7234        dd->f_iblink_state      = qib_7322_iblink_state;
7235        dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7236        dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7237        dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7238        dd->f_set_ib_loopback   = qib_7322_set_loopback;
7239        dd->f_get_ib_table      = qib_7322_get_ib_table;
7240        dd->f_set_ib_table      = qib_7322_set_ib_table;
7241        dd->f_set_intr_state    = qib_7322_set_intr_state;
7242        dd->f_setextled         = qib_setup_7322_setextled;
7243        dd->f_txchk_change      = qib_7322_txchk_change;
7244        dd->f_update_usrhead    = qib_update_7322_usrhead;
7245        dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7246        dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7247        dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7248        dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7249        dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7250        dd->f_writescratch      = writescratch;
7251        dd->f_tempsense_rd      = qib_7322_tempsense_rd;
7252#ifdef CONFIG_INFINIBAND_QIB_DCA
7253        dd->f_notify_dca        = qib_7322_notify_dca;
7254#endif
7255        /*
7256         * Do remaining PCIe setup and save PCIe values in dd.
7257         * Any error printing is already done by the init code.
7258         * On return, we have the chip mapped, but chip registers
7259         * are not set up until start of qib_init_7322_variables.
7260         */
7261        ret = qib_pcie_ddinit(dd, pdev, ent);
7262        if (ret < 0)
7263                goto bail_free;
7264
7265        /* initialize chip-specific variables */
7266        ret = qib_init_7322_variables(dd);
7267        if (ret)
7268                goto bail_cleanup;
7269
7270        if (qib_mini_init || !dd->num_pports)
7271                goto bail;
7272
7273        /*
7274         * Determine number of vectors we want; depends on port count
7275         * and number of configured kernel receive queues actually used.
7276         * Should also depend on whether sdma is enabled or not, but
7277         * that's such a rare testing case it's not worth worrying about.
7278         */
7279        tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7280        for (i = 0; i < tabsize; i++)
7281                if ((i < ARRAY_SIZE(irq_table) &&
7282                     irq_table[i].port <= dd->num_pports) ||
7283                    (i >= ARRAY_SIZE(irq_table) &&
7284                     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7285                        actual_cnt++;
7286        /* reduce by ctxt's < 2 */
7287        if (qib_krcvq01_no_msi)
7288                actual_cnt -= dd->num_pports;
7289
7290        tabsize = actual_cnt;
7291        dd->cspec->msix_entries = kcalloc(tabsize,
7292                                          sizeof(struct qib_msix_entry),
7293                                          GFP_KERNEL);
7294        if (!dd->cspec->msix_entries)
7295                tabsize = 0;
7296
7297        if (qib_pcie_params(dd, 8, &tabsize))
7298                qib_dev_err(dd,
7299                        "Failed to setup PCIe or interrupts; continuing anyway\n");
7300        /* may be less than we wanted, if not enough available */
7301        dd->cspec->num_msix_entries = tabsize;
7302
7303        /* setup interrupt handler */
7304        qib_setup_7322_interrupt(dd, 1);
7305
7306        /* clear diagctrl register, in case diags were running and crashed */
7307        qib_write_kreg(dd, kr_hwdiagctrl, 0);
7308#ifdef CONFIG_INFINIBAND_QIB_DCA
7309        if (!dca_add_requester(&pdev->dev)) {
7310                qib_devinfo(dd->pcidev, "DCA enabled\n");
7311                dd->flags |= QIB_DCA_ENABLED;
7312                qib_setup_dca(dd);
7313        }
7314#endif
7315        goto bail;
7316
7317bail_cleanup:
7318        qib_pcie_ddcleanup(dd);
7319bail_free:
7320        qib_free_devdata(dd);
7321        dd = ERR_PTR(ret);
7322bail:
7323        return dd;
7324}
7325
7326/*
7327 * Set the table entry at the specified index from the table specifed.
7328 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7329 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7330 * 'idx' below addresses the correct entry, while its 4 LSBs select the
7331 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7332 */
7333#define DDS_ENT_AMP_LSB 14
7334#define DDS_ENT_MAIN_LSB 9
7335#define DDS_ENT_POST_LSB 5
7336#define DDS_ENT_PRE_XTRA_LSB 3
7337#define DDS_ENT_PRE_LSB 0
7338
7339/*
7340 * Set one entry in the TxDDS table for spec'd port
7341 * ridx picks one of the entries, while tp points
7342 * to the appropriate table entry.
7343 */
7344static void set_txdds(struct qib_pportdata *ppd, int ridx,
7345                      const struct txdds_ent *tp)
7346{
7347        struct qib_devdata *dd = ppd->dd;
7348        u32 pack_ent;
7349        int regidx;
7350
7351        /* Get correct offset in chip-space, and in source table */
7352        regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7353        /*
7354         * We do not use qib_write_kreg_port() because it was intended
7355         * only for registers in the lower "port specific" pages.
7356         * So do index calculation  by hand.
7357         */
7358        if (ppd->hw_pidx)
7359                regidx += (dd->palign / sizeof(u64));
7360
7361        pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7362        pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7363        pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7364        pack_ent |= tp->post << DDS_ENT_POST_LSB;
7365        qib_write_kreg(dd, regidx, pack_ent);
7366        /* Prevent back-to-back writes by hitting scratch */
7367        qib_write_kreg(ppd->dd, kr_scratch, 0);
7368}
7369
7370static const struct vendor_txdds_ent vendor_txdds[] = {
7371        { /* Amphenol 1m 30awg NoEq */
7372                { 0x41, 0x50, 0x48 }, "584470002       ",
7373                { 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7374        },
7375        { /* Amphenol 3m 28awg NoEq */
7376                { 0x41, 0x50, 0x48 }, "584470004       ",
7377                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7378        },
7379        { /* Finisar 3m OM2 Optical */
7380                { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7381                {  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7382        },
7383        { /* Finisar 30m OM2 Optical */
7384                { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7385                {  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7386        },
7387        { /* Finisar Default OM2 Optical */
7388                { 0x00, 0x90, 0x65 }, NULL,
7389                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7390        },
7391        { /* Gore 1m 30awg NoEq */
7392                { 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7393                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7394        },
7395        { /* Gore 2m 30awg NoEq */
7396                { 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7397                {  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7398        },
7399        { /* Gore 1m 28awg NoEq */
7400                { 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7401                {  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7402        },
7403        { /* Gore 3m 28awg NoEq */
7404                { 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7405                {  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7406        },
7407        { /* Gore 5m 24awg Eq */
7408                { 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7409                {  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7410        },
7411        { /* Gore 7m 24awg Eq */
7412                { 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7413                {  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7414        },
7415        { /* Gore 5m 26awg Eq */
7416                { 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7417                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7418        },
7419        { /* Gore 7m 26awg Eq */
7420                { 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7421                {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7422        },
7423        { /* Intersil 12m 24awg Active */
7424                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7425                {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7426        },
7427        { /* Intersil 10m 28awg Active */
7428                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7429                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7430        },
7431        { /* Intersil 7m 30awg Active */
7432                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7433                {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7434        },
7435        { /* Intersil 5m 32awg Active */
7436                { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7437                {  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7438        },
7439        { /* Intersil Default Active */
7440                { 0x00, 0x30, 0xB4 }, NULL,
7441                {  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7442        },
7443        { /* Luxtera 20m Active Optical */
7444                { 0x00, 0x25, 0x63 }, NULL,
7445                {  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7446        },
7447        { /* Molex 1M Cu loopback */
7448                { 0x00, 0x09, 0x3A }, "74763-0025      ",
7449                {  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7450        },
7451        { /* Molex 2m 28awg NoEq */
7452                { 0x00, 0x09, 0x3A }, "74757-2201      ",
7453                {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7454        },
7455};
7456
7457static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7458        /* amp, pre, main, post */
7459        {  2, 2, 15,  6 },      /* Loopback */
7460        {  0, 0,  0,  1 },      /*  2 dB */
7461        {  0, 0,  0,  2 },      /*  3 dB */
7462        {  0, 0,  0,  3 },      /*  4 dB */
7463        {  0, 0,  0,  4 },      /*  5 dB */
7464        {  0, 0,  0,  5 },      /*  6 dB */
7465        {  0, 0,  0,  6 },      /*  7 dB */
7466        {  0, 0,  0,  7 },      /*  8 dB */
7467        {  0, 0,  0,  8 },      /*  9 dB */
7468        {  0, 0,  0,  9 },      /* 10 dB */
7469        {  0, 0,  0, 10 },      /* 11 dB */
7470        {  0, 0,  0, 11 },      /* 12 dB */
7471        {  0, 0,  0, 12 },      /* 13 dB */
7472        {  0, 0,  0, 13 },      /* 14 dB */
7473        {  0, 0,  0, 14 },      /* 15 dB */
7474        {  0, 0,  0, 15 },      /* 16 dB */
7475};
7476
7477static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7478        /* amp, pre, main, post */
7479        {  2, 2, 15,  6 },      /* Loopback */
7480        {  0, 0,  0,  8 },      /*  2 dB */
7481        {  0, 0,  0,  8 },      /*  3 dB */
7482        {  0, 0,  0,  9 },      /*  4 dB */
7483        {  0, 0,  0,  9 },      /*  5 dB */
7484        {  0, 0,  0, 10 },      /*  6 dB */
7485        {  0, 0,  0, 10 },      /*  7 dB */
7486        {  0, 0,  0, 11 },      /*  8 dB */
7487        {  0, 0,  0, 11 },      /*  9 dB */
7488        {  0, 0,  0, 12 },      /* 10 dB */
7489        {  0, 0,  0, 12 },      /* 11 dB */
7490        {  0, 0,  0, 13 },      /* 12 dB */
7491        {  0, 0,  0, 13 },      /* 13 dB */
7492        {  0, 0,  0, 14 },      /* 14 dB */
7493        {  0, 0,  0, 14 },      /* 15 dB */
7494        {  0, 0,  0, 15 },      /* 16 dB */
7495};
7496
7497static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7498        /* amp, pre, main, post */
7499        {  2, 2, 15,  6 },      /* Loopback */
7500        {  0, 1,  0,  7 },      /*  2 dB (also QMH7342) */
7501        {  0, 1,  0,  9 },      /*  3 dB (also QMH7342) */
7502        {  0, 1,  0, 11 },      /*  4 dB */
7503        {  0, 1,  0, 13 },      /*  5 dB */
7504        {  0, 1,  0, 15 },      /*  6 dB */
7505        {  0, 1,  3, 15 },      /*  7 dB */
7506        {  0, 1,  7, 15 },      /*  8 dB */
7507        {  0, 1,  7, 15 },      /*  9 dB */
7508        {  0, 1,  8, 15 },      /* 10 dB */
7509        {  0, 1,  9, 15 },      /* 11 dB */
7510        {  0, 1, 10, 15 },      /* 12 dB */
7511        {  0, 2,  6, 15 },      /* 13 dB */
7512        {  0, 2,  7, 15 },      /* 14 dB */
7513        {  0, 2,  8, 15 },      /* 15 dB */
7514        {  0, 2,  9, 15 },      /* 16 dB */
7515};
7516
7517/*
7518 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7519 * These are mostly used for mez cards going through connectors
7520 * and backplane traces, but can be used to add other "unusual"
7521 * table values as well.
7522 */
7523static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7524        /* amp, pre, main, post */
7525        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7526        {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7527        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7528        {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7529        {  0, 0, 0,  3 },       /* QMH7342 backplane settings */
7530        {  0, 0, 0,  4 },       /* QMH7342 backplane settings */
7531        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7532        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7533        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7534        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7535        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7536        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7537        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7538        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7539        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7540        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7541        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7542        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7543};
7544
7545static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7546        /* amp, pre, main, post */
7547        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7548        {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7549        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7550        {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7551        {  0, 0, 0,  9 },       /* QMH7342 backplane settings */
7552        {  0, 0, 0, 10 },       /* QMH7342 backplane settings */
7553        {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7554        {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7555        {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7556        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7557        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7558        {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7559        {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7560        {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7561        {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7562        {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7563        {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7564        {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7565};
7566
7567static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7568        /* amp, pre, main, post */
7569        {  0, 1,  0,  4 },      /* QMH7342 backplane settings */
7570        {  0, 1,  0,  5 },      /* QMH7342 backplane settings */
7571        {  0, 1,  0,  6 },      /* QMH7342 backplane settings */
7572        {  0, 1,  0,  8 },      /* QMH7342 backplane settings */
7573        {  0, 1,  0, 10 },      /* QMH7342 backplane settings */
7574        {  0, 1,  0, 12 },      /* QMH7342 backplane settings */
7575        {  0, 1,  4, 15 },      /* QME7342 backplane settings 1.0 */
7576        {  0, 1,  3, 15 },      /* QME7342 backplane settings 1.0 */
7577        {  0, 1,  0, 12 },      /* QME7342 backplane settings 1.0 */
7578        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.0 */
7579        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.0 */
7580        {  0, 1,  0, 14 },      /* QME7342 backplane settings 1.0 */
7581        {  0, 1,  2, 15 },      /* QME7342 backplane settings 1.0 */
7582        {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7583        {  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7584        {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7585        {  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7586        {  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7587};
7588
7589static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7590        /* amp, pre, main, post */
7591        { 0, 0, 0, 0 },         /* QME7342 mfg settings */
7592        { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7593};
7594
7595static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7596                                               unsigned atten)
7597{
7598        /*
7599         * The attenuation table starts at 2dB for entry 1,
7600         * with entry 0 being the loopback entry.
7601         */
7602        if (atten <= 2)
7603                atten = 1;
7604        else if (atten > TXDDS_TABLE_SZ)
7605                atten = TXDDS_TABLE_SZ - 1;
7606        else
7607                atten--;
7608        return txdds + atten;
7609}
7610
7611/*
7612 * if override is set, the module parameter txselect has a value
7613 * for this specific port, so use it, rather than our normal mechanism.
7614 */
7615static void find_best_ent(struct qib_pportdata *ppd,
7616                          const struct txdds_ent **sdr_dds,
7617                          const struct txdds_ent **ddr_dds,
7618                          const struct txdds_ent **qdr_dds, int override)
7619{
7620        struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7621        int idx;
7622
7623        /* Search table of known cables */
7624        for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7625                const struct vendor_txdds_ent *v = vendor_txdds + idx;
7626
7627                if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7628                    (!v->partnum ||
7629                     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7630                        *sdr_dds = &v->sdr;
7631                        *ddr_dds = &v->ddr;
7632                        *qdr_dds = &v->qdr;
7633                        return;
7634                }
7635        }
7636
7637        /* Active cables don't have attenuation so we only set SERDES
7638         * settings to account for the attenuation of the board traces. */
7639        if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7640                *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7641                *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7642                *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7643                return;
7644        }
7645
7646        if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7647                                                      qd->atten[1])) {
7648                *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7649                *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7650                *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7651                return;
7652        } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7653                /*
7654                 * If we have no (or incomplete) data from the cable
7655                 * EEPROM, or no QSFP, or override is set, use the
7656                 * module parameter value to index into the attentuation
7657                 * table.
7658                 */
7659                idx = ppd->cpspec->no_eep;
7660                *sdr_dds = &txdds_sdr[idx];
7661                *ddr_dds = &txdds_ddr[idx];
7662                *qdr_dds = &txdds_qdr[idx];
7663        } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7664                /* similar to above, but index into the "extra" table. */
7665                idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7666                *sdr_dds = &txdds_extra_sdr[idx];
7667                *ddr_dds = &txdds_extra_ddr[idx];
7668                *qdr_dds = &txdds_extra_qdr[idx];
7669        } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7670                   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7671                                          TXDDS_MFG_SZ)) {
7672                idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7673                pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7674                        ppd->dd->unit, ppd->port, idx);
7675                *sdr_dds = &txdds_extra_mfg[idx];
7676                *ddr_dds = &txdds_extra_mfg[idx];
7677                *qdr_dds = &txdds_extra_mfg[idx];
7678        } else {
7679                /* this shouldn't happen, it's range checked */
7680                *sdr_dds = txdds_sdr + qib_long_atten;
7681                *ddr_dds = txdds_ddr + qib_long_atten;
7682                *qdr_dds = txdds_qdr + qib_long_atten;
7683        }
7684}
7685
7686static void init_txdds_table(struct qib_pportdata *ppd, int override)
7687{
7688        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7689        struct txdds_ent *dds;
7690        int idx;
7691        int single_ent = 0;
7692
7693        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7694
7695        /* for mez cards or override, use the selected value for all entries */
7696        if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7697                single_ent = 1;
7698
7699        /* Fill in the first entry with the best entry found. */
7700        set_txdds(ppd, 0, sdr_dds);
7701        set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7702        set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7703        if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7704                QIBL_LINKACTIVE)) {
7705                dds = (struct txdds_ent *)(ppd->link_speed_active ==
7706                                           QIB_IB_QDR ?  qdr_dds :
7707                                           (ppd->link_speed_active ==
7708                                            QIB_IB_DDR ? ddr_dds : sdr_dds));
7709                write_tx_serdes_param(ppd, dds);
7710        }
7711
7712        /* Fill in the remaining entries with the default table values. */
7713        for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7714                set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7715                set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7716                          single_ent ? ddr_dds : txdds_ddr + idx);
7717                set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7718                          single_ent ? qdr_dds : txdds_qdr + idx);
7719        }
7720}
7721
7722#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7723#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7724#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7725#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7726#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7727#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7728#define AHB_TRANS_TRIES 10
7729
7730/*
7731 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7732 * 5=subsystem which is why most calls have "chan + chan >> 1"
7733 * for the channel argument.
7734 */
7735static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7736                    u32 data, u32 mask)
7737{
7738        u32 rd_data, wr_data, sz_mask;
7739        u64 trans, acc, prev_acc;
7740        u32 ret = 0xBAD0BAD;
7741        int tries;
7742
7743        prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7744        /* From this point on, make sure we return access */
7745        acc = (quad << 1) | 1;
7746        qib_write_kreg(dd, KR_AHB_ACC, acc);
7747
7748        for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7749                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7750                if (trans & AHB_TRANS_RDY)
7751                        break;
7752        }
7753        if (tries >= AHB_TRANS_TRIES) {
7754                qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7755                goto bail;
7756        }
7757
7758        /* If mask is not all 1s, we need to read, but different SerDes
7759         * entities have different sizes
7760         */
7761        sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7762        wr_data = data & mask & sz_mask;
7763        if ((~mask & sz_mask) != 0) {
7764                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7765                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7766
7767                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7768                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7769                        if (trans & AHB_TRANS_RDY)
7770                                break;
7771                }
7772                if (tries >= AHB_TRANS_TRIES) {
7773                        qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7774                                    AHB_TRANS_TRIES);
7775                        goto bail;
7776                }
7777                /* Re-read in case host split reads and read data first */
7778                trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7779                rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7780                wr_data |= (rd_data & ~mask & sz_mask);
7781        }
7782
7783        /* If mask is not zero, we need to write. */
7784        if (mask & sz_mask) {
7785                trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7786                trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7787                trans |= AHB_WR;
7788                qib_write_kreg(dd, KR_AHB_TRANS, trans);
7789
7790                for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7791                        trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7792                        if (trans & AHB_TRANS_RDY)
7793                                break;
7794                }
7795                if (tries >= AHB_TRANS_TRIES) {
7796                        qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7797                                    AHB_TRANS_TRIES);
7798                        goto bail;
7799                }
7800        }
7801        ret = wr_data;
7802bail:
7803        qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7804        return ret;
7805}
7806
7807static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7808                             unsigned mask)
7809{
7810        struct qib_devdata *dd = ppd->dd;
7811        int chan;
7812
7813        for (chan = 0; chan < SERDES_CHANS; ++chan) {
7814                ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7815                        data, mask);
7816                ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7817                        0, 0);
7818        }
7819}
7820
7821static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7822{
7823        u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7824        u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7825
7826        if (enable && !state) {
7827                pr_info("IB%u:%u Turning LOS on\n",
7828                        ppd->dd->unit, ppd->port);
7829                data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7830        } else if (!enable && state) {
7831                pr_info("IB%u:%u Turning LOS off\n",
7832                        ppd->dd->unit, ppd->port);
7833                data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7834        }
7835        qib_write_kreg_port(ppd, krp_serdesctrl, data);
7836}
7837
7838static int serdes_7322_init(struct qib_pportdata *ppd)
7839{
7840        int ret = 0;
7841
7842        if (ppd->dd->cspec->r1)
7843                ret = serdes_7322_init_old(ppd);
7844        else
7845                ret = serdes_7322_init_new(ppd);
7846        return ret;
7847}
7848
7849static int serdes_7322_init_old(struct qib_pportdata *ppd)
7850{
7851        u32 le_val;
7852
7853        /*
7854         * Initialize the Tx DDS tables.  Also done every QSFP event,
7855         * for adapters with QSFP
7856         */
7857        init_txdds_table(ppd, 0);
7858
7859        /* ensure no tx overrides from earlier driver loads */
7860        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7861                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7862                reset_tx_deemphasis_override));
7863
7864        /* Patch some SerDes defaults to "Better for IB" */
7865        /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7866        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7867
7868        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7869        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7870        /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7871        ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7872
7873        /* May be overridden in qsfp_7322_event */
7874        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7875        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7876
7877        /* enable LE1 adaptation for all but QME, which is disabled */
7878        le_val = IS_QME(ppd->dd) ? 0 : 1;
7879        ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7880
7881        /* Clear cmode-override, may be set from older driver */
7882        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7883
7884        /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7885        ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7886
7887        /* setup LoS params; these are subsystem, so chan == 5 */
7888        /* LoS filter threshold_count on, ch 0-3, set to 8 */
7889        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7890        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7891        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7892        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7893
7894        /* LoS filter threshold_count off, ch 0-3, set to 4 */
7895        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7896        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7897        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7898        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7899
7900        /* LoS filter select enabled */
7901        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7902
7903        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7904        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7905        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7906        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7907
7908        serdes_7322_los_enable(ppd, 1);
7909
7910        /* rxbistena; set 0 to avoid effects of it switch later */
7911        ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7912
7913        /* Configure 4 DFE taps, and only they adapt */
7914        ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7915
7916        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7917        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7918        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7919
7920        /*
7921         * Set receive adaptation mode.  SDR and DDR adaptation are
7922         * always on, and QDR is initially enabled; later disabled.
7923         */
7924        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7925        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7926        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7927                            ppd->dd->cspec->r1 ?
7928                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7929        ppd->cpspec->qdr_dfe_on = 1;
7930
7931        /* FLoop LOS gate: PPM filter  enabled */
7932        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7933
7934        /* rx offset center enabled */
7935        ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7936
7937        if (!ppd->dd->cspec->r1) {
7938                ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7939                ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7940        }
7941
7942        /* Set the frequency loop bandwidth to 15 */
7943        ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7944
7945        return 0;
7946}
7947
7948static int serdes_7322_init_new(struct qib_pportdata *ppd)
7949{
7950        unsigned long tend;
7951        u32 le_val, rxcaldone;
7952        int chan, chan_done = (1 << SERDES_CHANS) - 1;
7953
7954        /* Clear cmode-override, may be set from older driver */
7955        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7956
7957        /* ensure no tx overrides from earlier driver loads */
7958        qib_write_kreg_port(ppd, krp_tx_deemph_override,
7959                SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7960                reset_tx_deemphasis_override));
7961
7962        /* START OF LSI SUGGESTED SERDES BRINGUP */
7963        /* Reset - Calibration Setup */
7964        /*       Stop DFE adaptaion */
7965        ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7966        /*       Disable LE1 */
7967        ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7968        /*       Disable autoadapt for LE1 */
7969        ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7970        /*       Disable LE2 */
7971        ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7972        /*       Disable VGA */
7973        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7974        /*       Disable AFE Offset Cancel */
7975        ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7976        /*       Disable Timing Loop */
7977        ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7978        /*       Disable Frequency Loop */
7979        ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7980        /*       Disable Baseline Wander Correction */
7981        ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7982        /*       Disable RX Calibration */
7983        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7984        /*       Disable RX Offset Calibration */
7985        ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7986        /*       Select BB CDR */
7987        ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7988        /*       CDR Step Size */
7989        ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7990        /*       Enable phase Calibration */
7991        ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7992        /*       DFE Bandwidth [2:14-12] */
7993        ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7994        /*       DFE Config (4 taps only) */
7995        ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7996        /*       Gain Loop Bandwidth */
7997        if (!ppd->dd->cspec->r1) {
7998                ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7999                ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8000        } else {
8001                ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8002        }
8003        /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8004        /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8005        /*       Data Rate Select [5:7-6] (leave as default) */
8006        /*       RX Parallel Word Width [3:10-8] (leave as default) */
8007
8008        /* RX REST */
8009        /*       Single- or Multi-channel reset */
8010        /*       RX Analog reset */
8011        /*       RX Digital reset */
8012        ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8013        msleep(20);
8014        /*       RX Analog reset */
8015        ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8016        msleep(20);
8017        /*       RX Digital reset */
8018        ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8019        msleep(20);
8020
8021        /* setup LoS params; these are subsystem, so chan == 5 */
8022        /* LoS filter threshold_count on, ch 0-3, set to 8 */
8023        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8024        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8025        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8026        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8027
8028        /* LoS filter threshold_count off, ch 0-3, set to 4 */
8029        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8030        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8031        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8032        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8033
8034        /* LoS filter select enabled */
8035        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8036
8037        /* LoS target data:  SDR=4, DDR=2, QDR=1 */
8038        ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8039        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8040        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8041
8042        /* Turn on LOS on initial SERDES init */
8043        serdes_7322_los_enable(ppd, 1);
8044        /* FLoop LOS gate: PPM filter  enabled */
8045        ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8046
8047        /* RX LATCH CALIBRATION */
8048        /*       Enable Eyefinder Phase Calibration latch */
8049        ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8050        /*       Enable RX Offset Calibration latch */
8051        ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8052        msleep(20);
8053        /*       Start Calibration */
8054        ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8055        tend = jiffies + msecs_to_jiffies(500);
8056        while (chan_done && !time_is_before_jiffies(tend)) {
8057                msleep(20);
8058                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8059                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8060                                            (chan + (chan >> 1)),
8061                                            25, 0, 0);
8062                        if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8063                            (~chan_done & (1 << chan)) == 0)
8064                                chan_done &= ~(1 << chan);
8065                }
8066        }
8067        if (chan_done) {
8068                pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8069                         IBSD(ppd->hw_pidx), chan_done);
8070        } else {
8071                for (chan = 0; chan < SERDES_CHANS; ++chan) {
8072                        rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8073                                            (chan + (chan >> 1)),
8074                                            25, 0, 0);
8075                        if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8076                                pr_info("Serdes %d chan %d calibration failed\n",
8077                                        IBSD(ppd->hw_pidx), chan);
8078                }
8079        }
8080
8081        /*       Turn off Calibration */
8082        ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8083        msleep(20);
8084
8085        /* BRING RX UP */
8086        /*       Set LE2 value (May be overridden in qsfp_7322_event) */
8087        le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8088        ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8089        /*       Set LE2 Loop bandwidth */
8090        ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8091        /*       Enable LE2 */
8092        ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8093        msleep(20);
8094        /*       Enable H0 only */
8095        ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8096        /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8097        le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8098        ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8099        /*       Enable VGA */
8100        ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8101        msleep(20);
8102        /*       Set Frequency Loop Bandwidth */
8103        ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8104        /*       Enable Frequency Loop */
8105        ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8106        /*       Set Timing Loop Bandwidth */
8107        ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8108        /*       Enable Timing Loop */
8109        ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8110        msleep(50);
8111        /*       Enable DFE
8112         *       Set receive adaptation mode.  SDR and DDR adaptation are
8113         *       always on, and QDR is initially enabled; later disabled.
8114         */
8115        qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8116        qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8117        qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8118                            ppd->dd->cspec->r1 ?
8119                            QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8120        ppd->cpspec->qdr_dfe_on = 1;
8121        /*       Disable LE1  */
8122        ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8123        /*       Disable auto adapt for LE1 */
8124        ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8125        msleep(20);
8126        /*       Enable AFE Offset Cancel */
8127        ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8128        /*       Enable Baseline Wander Correction */
8129        ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8130        /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8131        ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8132        /* VGA output common mode */
8133        ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8134
8135        /*
8136         * Initialize the Tx DDS tables.  Also done every QSFP event,
8137         * for adapters with QSFP
8138         */
8139        init_txdds_table(ppd, 0);
8140
8141        return 0;
8142}
8143
8144/* start adjust QMH serdes parameters */
8145
8146static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8147{
8148        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8149                9, code << 9, 0x3f << 9);
8150}
8151
8152static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8153        int enable, u32 tapenable)
8154{
8155        if (enable)
8156                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8157                        1, 3 << 10, 0x1f << 10);
8158        else
8159                ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8160                        1, 0, 0x1f << 10);
8161}
8162
8163/* Set clock to 1, 0, 1, 0 */
8164static void clock_man(struct qib_pportdata *ppd, int chan)
8165{
8166        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8167                4, 0x4000, 0x4000);
8168        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8169                4, 0, 0x4000);
8170        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8171                4, 0x4000, 0x4000);
8172        ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8173                4, 0, 0x4000);
8174}
8175
8176/*
8177 * write the current Tx serdes pre,post,main,amp settings into the serdes.
8178 * The caller must pass the settings appropriate for the current speed,
8179 * or not care if they are correct for the current speed.
8180 */
8181static void write_tx_serdes_param(struct qib_pportdata *ppd,
8182                                  struct txdds_ent *txdds)
8183{
8184        u64 deemph;
8185
8186        deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8187        /* field names for amp, main, post, pre, respectively */
8188        deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8189                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8190                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8191                    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8192
8193        deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8194                           tx_override_deemphasis_select);
8195        deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8196                    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8197                                       txampcntl_d2a);
8198        deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8199                     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8200                                   txc0_ena);
8201        deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8202                     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8203                                    txcp1_ena);
8204        deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8205                     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8206                                    txcn1_ena);
8207        qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8208}
8209
8210/*
8211 * Set the parameters for mez cards on link bounce, so they are
8212 * always exactly what was requested.  Similar logic to init_txdds
8213 * but does just the serdes.
8214 */
8215static void adj_tx_serdes(struct qib_pportdata *ppd)
8216{
8217        const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8218        struct txdds_ent *dds;
8219
8220        find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8221        dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8222                qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8223                                ddr_dds : sdr_dds));
8224        write_tx_serdes_param(ppd, dds);
8225}
8226
8227/* set QDR forced value for H1, if needed */
8228static void force_h1(struct qib_pportdata *ppd)
8229{
8230        int chan;
8231
8232        ppd->cpspec->qdr_reforce = 0;
8233        if (!ppd->dd->cspec->r1)
8234                return;
8235
8236        for (chan = 0; chan < SERDES_CHANS; chan++) {
8237                set_man_mode_h1(ppd, chan, 1, 0);
8238                set_man_code(ppd, chan, ppd->cpspec->h1_val);
8239                clock_man(ppd, chan);
8240                set_man_mode_h1(ppd, chan, 0, 0);
8241        }
8242}
8243
8244#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8245#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8246
8247#define R_OPCODE_LSB 3
8248#define R_OP_NOP 0
8249#define R_OP_SHIFT 2
8250#define R_OP_UPDATE 3
8251#define R_TDI_LSB 2
8252#define R_TDO_LSB 1
8253#define R_RDY 1
8254
8255static int qib_r_grab(struct qib_devdata *dd)
8256{
8257        u64 val = SJA_EN;
8258
8259        qib_write_kreg(dd, kr_r_access, val);
8260        qib_read_kreg32(dd, kr_scratch);
8261        return 0;
8262}
8263
8264/* qib_r_wait_for_rdy() not only waits for the ready bit, it
8265 * returns the current state of R_TDO
8266 */
8267static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8268{
8269        u64 val;
8270        int timeout;
8271
8272        for (timeout = 0; timeout < 100 ; ++timeout) {
8273                val = qib_read_kreg32(dd, kr_r_access);
8274                if (val & R_RDY)
8275                        return (val >> R_TDO_LSB) & 1;
8276        }
8277        return -1;
8278}
8279
8280static int qib_r_shift(struct qib_devdata *dd, int bisten,
8281                       int len, u8 *inp, u8 *outp)
8282{
8283        u64 valbase, val;
8284        int ret, pos;
8285
8286        valbase = SJA_EN | (bisten << BISTEN_LSB) |
8287                (R_OP_SHIFT << R_OPCODE_LSB);
8288        ret = qib_r_wait_for_rdy(dd);
8289        if (ret < 0)
8290                goto bail;
8291        for (pos = 0; pos < len; ++pos) {
8292                val = valbase;
8293                if (outp) {
8294                        outp[pos >> 3] &= ~(1 << (pos & 7));
8295                        outp[pos >> 3] |= (ret << (pos & 7));
8296                }
8297                if (inp) {
8298                        int tdi = inp[pos >> 3] >> (pos & 7);
8299
8300                        val |= ((tdi & 1) << R_TDI_LSB);
8301                }
8302                qib_write_kreg(dd, kr_r_access, val);
8303                qib_read_kreg32(dd, kr_scratch);
8304                ret = qib_r_wait_for_rdy(dd);
8305                if (ret < 0)
8306                        break;
8307        }
8308        /* Restore to NOP between operations. */
8309        val =  SJA_EN | (bisten << BISTEN_LSB);
8310        qib_write_kreg(dd, kr_r_access, val);
8311        qib_read_kreg32(dd, kr_scratch);
8312        ret = qib_r_wait_for_rdy(dd);
8313
8314        if (ret >= 0)
8315                ret = pos;
8316bail:
8317        return ret;
8318}
8319
8320static int qib_r_update(struct qib_devdata *dd, int bisten)
8321{
8322        u64 val;
8323        int ret;
8324
8325        val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8326        ret = qib_r_wait_for_rdy(dd);
8327        if (ret >= 0) {
8328                qib_write_kreg(dd, kr_r_access, val);
8329                qib_read_kreg32(dd, kr_scratch);
8330        }
8331        return ret;
8332}
8333
8334#define BISTEN_PORT_SEL 15
8335#define LEN_PORT_SEL 625
8336#define BISTEN_AT 17
8337#define LEN_AT 156
8338#define BISTEN_ETM 16
8339#define LEN_ETM 632
8340
8341#define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8342
8343/* these are common for all IB port use cases. */
8344static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8345        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8346        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8347};
8348static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8349        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8350        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8351        0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8352        0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8353        0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8354        0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8355        0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8356        0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8357};
8358static u8 at[BIT2BYTE(LEN_AT)] = {
8359        0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8360        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8361};
8362
8363/* used for IB1 or IB2, only one in use */
8364static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8365        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8366        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8367        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8368        0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8369        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8370        0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8371        0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8372        0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8373};
8374
8375/* used when both IB1 and IB2 are in use */
8376static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8377        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8378        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8379        0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8380        0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8381        0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8382        0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8383        0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8384        0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8385};
8386
8387/* used when only IB1 is in use */
8388static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8389        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8390        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8391        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8392        0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8393        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8394        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8395        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8396        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8397};
8398
8399/* used when only IB2 is in use */
8400static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8401        0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8402        0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8403        0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8404        0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8405        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8406        0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8407        0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8408        0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8409};
8410
8411/* used when both IB1 and IB2 are in use */
8412static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8413        0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8414        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8415        0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8416        0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8417        0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8418        0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8419        0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8420        0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8421};
8422
8423/*
8424 * Do setup to properly handle IB link recovery; if port is zero, we
8425 * are initializing to cover both ports; otherwise we are initializing
8426 * to cover a single port card, or the port has reached INIT and we may
8427 * need to switch coverage types.
8428 */
8429static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8430{
8431        u8 *portsel, *etm;
8432        struct qib_devdata *dd = ppd->dd;
8433
8434        if (!ppd->dd->cspec->r1)
8435                return;
8436        if (!both) {
8437                dd->cspec->recovery_ports_initted++;
8438                ppd->cpspec->recovery_init = 1;
8439        }
8440        if (!both && dd->cspec->recovery_ports_initted == 1) {
8441                portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8442                etm = atetm_1port;
8443        } else {
8444                portsel = portsel_2port;
8445                etm = atetm_2port;
8446        }
8447
8448        if (qib_r_grab(dd) < 0 ||
8449                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8450                qib_r_update(dd, BISTEN_ETM) < 0 ||
8451                qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8452                qib_r_update(dd, BISTEN_AT) < 0 ||
8453                qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8454                            portsel, NULL) < 0 ||
8455                qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8456                qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8457                qib_r_update(dd, BISTEN_AT) < 0 ||
8458                qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8459                qib_r_update(dd, BISTEN_ETM) < 0)
8460                qib_dev_err(dd, "Failed IB link recovery setup\n");
8461}
8462
8463static void check_7322_rxe_status(struct qib_pportdata *ppd)
8464{
8465        struct qib_devdata *dd = ppd->dd;
8466        u64 fmask;
8467
8468        if (dd->cspec->recovery_ports_initted != 1)
8469                return; /* rest doesn't apply to dualport */
8470        qib_write_kreg(dd, kr_control, dd->control |
8471                       SYM_MASK(Control, FreezeMode));
8472        (void)qib_read_kreg64(dd, kr_scratch);
8473        udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8474        fmask = qib_read_kreg64(dd, kr_act_fmask);
8475        if (!fmask) {
8476                /*
8477                 * require a powercycle before we'll work again, and make
8478                 * sure we get no more interrupts, and don't turn off
8479                 * freeze.
8480                 */
8481                ppd->dd->cspec->stay_in_freeze = 1;
8482                qib_7322_set_intr_state(ppd->dd, 0);
8483                qib_write_kreg(dd, kr_fmask, 0ULL);
8484                qib_dev_err(dd, "HCA unusable until powercycled\n");
8485                return; /* eventually reset */
8486        }
8487
8488        qib_write_kreg(ppd->dd, kr_hwerrclear,
8489            SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8490
8491        /* don't do the full clear_freeze(), not needed for this */
8492        qib_write_kreg(dd, kr_control, dd->control);
8493        qib_read_kreg32(dd, kr_scratch);
8494        /* take IBC out of reset */
8495        if (ppd->link_speed_supported) {
8496                ppd->cpspec->ibcctrl_a &=
8497                        ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8498                qib_write_kreg_port(ppd, krp_ibcctrl_a,
8499                                    ppd->cpspec->ibcctrl_a);
8500                qib_read_kreg32(dd, kr_scratch);
8501                if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8502                        qib_set_ib_7322_lstate(ppd, 0,
8503                                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8504        }
8505}
8506