1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/io.h>
43#include <linux/jiffies.h>
44#include <linux/module.h>
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_smi.h>
47#ifdef CONFIG_INFINIBAND_QIB_DCA
48#include <linux/dca.h>
49#endif
50
51#include "qib.h"
52#include "qib_7322_regs.h"
53#include "qib_qsfp.h"
54
55#include "qib_mad.h"
56#include "qib_verbs.h"
57
58#undef pr_fmt
59#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60
61static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64static irqreturn_t qib_7322intr(int irq, void *data);
65static irqreturn_t qib_7322bufavail(int irq, void *data);
66static irqreturn_t sdma_intr(int irq, void *data);
67static irqreturn_t sdma_idle_intr(int irq, void *data);
68static irqreturn_t sdma_progress_intr(int irq, void *data);
69static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 struct qib_ctxtdata *rcd);
72static u8 qib_7322_phys_portstate(u64);
73static u32 qib_7322_iblink_state(u64);
74static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 u16 linitcmd);
76static void force_h1(struct qib_pportdata *);
77static void adj_tx_serdes(struct qib_pportdata *);
78static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80
81static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83static void serdes_7322_los_enable(struct qib_pportdata *, int);
84static int serdes_7322_init_old(struct qib_pportdata *);
85static int serdes_7322_init_new(struct qib_pportdata *);
86static void dump_sdma_7322_state(struct qib_pportdata *);
87
88#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89
90
91#define LE2_DEFAULT 5
92#define LE2_5m 4
93#define LE2_QME 0
94
95
96#define IBSD(hw_pidx) (hw_pidx + 2)
97
98
99static const unsigned rcv_int_timeout = 375;
100static const unsigned rcv_int_count = 16;
101static const unsigned sdma_idle_cnt = 64;
102
103
104#define RXEQ_DISABLE_MSECS 2500
105
106
107
108
109
110ushort qib_num_cfg_vls = 2;
111module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113
114static ushort qib_chase = 1;
115module_param_named(chase, qib_chase, ushort, S_IRUGO);
116MODULE_PARM_DESC(chase, "Enable state chase handling");
117
118static ushort qib_long_atten = 10;
119module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120MODULE_PARM_DESC(long_attenuation,
121 "attenuation cutoff (dB) for long copper cable setup");
122
123static ushort qib_singleport;
124module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126
127static ushort qib_krcvq01_no_msi;
128module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130
131
132
133
134static unsigned qib_rcvhdrcnt;
135module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137
138static unsigned qib_rcvhdrsize;
139module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141
142static unsigned qib_rcvhdrentsize;
143module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145
146#define MAX_ATTEN_LEN 64
147
148static char txselect_list[MAX_ATTEN_LEN] = "10";
149static struct kparam_string kp_txselect = {
150 .string = txselect_list,
151 .maxlen = MAX_ATTEN_LEN
152};
153static int setup_txselect(const char *, struct kernel_param *);
154module_param_call(txselect, setup_txselect, param_get_string,
155 &kp_txselect, S_IWUSR | S_IRUGO);
156MODULE_PARM_DESC(txselect,
157 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158
159#define BOARD_QME7342 5
160#define BOARD_QMH7342 6
161#define BOARD_QMH7360 9
162#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
163 BOARD_QMH7342)
164#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
165 BOARD_QME7342)
166
167#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
168
169#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
170
171#define MASK_ACROSS(lsb, msb) \
172 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
173
174#define SYM_RMASK(regname, fldname) ((u64) \
175 QIB_7322_##regname##_##fldname##_RMASK)
176
177#define SYM_MASK(regname, fldname) ((u64) \
178 QIB_7322_##regname##_##fldname##_RMASK << \
179 QIB_7322_##regname##_##fldname##_LSB)
180
181#define SYM_FIELD(value, regname, fldname) ((u64) \
182 (((value) >> SYM_LSB(regname, fldname)) & \
183 SYM_RMASK(regname, fldname)))
184
185
186#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
187 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
188
189#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
190#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
191#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
192#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
193#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
194
195#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
196
197
198#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
199
200
201
202
203
204#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
205#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT)
206#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT)
207#define IBA7322_TID_PA_SHIFT 11U
208
209#define SendIBSLIDAssignMask \
210 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
211#define SendIBSLMCMask \
212 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
213
214#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
215#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
216#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
217#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
218#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
219#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
220
221#define _QIB_GPIO_SDA_NUM 1
222#define _QIB_GPIO_SCL_NUM 0
223#define QIB_EEPROM_WEN_NUM 14
224#define QIB_TWSI_EEPROM_DEV 0xA2
225
226
227#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
228
229
230#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
231#define PORT_SPD_CAP_SHIFT 3
232
233
234#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
235
236
237
238
239
240
241
242#define kr_contextcnt KREG_IDX(ContextCnt)
243#define kr_control KREG_IDX(Control)
244#define kr_counterregbase KREG_IDX(CntrRegBase)
245#define kr_errclear KREG_IDX(ErrClear)
246#define kr_errmask KREG_IDX(ErrMask)
247#define kr_errstatus KREG_IDX(ErrStatus)
248#define kr_extctrl KREG_IDX(EXTCtrl)
249#define kr_extstatus KREG_IDX(EXTStatus)
250#define kr_gpio_clear KREG_IDX(GPIOClear)
251#define kr_gpio_mask KREG_IDX(GPIOMask)
252#define kr_gpio_out KREG_IDX(GPIOOut)
253#define kr_gpio_status KREG_IDX(GPIOStatus)
254#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
255#define kr_debugportval KREG_IDX(DebugPortValueReg)
256#define kr_fmask KREG_IDX(feature_mask)
257#define kr_act_fmask KREG_IDX(active_feature_mask)
258#define kr_hwerrclear KREG_IDX(HwErrClear)
259#define kr_hwerrmask KREG_IDX(HwErrMask)
260#define kr_hwerrstatus KREG_IDX(HwErrStatus)
261#define kr_intclear KREG_IDX(IntClear)
262#define kr_intmask KREG_IDX(IntMask)
263#define kr_intredirect KREG_IDX(IntRedirect0)
264#define kr_intstatus KREG_IDX(IntStatus)
265#define kr_pagealign KREG_IDX(PageAlign)
266#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
267#define kr_rcvctrl KREG_IDX(RcvCtrl)
268#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
269#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
270#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
271#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
272#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
273#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
274#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
275#define kr_revision KREG_IDX(Revision)
276#define kr_scratch KREG_IDX(Scratch)
277#define kr_sendbuffererror KREG_IDX(SendBufErr0)
278#define kr_sendcheckmask KREG_IDX(SendCheckMask0)
279#define kr_sendctrl KREG_IDX(SendCtrl)
280#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0)
281#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0)
282#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
283#define kr_sendpiobufbase KREG_IDX(SendBufBase)
284#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
285#define kr_sendpiosize KREG_IDX(SendBufSize)
286#define kr_sendregbase KREG_IDX(SendRegBase)
287#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
288#define kr_userregbase KREG_IDX(UserRegBase)
289#define kr_intgranted KREG_IDX(Int_Granted)
290#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
291#define kr_intblocked KREG_IDX(IntBlocked)
292#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
293
294
295
296
297
298#define krp_errclear KREG_IBPORT_IDX(ErrClear)
299#define krp_errmask KREG_IBPORT_IDX(ErrMask)
300#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
301#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
302#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
303#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
304#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
305#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
306#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
307#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
308#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
309#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
310#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
311#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
312#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
313#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
314#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
315#define krp_psstart KREG_IBPORT_IDX(PSStart)
316#define krp_psstat KREG_IBPORT_IDX(PSStat)
317#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
318#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
319#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
320#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
321#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
322#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
323#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
324#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
325#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
326#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
327#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
328#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
329#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
330#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
331#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
332#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
333#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
334#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
335#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
336#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
337#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
338#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
339#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
340#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
341#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
342#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
343#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
344#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
345#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
346#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
347#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
348
349
350
351
352
353#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
354#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
355
356
357
358
359
360
361
362
363
364#define NUM_TIDFLOWS_CTXT 0x20
365#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
366
367
368#define TIDFLOW_ERRBITS ( \
369 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
370 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
371 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
372 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
373
374
375
376
377#define CREG_IDX(regname) \
378((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
379
380#define crp_badformat CREG_IDX(RxVersionErrCnt)
381#define crp_err_rlen CREG_IDX(RxLenErrCnt)
382#define crp_erricrc CREG_IDX(RxICRCErrCnt)
383#define crp_errlink CREG_IDX(RxLinkMalformCnt)
384#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
385#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
386#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
387#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
388#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
389#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
390#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
391#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
392#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
393#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
394#define crp_pktrcv CREG_IDX(RxDataPktCnt)
395#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
396#define crp_pktsend CREG_IDX(TxDataPktCnt)
397#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
398#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
399#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
400#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
401#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
402#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
403#define crp_rcvebp CREG_IDX(RxEBPCnt)
404#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
405#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
406#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
407#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
408#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
409#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
410#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
411#define crp_sendstall CREG_IDX(TxFlowStallCnt)
412#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
413#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
414#define crp_txlenerr CREG_IDX(TxLenErrCnt)
415#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
416#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
417#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
418#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
419#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
420#define crp_wordrcv CREG_IDX(RxDwordCnt)
421#define crp_wordsend CREG_IDX(TxDwordCnt)
422#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
423
424
425#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
426 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
427#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
428#define cr_lbint CREG_DEVIDX(LBIntCnt)
429#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
430#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
431#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
432#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
433#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
434
435
436#define NUM_IB_PORTS 2
437
438
439#define NUM_VL15_BUFS NUM_IB_PORTS
440
441
442
443
444
445
446
447#define KCTXT0_EGRCNT 2048
448
449
450#define PBC_PORT_SEL_LSB 26
451#define PBC_PORT_SEL_RMASK 1
452#define PBC_VL_NUM_LSB 27
453#define PBC_VL_NUM_RMASK 7
454#define PBC_7322_VL15_SEND (1ULL << 63)
455#define PBC_7322_VL15_SEND_CTRL (1ULL << 31)
456
457static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
458 [IB_RATE_2_5_GBPS] = 16,
459 [IB_RATE_5_GBPS] = 8,
460 [IB_RATE_10_GBPS] = 4,
461 [IB_RATE_20_GBPS] = 2,
462 [IB_RATE_30_GBPS] = 2,
463 [IB_RATE_40_GBPS] = 1
464};
465
466#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
467#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
468
469
470#define IB_7322_LT_STATE_DISABLED 0x00
471#define IB_7322_LT_STATE_LINKUP 0x01
472#define IB_7322_LT_STATE_POLLACTIVE 0x02
473#define IB_7322_LT_STATE_POLLQUIET 0x03
474#define IB_7322_LT_STATE_SLEEPDELAY 0x04
475#define IB_7322_LT_STATE_SLEEPQUIET 0x05
476#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
477#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
478#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
479#define IB_7322_LT_STATE_CFGIDLE 0x0b
480#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
481#define IB_7322_LT_STATE_TXREVLANES 0x0d
482#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
483#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
484#define IB_7322_LT_STATE_CFGENH 0x10
485#define IB_7322_LT_STATE_CFGTEST 0x11
486#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
487#define IB_7322_LT_STATE_CFGWAITENH 0x13
488
489
490#define IB_7322_L_STATE_DOWN 0x0
491#define IB_7322_L_STATE_INIT 0x1
492#define IB_7322_L_STATE_ARM 0x2
493#define IB_7322_L_STATE_ACTIVE 0x3
494#define IB_7322_L_STATE_ACT_DEFER 0x4
495
496static const u8 qib_7322_physportstate[0x20] = {
497 [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
498 [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
499 [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
500 [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
501 [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
502 [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
503 [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
504 [IB_7322_LT_STATE_CFGRCVFCFG] =
505 IB_PHYSPORTSTATE_CFG_TRAIN,
506 [IB_7322_LT_STATE_CFGWAITRMT] =
507 IB_PHYSPORTSTATE_CFG_TRAIN,
508 [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
509 [IB_7322_LT_STATE_RECOVERRETRAIN] =
510 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
511 [IB_7322_LT_STATE_RECOVERWAITRMT] =
512 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
513 [IB_7322_LT_STATE_RECOVERIDLE] =
514 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
515 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
516 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
517 [IB_7322_LT_STATE_CFGWAITRMTTEST] =
518 IB_PHYSPORTSTATE_CFG_TRAIN,
519 [IB_7322_LT_STATE_CFGWAITENH] =
520 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
521 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
522 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
523 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
524 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
525};
526
527#ifdef CONFIG_INFINIBAND_QIB_DCA
528struct qib_irq_notify {
529 int rcv;
530 void *arg;
531 struct irq_affinity_notify notify;
532};
533#endif
534
535struct qib_chip_specific {
536 u64 __iomem *cregbase;
537 u64 *cntrs;
538 spinlock_t rcvmod_lock;
539 spinlock_t gpio_lock;
540 u64 main_int_mask;
541 u64 int_enable_mask;
542 u64 errormask;
543 u64 hwerrmask;
544 u64 gpio_out;
545 u64 gpio_mask;
546 u64 extctrl;
547 u32 ncntrs;
548 u32 nportcntrs;
549 u32 cntrnamelen;
550 u32 portcntrnamelen;
551 u32 numctxts;
552 u32 rcvegrcnt;
553 u32 updthresh;
554 u32 updthresh_dflt;
555 u32 r1;
556 int irq;
557 u32 num_msix_entries;
558 u32 sdmabufcnt;
559 u32 lastbuf_for_pio;
560 u32 stay_in_freeze;
561 u32 recovery_ports_initted;
562#ifdef CONFIG_INFINIBAND_QIB_DCA
563 u32 dca_ctrl;
564 int rhdr_cpu[18];
565 int sdma_cpu[2];
566 u64 dca_rcvhdr_ctrl[5];
567#endif
568 struct qib_msix_entry *msix_entries;
569 unsigned long *sendchkenable;
570 unsigned long *sendgrhchk;
571 unsigned long *sendibchk;
572 u32 rcvavail_timeout[18];
573 char emsgbuf[128];
574};
575
576
577struct txdds_ent {
578 u8 amp;
579 u8 pre;
580 u8 main;
581 u8 post;
582};
583
584struct vendor_txdds_ent {
585 u8 oui[QSFP_VOUI_LEN];
586 u8 *partnum;
587 struct txdds_ent sdr;
588 struct txdds_ent ddr;
589 struct txdds_ent qdr;
590};
591
592static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
593
594#define TXDDS_TABLE_SZ 16
595#define TXDDS_EXTRA_SZ 18
596#define TXDDS_MFG_SZ 2
597#define SERDES_CHANS 4
598
599#define H1_FORCE_VAL 8
600#define H1_FORCE_QME 1
601#define H1_FORCE_QMH 7
602
603
604#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
605 + ((spd) * 2))
606
607#define QDR_DFE_DISABLE_DELAY 4000
608#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL
609#define QDR_STATIC_ADAPT_DOWN_R1 0ULL
610#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL
611#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL
612
613struct qib_chippport_specific {
614 u64 __iomem *kpregbase;
615 u64 __iomem *cpregbase;
616 u64 *portcntrs;
617 struct qib_pportdata *ppd;
618 wait_queue_head_t autoneg_wait;
619 struct delayed_work autoneg_work;
620 struct delayed_work ipg_work;
621 struct timer_list chase_timer;
622
623
624
625
626
627
628
629
630 u64 ibdeltainprog;
631 u64 ibsymdelta;
632 u64 ibsymsnap;
633 u64 iblnkerrdelta;
634 u64 iblnkerrsnap;
635 u64 iblnkdownsnap;
636 u64 iblnkdowndelta;
637 u64 ibmalfdelta;
638 u64 ibmalfsnap;
639 u64 ibcctrl_a;
640 u64 ibcctrl_b;
641 unsigned long qdr_dfe_time;
642 unsigned long chase_end;
643 u32 autoneg_tries;
644 u32 recovery_init;
645 u32 qdr_dfe_on;
646 u32 qdr_reforce;
647
648
649
650
651 u8 h1_val;
652 u8 no_eep;
653 u8 ipg_tries;
654 u8 ibmalfusesnap;
655 struct qib_qsfp_data qsfp_data;
656 char epmsgbuf[192];
657 char sdmamsgbuf[192];
658};
659
660static struct {
661 const char *name;
662 irq_handler_t handler;
663 int lsb;
664 int port;
665 int dca;
666} irq_table[] = {
667 { "", qib_7322intr, -1, 0, 0 },
668 { " (buf avail)", qib_7322bufavail,
669 SYM_LSB(IntStatus, SendBufAvail), 0, 0},
670 { " (sdma 0)", sdma_intr,
671 SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
672 { " (sdma 1)", sdma_intr,
673 SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
674 { " (sdmaI 0)", sdma_idle_intr,
675 SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
676 { " (sdmaI 1)", sdma_idle_intr,
677 SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
678 { " (sdmaP 0)", sdma_progress_intr,
679 SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
680 { " (sdmaP 1)", sdma_progress_intr,
681 SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
682 { " (sdmaC 0)", sdma_cleanup_intr,
683 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
684 { " (sdmaC 1)", sdma_cleanup_intr,
685 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
686};
687
688#ifdef CONFIG_INFINIBAND_QIB_DCA
689
690static const struct dca_reg_map {
691 int shadow_inx;
692 int lsb;
693 u64 mask;
694 u16 regno;
695} dca_rcvhdr_reg_map[] = {
696 { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
697 ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
698 { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
699 ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
700 { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
701 ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
702 { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
703 ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
704 { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
705 ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
706 { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
707 ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
708 { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
709 ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
710 { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
711 ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
712 { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
713 ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
714 { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
715 ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
716 { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
717 ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
718 { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
719 ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
720 { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
721 ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
722 { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
723 ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
724 { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
725 ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
726 { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
727 ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
728 { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
729 ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
730 { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
731 ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
732};
733#endif
734
735
736#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
737
738#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
739
740#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
741#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
742
743#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1
744#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2
745#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3
746
747#define BLOB_7322_IBCHG 0x101
748
749static inline void qib_write_kreg(const struct qib_devdata *dd,
750 const u32 regno, u64 value);
751static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
752static void write_7322_initregs(struct qib_devdata *);
753static void write_7322_init_portregs(struct qib_pportdata *);
754static void setup_7322_link_recovery(struct qib_pportdata *, u32);
755static void check_7322_rxe_status(struct qib_pportdata *);
756static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
757#ifdef CONFIG_INFINIBAND_QIB_DCA
758static void qib_setup_dca(struct qib_devdata *dd);
759static void setup_dca_notifier(struct qib_devdata *dd,
760 struct qib_msix_entry *m);
761static void reset_dca_notifier(struct qib_devdata *dd,
762 struct qib_msix_entry *m);
763#endif
764
765
766
767
768
769
770
771
772
773
774
775static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
776 enum qib_ureg regno, int ctxt)
777{
778 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
779 return 0;
780 return readl(regno + (u64 __iomem *)(
781 (dd->ureg_align * ctxt) + (dd->userbase ?
782 (char __iomem *)dd->userbase :
783 (char __iomem *)dd->kregbase + dd->uregbase)));
784}
785
786
787
788
789
790
791
792
793
794
795
796static inline u64 qib_read_ureg(const struct qib_devdata *dd,
797 enum qib_ureg regno, int ctxt)
798{
799
800 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
801 return 0;
802 return readq(regno + (u64 __iomem *)(
803 (dd->ureg_align * ctxt) + (dd->userbase ?
804 (char __iomem *)dd->userbase :
805 (char __iomem *)dd->kregbase + dd->uregbase)));
806}
807
808
809
810
811
812
813
814
815
816
817static inline void qib_write_ureg(const struct qib_devdata *dd,
818 enum qib_ureg regno, u64 value, int ctxt)
819{
820 u64 __iomem *ubase;
821
822 if (dd->userbase)
823 ubase = (u64 __iomem *)
824 ((char __iomem *) dd->userbase +
825 dd->ureg_align * ctxt);
826 else
827 ubase = (u64 __iomem *)
828 (dd->uregbase +
829 (char __iomem *) dd->kregbase +
830 dd->ureg_align * ctxt);
831
832 if (dd->kregbase && (dd->flags & QIB_PRESENT))
833 writeq(value, &ubase[regno]);
834}
835
836static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
837 const u32 regno)
838{
839 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
840 return -1;
841 return readl((u32 __iomem *) &dd->kregbase[regno]);
842}
843
844static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
845 const u32 regno)
846{
847 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
848 return -1;
849 return readq(&dd->kregbase[regno]);
850}
851
852static inline void qib_write_kreg(const struct qib_devdata *dd,
853 const u32 regno, u64 value)
854{
855 if (dd->kregbase && (dd->flags & QIB_PRESENT))
856 writeq(value, &dd->kregbase[regno]);
857}
858
859
860
861
862
863static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
864 const u16 regno)
865{
866 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
867 return 0ULL;
868 return readq(&ppd->cpspec->kpregbase[regno]);
869}
870
871static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
872 const u16 regno, u64 value)
873{
874 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
875 (ppd->dd->flags & QIB_PRESENT))
876 writeq(value, &ppd->cpspec->kpregbase[regno]);
877}
878
879
880
881
882
883
884
885
886static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
887 const u16 regno, unsigned ctxt,
888 u64 value)
889{
890 qib_write_kreg(dd, regno + ctxt, value);
891}
892
893static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
894{
895 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
896 return 0;
897 return readq(&dd->cspec->cregbase[regno]);
898
899
900}
901
902static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
903{
904 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
905 return 0;
906 return readl(&dd->cspec->cregbase[regno]);
907
908
909}
910
911static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
912 u16 regno, u64 value)
913{
914 if (ppd->cpspec && ppd->cpspec->cpregbase &&
915 (ppd->dd->flags & QIB_PRESENT))
916 writeq(value, &ppd->cpspec->cpregbase[regno]);
917}
918
919static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
920 u16 regno)
921{
922 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
923 !(ppd->dd->flags & QIB_PRESENT))
924 return 0;
925 return readq(&ppd->cpspec->cpregbase[regno]);
926}
927
928static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
929 u16 regno)
930{
931 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
932 !(ppd->dd->flags & QIB_PRESENT))
933 return 0;
934 return readl(&ppd->cpspec->cpregbase[regno]);
935}
936
937
938#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
939#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
940
941
942#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
943#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
944#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
945#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
946#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
947#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
948#define QIB_I_C_ERROR INT_MASK(Err)
949
950#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
951#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
952#define QIB_I_GPIO INT_MASK(AssertGPIO)
953#define QIB_I_P_SDMAINT(pidx) \
954 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
955 INT_MASK_P(SDmaProgress, pidx) | \
956 INT_MASK_PM(SDmaCleanupDone, pidx))
957
958
959#define QIB_I_P_BITSEXTANT(pidx) \
960 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
961 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
962 INT_MASK_P(SDmaProgress, pidx) | \
963 INT_MASK_PM(SDmaCleanupDone, pidx))
964
965
966
967#define QIB_I_C_BITSEXTANT \
968 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
969 QIB_I_SPIOSENT | \
970 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
971
972#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
973 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
974
975
976
977
978#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
979#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
980#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
981#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
982#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
983#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
984#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
985#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
986#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
987#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
988#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
989#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
990#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
991#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
992#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
993#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
994#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
995#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
996#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
997#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
998#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
999#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1000#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1001#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1002#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1003#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1004#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1005#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1006
1007#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1008#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1009#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1010#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1011#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1012#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1013#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1014#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1015#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1016#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1017#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1018
1019
1020#define QIB_E_RESET ERR_MASK(ResetNegated)
1021#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1022#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1033#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1034#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1035#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1036#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1037#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1038#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1039#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1051#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1052#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1053
1054
1055
1056
1057
1058#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1059 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1060 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1061 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1062 QIB_E_P_REBP)
1063
1064
1065#define QIB_E_P_RPKTERRS (\
1066 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1067 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1068 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1069 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1070 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1071 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1072
1073
1074
1075
1076
1077
1078#define QIB_E_P_SPKTERRS (\
1079 QIB_E_P_SUNEXP_PKTNUM |\
1080 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1081 QIB_E_P_SMAXPKTLEN |\
1082 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1083 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1084 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1085
1086#define QIB_E_SPKTERRS ( \
1087 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1088 ERR_MASK_N(SendUnsupportedVLErr) | \
1089 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1090
1091#define QIB_E_P_SDMAERRS ( \
1092 QIB_E_P_SDMAHALT | \
1093 QIB_E_P_SDMADESCADDRMISALIGN | \
1094 QIB_E_P_SDMAUNEXPDATA | \
1095 QIB_E_P_SDMAMISSINGDW | \
1096 QIB_E_P_SDMADWEN | \
1097 QIB_E_P_SDMARPYTAG | \
1098 QIB_E_P_SDMA1STDESC | \
1099 QIB_E_P_SDMABASE | \
1100 QIB_E_P_SDMATAILOUTOFBOUND | \
1101 QIB_E_P_SDMAOUTOFBOUND | \
1102 QIB_E_P_SDMAGENMISMATCH)
1103
1104
1105
1106
1107
1108
1109#define QIB_E_P_BITSEXTANT ( \
1110 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1111 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1112 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1113 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1114 )
1115
1116
1117
1118
1119
1120
1121
1122
1123#define QIB_E_P_LINK_PKTERRS (\
1124 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1125 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1126 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1127 QIB_E_P_RUNEXPCHAR)
1128
1129
1130
1131
1132
1133
1134#define QIB_E_C_BITSEXTANT (\
1135 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1136 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1137 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1138
1139
1140#define E_SPKT_ERRS_IGNORE 0
1141
1142#define QIB_EXTS_MEMBIST_DISABLED \
1143 SYM_MASK(EXTStatus, MemBISTDisabled)
1144#define QIB_EXTS_MEMBIST_ENDTEST \
1145 SYM_MASK(EXTStatus, MemBISTEndTest)
1146
1147#define QIB_E_SPIOARMLAUNCH \
1148 ERR_MASK(SendArmLaunchErr)
1149
1150#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1151#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1152
1153
1154
1155
1156
1157
1158
1159#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1160#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1161#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1162#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1163#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1164#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1165 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1166#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1167
1168#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1169#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1170
1171#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1172#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1173#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1174
1175#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1176#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1177#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1178 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1179#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1180 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1181#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1182
1183#define IBA7322_REDIRECT_VEC_PER_REG 12
1184
1185#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1186#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1187#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1188#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1189#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1190
1191#define AUTONEG_TRIES 3
1192
1193#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1194 .msg = #fldname , .sz = sizeof(#fldname) }
1195#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1196 fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1197static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1198 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1199 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1200 HWE_AUTO(PCIESerdesPClkNotDetect),
1201 HWE_AUTO(PowerOnBISTFailed),
1202 HWE_AUTO(TempsenseTholdReached),
1203 HWE_AUTO(MemoryErr),
1204 HWE_AUTO(PCIeBusParityErr),
1205 HWE_AUTO(PcieCplTimeout),
1206 HWE_AUTO(PciePoisonedTLP),
1207 HWE_AUTO_P(SDmaMemReadErr, 1),
1208 HWE_AUTO_P(SDmaMemReadErr, 0),
1209 HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1210 HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1211 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1212 HWE_AUTO(statusValidNoEop),
1213 HWE_AUTO(LATriggered),
1214 { .mask = 0, .sz = 0 }
1215};
1216
1217#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1218 .msg = #fldname, .sz = sizeof(#fldname) }
1219#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1220 .msg = #fldname, .sz = sizeof(#fldname) }
1221static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1222 E_AUTO(RcvEgrFullErr),
1223 E_AUTO(RcvHdrFullErr),
1224 E_AUTO(ResetNegated),
1225 E_AUTO(HardwareErr),
1226 E_AUTO(InvalidAddrErr),
1227 E_AUTO(SDmaVL15Err),
1228 E_AUTO(SBufVL15MisUseErr),
1229 E_AUTO(InvalidEEPCmd),
1230 E_AUTO(RcvContextShareErr),
1231 E_AUTO(SendVLMismatchErr),
1232 E_AUTO(SendArmLaunchErr),
1233 E_AUTO(SendSpecialTriggerErr),
1234 E_AUTO(SDmaWrongPortErr),
1235 E_AUTO(SDmaBufMaskDuplicateErr),
1236 { .mask = 0, .sz = 0 }
1237};
1238
1239static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1240 E_P_AUTO(IBStatusChanged),
1241 E_P_AUTO(SHeadersErr),
1242 E_P_AUTO(VL15BufMisuseErr),
1243
1244
1245
1246 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1247 .sz = 11},
1248 E_P_AUTO(SDmaDescAddrMisalignErr),
1249 E_P_AUTO(SDmaUnexpDataErr),
1250 E_P_AUTO(SDmaMissingDwErr),
1251 E_P_AUTO(SDmaDwEnErr),
1252 E_P_AUTO(SDmaRpyTagErr),
1253 E_P_AUTO(SDma1stDescErr),
1254 E_P_AUTO(SDmaBaseErr),
1255 E_P_AUTO(SDmaTailOutOfBoundErr),
1256 E_P_AUTO(SDmaOutOfBoundErr),
1257 E_P_AUTO(SDmaGenMismatchErr),
1258 E_P_AUTO(SendBufMisuseErr),
1259 E_P_AUTO(SendUnsupportedVLErr),
1260 E_P_AUTO(SendUnexpectedPktNumErr),
1261 E_P_AUTO(SendDroppedDataPktErr),
1262 E_P_AUTO(SendDroppedSmpPktErr),
1263 E_P_AUTO(SendPktLenErr),
1264 E_P_AUTO(SendUnderRunErr),
1265 E_P_AUTO(SendMaxPktLenErr),
1266 E_P_AUTO(SendMinPktLenErr),
1267 E_P_AUTO(RcvIBLostLinkErr),
1268 E_P_AUTO(RcvHdrErr),
1269 E_P_AUTO(RcvHdrLenErr),
1270 E_P_AUTO(RcvBadTidErr),
1271 E_P_AUTO(RcvBadVersionErr),
1272 E_P_AUTO(RcvIBFlowErr),
1273 E_P_AUTO(RcvEBPErr),
1274 E_P_AUTO(RcvUnsupportedVLErr),
1275 E_P_AUTO(RcvUnexpectedCharErr),
1276 E_P_AUTO(RcvShortPktLenErr),
1277 E_P_AUTO(RcvLongPktLenErr),
1278 E_P_AUTO(RcvMaxPktLenErr),
1279 E_P_AUTO(RcvMinPktLenErr),
1280 E_P_AUTO(RcvICRCErr),
1281 E_P_AUTO(RcvVCRCErr),
1282 E_P_AUTO(RcvFormatErr),
1283 { .mask = 0, .sz = 0 }
1284};
1285
1286
1287
1288
1289
1290#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1291 .msg = #fldname, .sz = sizeof(#fldname) }
1292
1293#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1294 SYM_LSB(IntMask, fldname##Mask##_0), \
1295 SYM_LSB(IntMask, fldname##Mask##_1)), \
1296 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1297
1298#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1299 SYM_LSB(IntMask, fldname##Mask##_1), \
1300 SYM_LSB(IntMask, fldname##Mask##_0)), \
1301 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1302
1303
1304
1305
1306#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1307 SYM_LSB(IntMask, fldname##0IntMask), \
1308 SYM_LSB(IntMask, fldname##17IntMask)), \
1309 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1310
1311#define TXSYMPTOM_AUTO_P(fldname) \
1312 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1313 .msg = #fldname, .sz = sizeof(#fldname) }
1314static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1315 TXSYMPTOM_AUTO_P(NonKeyPacket),
1316 TXSYMPTOM_AUTO_P(GRHFail),
1317 TXSYMPTOM_AUTO_P(PkeyFail),
1318 TXSYMPTOM_AUTO_P(QPFail),
1319 TXSYMPTOM_AUTO_P(SLIDFail),
1320 TXSYMPTOM_AUTO_P(RawIPV6),
1321 TXSYMPTOM_AUTO_P(PacketTooSmall),
1322 { .mask = 0, .sz = 0 }
1323};
1324
1325#define IBA7322_HDRHEAD_PKTINT_SHIFT 32
1326
1327
1328
1329
1330
1331
1332static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1333{
1334 struct qib_devdata *dd = ppd->dd;
1335 u32 i;
1336 int any;
1337 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1338 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1339 unsigned long sbuf[4];
1340
1341
1342
1343
1344
1345 any = 0;
1346 for (i = 0; i < regcnt; ++i) {
1347 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1348 if (sbuf[i]) {
1349 any = 1;
1350 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1351 }
1352 }
1353
1354 if (any)
1355 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1356}
1357
1358
1359
1360
1361static void err_decode(char *msg, size_t len, u64 errs,
1362 const struct qib_hwerror_msgs *msp)
1363{
1364 u64 these, lmask;
1365 int took, multi, n = 0;
1366
1367 while (errs && msp && msp->mask) {
1368 multi = (msp->mask & (msp->mask - 1));
1369 while (errs & msp->mask) {
1370 these = (errs & msp->mask);
1371 lmask = (these & (these - 1)) ^ these;
1372 if (len) {
1373 if (n++) {
1374
1375 *msg++ = ',';
1376 len--;
1377 }
1378 BUG_ON(!msp->sz);
1379
1380 took = min_t(size_t, msp->sz - (size_t)1, len);
1381 memcpy(msg, msp->msg, took);
1382 len -= took;
1383 msg += took;
1384 if (len)
1385 *msg = '\0';
1386 }
1387 errs &= ~lmask;
1388 if (len && multi) {
1389
1390 int idx = -1;
1391
1392 while (lmask & msp->mask) {
1393 ++idx;
1394 lmask >>= 1;
1395 }
1396 took = scnprintf(msg, len, "_%d", idx);
1397 len -= took;
1398 msg += took;
1399 }
1400 }
1401 ++msp;
1402 }
1403
1404 if (len && errs)
1405 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1406 (unsigned long long) errs);
1407}
1408
1409
1410static void flush_fifo(struct qib_pportdata *ppd)
1411{
1412 struct qib_devdata *dd = ppd->dd;
1413 u32 __iomem *piobuf;
1414 u32 bufn;
1415 u32 *hdr;
1416 u64 pbc;
1417 const unsigned hdrwords = 7;
1418 static struct ib_header ibhdr = {
1419 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1420 .lrh[1] = IB_LID_PERMISSIVE,
1421 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1422 .lrh[3] = IB_LID_PERMISSIVE,
1423 .u.oth.bth[0] = cpu_to_be32(
1424 (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1425 .u.oth.bth[1] = cpu_to_be32(0),
1426 .u.oth.bth[2] = cpu_to_be32(0),
1427 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1428 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1429 };
1430
1431
1432
1433
1434
1435 pbc = PBC_7322_VL15_SEND |
1436 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1437 (hdrwords + SIZE_OF_CRC);
1438 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1439 if (!piobuf)
1440 return;
1441 writeq(pbc, piobuf);
1442 hdr = (u32 *) &ibhdr;
1443 if (dd->flags & QIB_PIO_FLUSH_WC) {
1444 qib_flush_wc();
1445 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1446 qib_flush_wc();
1447 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1448 qib_flush_wc();
1449 } else
1450 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1451 qib_sendbuf_done(dd, bufn);
1452}
1453
1454
1455
1456
1457static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1458{
1459 struct qib_devdata *dd = ppd->dd;
1460 u64 set_sendctrl = 0;
1461 u64 clr_sendctrl = 0;
1462
1463 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1464 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1465 else
1466 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1467
1468 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1469 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1470 else
1471 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1472
1473 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1474 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1475 else
1476 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1477
1478 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1479 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1480 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1481 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1482 else
1483 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1484 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1485 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1486
1487 spin_lock(&dd->sendctrl_lock);
1488
1489
1490 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1491 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1492 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1493 qib_write_kreg(dd, kr_scratch, 0);
1494 }
1495
1496 ppd->p_sendctrl |= set_sendctrl;
1497 ppd->p_sendctrl &= ~clr_sendctrl;
1498
1499 if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1500 qib_write_kreg_port(ppd, krp_sendctrl,
1501 ppd->p_sendctrl |
1502 SYM_MASK(SendCtrl_0, SDmaCleanup));
1503 else
1504 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1505 qib_write_kreg(dd, kr_scratch, 0);
1506
1507 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1508 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1509 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1510 qib_write_kreg(dd, kr_scratch, 0);
1511 }
1512
1513 spin_unlock(&dd->sendctrl_lock);
1514
1515 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1516 flush_fifo(ppd);
1517}
1518
1519static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1520{
1521 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1522}
1523
1524static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1525{
1526
1527
1528
1529
1530
1531 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1532 qib_write_kreg_port(ppd, krp_senddmalengen,
1533 ppd->sdma_descq_cnt |
1534 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1535}
1536
1537
1538
1539
1540static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1541{
1542
1543 wmb();
1544 ppd->sdma_descq_tail = tail;
1545 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1546}
1547
1548
1549
1550
1551static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1552{
1553
1554
1555
1556
1557
1558
1559 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1560
1561 qib_sdma_7322_setlengen(ppd);
1562 qib_sdma_update_7322_tail(ppd, 0);
1563 ppd->sdma_head_dma[0] = 0;
1564 qib_7322_sdma_sendctrl(ppd,
1565 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1566}
1567
1568#define DISABLES_SDMA ( \
1569 QIB_E_P_SDMAHALT | \
1570 QIB_E_P_SDMADESCADDRMISALIGN | \
1571 QIB_E_P_SDMAMISSINGDW | \
1572 QIB_E_P_SDMADWEN | \
1573 QIB_E_P_SDMARPYTAG | \
1574 QIB_E_P_SDMA1STDESC | \
1575 QIB_E_P_SDMABASE | \
1576 QIB_E_P_SDMATAILOUTOFBOUND | \
1577 QIB_E_P_SDMAOUTOFBOUND | \
1578 QIB_E_P_SDMAGENMISMATCH)
1579
1580static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1581{
1582 unsigned long flags;
1583 struct qib_devdata *dd = ppd->dd;
1584
1585 errs &= QIB_E_P_SDMAERRS;
1586 err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1587 errs, qib_7322p_error_msgs);
1588
1589 if (errs & QIB_E_P_SDMAUNEXPDATA)
1590 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1591 ppd->port);
1592
1593 spin_lock_irqsave(&ppd->sdma_lock, flags);
1594
1595 if (errs != QIB_E_P_SDMAHALT) {
1596
1597 qib_dev_porterr(dd, ppd->port,
1598 "SDMA %s 0x%016llx %s\n",
1599 qib_sdma_state_names[ppd->sdma_state.current_state],
1600 errs, ppd->cpspec->sdmamsgbuf);
1601 dump_sdma_7322_state(ppd);
1602 }
1603
1604 switch (ppd->sdma_state.current_state) {
1605 case qib_sdma_state_s00_hw_down:
1606 break;
1607
1608 case qib_sdma_state_s10_hw_start_up_wait:
1609 if (errs & QIB_E_P_SDMAHALT)
1610 __qib_sdma_process_event(ppd,
1611 qib_sdma_event_e20_hw_started);
1612 break;
1613
1614 case qib_sdma_state_s20_idle:
1615 break;
1616
1617 case qib_sdma_state_s30_sw_clean_up_wait:
1618 break;
1619
1620 case qib_sdma_state_s40_hw_clean_up_wait:
1621 if (errs & QIB_E_P_SDMAHALT)
1622 __qib_sdma_process_event(ppd,
1623 qib_sdma_event_e50_hw_cleaned);
1624 break;
1625
1626 case qib_sdma_state_s50_hw_halt_wait:
1627 if (errs & QIB_E_P_SDMAHALT)
1628 __qib_sdma_process_event(ppd,
1629 qib_sdma_event_e60_hw_halted);
1630 break;
1631
1632 case qib_sdma_state_s99_running:
1633 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1634 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1635 break;
1636 }
1637
1638 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1639}
1640
1641
1642
1643
1644static noinline void handle_7322_errors(struct qib_devdata *dd)
1645{
1646 char *msg;
1647 u64 iserr = 0;
1648 u64 errs;
1649 u64 mask;
1650
1651 qib_stats.sps_errints++;
1652 errs = qib_read_kreg64(dd, kr_errstatus);
1653 if (!errs) {
1654 qib_devinfo(dd->pcidev,
1655 "device error interrupt, but no error bits set!\n");
1656 goto done;
1657 }
1658
1659
1660 errs &= dd->cspec->errormask;
1661 msg = dd->cspec->emsgbuf;
1662
1663
1664 if (errs & QIB_E_HARDWARE) {
1665 *msg = '\0';
1666 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1667 }
1668
1669 if (errs & QIB_E_SPKTERRS) {
1670 qib_disarm_7322_senderrbufs(dd->pport);
1671 qib_stats.sps_txerrs++;
1672 } else if (errs & QIB_E_INVALIDADDR)
1673 qib_stats.sps_txerrs++;
1674 else if (errs & QIB_E_ARMLAUNCH) {
1675 qib_stats.sps_txerrs++;
1676 qib_disarm_7322_senderrbufs(dd->pport);
1677 }
1678 qib_write_kreg(dd, kr_errclear, errs);
1679
1680
1681
1682
1683
1684
1685 mask = QIB_E_HARDWARE;
1686 *msg = '\0';
1687
1688 err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1689 qib_7322error_msgs);
1690
1691
1692
1693
1694
1695 if (errs & QIB_E_RESET) {
1696 int pidx;
1697
1698 qib_dev_err(dd,
1699 "Got reset, requires re-init (unload and reload driver)\n");
1700 dd->flags &= ~QIB_INITTED;
1701
1702 *dd->devstatusp |= QIB_STATUS_HWERROR;
1703 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1704 if (dd->pport[pidx].link_speed_supported)
1705 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1706 }
1707
1708 if (*msg && iserr)
1709 qib_dev_err(dd, "%s error\n", msg);
1710
1711
1712
1713
1714
1715
1716
1717
1718 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1719 qib_handle_urcv(dd, ~0U);
1720 if (errs & ERR_MASK(RcvEgrFullErr))
1721 qib_stats.sps_buffull++;
1722 else
1723 qib_stats.sps_hdrfull++;
1724 }
1725
1726done:
1727 return;
1728}
1729
1730static void qib_error_tasklet(unsigned long data)
1731{
1732 struct qib_devdata *dd = (struct qib_devdata *)data;
1733
1734 handle_7322_errors(dd);
1735 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1736}
1737
1738static void reenable_chase(struct timer_list *t)
1739{
1740 struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1741 struct qib_pportdata *ppd = cp->ppd;
1742
1743 ppd->cpspec->chase_timer.expires = 0;
1744 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1745 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1746}
1747
1748static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1749 u8 ibclt)
1750{
1751 ppd->cpspec->chase_end = 0;
1752
1753 if (!qib_chase)
1754 return;
1755
1756 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1757 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1758 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1759 add_timer(&ppd->cpspec->chase_timer);
1760}
1761
1762static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1763{
1764 u8 ibclt;
1765 unsigned long tnow;
1766
1767 ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1768
1769
1770
1771
1772
1773
1774
1775 switch (ibclt) {
1776 case IB_7322_LT_STATE_CFGRCVFCFG:
1777 case IB_7322_LT_STATE_CFGWAITRMT:
1778 case IB_7322_LT_STATE_TXREVLANES:
1779 case IB_7322_LT_STATE_CFGENH:
1780 tnow = jiffies;
1781 if (ppd->cpspec->chase_end &&
1782 time_after(tnow, ppd->cpspec->chase_end))
1783 disable_chase(ppd, tnow, ibclt);
1784 else if (!ppd->cpspec->chase_end)
1785 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1786 break;
1787 default:
1788 ppd->cpspec->chase_end = 0;
1789 break;
1790 }
1791
1792 if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1793 ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1794 ibclt == IB_7322_LT_STATE_LINKUP) &&
1795 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1796 force_h1(ppd);
1797 ppd->cpspec->qdr_reforce = 1;
1798 if (!ppd->dd->cspec->r1)
1799 serdes_7322_los_enable(ppd, 0);
1800 } else if (ppd->cpspec->qdr_reforce &&
1801 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1802 (ibclt == IB_7322_LT_STATE_CFGENH ||
1803 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1804 ibclt == IB_7322_LT_STATE_LINKUP))
1805 force_h1(ppd);
1806
1807 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1808 ppd->link_speed_enabled == QIB_IB_QDR &&
1809 (ibclt == IB_7322_LT_STATE_CFGTEST ||
1810 ibclt == IB_7322_LT_STATE_CFGENH ||
1811 (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1812 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1813 adj_tx_serdes(ppd);
1814
1815 if (ibclt != IB_7322_LT_STATE_LINKUP) {
1816 u8 ltstate = qib_7322_phys_portstate(ibcst);
1817 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1818 LinkTrainingState);
1819 if (!ppd->dd->cspec->r1 &&
1820 pibclt == IB_7322_LT_STATE_LINKUP &&
1821 ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1822 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1823 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1824 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1825
1826
1827 serdes_7322_los_enable(ppd, 1);
1828 if (!ppd->cpspec->qdr_dfe_on &&
1829 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1830 ppd->cpspec->qdr_dfe_on = 1;
1831 ppd->cpspec->qdr_dfe_time = 0;
1832
1833 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1834 ppd->dd->cspec->r1 ?
1835 QDR_STATIC_ADAPT_DOWN_R1 :
1836 QDR_STATIC_ADAPT_DOWN);
1837 pr_info(
1838 "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1839 ppd->dd->unit, ppd->port, ibclt);
1840 }
1841 }
1842}
1843
1844static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1845
1846
1847
1848
1849
1850
1851static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1852{
1853 char *msg;
1854 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1855 struct qib_devdata *dd = ppd->dd;
1856
1857
1858 fmask = qib_read_kreg64(dd, kr_act_fmask);
1859 if (!fmask)
1860 check_7322_rxe_status(ppd);
1861
1862 errs = qib_read_kreg_port(ppd, krp_errstatus);
1863 if (!errs)
1864 qib_devinfo(dd->pcidev,
1865 "Port%d error interrupt, but no error bits set!\n",
1866 ppd->port);
1867 if (!fmask)
1868 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1869 if (!errs)
1870 goto done;
1871
1872 msg = ppd->cpspec->epmsgbuf;
1873 *msg = '\0';
1874
1875 if (errs & ~QIB_E_P_BITSEXTANT) {
1876 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1877 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1878 if (!*msg)
1879 snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1880 "no others");
1881 qib_dev_porterr(dd, ppd->port,
1882 "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1883 (errs & ~QIB_E_P_BITSEXTANT), msg);
1884 *msg = '\0';
1885 }
1886
1887 if (errs & QIB_E_P_SHDR) {
1888 u64 symptom;
1889
1890
1891 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1892 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1893 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1894 hdrchk_msgs);
1895 *msg = '\0';
1896
1897 }
1898
1899 if (errs & QIB_E_P_SPKTERRS) {
1900 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1901 !(ppd->lflags & QIBL_LINKACTIVE)) {
1902
1903
1904
1905
1906
1907
1908
1909 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1910 (errs & QIB_E_P_LINK_PKTERRS),
1911 qib_7322p_error_msgs);
1912 *msg = '\0';
1913 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1914 }
1915 qib_disarm_7322_senderrbufs(ppd);
1916 } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1917 !(ppd->lflags & QIBL_LINKACTIVE)) {
1918
1919
1920
1921
1922
1923
1924
1925 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1926 qib_7322p_error_msgs);
1927 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1928 *msg = '\0';
1929 }
1930
1931 qib_write_kreg_port(ppd, krp_errclear, errs);
1932
1933 errs &= ~ignore_this_time;
1934 if (!errs)
1935 goto done;
1936
1937 if (errs & QIB_E_P_RPKTERRS)
1938 qib_stats.sps_rcverrs++;
1939 if (errs & QIB_E_P_SPKTERRS)
1940 qib_stats.sps_txerrs++;
1941
1942 iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1943
1944 if (errs & QIB_E_P_SDMAERRS)
1945 sdma_7322_p_errors(ppd, errs);
1946
1947 if (errs & QIB_E_P_IBSTATUSCHANGED) {
1948 u64 ibcs;
1949 u8 ltstate;
1950
1951 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1952 ltstate = qib_7322_phys_portstate(ibcs);
1953
1954 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1955 handle_serdes_issues(ppd, ibcs);
1956 if (!(ppd->cpspec->ibcctrl_a &
1957 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1958
1959
1960
1961
1962
1963 ppd->cpspec->ibcctrl_a |=
1964 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1965 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1966 ppd->cpspec->ibcctrl_a);
1967 }
1968
1969
1970 ppd->link_width_active =
1971 (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1972 IB_WIDTH_4X : IB_WIDTH_1X;
1973 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1974 LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1975 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1976 QIB_IB_DDR : QIB_IB_SDR;
1977
1978 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1979 IB_PHYSPORTSTATE_DISABLED)
1980 qib_set_ib_7322_lstate(ppd, 0,
1981 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1982 else
1983
1984
1985
1986
1987
1988
1989
1990
1991 if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1992 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1993 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1994 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1995 qib_handle_e_ibstatuschanged(ppd, ibcs);
1996 }
1997 if (*msg && iserr)
1998 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1999
2000 if (ppd->state_wanted & ppd->lflags)
2001 wake_up_interruptible(&ppd->state_wait);
2002done:
2003 return;
2004}
2005
2006
2007static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2008{
2009 if (enable) {
2010 if (dd->flags & QIB_BADINTR)
2011 return;
2012 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2013
2014 qib_write_kreg(dd, kr_intclear, 0ULL);
2015 if (dd->cspec->num_msix_entries) {
2016
2017 u64 val = qib_read_kreg64(dd, kr_intgranted);
2018
2019 if (val)
2020 qib_write_kreg(dd, kr_intgranted, val);
2021 }
2022 } else
2023 qib_write_kreg(dd, kr_intmask, 0ULL);
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041static void qib_7322_clear_freeze(struct qib_devdata *dd)
2042{
2043 int pidx;
2044
2045
2046 qib_write_kreg(dd, kr_errmask, 0ULL);
2047
2048 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2049 if (dd->pport[pidx].link_speed_supported)
2050 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2051 0ULL);
2052
2053
2054 qib_7322_set_intr_state(dd, 0);
2055
2056
2057 qib_write_kreg(dd, kr_control, dd->control);
2058 qib_read_kreg32(dd, kr_scratch);
2059
2060
2061
2062
2063
2064
2065
2066 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2067 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2068 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2069
2070 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2071 if (!dd->pport[pidx].link_speed_supported)
2072 continue;
2073 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2074 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2075 }
2076 qib_7322_set_intr_state(dd, 1);
2077}
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2092 size_t msgl)
2093{
2094 u64 hwerrs;
2095 u32 ctrl;
2096 int isfatal = 0;
2097
2098 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2099 if (!hwerrs)
2100 goto bail;
2101 if (hwerrs == ~0ULL) {
2102 qib_dev_err(dd,
2103 "Read of hardware error status failed (all bits set); ignoring\n");
2104 goto bail;
2105 }
2106 qib_stats.sps_hwerrs++;
2107
2108
2109 qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2110 ~HWE_MASK(PowerOnBISTFailed));
2111
2112 hwerrs &= dd->cspec->hwerrmask;
2113
2114
2115
2116 if (hwerrs)
2117 qib_devinfo(dd->pcidev,
2118 "Hardware error: hwerr=0x%llx (cleared)\n",
2119 (unsigned long long) hwerrs);
2120
2121 ctrl = qib_read_kreg32(dd, kr_control);
2122 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2123
2124
2125
2126 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2127 dd->cspec->stay_in_freeze) {
2128
2129
2130
2131
2132
2133
2134
2135 if (dd->flags & QIB_INITTED)
2136 isfatal = 1;
2137 } else
2138 qib_7322_clear_freeze(dd);
2139 }
2140
2141 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2142 isfatal = 1;
2143 strlcpy(msg,
2144 "[Memory BIST test failed, InfiniPath hardware unusable]",
2145 msgl);
2146
2147 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2148 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2149 }
2150
2151 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2152
2153
2154
2155 qib_dev_err(dd, "%s hardware error\n", msg);
2156
2157 if (hwerrs &
2158 (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2159 SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2160 int pidx = 0;
2161 int err;
2162 unsigned long flags;
2163 struct qib_pportdata *ppd = dd->pport;
2164
2165 for (; pidx < dd->num_pports; ++pidx, ppd++) {
2166 err = 0;
2167 if (pidx == 0 && (hwerrs &
2168 SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2169 err++;
2170 if (pidx == 1 && (hwerrs &
2171 SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2172 err++;
2173 if (err) {
2174 spin_lock_irqsave(&ppd->sdma_lock, flags);
2175 dump_sdma_7322_state(ppd);
2176 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2177 }
2178 }
2179 }
2180
2181 if (isfatal && !dd->diag_client) {
2182 qib_dev_err(dd,
2183 "Fatal Hardware Error, no longer usable, SN %.16s\n",
2184 dd->serial);
2185
2186
2187
2188
2189 if (dd->freezemsg)
2190 snprintf(dd->freezemsg, dd->freezelen,
2191 "{%s}", msg);
2192 qib_disable_after_error(dd);
2193 }
2194bail:;
2195}
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2208{
2209 int pidx;
2210 u64 extsval;
2211
2212 extsval = qib_read_kreg64(dd, kr_extstatus);
2213 if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2214 QIB_EXTS_MEMBIST_ENDTEST)))
2215 qib_dev_err(dd, "MemBIST did not complete!\n");
2216
2217
2218 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2219 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2220
2221
2222 qib_write_kreg(dd, kr_errclear, ~0ULL);
2223
2224 qib_write_kreg(dd, kr_errmask, ~0ULL);
2225 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2226 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2227 if (dd->pport[pidx].link_speed_supported)
2228 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2229 ~0ULL);
2230}
2231
2232
2233
2234
2235
2236
2237
2238static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2239{
2240 if (enable) {
2241 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2242 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2243 } else
2244 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2245 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2246}
2247
2248
2249
2250
2251
2252
2253static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2254 u16 linitcmd)
2255{
2256 u64 mod_wd;
2257 struct qib_devdata *dd = ppd->dd;
2258 unsigned long flags;
2259
2260 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2261
2262
2263
2264
2265
2266
2267
2268 qib_7322_mini_pcs_reset(ppd);
2269 spin_lock_irqsave(&ppd->lflags_lock, flags);
2270 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2271 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2272 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2273
2274
2275
2276
2277
2278 spin_lock_irqsave(&ppd->lflags_lock, flags);
2279 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2280 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2281
2282
2283
2284
2285 ppd->cpspec->ibcctrl_a &=
2286 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2287 }
2288
2289 mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2290 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2291
2292 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2293 mod_wd);
2294
2295 qib_write_kreg(dd, kr_scratch, 0);
2296
2297}
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307#define RCV_BUF_UNITSZ 64
2308#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2309
2310static void set_vls(struct qib_pportdata *ppd)
2311{
2312 int i, numvls, totcred, cred_vl, vl0extra;
2313 struct qib_devdata *dd = ppd->dd;
2314 u64 val;
2315
2316 numvls = qib_num_vls(ppd->vls_operational);
2317
2318
2319
2320
2321
2322
2323
2324
2325 totcred = NUM_RCV_BUF_UNITS(dd);
2326 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2327 totcred -= cred_vl;
2328 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2329 cred_vl = totcred / numvls;
2330 vl0extra = totcred - cred_vl * numvls;
2331 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2332 for (i = 1; i < numvls; i++)
2333 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2334 for (; i < 8; i++)
2335 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2336
2337
2338 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2339 val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2340 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2341 qib_write_kreg(dd, kr_scratch, 0ULL);
2342 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2343 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2344
2345 for (i = 0; i < numvls; i++)
2346 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2347 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2348
2349
2350 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2351 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2352 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2353 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2354 qib_write_kreg(dd, kr_scratch, 0ULL);
2355}
2356
2357
2358
2359
2360
2361static int serdes_7322_init(struct qib_pportdata *ppd);
2362
2363
2364
2365
2366
2367static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2368{
2369 struct qib_devdata *dd = ppd->dd;
2370 u64 val, guid, ibc;
2371 unsigned long flags;
2372 int ret = 0;
2373
2374
2375
2376
2377
2378
2379
2380 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2381 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2382 qib_write_kreg(dd, kr_scratch, 0ULL);
2383
2384
2385 qib_write_kreg_port(ppd, krp_tx_deemph_override,
2386 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2387 reset_tx_deemphasis_override));
2388
2389 if (qib_compat_ddr_negotiate) {
2390 ppd->cpspec->ibdeltainprog = 1;
2391 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2392 crp_ibsymbolerr);
2393 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2394 crp_iblinkerrrecov);
2395 }
2396
2397
2398 ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2399
2400
2401
2402
2403
2404 ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2405
2406 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2407
2408 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2409
2410
2411
2412
2413 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2414 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2415 ppd->cpspec->ibcctrl_a = ibc;
2416
2417
2418
2419
2420
2421 qib_7322_mini_pcs_reset(ppd);
2422
2423 if (!ppd->cpspec->ibcctrl_b) {
2424 unsigned lse = ppd->link_speed_enabled;
2425
2426
2427
2428
2429
2430 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2431 krp_ibcctrl_b);
2432 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2433 IBA7322_IBC_SPEED_DDR |
2434 IBA7322_IBC_SPEED_SDR |
2435 IBA7322_IBC_WIDTH_AUTONEG |
2436 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2437 if (lse & (lse - 1))
2438 ppd->cpspec->ibcctrl_b |=
2439 (lse << IBA7322_IBC_SPEED_LSB) |
2440 IBA7322_IBC_IBTA_1_2_MASK |
2441 IBA7322_IBC_MAX_SPEED_MASK;
2442 else
2443 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2444 IBA7322_IBC_SPEED_QDR |
2445 IBA7322_IBC_IBTA_1_2_MASK :
2446 (lse == QIB_IB_DDR) ?
2447 IBA7322_IBC_SPEED_DDR :
2448 IBA7322_IBC_SPEED_SDR;
2449 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2450 (IB_WIDTH_1X | IB_WIDTH_4X))
2451 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2452 else
2453 ppd->cpspec->ibcctrl_b |=
2454 ppd->link_width_enabled == IB_WIDTH_4X ?
2455 IBA7322_IBC_WIDTH_4X_ONLY :
2456 IBA7322_IBC_WIDTH_1X_ONLY;
2457
2458
2459 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2460 IBA7322_IBC_HRTBT_MASK);
2461 }
2462 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2463
2464
2465 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2466 val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2467 val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2468 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2469
2470 serdes_7322_init(ppd);
2471
2472 guid = be64_to_cpu(ppd->guid);
2473 if (!guid) {
2474 if (dd->base_guid)
2475 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2476 ppd->guid = cpu_to_be64(guid);
2477 }
2478
2479 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2480
2481 qib_write_kreg(dd, kr_scratch, 0);
2482
2483
2484 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2485 set_vls(ppd);
2486
2487
2488 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2489 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2490 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2491 qib_write_kreg(dd, kr_scratch, 0ULL);
2492
2493 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2494
2495
2496 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2497 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2498 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2499 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2500
2501
2502 val = qib_read_kreg_port(ppd, krp_errmask);
2503 qib_write_kreg_port(ppd, krp_errmask,
2504 val | ERR_MASK_N(IBStatusChanged));
2505
2506
2507 return ret;
2508}
2509
2510
2511
2512
2513
2514
2515static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2516{
2517 u64 val;
2518 unsigned long flags;
2519
2520 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2521
2522 spin_lock_irqsave(&ppd->lflags_lock, flags);
2523 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2524 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2525 wake_up(&ppd->cpspec->autoneg_wait);
2526 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2527 if (ppd->dd->cspec->r1)
2528 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2529
2530 ppd->cpspec->chase_end = 0;
2531 if (ppd->cpspec->chase_timer.function)
2532 del_timer_sync(&ppd->cpspec->chase_timer);
2533
2534
2535
2536
2537
2538
2539
2540
2541 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2542 qib_7322_mini_pcs_reset(ppd);
2543
2544
2545
2546
2547
2548 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2549 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2550 struct qib_devdata *dd = ppd->dd;
2551 u64 diagc;
2552
2553
2554 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2555 qib_write_kreg(dd, kr_hwdiagctrl,
2556 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2557
2558 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2559 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2560 if (ppd->cpspec->ibdeltainprog)
2561 val -= val - ppd->cpspec->ibsymsnap;
2562 val -= ppd->cpspec->ibsymdelta;
2563 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2564 }
2565 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2566 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2567 if (ppd->cpspec->ibdeltainprog)
2568 val -= val - ppd->cpspec->iblnkerrsnap;
2569 val -= ppd->cpspec->iblnkerrdelta;
2570 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2571 }
2572 if (ppd->cpspec->iblnkdowndelta) {
2573 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2574 val += ppd->cpspec->iblnkdowndelta;
2575 write_7322_creg_port(ppd, crp_iblinkdown, val);
2576 }
2577
2578
2579
2580
2581
2582
2583 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2584 }
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2610{
2611 struct qib_devdata *dd = ppd->dd;
2612 u64 extctl, ledblink = 0, val;
2613 unsigned long flags;
2614 int yel, grn;
2615
2616
2617
2618
2619
2620 if (dd->diag_client)
2621 return;
2622
2623
2624 if (ppd->led_override) {
2625 grn = (ppd->led_override & QIB_LED_PHYS);
2626 yel = (ppd->led_override & QIB_LED_LOG);
2627 } else if (on) {
2628 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2629 grn = qib_7322_phys_portstate(val) ==
2630 IB_PHYSPORTSTATE_LINKUP;
2631 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2632 } else {
2633 grn = 0;
2634 yel = 0;
2635 }
2636
2637 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2638 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2639 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2640 if (grn) {
2641 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2642
2643
2644
2645
2646
2647 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2648 ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2649 }
2650 if (yel)
2651 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2652 dd->cspec->extctrl = extctl;
2653 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2654 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2655
2656 if (ledblink)
2657 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2658}
2659
2660#ifdef CONFIG_INFINIBAND_QIB_DCA
2661
2662static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2663{
2664 switch (event) {
2665 case DCA_PROVIDER_ADD:
2666 if (dd->flags & QIB_DCA_ENABLED)
2667 break;
2668 if (!dca_add_requester(&dd->pcidev->dev)) {
2669 qib_devinfo(dd->pcidev, "DCA enabled\n");
2670 dd->flags |= QIB_DCA_ENABLED;
2671 qib_setup_dca(dd);
2672 }
2673 break;
2674 case DCA_PROVIDER_REMOVE:
2675 if (dd->flags & QIB_DCA_ENABLED) {
2676 dca_remove_requester(&dd->pcidev->dev);
2677 dd->flags &= ~QIB_DCA_ENABLED;
2678 dd->cspec->dca_ctrl = 0;
2679 qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2680 dd->cspec->dca_ctrl);
2681 }
2682 break;
2683 }
2684 return 0;
2685}
2686
2687static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2688{
2689 struct qib_devdata *dd = rcd->dd;
2690 struct qib_chip_specific *cspec = dd->cspec;
2691
2692 if (!(dd->flags & QIB_DCA_ENABLED))
2693 return;
2694 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2695 const struct dca_reg_map *rmp;
2696
2697 cspec->rhdr_cpu[rcd->ctxt] = cpu;
2698 rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2699 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2700 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2701 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2702 qib_devinfo(dd->pcidev,
2703 "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2704 (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2705 qib_write_kreg(dd, rmp->regno,
2706 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2707 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2708 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2709 }
2710}
2711
2712static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2713{
2714 struct qib_devdata *dd = ppd->dd;
2715 struct qib_chip_specific *cspec = dd->cspec;
2716 unsigned pidx = ppd->port - 1;
2717
2718 if (!(dd->flags & QIB_DCA_ENABLED))
2719 return;
2720 if (cspec->sdma_cpu[pidx] != cpu) {
2721 cspec->sdma_cpu[pidx] = cpu;
2722 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2723 SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2724 SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2725 cspec->dca_rcvhdr_ctrl[4] |=
2726 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2727 (ppd->hw_pidx ?
2728 SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2729 SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2730 qib_devinfo(dd->pcidev,
2731 "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2732 (long long) cspec->dca_rcvhdr_ctrl[4]);
2733 qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2734 cspec->dca_rcvhdr_ctrl[4]);
2735 cspec->dca_ctrl |= ppd->hw_pidx ?
2736 SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2737 SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2738 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2739 }
2740}
2741
2742static void qib_setup_dca(struct qib_devdata *dd)
2743{
2744 struct qib_chip_specific *cspec = dd->cspec;
2745 int i;
2746
2747 for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2748 cspec->rhdr_cpu[i] = -1;
2749 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2750 cspec->sdma_cpu[i] = -1;
2751 cspec->dca_rcvhdr_ctrl[0] =
2752 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2753 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2754 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2755 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2756 cspec->dca_rcvhdr_ctrl[1] =
2757 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2758 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2759 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2760 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2761 cspec->dca_rcvhdr_ctrl[2] =
2762 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2763 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2764 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2765 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2766 cspec->dca_rcvhdr_ctrl[3] =
2767 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2768 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2769 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2770 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2771 cspec->dca_rcvhdr_ctrl[4] =
2772 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2773 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2774 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2775 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2776 cspec->dca_rcvhdr_ctrl[i]);
2777 for (i = 0; i < cspec->num_msix_entries; i++)
2778 setup_dca_notifier(dd, &cspec->msix_entries[i]);
2779}
2780
2781static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2782 const cpumask_t *mask)
2783{
2784 struct qib_irq_notify *n =
2785 container_of(notify, struct qib_irq_notify, notify);
2786 int cpu = cpumask_first(mask);
2787
2788 if (n->rcv) {
2789 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2790
2791 qib_update_rhdrq_dca(rcd, cpu);
2792 } else {
2793 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2794
2795 qib_update_sdma_dca(ppd, cpu);
2796 }
2797}
2798
2799static void qib_irq_notifier_release(struct kref *ref)
2800{
2801 struct qib_irq_notify *n =
2802 container_of(ref, struct qib_irq_notify, notify.kref);
2803 struct qib_devdata *dd;
2804
2805 if (n->rcv) {
2806 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2807
2808 dd = rcd->dd;
2809 } else {
2810 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2811
2812 dd = ppd->dd;
2813 }
2814 qib_devinfo(dd->pcidev,
2815 "release on HCA notify 0x%p n 0x%p\n", ref, n);
2816 kfree(n);
2817}
2818#endif
2819
2820
2821
2822
2823
2824
2825static void qib_7322_nomsix(struct qib_devdata *dd)
2826{
2827 u64 intgranted;
2828 int n;
2829
2830 dd->cspec->main_int_mask = ~0ULL;
2831 n = dd->cspec->num_msix_entries;
2832 if (n) {
2833 int i;
2834
2835 dd->cspec->num_msix_entries = 0;
2836 for (i = 0; i < n; i++) {
2837#ifdef CONFIG_INFINIBAND_QIB_DCA
2838 reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
2839#endif
2840 irq_set_affinity_hint(
2841 dd->cspec->msix_entries[i].irq, NULL);
2842 free_cpumask_var(dd->cspec->msix_entries[i].mask);
2843 free_irq(dd->cspec->msix_entries[i].irq,
2844 dd->cspec->msix_entries[i].arg);
2845 }
2846 qib_nomsix(dd);
2847 }
2848
2849 intgranted = qib_read_kreg64(dd, kr_intgranted);
2850 if (intgranted)
2851 qib_write_kreg(dd, kr_intgranted, intgranted);
2852}
2853
2854static void qib_7322_free_irq(struct qib_devdata *dd)
2855{
2856 if (dd->cspec->irq) {
2857 free_irq(dd->cspec->irq, dd);
2858 dd->cspec->irq = 0;
2859 }
2860 qib_7322_nomsix(dd);
2861}
2862
2863static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2864{
2865 int i;
2866
2867#ifdef CONFIG_INFINIBAND_QIB_DCA
2868 if (dd->flags & QIB_DCA_ENABLED) {
2869 dca_remove_requester(&dd->pcidev->dev);
2870 dd->flags &= ~QIB_DCA_ENABLED;
2871 dd->cspec->dca_ctrl = 0;
2872 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2873 }
2874#endif
2875
2876 qib_7322_free_irq(dd);
2877 kfree(dd->cspec->cntrs);
2878 kfree(dd->cspec->sendchkenable);
2879 kfree(dd->cspec->sendgrhchk);
2880 kfree(dd->cspec->sendibchk);
2881 kfree(dd->cspec->msix_entries);
2882 for (i = 0; i < dd->num_pports; i++) {
2883 unsigned long flags;
2884 u32 mask = QSFP_GPIO_MOD_PRS_N |
2885 (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2886
2887 kfree(dd->pport[i].cpspec->portcntrs);
2888 if (dd->flags & QIB_HAS_QSFP) {
2889 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2890 dd->cspec->gpio_mask &= ~mask;
2891 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2892 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2893 }
2894 }
2895}
2896
2897
2898static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2899{
2900 struct qib_pportdata *ppd0 = &dd->pport[0];
2901 struct qib_pportdata *ppd1 = &dd->pport[1];
2902 u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2903 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2904 u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2905 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2906
2907 if (intr0)
2908 qib_sdma_intr(ppd0);
2909 if (intr1)
2910 qib_sdma_intr(ppd1);
2911
2912 if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2913 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2914 if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2915 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2916}
2917
2918
2919
2920
2921static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2922{
2923 unsigned long flags;
2924
2925 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2926 if (needint)
2927 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2928 else
2929 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2930 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2931 qib_write_kreg(dd, kr_scratch, 0ULL);
2932 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2933}
2934
2935
2936
2937
2938
2939
2940static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2941{
2942 u64 kills;
2943 char msg[128];
2944
2945 kills = istat & ~QIB_I_BITSEXTANT;
2946 qib_dev_err(dd,
2947 "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2948 (unsigned long long) kills, msg);
2949 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2950}
2951
2952
2953static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2954{
2955 u32 gpiostatus;
2956 int handled = 0;
2957 int pidx;
2958
2959
2960
2961
2962
2963
2964
2965
2966 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2967
2968
2969
2970
2971
2972
2973
2974 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2975
2976
2977
2978
2979 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2980 ++pidx) {
2981 struct qib_pportdata *ppd;
2982 struct qib_qsfp_data *qd;
2983 u32 mask;
2984
2985 if (!dd->pport[pidx].link_speed_supported)
2986 continue;
2987 mask = QSFP_GPIO_MOD_PRS_N;
2988 ppd = dd->pport + pidx;
2989 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2990 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2991 u64 pins;
2992
2993 qd = &ppd->cpspec->qsfp_data;
2994 gpiostatus &= ~mask;
2995 pins = qib_read_kreg64(dd, kr_extstatus);
2996 pins >>= SYM_LSB(EXTStatus, GPIOIn);
2997 if (!(pins & mask)) {
2998 ++handled;
2999 qd->t_insert = jiffies;
3000 queue_work(ib_wq, &qd->work);
3001 }
3002 }
3003 }
3004
3005 if (gpiostatus && !handled) {
3006 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3007 u32 gpio_irq = mask & gpiostatus;
3008
3009
3010
3011
3012 dd->cspec->gpio_mask &= ~gpio_irq;
3013 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3014 }
3015}
3016
3017
3018
3019
3020
3021static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3022{
3023 if (istat & ~QIB_I_BITSEXTANT)
3024 unknown_7322_ibits(dd, istat);
3025 if (istat & QIB_I_GPIO)
3026 unknown_7322_gpio_intr(dd);
3027 if (istat & QIB_I_C_ERROR) {
3028 qib_write_kreg(dd, kr_errmask, 0ULL);
3029 tasklet_schedule(&dd->error_tasklet);
3030 }
3031 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3032 handle_7322_p_errors(dd->rcd[0]->ppd);
3033 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3034 handle_7322_p_errors(dd->rcd[1]->ppd);
3035}
3036
3037
3038
3039
3040
3041static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3042{
3043 struct qib_devdata *dd = rcd->dd;
3044 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3045
3046
3047
3048
3049
3050 if (npkts < rcv_int_count && timeout > 2)
3051 timeout >>= 1;
3052 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3053 timeout = min(timeout << 1, rcv_int_timeout);
3054 else
3055 return;
3056
3057 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3058 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3059}
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069static irqreturn_t qib_7322intr(int irq, void *data)
3070{
3071 struct qib_devdata *dd = data;
3072 irqreturn_t ret;
3073 u64 istat;
3074 u64 ctxtrbits;
3075 u64 rmask;
3076 unsigned i;
3077 u32 npkts;
3078
3079 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3080
3081
3082
3083
3084
3085
3086 ret = IRQ_HANDLED;
3087 goto bail;
3088 }
3089
3090 istat = qib_read_kreg64(dd, kr_intstatus);
3091
3092 if (unlikely(istat == ~0ULL)) {
3093 qib_bad_intrstatus(dd);
3094 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3095
3096 ret = IRQ_NONE;
3097 goto bail;
3098 }
3099
3100 istat &= dd->cspec->main_int_mask;
3101 if (unlikely(!istat)) {
3102
3103 ret = IRQ_NONE;
3104 goto bail;
3105 }
3106
3107 this_cpu_inc(*dd->int_counter);
3108
3109
3110 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3111 QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3112 INT_MASK_P(Err, 1))))
3113 unlikely_7322_intr(dd, istat);
3114
3115
3116
3117
3118
3119
3120
3121 qib_write_kreg(dd, kr_intclear, istat);
3122
3123
3124
3125
3126
3127
3128 ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3129 if (ctxtrbits) {
3130 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3131 (1ULL << QIB_I_RCVURG_LSB);
3132 for (i = 0; i < dd->first_user_ctxt; i++) {
3133 if (ctxtrbits & rmask) {
3134 ctxtrbits &= ~rmask;
3135 if (dd->rcd[i])
3136 qib_kreceive(dd->rcd[i], NULL, &npkts);
3137 }
3138 rmask <<= 1;
3139 }
3140 if (ctxtrbits) {
3141 ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3142 (ctxtrbits >> QIB_I_RCVURG_LSB);
3143 qib_handle_urcv(dd, ctxtrbits);
3144 }
3145 }
3146
3147 if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3148 sdma_7322_intr(dd, istat);
3149
3150 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3151 qib_ib_piobufavail(dd);
3152
3153 ret = IRQ_HANDLED;
3154bail:
3155 return ret;
3156}
3157
3158
3159
3160
3161static irqreturn_t qib_7322pintr(int irq, void *data)
3162{
3163 struct qib_ctxtdata *rcd = data;
3164 struct qib_devdata *dd = rcd->dd;
3165 u32 npkts;
3166
3167 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3168
3169
3170
3171
3172
3173
3174 return IRQ_HANDLED;
3175
3176 this_cpu_inc(*dd->int_counter);
3177
3178
3179 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3180 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3181
3182 qib_kreceive(rcd, NULL, &npkts);
3183
3184 return IRQ_HANDLED;
3185}
3186
3187
3188
3189
3190static irqreturn_t qib_7322bufavail(int irq, void *data)
3191{
3192 struct qib_devdata *dd = data;
3193
3194 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3195
3196
3197
3198
3199
3200
3201 return IRQ_HANDLED;
3202
3203 this_cpu_inc(*dd->int_counter);
3204
3205
3206 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3207
3208
3209 if (dd->flags & QIB_INITTED)
3210 qib_ib_piobufavail(dd);
3211 else
3212 qib_wantpiobuf_7322_intr(dd, 0);
3213
3214 return IRQ_HANDLED;
3215}
3216
3217
3218
3219
3220static irqreturn_t sdma_intr(int irq, void *data)
3221{
3222 struct qib_pportdata *ppd = data;
3223 struct qib_devdata *dd = ppd->dd;
3224
3225 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3226
3227
3228
3229
3230
3231
3232 return IRQ_HANDLED;
3233
3234 this_cpu_inc(*dd->int_counter);
3235
3236
3237 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3238 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3239 qib_sdma_intr(ppd);
3240
3241 return IRQ_HANDLED;
3242}
3243
3244
3245
3246
3247static irqreturn_t sdma_idle_intr(int irq, void *data)
3248{
3249 struct qib_pportdata *ppd = data;
3250 struct qib_devdata *dd = ppd->dd;
3251
3252 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3253
3254
3255
3256
3257
3258
3259 return IRQ_HANDLED;
3260
3261 this_cpu_inc(*dd->int_counter);
3262
3263
3264 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3265 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3266 qib_sdma_intr(ppd);
3267
3268 return IRQ_HANDLED;
3269}
3270
3271
3272
3273
3274static irqreturn_t sdma_progress_intr(int irq, void *data)
3275{
3276 struct qib_pportdata *ppd = data;
3277 struct qib_devdata *dd = ppd->dd;
3278
3279 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3280
3281
3282
3283
3284
3285
3286 return IRQ_HANDLED;
3287
3288 this_cpu_inc(*dd->int_counter);
3289
3290
3291 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3292 INT_MASK_P(SDmaProgress, 1) :
3293 INT_MASK_P(SDmaProgress, 0));
3294 qib_sdma_intr(ppd);
3295
3296 return IRQ_HANDLED;
3297}
3298
3299
3300
3301
3302static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3303{
3304 struct qib_pportdata *ppd = data;
3305 struct qib_devdata *dd = ppd->dd;
3306
3307 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3308
3309
3310
3311
3312
3313
3314 return IRQ_HANDLED;
3315
3316 this_cpu_inc(*dd->int_counter);
3317
3318
3319 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3320 INT_MASK_PM(SDmaCleanupDone, 1) :
3321 INT_MASK_PM(SDmaCleanupDone, 0));
3322 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3323
3324 return IRQ_HANDLED;
3325}
3326
3327#ifdef CONFIG_INFINIBAND_QIB_DCA
3328
3329static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3330{
3331 if (!m->dca)
3332 return;
3333 qib_devinfo(dd->pcidev,
3334 "Disabling notifier on HCA %d irq %d\n",
3335 dd->unit,
3336 m->irq);
3337 irq_set_affinity_notifier(
3338 m->irq,
3339 NULL);
3340 m->notifier = NULL;
3341}
3342
3343static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
3344{
3345 struct qib_irq_notify *n;
3346
3347 if (!m->dca)
3348 return;
3349 n = kzalloc(sizeof(*n), GFP_KERNEL);
3350 if (n) {
3351 int ret;
3352
3353 m->notifier = n;
3354 n->notify.irq = m->irq;
3355 n->notify.notify = qib_irq_notifier_notify;
3356 n->notify.release = qib_irq_notifier_release;
3357 n->arg = m->arg;
3358 n->rcv = m->rcv;
3359 qib_devinfo(dd->pcidev,
3360 "set notifier irq %d rcv %d notify %p\n",
3361 n->notify.irq, n->rcv, &n->notify);
3362 ret = irq_set_affinity_notifier(
3363 n->notify.irq,
3364 &n->notify);
3365 if (ret) {
3366 m->notifier = NULL;
3367 kfree(n);
3368 }
3369 }
3370}
3371
3372#endif
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3383{
3384 int ret, i, msixnum;
3385 u64 redirect[6];
3386 u64 mask;
3387 const struct cpumask *local_mask;
3388 int firstcpu, secondcpu = 0, currrcvcpu = 0;
3389
3390 if (!dd->num_pports)
3391 return;
3392
3393 if (clearpend) {
3394
3395
3396
3397
3398
3399 qib_7322_set_intr_state(dd, 0);
3400
3401
3402 qib_7322_init_hwerrors(dd);
3403
3404
3405 qib_write_kreg(dd, kr_intclear, ~0ULL);
3406
3407
3408 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3409 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3410 }
3411
3412 if (!dd->cspec->num_msix_entries) {
3413
3414try_intx:
3415 if (!dd->pcidev->irq) {
3416 qib_dev_err(dd,
3417 "irq is 0, BIOS error? Interrupts won't work\n");
3418 goto bail;
3419 }
3420 ret = request_irq(dd->pcidev->irq, qib_7322intr,
3421 IRQF_SHARED, QIB_DRV_NAME, dd);
3422 if (ret) {
3423 qib_dev_err(dd,
3424 "Couldn't setup INTx interrupt (irq=%d): %d\n",
3425 dd->pcidev->irq, ret);
3426 goto bail;
3427 }
3428 dd->cspec->irq = dd->pcidev->irq;
3429 dd->cspec->main_int_mask = ~0ULL;
3430 goto bail;
3431 }
3432
3433
3434 memset(redirect, 0, sizeof(redirect));
3435 mask = ~0ULL;
3436 msixnum = 0;
3437 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3438 firstcpu = cpumask_first(local_mask);
3439 if (firstcpu >= nr_cpu_ids ||
3440 cpumask_weight(local_mask) == num_online_cpus()) {
3441 local_mask = topology_core_cpumask(0);
3442 firstcpu = cpumask_first(local_mask);
3443 }
3444 if (firstcpu < nr_cpu_ids) {
3445 secondcpu = cpumask_next(firstcpu, local_mask);
3446 if (secondcpu >= nr_cpu_ids)
3447 secondcpu = firstcpu;
3448 currrcvcpu = secondcpu;
3449 }
3450 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3451 irq_handler_t handler;
3452 void *arg;
3453 u64 val;
3454 int lsb, reg, sh;
3455#ifdef CONFIG_INFINIBAND_QIB_DCA
3456 int dca = 0;
3457#endif
3458
3459 dd->cspec->msix_entries[msixnum].
3460 name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3461 = '\0';
3462 if (i < ARRAY_SIZE(irq_table)) {
3463 if (irq_table[i].port) {
3464
3465 if (irq_table[i].port > dd->num_pports)
3466 continue;
3467 arg = dd->pport + irq_table[i].port - 1;
3468 } else
3469 arg = dd;
3470#ifdef CONFIG_INFINIBAND_QIB_DCA
3471 dca = irq_table[i].dca;
3472#endif
3473 lsb = irq_table[i].lsb;
3474 handler = irq_table[i].handler;
3475 snprintf(dd->cspec->msix_entries[msixnum].name,
3476 sizeof(dd->cspec->msix_entries[msixnum].name)
3477 - 1,
3478 QIB_DRV_NAME "%d%s", dd->unit,
3479 irq_table[i].name);
3480 } else {
3481 unsigned ctxt;
3482
3483 ctxt = i - ARRAY_SIZE(irq_table);
3484
3485 arg = dd->rcd[ctxt];
3486 if (!arg)
3487 continue;
3488 if (qib_krcvq01_no_msi && ctxt < 2)
3489 continue;
3490#ifdef CONFIG_INFINIBAND_QIB_DCA
3491 dca = 1;
3492#endif
3493 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3494 handler = qib_7322pintr;
3495 snprintf(dd->cspec->msix_entries[msixnum].name,
3496 sizeof(dd->cspec->msix_entries[msixnum].name)
3497 - 1,
3498 QIB_DRV_NAME "%d (kctx)", dd->unit);
3499 }
3500
3501 dd->cspec->msix_entries[msixnum].irq = pci_irq_vector(
3502 dd->pcidev, msixnum);
3503 if (dd->cspec->msix_entries[msixnum].irq < 0) {
3504 qib_dev_err(dd,
3505 "Couldn't get MSIx irq (vec=%d): %d\n",
3506 msixnum,
3507 dd->cspec->msix_entries[msixnum].irq);
3508 qib_7322_nomsix(dd);
3509 goto try_intx;
3510 }
3511 ret = request_irq(dd->cspec->msix_entries[msixnum].irq,
3512 handler, 0,
3513 dd->cspec->msix_entries[msixnum].name,
3514 arg);
3515 if (ret) {
3516
3517
3518
3519
3520 qib_dev_err(dd,
3521 "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3522 msixnum,
3523 dd->cspec->msix_entries[msixnum].irq,
3524 ret);
3525 qib_7322_nomsix(dd);
3526 goto try_intx;
3527 }
3528 dd->cspec->msix_entries[msixnum].arg = arg;
3529#ifdef CONFIG_INFINIBAND_QIB_DCA
3530 dd->cspec->msix_entries[msixnum].dca = dca;
3531 dd->cspec->msix_entries[msixnum].rcv =
3532 handler == qib_7322pintr;
3533#endif
3534 if (lsb >= 0) {
3535 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3536 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3537 SYM_LSB(IntRedirect0, vec1);
3538 mask &= ~(1ULL << lsb);
3539 redirect[reg] |= ((u64) msixnum) << sh;
3540 }
3541 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3542 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3543 if (firstcpu < nr_cpu_ids &&
3544 zalloc_cpumask_var(
3545 &dd->cspec->msix_entries[msixnum].mask,
3546 GFP_KERNEL)) {
3547 if (handler == qib_7322pintr) {
3548 cpumask_set_cpu(currrcvcpu,
3549 dd->cspec->msix_entries[msixnum].mask);
3550 currrcvcpu = cpumask_next(currrcvcpu,
3551 local_mask);
3552 if (currrcvcpu >= nr_cpu_ids)
3553 currrcvcpu = secondcpu;
3554 } else {
3555 cpumask_set_cpu(firstcpu,
3556 dd->cspec->msix_entries[msixnum].mask);
3557 }
3558 irq_set_affinity_hint(
3559 dd->cspec->msix_entries[msixnum].irq,
3560 dd->cspec->msix_entries[msixnum].mask);
3561 }
3562 msixnum++;
3563 }
3564
3565 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3566 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3567 dd->cspec->main_int_mask = mask;
3568 tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3569 (unsigned long)dd);
3570bail:;
3571}
3572
3573
3574
3575
3576
3577
3578
3579static unsigned qib_7322_boardname(struct qib_devdata *dd)
3580{
3581
3582 u32 boardid;
3583 unsigned int features = DUAL_PORT_CAP;
3584
3585 boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3586
3587 switch (boardid) {
3588 case 0:
3589 dd->boardname = "InfiniPath_QLE7342_Emulation";
3590 break;
3591 case 1:
3592 dd->boardname = "InfiniPath_QLE7340";
3593 dd->flags |= QIB_HAS_QSFP;
3594 features = PORT_SPD_CAP;
3595 break;
3596 case 2:
3597 dd->boardname = "InfiniPath_QLE7342";
3598 dd->flags |= QIB_HAS_QSFP;
3599 break;
3600 case 3:
3601 dd->boardname = "InfiniPath_QMI7342";
3602 break;
3603 case 4:
3604 dd->boardname = "InfiniPath_Unsupported7342";
3605 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3606 features = 0;
3607 break;
3608 case BOARD_QMH7342:
3609 dd->boardname = "InfiniPath_QMH7342";
3610 features = 0x24;
3611 break;
3612 case BOARD_QME7342:
3613 dd->boardname = "InfiniPath_QME7342";
3614 break;
3615 case 8:
3616 dd->boardname = "InfiniPath_QME7362";
3617 dd->flags |= QIB_HAS_QSFP;
3618 break;
3619 case BOARD_QMH7360:
3620 dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3621 dd->flags |= QIB_HAS_QSFP;
3622 break;
3623 case 15:
3624 dd->boardname = "InfiniPath_QLE7342_TEST";
3625 dd->flags |= QIB_HAS_QSFP;
3626 break;
3627 default:
3628 dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3629 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3630 break;
3631 }
3632 dd->board_atten = 1;
3633
3634 snprintf(dd->boardversion, sizeof(dd->boardversion),
3635 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3636 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3637 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3638 dd->majrev, dd->minrev,
3639 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3640
3641 if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3642 qib_devinfo(dd->pcidev,
3643 "IB%u: Forced to single port mode by module parameter\n",
3644 dd->unit);
3645 features &= PORT_SPD_CAP;
3646 }
3647
3648 return features;
3649}
3650
3651
3652
3653
3654
3655static int qib_do_7322_reset(struct qib_devdata *dd)
3656{
3657 u64 val;
3658 u64 *msix_vecsave = NULL;
3659 int i, msix_entries, ret = 1;
3660 u16 cmdval;
3661 u8 int_line, clinesz;
3662 unsigned long flags;
3663
3664
3665 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3666
3667 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3668
3669 msix_entries = dd->cspec->num_msix_entries;
3670
3671
3672 qib_7322_set_intr_state(dd, 0);
3673
3674 if (msix_entries) {
3675 qib_7322_nomsix(dd);
3676
3677 msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
3678 sizeof(u64),
3679 GFP_KERNEL);
3680 }
3681
3682
3683
3684
3685
3686
3687
3688
3689 for (i = 0; i < msix_entries; i++) {
3690 u64 vecaddr, vecdata;
3691
3692 vecaddr = qib_read_kreg64(dd, 2 * i +
3693 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3694 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3695 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3696 if (msix_vecsave) {
3697 msix_vecsave[2 * i] = vecaddr;
3698
3699 msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3700 }
3701 }
3702
3703 dd->pport->cpspec->ibdeltainprog = 0;
3704 dd->pport->cpspec->ibsymdelta = 0;
3705 dd->pport->cpspec->iblnkerrdelta = 0;
3706 dd->pport->cpspec->ibmalfdelta = 0;
3707
3708 dd->z_int_counter = qib_int_counter(dd);
3709
3710
3711
3712
3713
3714
3715 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3716 dd->flags |= QIB_DOING_RESET;
3717 val = dd->control | QLOGIC_IB_C_RESET;
3718 writeq(val, &dd->kregbase[kr_control]);
3719
3720 for (i = 1; i <= 5; i++) {
3721
3722
3723
3724
3725
3726 msleep(1000 + (1 + i) * 3000);
3727
3728 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3729
3730
3731
3732
3733
3734 val = readq(&dd->kregbase[kr_revision]);
3735 if (val == dd->revision)
3736 break;
3737 if (i == 5) {
3738 qib_dev_err(dd,
3739 "Failed to initialize after reset, unusable\n");
3740 ret = 0;
3741 goto bail;
3742 }
3743 }
3744
3745 dd->flags |= QIB_PRESENT;
3746
3747 if (msix_entries) {
3748
3749 for (i = 0; i < msix_entries; i++) {
3750 if (!msix_vecsave || !msix_vecsave[2 * i])
3751 continue;
3752 qib_write_kreg(dd, 2 * i +
3753 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3754 msix_vecsave[2 * i]);
3755 qib_write_kreg(dd, 1 + 2 * i +
3756 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3757 msix_vecsave[1 + 2 * i]);
3758 }
3759 }
3760
3761
3762 for (i = 0; i < dd->num_pports; ++i)
3763 write_7322_init_portregs(&dd->pport[i]);
3764 write_7322_initregs(dd);
3765
3766 if (qib_pcie_params(dd, dd->lbus_width,
3767 &dd->cspec->num_msix_entries))
3768 qib_dev_err(dd,
3769 "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3770
3771 qib_setup_7322_interrupt(dd, 1);
3772
3773 for (i = 0; i < dd->num_pports; ++i) {
3774 struct qib_pportdata *ppd = &dd->pport[i];
3775
3776 spin_lock_irqsave(&ppd->lflags_lock, flags);
3777 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3778 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3779 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3780 }
3781
3782bail:
3783 dd->flags &= ~QIB_DOING_RESET;
3784 kfree(msix_vecsave);
3785 return ret;
3786}
3787
3788
3789
3790
3791
3792
3793
3794
3795static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3796 u32 type, unsigned long pa)
3797{
3798 if (!(dd->flags & QIB_PRESENT))
3799 return;
3800 if (pa != dd->tidinvalid) {
3801 u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3802
3803
3804 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3805 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3806 pa);
3807 return;
3808 }
3809 if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3810 qib_dev_err(dd,
3811 "Physical page address 0x%lx larger than supported\n",
3812 pa);
3813 return;
3814 }
3815
3816 if (type == RCVHQ_RCV_TYPE_EAGER)
3817 chippa |= dd->tidtemplate;
3818 else
3819 chippa |= IBA7322_TID_SZ_4K;
3820 pa = chippa;
3821 }
3822 writeq(pa, tidptr);
3823 mmiowb();
3824}
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834static void qib_7322_clear_tids(struct qib_devdata *dd,
3835 struct qib_ctxtdata *rcd)
3836{
3837 u64 __iomem *tidbase;
3838 unsigned long tidinv;
3839 u32 ctxt;
3840 int i;
3841
3842 if (!dd->kregbase || !rcd)
3843 return;
3844
3845 ctxt = rcd->ctxt;
3846
3847 tidinv = dd->tidinvalid;
3848 tidbase = (u64 __iomem *)
3849 ((char __iomem *) dd->kregbase +
3850 dd->rcvtidbase +
3851 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3852
3853 for (i = 0; i < dd->rcvtidcnt; i++)
3854 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3855 tidinv);
3856
3857 tidbase = (u64 __iomem *)
3858 ((char __iomem *) dd->kregbase +
3859 dd->rcvegrbase +
3860 rcd->rcvegr_tid_base * sizeof(*tidbase));
3861
3862 for (i = 0; i < rcd->rcvegrcnt; i++)
3863 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3864 tidinv);
3865}
3866
3867
3868
3869
3870
3871
3872
3873static void qib_7322_tidtemplate(struct qib_devdata *dd)
3874{
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884 if (dd->rcvegrbufsize == 2048)
3885 dd->tidtemplate = IBA7322_TID_SZ_2K;
3886 else if (dd->rcvegrbufsize == 4096)
3887 dd->tidtemplate = IBA7322_TID_SZ_4K;
3888 dd->tidinvalid = 0;
3889}
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3901 struct qib_base_info *kinfo)
3902{
3903 kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3904 QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3905 QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3906 if (rcd->dd->cspec->r1)
3907 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3908 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3909 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3910
3911 return 0;
3912}
3913
3914static struct qib_message_header *
3915qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3916{
3917 u32 offset = qib_hdrget_offset(rhf_addr);
3918
3919 return (struct qib_message_header *)
3920 (rhf_addr - dd->rhf_offset + offset);
3921}
3922
3923
3924
3925
3926static void qib_7322_config_ctxts(struct qib_devdata *dd)
3927{
3928 unsigned long flags;
3929 u32 nchipctxts;
3930
3931 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3932 dd->cspec->numctxts = nchipctxts;
3933 if (qib_n_krcv_queues > 1 && dd->num_pports) {
3934 dd->first_user_ctxt = NUM_IB_PORTS +
3935 (qib_n_krcv_queues - 1) * dd->num_pports;
3936 if (dd->first_user_ctxt > nchipctxts)
3937 dd->first_user_ctxt = nchipctxts;
3938 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3939 } else {
3940 dd->first_user_ctxt = NUM_IB_PORTS;
3941 dd->n_krcv_queues = 1;
3942 }
3943
3944 if (!qib_cfgctxts) {
3945 int nctxts = dd->first_user_ctxt + num_online_cpus();
3946
3947 if (nctxts <= 6)
3948 dd->ctxtcnt = 6;
3949 else if (nctxts <= 10)
3950 dd->ctxtcnt = 10;
3951 else if (nctxts <= nchipctxts)
3952 dd->ctxtcnt = nchipctxts;
3953 } else if (qib_cfgctxts < dd->num_pports)
3954 dd->ctxtcnt = dd->num_pports;
3955 else if (qib_cfgctxts <= nchipctxts)
3956 dd->ctxtcnt = qib_cfgctxts;
3957 if (!dd->ctxtcnt)
3958 dd->ctxtcnt = nchipctxts;
3959
3960
3961
3962
3963
3964
3965 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3966 if (dd->ctxtcnt > 10)
3967 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3968 else if (dd->ctxtcnt > 6)
3969 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3970
3971
3972
3973 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3974
3975
3976
3977
3978
3979 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3980 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3981
3982
3983 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3984 if (qib_rcvhdrcnt)
3985 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3986 else
3987 dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3988 dd->num_pports > 1 ? 1024U : 2048U);
3989}
3990
3991static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3992{
3993
3994 int lsb, ret = 0;
3995 u64 maskr;
3996
3997 switch (which) {
3998
3999 case QIB_IB_CFG_LWID_ENB:
4000 ret = ppd->link_width_enabled;
4001 goto done;
4002
4003 case QIB_IB_CFG_LWID:
4004 ret = ppd->link_width_active;
4005 goto done;
4006
4007 case QIB_IB_CFG_SPD_ENB:
4008 ret = ppd->link_speed_enabled;
4009 goto done;
4010
4011 case QIB_IB_CFG_SPD:
4012 ret = ppd->link_speed_active;
4013 goto done;
4014
4015 case QIB_IB_CFG_RXPOL_ENB:
4016 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4017 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4018 break;
4019
4020 case QIB_IB_CFG_LREV_ENB:
4021 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4022 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4023 break;
4024
4025 case QIB_IB_CFG_LINKLATENCY:
4026 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4027 SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4028 goto done;
4029
4030 case QIB_IB_CFG_OP_VLS:
4031 ret = ppd->vls_operational;
4032 goto done;
4033
4034 case QIB_IB_CFG_VL_HIGH_CAP:
4035 ret = 16;
4036 goto done;
4037
4038 case QIB_IB_CFG_VL_LOW_CAP:
4039 ret = 16;
4040 goto done;
4041
4042 case QIB_IB_CFG_OVERRUN_THRESH:
4043 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4044 OverrunThreshold);
4045 goto done;
4046
4047 case QIB_IB_CFG_PHYERR_THRESH:
4048 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4049 PhyerrThreshold);
4050 goto done;
4051
4052 case QIB_IB_CFG_LINKDEFAULT:
4053
4054 ret = (ppd->cpspec->ibcctrl_a &
4055 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4056 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4057 goto done;
4058
4059 case QIB_IB_CFG_HRTBT:
4060 lsb = IBA7322_IBC_HRTBT_LSB;
4061 maskr = IBA7322_IBC_HRTBT_RMASK;
4062 break;
4063
4064 case QIB_IB_CFG_PMA_TICKS:
4065
4066
4067
4068
4069 if (ppd->link_speed_active == QIB_IB_QDR)
4070 ret = 3;
4071 else if (ppd->link_speed_active == QIB_IB_DDR)
4072 ret = 1;
4073 else
4074 ret = 0;
4075 goto done;
4076
4077 default:
4078 ret = -EINVAL;
4079 goto done;
4080 }
4081 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4082done:
4083 return ret;
4084}
4085
4086
4087
4088
4089
4090#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4091#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4092 | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4093
4094static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4095{
4096 struct qib_devdata *dd = ppd->dd;
4097 u64 maskr;
4098 int lsb, ret = 0;
4099 u16 lcmd, licmd;
4100 unsigned long flags;
4101
4102 switch (which) {
4103 case QIB_IB_CFG_LIDLMC:
4104
4105
4106
4107
4108 lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4109 maskr = IBA7322_IBC_DLIDLMC_MASK;
4110
4111
4112
4113
4114
4115
4116
4117 qib_write_kreg_port(ppd, krp_sendslid,
4118 val & (val >> 16) & SendIBSLIDAssignMask);
4119 qib_write_kreg_port(ppd, krp_sendslidmask,
4120 (val >> 16) & SendIBSLMCMask);
4121 break;
4122
4123 case QIB_IB_CFG_LWID_ENB:
4124 ppd->link_width_enabled = val;
4125
4126 if (val == IB_WIDTH_1X)
4127 val = 0;
4128 else if (val == IB_WIDTH_4X)
4129 val = 1;
4130 else
4131 val = 3;
4132 maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4133 lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4134 break;
4135
4136 case QIB_IB_CFG_SPD_ENB:
4137
4138
4139
4140
4141
4142
4143
4144 ppd->link_speed_enabled = val;
4145 val <<= IBA7322_IBC_SPEED_LSB;
4146 maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4147 IBA7322_IBC_MAX_SPEED_MASK;
4148 if (val & (val - 1)) {
4149
4150 val |= IBA7322_IBC_IBTA_1_2_MASK |
4151 IBA7322_IBC_MAX_SPEED_MASK;
4152 spin_lock_irqsave(&ppd->lflags_lock, flags);
4153 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4154 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4155 } else if (val & IBA7322_IBC_SPEED_QDR)
4156 val |= IBA7322_IBC_IBTA_1_2_MASK;
4157
4158 lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4159 break;
4160
4161 case QIB_IB_CFG_RXPOL_ENB:
4162 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4163 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4164 break;
4165
4166 case QIB_IB_CFG_LREV_ENB:
4167 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4168 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4169 break;
4170
4171 case QIB_IB_CFG_OVERRUN_THRESH:
4172 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4173 OverrunThreshold);
4174 if (maskr != val) {
4175 ppd->cpspec->ibcctrl_a &=
4176 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4177 ppd->cpspec->ibcctrl_a |= (u64) val <<
4178 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4179 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4180 ppd->cpspec->ibcctrl_a);
4181 qib_write_kreg(dd, kr_scratch, 0ULL);
4182 }
4183 goto bail;
4184
4185 case QIB_IB_CFG_PHYERR_THRESH:
4186 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4187 PhyerrThreshold);
4188 if (maskr != val) {
4189 ppd->cpspec->ibcctrl_a &=
4190 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4191 ppd->cpspec->ibcctrl_a |= (u64) val <<
4192 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4193 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4194 ppd->cpspec->ibcctrl_a);
4195 qib_write_kreg(dd, kr_scratch, 0ULL);
4196 }
4197 goto bail;
4198
4199 case QIB_IB_CFG_PKEYS:
4200 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4201 ((u64) ppd->pkeys[2] << 32) |
4202 ((u64) ppd->pkeys[3] << 48);
4203 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4204 goto bail;
4205
4206 case QIB_IB_CFG_LINKDEFAULT:
4207
4208 if (val == IB_LINKINITCMD_POLL)
4209 ppd->cpspec->ibcctrl_a &=
4210 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4211 else
4212 ppd->cpspec->ibcctrl_a |=
4213 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4214 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4215 qib_write_kreg(dd, kr_scratch, 0ULL);
4216 goto bail;
4217
4218 case QIB_IB_CFG_MTU:
4219
4220
4221
4222
4223
4224
4225
4226 val = (ppd->ibmaxlen >> 2) + 1;
4227 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4228 ppd->cpspec->ibcctrl_a |= (u64)val <<
4229 SYM_LSB(IBCCtrlA_0, MaxPktLen);
4230 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4231 ppd->cpspec->ibcctrl_a);
4232 qib_write_kreg(dd, kr_scratch, 0ULL);
4233 goto bail;
4234
4235 case QIB_IB_CFG_LSTATE:
4236 switch (val & 0xffff0000) {
4237 case IB_LINKCMD_DOWN:
4238 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4239 ppd->cpspec->ibmalfusesnap = 1;
4240 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4241 crp_errlink);
4242 if (!ppd->cpspec->ibdeltainprog &&
4243 qib_compat_ddr_negotiate) {
4244 ppd->cpspec->ibdeltainprog = 1;
4245 ppd->cpspec->ibsymsnap =
4246 read_7322_creg32_port(ppd,
4247 crp_ibsymbolerr);
4248 ppd->cpspec->iblnkerrsnap =
4249 read_7322_creg32_port(ppd,
4250 crp_iblinkerrrecov);
4251 }
4252 break;
4253
4254 case IB_LINKCMD_ARMED:
4255 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4256 if (ppd->cpspec->ibmalfusesnap) {
4257 ppd->cpspec->ibmalfusesnap = 0;
4258 ppd->cpspec->ibmalfdelta +=
4259 read_7322_creg32_port(ppd,
4260 crp_errlink) -
4261 ppd->cpspec->ibmalfsnap;
4262 }
4263 break;
4264
4265 case IB_LINKCMD_ACTIVE:
4266 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4267 break;
4268
4269 default:
4270 ret = -EINVAL;
4271 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4272 goto bail;
4273 }
4274 switch (val & 0xffff) {
4275 case IB_LINKINITCMD_NOP:
4276 licmd = 0;
4277 break;
4278
4279 case IB_LINKINITCMD_POLL:
4280 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4281 break;
4282
4283 case IB_LINKINITCMD_SLEEP:
4284 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4285 break;
4286
4287 case IB_LINKINITCMD_DISABLE:
4288 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4289 ppd->cpspec->chase_end = 0;
4290
4291
4292
4293
4294 if (ppd->cpspec->chase_timer.expires) {
4295 del_timer_sync(&ppd->cpspec->chase_timer);
4296 ppd->cpspec->chase_timer.expires = 0;
4297 }
4298 break;
4299
4300 default:
4301 ret = -EINVAL;
4302 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4303 val & 0xffff);
4304 goto bail;
4305 }
4306 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4307 goto bail;
4308
4309 case QIB_IB_CFG_OP_VLS:
4310 if (ppd->vls_operational != val) {
4311 ppd->vls_operational = val;
4312 set_vls(ppd);
4313 }
4314 goto bail;
4315
4316 case QIB_IB_CFG_VL_HIGH_LIMIT:
4317 qib_write_kreg_port(ppd, krp_highprio_limit, val);
4318 goto bail;
4319
4320 case QIB_IB_CFG_HRTBT:
4321 if (val > 3) {
4322 ret = -EINVAL;
4323 goto bail;
4324 }
4325 lsb = IBA7322_IBC_HRTBT_LSB;
4326 maskr = IBA7322_IBC_HRTBT_RMASK;
4327 break;
4328
4329 case QIB_IB_CFG_PORT:
4330
4331 if (ppd->dd->cspec->r1) {
4332 cancel_delayed_work(&ppd->cpspec->ipg_work);
4333 ppd->cpspec->ipg_tries = 0;
4334 }
4335 goto bail;
4336
4337 default:
4338 ret = -EINVAL;
4339 goto bail;
4340 }
4341 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4342 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4343 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4344 qib_write_kreg(dd, kr_scratch, 0);
4345bail:
4346 return ret;
4347}
4348
4349static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4350{
4351 int ret = 0;
4352 u64 val, ctrlb;
4353
4354
4355 if (!strncmp(what, "ibc", 3)) {
4356 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4357 Loopback);
4358 val = 0;
4359 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4360 ppd->dd->unit, ppd->port);
4361 } else if (!strncmp(what, "off", 3)) {
4362 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4363 Loopback);
4364
4365 val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4366 qib_devinfo(ppd->dd->pcidev,
4367 "Disabling IB%u:%u IBC loopback (normal)\n",
4368 ppd->dd->unit, ppd->port);
4369 } else
4370 ret = -EINVAL;
4371 if (!ret) {
4372 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4373 ppd->cpspec->ibcctrl_a);
4374 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4375 << IBA7322_IBC_HRTBT_LSB);
4376 ppd->cpspec->ibcctrl_b = ctrlb | val;
4377 qib_write_kreg_port(ppd, krp_ibcctrl_b,
4378 ppd->cpspec->ibcctrl_b);
4379 qib_write_kreg(ppd->dd, kr_scratch, 0);
4380 }
4381 return ret;
4382}
4383
4384static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4385 struct ib_vl_weight_elem *vl)
4386{
4387 unsigned i;
4388
4389 for (i = 0; i < 16; i++, regno++, vl++) {
4390 u32 val = qib_read_kreg_port(ppd, regno);
4391
4392 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4393 SYM_RMASK(LowPriority0_0, VirtualLane);
4394 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4395 SYM_RMASK(LowPriority0_0, Weight);
4396 }
4397}
4398
4399static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4400 struct ib_vl_weight_elem *vl)
4401{
4402 unsigned i;
4403
4404 for (i = 0; i < 16; i++, regno++, vl++) {
4405 u64 val;
4406
4407 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4408 SYM_LSB(LowPriority0_0, VirtualLane)) |
4409 ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4410 SYM_LSB(LowPriority0_0, Weight));
4411 qib_write_kreg_port(ppd, regno, val);
4412 }
4413 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4414 struct qib_devdata *dd = ppd->dd;
4415 unsigned long flags;
4416
4417 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4418 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4419 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4420 qib_write_kreg(dd, kr_scratch, 0);
4421 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4422 }
4423}
4424
4425static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4426{
4427 switch (which) {
4428 case QIB_IB_TBL_VL_HIGH_ARB:
4429 get_vl_weights(ppd, krp_highprio_0, t);
4430 break;
4431
4432 case QIB_IB_TBL_VL_LOW_ARB:
4433 get_vl_weights(ppd, krp_lowprio_0, t);
4434 break;
4435
4436 default:
4437 return -EINVAL;
4438 }
4439 return 0;
4440}
4441
4442static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4443{
4444 switch (which) {
4445 case QIB_IB_TBL_VL_HIGH_ARB:
4446 set_vl_weights(ppd, krp_highprio_0, t);
4447 break;
4448
4449 case QIB_IB_TBL_VL_LOW_ARB:
4450 set_vl_weights(ppd, krp_lowprio_0, t);
4451 break;
4452
4453 default:
4454 return -EINVAL;
4455 }
4456 return 0;
4457}
4458
4459static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4460 u32 updegr, u32 egrhd, u32 npkts)
4461{
4462
4463
4464
4465
4466 if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4467 adjust_rcv_timeout(rcd, npkts);
4468 if (updegr)
4469 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4470 mmiowb();
4471 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4472 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4473 mmiowb();
4474}
4475
4476static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4477{
4478 u32 head, tail;
4479
4480 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4481 if (rcd->rcvhdrtail_kvaddr)
4482 tail = qib_get_rcvhdrtail(rcd);
4483 else
4484 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4485 return head == tail;
4486}
4487
4488#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4489 QIB_RCVCTRL_CTXT_DIS | \
4490 QIB_RCVCTRL_TIDFLOW_ENB | \
4491 QIB_RCVCTRL_TIDFLOW_DIS | \
4492 QIB_RCVCTRL_TAILUPD_ENB | \
4493 QIB_RCVCTRL_TAILUPD_DIS | \
4494 QIB_RCVCTRL_INTRAVAIL_ENB | \
4495 QIB_RCVCTRL_INTRAVAIL_DIS | \
4496 QIB_RCVCTRL_BP_ENB | \
4497 QIB_RCVCTRL_BP_DIS)
4498
4499#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4500 QIB_RCVCTRL_CTXT_DIS | \
4501 QIB_RCVCTRL_PKEY_DIS | \
4502 QIB_RCVCTRL_PKEY_ENB)
4503
4504
4505
4506
4507
4508
4509
4510
4511static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4512 int ctxt)
4513{
4514 struct qib_devdata *dd = ppd->dd;
4515 struct qib_ctxtdata *rcd;
4516 u64 mask, val;
4517 unsigned long flags;
4518
4519 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4520
4521 if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4522 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4523 if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4524 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4525 if (op & QIB_RCVCTRL_TAILUPD_ENB)
4526 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4527 if (op & QIB_RCVCTRL_TAILUPD_DIS)
4528 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4529 if (op & QIB_RCVCTRL_PKEY_ENB)
4530 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4531 if (op & QIB_RCVCTRL_PKEY_DIS)
4532 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4533 if (ctxt < 0) {
4534 mask = (1ULL << dd->ctxtcnt) - 1;
4535 rcd = NULL;
4536 } else {
4537 mask = (1ULL << ctxt);
4538 rcd = dd->rcd[ctxt];
4539 }
4540 if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4541 ppd->p_rcvctrl |=
4542 (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4543 if (!(dd->flags & QIB_NODMA_RTAIL)) {
4544 op |= QIB_RCVCTRL_TAILUPD_ENB;
4545 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4546 }
4547
4548 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4549 rcd->rcvhdrqtailaddr_phys);
4550 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4551 rcd->rcvhdrq_phys);
4552 rcd->seq_cnt = 1;
4553 }
4554 if (op & QIB_RCVCTRL_CTXT_DIS)
4555 ppd->p_rcvctrl &=
4556 ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4557 if (op & QIB_RCVCTRL_BP_ENB)
4558 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4559 if (op & QIB_RCVCTRL_BP_DIS)
4560 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4561 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4562 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4563 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4564 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4565
4566
4567
4568
4569
4570 if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4571 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4572 if (op == 0 || (op & RCVCTRL_PORT_MODS))
4573 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4574 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4575
4576
4577
4578
4579
4580
4581 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4582 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4583
4584
4585 (void) qib_read_kreg32(dd, kr_scratch);
4586 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4587 dd->rcd[ctxt]->head = val;
4588
4589 if (ctxt < dd->first_user_ctxt)
4590 val |= dd->rhdrhead_intr_off;
4591 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4592 } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4593 dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4594
4595 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4596 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4597 }
4598 if (op & QIB_RCVCTRL_CTXT_DIS) {
4599 unsigned f;
4600
4601
4602 if (ctxt >= 0) {
4603 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4604 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4605 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4606 qib_write_ureg(dd, ur_rcvflowtable + f,
4607 TIDFLOW_ERRBITS, ctxt);
4608 } else {
4609 unsigned i;
4610
4611 for (i = 0; i < dd->cfgctxts; i++) {
4612 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4613 i, 0);
4614 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4615 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4616 qib_write_ureg(dd, ur_rcvflowtable + f,
4617 TIDFLOW_ERRBITS, i);
4618 }
4619 }
4620 }
4621 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4622}
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636#define SENDCTRL_COMMON_MODS (\
4637 QIB_SENDCTRL_CLEAR | \
4638 QIB_SENDCTRL_AVAIL_DIS | \
4639 QIB_SENDCTRL_AVAIL_ENB | \
4640 QIB_SENDCTRL_AVAIL_BLIP | \
4641 QIB_SENDCTRL_DISARM | \
4642 QIB_SENDCTRL_DISARM_ALL | \
4643 QIB_SENDCTRL_SEND_ENB)
4644
4645#define SENDCTRL_PORT_MODS (\
4646 QIB_SENDCTRL_CLEAR | \
4647 QIB_SENDCTRL_SEND_ENB | \
4648 QIB_SENDCTRL_SEND_DIS | \
4649 QIB_SENDCTRL_FLUSH)
4650
4651static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4652{
4653 struct qib_devdata *dd = ppd->dd;
4654 u64 tmp_dd_sendctrl;
4655 unsigned long flags;
4656
4657 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4658
4659
4660 if (op & QIB_SENDCTRL_CLEAR)
4661 dd->sendctrl = 0;
4662 if (op & QIB_SENDCTRL_AVAIL_DIS)
4663 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4664 else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4665 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4666 if (dd->flags & QIB_USE_SPCL_TRIG)
4667 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4668 }
4669
4670
4671 if (op & QIB_SENDCTRL_SEND_DIS)
4672 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4673 else if (op & QIB_SENDCTRL_SEND_ENB)
4674 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4675
4676 if (op & QIB_SENDCTRL_DISARM_ALL) {
4677 u32 i, last;
4678
4679 tmp_dd_sendctrl = dd->sendctrl;
4680 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4681
4682
4683
4684
4685 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4686 for (i = 0; i < last; i++) {
4687 qib_write_kreg(dd, kr_sendctrl,
4688 tmp_dd_sendctrl |
4689 SYM_MASK(SendCtrl, Disarm) | i);
4690 qib_write_kreg(dd, kr_scratch, 0);
4691 }
4692 }
4693
4694 if (op & QIB_SENDCTRL_FLUSH) {
4695 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4696
4697
4698
4699
4700
4701 tmp_ppd_sendctrl |=
4702 SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4703 SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4704 SYM_MASK(SendCtrl_0, TxeBypassIbc);
4705 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4706 qib_write_kreg(dd, kr_scratch, 0);
4707 }
4708
4709 tmp_dd_sendctrl = dd->sendctrl;
4710
4711 if (op & QIB_SENDCTRL_DISARM)
4712 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4713 ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4714 SYM_LSB(SendCtrl, DisarmSendBuf));
4715 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4716 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4717 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4718
4719 if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4720 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4721 qib_write_kreg(dd, kr_scratch, 0);
4722 }
4723
4724 if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4725 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4726 qib_write_kreg(dd, kr_scratch, 0);
4727 }
4728
4729 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4730 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4731 qib_write_kreg(dd, kr_scratch, 0);
4732 }
4733
4734 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4735
4736 if (op & QIB_SENDCTRL_FLUSH) {
4737 u32 v;
4738
4739
4740
4741
4742
4743
4744 v = qib_read_kreg32(dd, kr_scratch);
4745 qib_write_kreg(dd, kr_scratch, v);
4746 v = qib_read_kreg32(dd, kr_scratch);
4747 qib_write_kreg(dd, kr_scratch, v);
4748 qib_read_kreg32(dd, kr_scratch);
4749 }
4750}
4751
4752#define _PORT_VIRT_FLAG 0x8000U
4753#define _PORT_64BIT_FLAG 0x10000U
4754#define _PORT_CNTR_IDXMASK 0x7fffU
4755
4756
4757
4758
4759
4760
4761static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4762{
4763 struct qib_devdata *dd = ppd->dd;
4764 u64 ret = 0ULL;
4765 u16 creg;
4766
4767 static const u32 xlator[] = {
4768 [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4769 [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4770 [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4771 [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4772 [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4773 [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4774 [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4775 [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4776 [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4777 [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4778 [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4779 [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4780 [QIBPORTCNTR_RXDROPPKT] = 0xffff,
4781 [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4782 [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4783 [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4784 [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4785 [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4786 [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4787 [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4788 [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4789 [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4790 [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4791 [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4792 [QIBPORTCNTR_ERRLINK] = crp_errlink,
4793 [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4794 [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4795 [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4796 [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4797 [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4798
4799
4800
4801
4802
4803 [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4804 [QIBPORTCNTR_PSSTART] = krp_psstart,
4805 [QIBPORTCNTR_PSSTAT] = krp_psstat,
4806
4807 [QIBPORTCNTR_KHDROVFL] = 0xffff,
4808 };
4809
4810 if (reg >= ARRAY_SIZE(xlator)) {
4811 qib_devinfo(ppd->dd->pcidev,
4812 "Unimplemented portcounter %u\n", reg);
4813 goto done;
4814 }
4815 creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4816
4817
4818 if (reg == QIBPORTCNTR_KHDROVFL) {
4819 int i;
4820
4821
4822 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4823 struct qib_ctxtdata *rcd = dd->rcd[i];
4824
4825 if (!rcd || rcd->ppd != ppd)
4826 continue;
4827 ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4828 }
4829 goto done;
4830 } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4831
4832
4833
4834
4835
4836 goto done;
4837 } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4838 reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4839
4840 ret = qib_read_kreg_port(ppd, creg);
4841 goto done;
4842 }
4843
4844
4845
4846
4847
4848 if (xlator[reg] & _PORT_64BIT_FLAG)
4849 ret = read_7322_creg_port(ppd, creg);
4850 else
4851 ret = read_7322_creg32_port(ppd, creg);
4852 if (creg == crp_ibsymbolerr) {
4853 if (ppd->cpspec->ibdeltainprog)
4854 ret -= ret - ppd->cpspec->ibsymsnap;
4855 ret -= ppd->cpspec->ibsymdelta;
4856 } else if (creg == crp_iblinkerrrecov) {
4857 if (ppd->cpspec->ibdeltainprog)
4858 ret -= ret - ppd->cpspec->iblnkerrsnap;
4859 ret -= ppd->cpspec->iblnkerrdelta;
4860 } else if (creg == crp_errlink)
4861 ret -= ppd->cpspec->ibmalfdelta;
4862 else if (creg == crp_iblinkdown)
4863 ret += ppd->cpspec->iblnkdowndelta;
4864done:
4865 return ret;
4866}
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881static const char cntr7322names[] =
4882 "Interrupts\n"
4883 "HostBusStall\n"
4884 "E RxTIDFull\n"
4885 "RxTIDInvalid\n"
4886 "RxTIDFloDrop\n"
4887 "Ctxt0EgrOvfl\n"
4888 "Ctxt1EgrOvfl\n"
4889 "Ctxt2EgrOvfl\n"
4890 "Ctxt3EgrOvfl\n"
4891 "Ctxt4EgrOvfl\n"
4892 "Ctxt5EgrOvfl\n"
4893 "Ctxt6EgrOvfl\n"
4894 "Ctxt7EgrOvfl\n"
4895 "Ctxt8EgrOvfl\n"
4896 "Ctxt9EgrOvfl\n"
4897 "Ctx10EgrOvfl\n"
4898 "Ctx11EgrOvfl\n"
4899 "Ctx12EgrOvfl\n"
4900 "Ctx13EgrOvfl\n"
4901 "Ctx14EgrOvfl\n"
4902 "Ctx15EgrOvfl\n"
4903 "Ctx16EgrOvfl\n"
4904 "Ctx17EgrOvfl\n"
4905 ;
4906
4907static const u32 cntr7322indices[] = {
4908 cr_lbint | _PORT_64BIT_FLAG,
4909 cr_lbstall | _PORT_64BIT_FLAG,
4910 cr_tidfull,
4911 cr_tidinvalid,
4912 cr_rxtidflowdrop,
4913 cr_base_egrovfl + 0,
4914 cr_base_egrovfl + 1,
4915 cr_base_egrovfl + 2,
4916 cr_base_egrovfl + 3,
4917 cr_base_egrovfl + 4,
4918 cr_base_egrovfl + 5,
4919 cr_base_egrovfl + 6,
4920 cr_base_egrovfl + 7,
4921 cr_base_egrovfl + 8,
4922 cr_base_egrovfl + 9,
4923 cr_base_egrovfl + 10,
4924 cr_base_egrovfl + 11,
4925 cr_base_egrovfl + 12,
4926 cr_base_egrovfl + 13,
4927 cr_base_egrovfl + 14,
4928 cr_base_egrovfl + 15,
4929 cr_base_egrovfl + 16,
4930 cr_base_egrovfl + 17,
4931};
4932
4933
4934
4935
4936
4937
4938static const char portcntr7322names[] =
4939 "TxPkt\n"
4940 "TxFlowPkt\n"
4941 "TxWords\n"
4942 "RxPkt\n"
4943 "RxFlowPkt\n"
4944 "RxWords\n"
4945 "TxFlowStall\n"
4946 "TxDmaDesc\n"
4947 "E RxDlidFltr\n"
4948 "IBStatusChng\n"
4949 "IBLinkDown\n"
4950 "IBLnkRecov\n"
4951 "IBRxLinkErr\n"
4952 "IBSymbolErr\n"
4953 "RxLLIErr\n"
4954 "RxBadFormat\n"
4955 "RxBadLen\n"
4956 "RxBufOvrfl\n"
4957 "RxEBP\n"
4958 "RxFlowCtlErr\n"
4959 "RxICRCerr\n"
4960 "RxLPCRCerr\n"
4961 "RxVCRCerr\n"
4962 "RxInvalLen\n"
4963 "RxInvalPKey\n"
4964 "RxPktDropped\n"
4965 "TxBadLength\n"
4966 "TxDropped\n"
4967 "TxInvalLen\n"
4968 "TxUnderrun\n"
4969 "TxUnsupVL\n"
4970 "RxLclPhyErr\n"
4971 "RxVL15Drop\n"
4972 "RxVlErr\n"
4973 "XcessBufOvfl\n"
4974 "RxQPBadCtxt\n"
4975 "TXBadHeader\n"
4976 ;
4977
4978static const u32 portcntr7322indices[] = {
4979 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4980 crp_pktsendflow,
4981 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4982 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4983 crp_pktrcvflowctrl,
4984 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4985 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4986 crp_txsdmadesc | _PORT_64BIT_FLAG,
4987 crp_rxdlidfltr,
4988 crp_ibstatuschange,
4989 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4990 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4991 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4992 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4993 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4994 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4995 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4996 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4997 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4998 crp_rcvflowctrlviol,
4999 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
5000 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
5001 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
5002 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
5003 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
5004 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
5005 crp_txminmaxlenerr,
5006 crp_txdroppedpkt,
5007 crp_txlenerr,
5008 crp_txunderrun,
5009 crp_txunsupvl,
5010 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
5011 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
5012 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
5013 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
5014 crp_rxqpinvalidctxt,
5015 crp_txhdrerr,
5016};
5017
5018
5019static void init_7322_cntrnames(struct qib_devdata *dd)
5020{
5021 int i, j = 0;
5022 char *s;
5023
5024 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
5025 i++) {
5026
5027 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5028 j = 1;
5029 s = strchr(s + 1, '\n');
5030 if (s && j)
5031 j++;
5032 }
5033 dd->cspec->ncntrs = i;
5034 if (!s)
5035
5036 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5037 else
5038 dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5039 dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
5040 GFP_KERNEL);
5041
5042 for (i = 0, s = (char *)portcntr7322names; s; i++)
5043 s = strchr(s + 1, '\n');
5044 dd->cspec->nportcntrs = i - 1;
5045 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5046 for (i = 0; i < dd->num_pports; ++i) {
5047 dd->pport[i].cpspec->portcntrs =
5048 kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
5049 GFP_KERNEL);
5050 }
5051}
5052
5053static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5054 u64 **cntrp)
5055{
5056 u32 ret;
5057
5058 if (namep) {
5059 ret = dd->cspec->cntrnamelen;
5060 if (pos >= ret)
5061 ret = 0;
5062 else
5063 *namep = (char *) cntr7322names;
5064 } else {
5065 u64 *cntr = dd->cspec->cntrs;
5066 int i;
5067
5068 ret = dd->cspec->ncntrs * sizeof(u64);
5069 if (!cntr || pos >= ret) {
5070
5071 ret = 0;
5072 goto done;
5073 }
5074 *cntrp = cntr;
5075 for (i = 0; i < dd->cspec->ncntrs; i++)
5076 if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5077 *cntr++ = read_7322_creg(dd,
5078 cntr7322indices[i] &
5079 _PORT_CNTR_IDXMASK);
5080 else
5081 *cntr++ = read_7322_creg32(dd,
5082 cntr7322indices[i]);
5083 }
5084done:
5085 return ret;
5086}
5087
5088static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5089 char **namep, u64 **cntrp)
5090{
5091 u32 ret;
5092
5093 if (namep) {
5094 ret = dd->cspec->portcntrnamelen;
5095 if (pos >= ret)
5096 ret = 0;
5097 else
5098 *namep = (char *)portcntr7322names;
5099 } else {
5100 struct qib_pportdata *ppd = &dd->pport[port];
5101 u64 *cntr = ppd->cpspec->portcntrs;
5102 int i;
5103
5104 ret = dd->cspec->nportcntrs * sizeof(u64);
5105 if (!cntr || pos >= ret) {
5106
5107 ret = 0;
5108 goto done;
5109 }
5110 *cntrp = cntr;
5111 for (i = 0; i < dd->cspec->nportcntrs; i++) {
5112 if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5113 *cntr++ = qib_portcntr_7322(ppd,
5114 portcntr7322indices[i] &
5115 _PORT_CNTR_IDXMASK);
5116 else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5117 *cntr++ = read_7322_creg_port(ppd,
5118 portcntr7322indices[i] &
5119 _PORT_CNTR_IDXMASK);
5120 else
5121 *cntr++ = read_7322_creg32_port(ppd,
5122 portcntr7322indices[i]);
5123 }
5124 }
5125done:
5126 return ret;
5127}
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140static void qib_get_7322_faststats(struct timer_list *t)
5141{
5142 struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5143 struct qib_pportdata *ppd;
5144 unsigned long flags;
5145 u64 traffic_wds;
5146 int pidx;
5147
5148 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5149 ppd = dd->pport + pidx;
5150
5151
5152
5153
5154
5155
5156 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5157 || dd->diag_client)
5158 continue;
5159
5160
5161
5162
5163
5164
5165 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5166 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5167 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5168 traffic_wds -= ppd->dd->traffic_wds;
5169 ppd->dd->traffic_wds += traffic_wds;
5170 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5171 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5172 QIB_IB_QDR) &&
5173 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5174 QIBL_LINKACTIVE)) &&
5175 ppd->cpspec->qdr_dfe_time &&
5176 time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5177 ppd->cpspec->qdr_dfe_on = 0;
5178
5179 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5180 ppd->dd->cspec->r1 ?
5181 QDR_STATIC_ADAPT_INIT_R1 :
5182 QDR_STATIC_ADAPT_INIT);
5183 force_h1(ppd);
5184 }
5185 }
5186 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5187}
5188
5189
5190
5191
5192static int qib_7322_intr_fallback(struct qib_devdata *dd)
5193{
5194 if (!dd->cspec->num_msix_entries)
5195 return 0;
5196
5197 qib_devinfo(dd->pcidev,
5198 "MSIx interrupt not detected, trying INTx interrupts\n");
5199 qib_7322_nomsix(dd);
5200 qib_enable_intx(dd);
5201 qib_setup_7322_interrupt(dd, 0);
5202 return 1;
5203}
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5215{
5216 u64 val;
5217 struct qib_devdata *dd = ppd->dd;
5218 const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5219 SYM_MASK(IBPCSConfig_0, xcv_treset) |
5220 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5221
5222 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5223 qib_write_kreg(dd, kr_hwerrmask,
5224 dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5225 qib_write_kreg_port(ppd, krp_ibcctrl_a,
5226 ppd->cpspec->ibcctrl_a &
5227 ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5228
5229 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5230 qib_read_kreg32(dd, kr_scratch);
5231 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5232 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5233 qib_write_kreg(dd, kr_scratch, 0ULL);
5234 qib_write_kreg(dd, kr_hwerrclear,
5235 SYM_MASK(HwErrClear, statusValidNoEopClear));
5236 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5237}
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5248 u32 dcnt, u32 *data)
5249{
5250 int i;
5251 u64 pbc;
5252 u32 __iomem *piobuf;
5253 u32 pnum, control, len;
5254 struct qib_devdata *dd = ppd->dd;
5255
5256 i = 0;
5257 len = 7 + dcnt + 1;
5258 control = qib_7322_setpbc_control(ppd, len, 0, 15);
5259 pbc = ((u64) control << 32) | len;
5260 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5261 if (i++ > 15)
5262 return;
5263 udelay(2);
5264 }
5265
5266 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5267 writeq(pbc, piobuf);
5268 qib_flush_wc();
5269 qib_pio_copy(piobuf + 2, hdr, 7);
5270 qib_pio_copy(piobuf + 9, data, dcnt);
5271 if (dd->flags & QIB_USE_SPCL_TRIG) {
5272 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5273
5274 qib_flush_wc();
5275 __raw_writel(0xaebecede, piobuf + spcl_off);
5276 }
5277 qib_flush_wc();
5278 qib_sendbuf_done(dd, pnum);
5279
5280 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5281}
5282
5283
5284
5285
5286static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5287{
5288 struct qib_devdata *dd = ppd->dd;
5289 static u32 swapped;
5290 u32 dw, i, hcnt, dcnt, *data;
5291 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5292 static u32 madpayload_start[0x40] = {
5293 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5294 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5295 0x1, 0x1388, 0x15e, 0x1,
5296 };
5297 static u32 madpayload_done[0x40] = {
5298 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5299 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5300 0x40000001, 0x1388, 0x15e,
5301 };
5302
5303 dcnt = ARRAY_SIZE(madpayload_start);
5304 hcnt = ARRAY_SIZE(hdr);
5305 if (!swapped) {
5306
5307 for (i = 0; i < hcnt; i++) {
5308 dw = (__force u32) cpu_to_be32(hdr[i]);
5309 hdr[i] = dw;
5310 }
5311 for (i = 0; i < dcnt; i++) {
5312 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5313 madpayload_start[i] = dw;
5314 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5315 madpayload_done[i] = dw;
5316 }
5317 swapped = 1;
5318 }
5319
5320 data = which ? madpayload_done : madpayload_start;
5321
5322 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5323 qib_read_kreg64(dd, kr_scratch);
5324 udelay(2);
5325 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5326 qib_read_kreg64(dd, kr_scratch);
5327 udelay(2);
5328}
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5345{
5346 u64 newctrlb;
5347
5348 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5349 IBA7322_IBC_IBTA_1_2_MASK |
5350 IBA7322_IBC_MAX_SPEED_MASK);
5351
5352 if (speed & (speed - 1))
5353 newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5354 IBA7322_IBC_IBTA_1_2_MASK |
5355 IBA7322_IBC_MAX_SPEED_MASK;
5356 else
5357 newctrlb |= speed == QIB_IB_QDR ?
5358 IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5359 ((speed == QIB_IB_DDR ?
5360 IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5361
5362 if (newctrlb == ppd->cpspec->ibcctrl_b)
5363 return;
5364
5365 ppd->cpspec->ibcctrl_b = newctrlb;
5366 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5367 qib_write_kreg(ppd->dd, kr_scratch, 0);
5368}
5369
5370
5371
5372
5373
5374
5375
5376static void try_7322_autoneg(struct qib_pportdata *ppd)
5377{
5378 unsigned long flags;
5379
5380 spin_lock_irqsave(&ppd->lflags_lock, flags);
5381 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5382 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5383 qib_autoneg_7322_send(ppd, 0);
5384 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5385 qib_7322_mini_pcs_reset(ppd);
5386
5387 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5388 msecs_to_jiffies(2));
5389}
5390
5391
5392
5393
5394
5395static void autoneg_7322_work(struct work_struct *work)
5396{
5397 struct qib_pportdata *ppd;
5398 struct qib_devdata *dd;
5399 u64 startms;
5400 u32 i;
5401 unsigned long flags;
5402
5403 ppd = container_of(work, struct qib_chippport_specific,
5404 autoneg_work.work)->ppd;
5405 dd = ppd->dd;
5406
5407 startms = jiffies_to_msecs(jiffies);
5408
5409
5410
5411
5412
5413 for (i = 0; i < 25; i++) {
5414 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5415 == IB_7322_LT_STATE_POLLQUIET) {
5416 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5417 break;
5418 }
5419 udelay(100);
5420 }
5421
5422 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5423 goto done;
5424
5425
5426 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5427 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5428 msecs_to_jiffies(90)))
5429 goto done;
5430 qib_7322_mini_pcs_reset(ppd);
5431
5432
5433 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5434 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5435 msecs_to_jiffies(1700)))
5436 goto done;
5437 qib_7322_mini_pcs_reset(ppd);
5438
5439 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5440
5441
5442
5443
5444
5445 wait_event_timeout(ppd->cpspec->autoneg_wait,
5446 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5447 msecs_to_jiffies(250));
5448done:
5449 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5450 spin_lock_irqsave(&ppd->lflags_lock, flags);
5451 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5452 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5453 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5454 ppd->cpspec->autoneg_tries = 0;
5455 }
5456 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5457 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5458 }
5459}
5460
5461
5462
5463
5464
5465static void try_7322_ipg(struct qib_pportdata *ppd)
5466{
5467 struct qib_ibport *ibp = &ppd->ibport_data;
5468 struct ib_mad_send_buf *send_buf;
5469 struct ib_mad_agent *agent;
5470 struct ib_smp *smp;
5471 unsigned delay;
5472 int ret;
5473
5474 agent = ibp->rvp.send_agent;
5475 if (!agent)
5476 goto retry;
5477
5478 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5479 IB_MGMT_MAD_DATA, GFP_ATOMIC,
5480 IB_MGMT_BASE_VERSION);
5481 if (IS_ERR(send_buf))
5482 goto retry;
5483
5484 if (!ibp->smi_ah) {
5485 struct ib_ah *ah;
5486
5487 ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5488 if (IS_ERR(ah))
5489 ret = PTR_ERR(ah);
5490 else {
5491 send_buf->ah = ah;
5492 ibp->smi_ah = ibah_to_rvtah(ah);
5493 ret = 0;
5494 }
5495 } else {
5496 send_buf->ah = &ibp->smi_ah->ibah;
5497 ret = 0;
5498 }
5499
5500 smp = send_buf->mad;
5501 smp->base_version = IB_MGMT_BASE_VERSION;
5502 smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5503 smp->class_version = 1;
5504 smp->method = IB_MGMT_METHOD_SEND;
5505 smp->hop_cnt = 1;
5506 smp->attr_id = QIB_VENDOR_IPG;
5507 smp->attr_mod = 0;
5508
5509 if (!ret)
5510 ret = ib_post_send_mad(send_buf, NULL);
5511 if (ret)
5512 ib_free_send_mad(send_buf);
5513retry:
5514 delay = 2 << ppd->cpspec->ipg_tries;
5515 queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5516 msecs_to_jiffies(delay));
5517}
5518
5519
5520
5521
5522
5523static void ipg_7322_work(struct work_struct *work)
5524{
5525 struct qib_pportdata *ppd;
5526
5527 ppd = container_of(work, struct qib_chippport_specific,
5528 ipg_work.work)->ppd;
5529 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5530 && ++ppd->cpspec->ipg_tries <= 10)
5531 try_7322_ipg(ppd);
5532}
5533
5534static u32 qib_7322_iblink_state(u64 ibcs)
5535{
5536 u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5537
5538 switch (state) {
5539 case IB_7322_L_STATE_INIT:
5540 state = IB_PORT_INIT;
5541 break;
5542 case IB_7322_L_STATE_ARM:
5543 state = IB_PORT_ARMED;
5544 break;
5545 case IB_7322_L_STATE_ACTIVE:
5546
5547 case IB_7322_L_STATE_ACT_DEFER:
5548 state = IB_PORT_ACTIVE;
5549 break;
5550 default:
5551 case IB_7322_L_STATE_DOWN:
5552 state = IB_PORT_DOWN;
5553 break;
5554 }
5555 return state;
5556}
5557
5558
5559static u8 qib_7322_phys_portstate(u64 ibcs)
5560{
5561 u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5562 return qib_7322_physportstate[state];
5563}
5564
5565static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5566{
5567 int ret = 0, symadj = 0;
5568 unsigned long flags;
5569 int mult;
5570
5571 spin_lock_irqsave(&ppd->lflags_lock, flags);
5572 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5573 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5574
5575
5576 if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5577 ppd->link_speed_active = QIB_IB_QDR;
5578 mult = 4;
5579 } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5580 ppd->link_speed_active = QIB_IB_DDR;
5581 mult = 2;
5582 } else {
5583 ppd->link_speed_active = QIB_IB_SDR;
5584 mult = 1;
5585 }
5586 if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5587 ppd->link_width_active = IB_WIDTH_4X;
5588 mult *= 4;
5589 } else
5590 ppd->link_width_active = IB_WIDTH_1X;
5591 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5592
5593 if (!ibup) {
5594 u64 clr;
5595
5596
5597
5598 ppd->cpspec->ipg_tries = 0;
5599 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5600 (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5601 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5602 if (clr)
5603 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5604 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5605 QIBL_IB_AUTONEG_INPROG)))
5606 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5607 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5608 struct qib_qsfp_data *qd =
5609 &ppd->cpspec->qsfp_data;
5610
5611 qib_write_kreg_port(ppd, krp_tx_deemph_override,
5612 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5613 reset_tx_deemphasis_override));
5614 qib_cancel_sends(ppd);
5615
5616 qib_7322_mini_pcs_reset(ppd);
5617
5618
5619 if (ppd->dd->flags & QIB_HAS_QSFP) {
5620 qd->t_insert = jiffies;
5621 queue_work(ib_wq, &qd->work);
5622 }
5623 spin_lock_irqsave(&ppd->sdma_lock, flags);
5624 if (__qib_sdma_running(ppd))
5625 __qib_sdma_process_event(ppd,
5626 qib_sdma_event_e70_go_idle);
5627 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5628 }
5629 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5630 if (clr == ppd->cpspec->iblnkdownsnap)
5631 ppd->cpspec->iblnkdowndelta++;
5632 } else {
5633 if (qib_compat_ddr_negotiate &&
5634 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5635 QIBL_IB_AUTONEG_INPROG)) &&
5636 ppd->link_speed_active == QIB_IB_SDR &&
5637 (ppd->link_speed_enabled & QIB_IB_DDR)
5638 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5639
5640 ++ppd->cpspec->autoneg_tries;
5641 if (!ppd->cpspec->ibdeltainprog) {
5642 ppd->cpspec->ibdeltainprog = 1;
5643 ppd->cpspec->ibsymdelta +=
5644 read_7322_creg32_port(ppd,
5645 crp_ibsymbolerr) -
5646 ppd->cpspec->ibsymsnap;
5647 ppd->cpspec->iblnkerrdelta +=
5648 read_7322_creg32_port(ppd,
5649 crp_iblinkerrrecov) -
5650 ppd->cpspec->iblnkerrsnap;
5651 }
5652 try_7322_autoneg(ppd);
5653 ret = 1;
5654 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5655 ppd->link_speed_active == QIB_IB_SDR) {
5656 qib_autoneg_7322_send(ppd, 1);
5657 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5658 qib_7322_mini_pcs_reset(ppd);
5659 udelay(2);
5660 ret = 1;
5661 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5662 (ppd->link_speed_active & QIB_IB_DDR)) {
5663 spin_lock_irqsave(&ppd->lflags_lock, flags);
5664 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5665 QIBL_IB_AUTONEG_FAILED);
5666 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5667 ppd->cpspec->autoneg_tries = 0;
5668
5669 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5670 wake_up(&ppd->cpspec->autoneg_wait);
5671 symadj = 1;
5672 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5673
5674
5675
5676
5677
5678
5679 spin_lock_irqsave(&ppd->lflags_lock, flags);
5680 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5681 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5682 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5683 symadj = 1;
5684 }
5685 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5686 symadj = 1;
5687 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5688 try_7322_ipg(ppd);
5689 if (!ppd->cpspec->recovery_init)
5690 setup_7322_link_recovery(ppd, 0);
5691 ppd->cpspec->qdr_dfe_time = jiffies +
5692 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5693 }
5694 ppd->cpspec->ibmalfusesnap = 0;
5695 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5696 crp_errlink);
5697 }
5698 if (symadj) {
5699 ppd->cpspec->iblnkdownsnap =
5700 read_7322_creg32_port(ppd, crp_iblinkdown);
5701 if (ppd->cpspec->ibdeltainprog) {
5702 ppd->cpspec->ibdeltainprog = 0;
5703 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5704 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5705 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5706 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5707 }
5708 } else if (!ibup && qib_compat_ddr_negotiate &&
5709 !ppd->cpspec->ibdeltainprog &&
5710 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5711 ppd->cpspec->ibdeltainprog = 1;
5712 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5713 crp_ibsymbolerr);
5714 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5715 crp_iblinkerrrecov);
5716 }
5717
5718 if (!ret)
5719 qib_setup_7322_setextled(ppd, ibup);
5720 return ret;
5721}
5722
5723
5724
5725
5726
5727
5728
5729
5730static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5731{
5732 u64 read_val, new_out;
5733 unsigned long flags;
5734
5735 if (mask) {
5736
5737 dir &= mask;
5738 out &= mask;
5739 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5740 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5741 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5742 new_out = (dd->cspec->gpio_out & ~mask) | out;
5743
5744 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5745 qib_write_kreg(dd, kr_gpio_out, new_out);
5746 dd->cspec->gpio_out = new_out;
5747 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5748 }
5749
5750
5751
5752
5753
5754
5755
5756
5757 read_val = qib_read_kreg64(dd, kr_extstatus);
5758 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5759}
5760
5761
5762static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5763{
5764 int prev_wen;
5765 u32 mask;
5766
5767 mask = 1 << QIB_EEPROM_WEN_NUM;
5768 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5769 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5770
5771 return prev_wen & 1;
5772}
5773
5774
5775
5776
5777
5778
5779static void get_7322_chip_params(struct qib_devdata *dd)
5780{
5781 u64 val;
5782 u32 piobufs;
5783 int mtu;
5784
5785 dd->palign = qib_read_kreg32(dd, kr_pagealign);
5786
5787 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5788
5789 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5790 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5791 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5792 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5793 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5794
5795 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5796 dd->piobcnt2k = val & ~0U;
5797 dd->piobcnt4k = val >> 32;
5798 val = qib_read_kreg64(dd, kr_sendpiosize);
5799 dd->piosize2k = val & ~0U;
5800 dd->piosize4k = val >> 32;
5801
5802 mtu = ib_mtu_enum_to_int(qib_ibmtu);
5803 if (mtu == -1)
5804 mtu = QIB_DEFAULT_MTU;
5805 dd->pport[0].ibmtu = (u32)mtu;
5806 dd->pport[1].ibmtu = (u32)mtu;
5807
5808
5809 dd->pio2kbase = (u32 __iomem *)
5810 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5811 dd->pio4kbase = (u32 __iomem *)
5812 ((char __iomem *) dd->kregbase +
5813 (dd->piobufbase >> 32));
5814
5815
5816
5817
5818
5819 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5820
5821 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5822
5823 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5824 (sizeof(u64) * BITS_PER_BYTE / 2);
5825}
5826
5827
5828
5829
5830
5831
5832static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5833{
5834 u32 cregbase;
5835
5836 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5837
5838 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5839 (char __iomem *)dd->kregbase);
5840
5841 dd->egrtidbase = (u64 __iomem *)
5842 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5843
5844
5845 dd->pport[0].cpspec->kpregbase =
5846 (u64 __iomem *)((char __iomem *)dd->kregbase);
5847 dd->pport[1].cpspec->kpregbase =
5848 (u64 __iomem *)(dd->palign +
5849 (char __iomem *)dd->kregbase);
5850 dd->pport[0].cpspec->cpregbase =
5851 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5852 kr_counterregbase) + (char __iomem *)dd->kregbase);
5853 dd->pport[1].cpspec->cpregbase =
5854 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5855 kr_counterregbase) + (char __iomem *)dd->kregbase);
5856}
5857
5858
5859
5860
5861
5862
5863#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5864 SYM_MASK(SendCtrl_0, SDmaEnable) | \
5865 SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5866 SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5867 SYM_MASK(SendCtrl_0, SDmaHalt) | \
5868 SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5869 SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5870
5871static int sendctrl_hook(struct qib_devdata *dd,
5872 const struct diag_observer *op, u32 offs,
5873 u64 *data, u64 mask, int only_32)
5874{
5875 unsigned long flags;
5876 unsigned idx;
5877 unsigned pidx;
5878 struct qib_pportdata *ppd = NULL;
5879 u64 local_data, all_bits;
5880
5881
5882
5883
5884
5885
5886
5887 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5888 u64 __iomem *psptr;
5889 u32 psoffs;
5890
5891 ppd = dd->pport + pidx;
5892 if (!ppd->cpspec->kpregbase)
5893 continue;
5894
5895 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5896 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5897 if (psoffs == offs)
5898 break;
5899 }
5900
5901
5902 if (pidx >= dd->num_pports)
5903 ppd = NULL;
5904
5905
5906 idx = offs / sizeof(u64);
5907
5908 all_bits = ~0ULL;
5909 if (only_32)
5910 all_bits >>= 32;
5911
5912 spin_lock_irqsave(&dd->sendctrl_lock, flags);
5913 if (!ppd || (mask & all_bits) != all_bits) {
5914
5915
5916
5917
5918
5919
5920
5921 if (only_32)
5922 local_data = (u64)qib_read_kreg32(dd, idx);
5923 else
5924 local_data = qib_read_kreg64(dd, idx);
5925 *data = (local_data & ~mask) | (*data & mask);
5926 }
5927 if (mask) {
5928
5929
5930
5931
5932 u64 sval, tval;
5933
5934
5935
5936
5937
5938 if (ppd) {
5939 sval = ppd->p_sendctrl & ~mask;
5940 sval |= *data & SENDCTRL_SHADOWED & mask;
5941 ppd->p_sendctrl = sval;
5942 } else
5943 sval = *data & SENDCTRL_SHADOWED & mask;
5944 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5945 qib_write_kreg(dd, idx, tval);
5946 qib_write_kreg(dd, kr_scratch, 0Ull);
5947 }
5948 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5949 return only_32 ? 4 : 8;
5950}
5951
5952static const struct diag_observer sendctrl_0_observer = {
5953 sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5954 KREG_IDX(SendCtrl_0) * sizeof(u64)
5955};
5956
5957static const struct diag_observer sendctrl_1_observer = {
5958 sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5959 KREG_IDX(SendCtrl_1) * sizeof(u64)
5960};
5961
5962static ushort sdma_fetch_prio = 8;
5963module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5964MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5965
5966
5967static void init_txdds_table(struct qib_pportdata *ppd, int override);
5968
5969static void qsfp_7322_event(struct work_struct *work)
5970{
5971 struct qib_qsfp_data *qd;
5972 struct qib_pportdata *ppd;
5973 unsigned long pwrup;
5974 unsigned long flags;
5975 int ret;
5976 u32 le2;
5977
5978 qd = container_of(work, struct qib_qsfp_data, work);
5979 ppd = qd->ppd;
5980 pwrup = qd->t_insert +
5981 msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5982
5983
5984 mdelay(QSFP_MODPRS_LAG_MSEC);
5985
5986 if (!qib_qsfp_mod_present(ppd)) {
5987 ppd->cpspec->qsfp_data.modpresent = 0;
5988
5989 qib_set_ib_7322_lstate(ppd, 0,
5990 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5991 spin_lock_irqsave(&ppd->lflags_lock, flags);
5992 ppd->lflags &= ~QIBL_LINKV;
5993 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5994 } else {
5995
5996
5997
5998
5999
6000 while (1) {
6001 if (time_is_before_jiffies(pwrup))
6002 break;
6003 msleep(20);
6004 }
6005
6006 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
6007
6008
6009
6010
6011
6012
6013
6014 if (!ret && !ppd->dd->cspec->r1) {
6015 if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
6016 le2 = LE2_QME;
6017 else if (qd->cache.atten[1] >= qib_long_atten &&
6018 QSFP_IS_CU(qd->cache.tech))
6019 le2 = LE2_5m;
6020 else
6021 le2 = LE2_DEFAULT;
6022 } else
6023 le2 = LE2_DEFAULT;
6024 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
6025
6026
6027
6028
6029
6030
6031 init_txdds_table(ppd, 0);
6032
6033
6034
6035
6036 if (!ppd->cpspec->qsfp_data.modpresent &&
6037 (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6038 ppd->cpspec->qsfp_data.modpresent = 1;
6039 qib_set_ib_7322_lstate(ppd, 0,
6040 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6041 spin_lock_irqsave(&ppd->lflags_lock, flags);
6042 ppd->lflags |= QIBL_LINKV;
6043 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6044 }
6045 }
6046}
6047
6048
6049
6050
6051
6052static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6053{
6054 unsigned long flags;
6055 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6056 struct qib_devdata *dd = ppd->dd;
6057 u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6058
6059 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6060 qd->ppd = ppd;
6061 qib_qsfp_init(qd, qsfp_7322_event);
6062 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6063 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6064 dd->cspec->gpio_mask |= mod_prs_bit;
6065 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6066 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6067 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6068}
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6084{
6085 char *nxt, *str;
6086 u32 pidx, unit, port, deflt, h1;
6087 unsigned long val;
6088 int any = 0, seth1;
6089 int txdds_size;
6090
6091 str = txselect_list;
6092
6093
6094 deflt = simple_strtoul(str, &nxt, 0);
6095 for (pidx = 0; pidx < dd->num_pports; ++pidx)
6096 dd->pport[pidx].cpspec->no_eep = deflt;
6097
6098 txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6099 if (IS_QME(dd) || IS_QMH(dd))
6100 txdds_size += TXDDS_MFG_SZ;
6101
6102 while (*nxt && nxt[1]) {
6103 str = ++nxt;
6104 unit = simple_strtoul(str, &nxt, 0);
6105 if (nxt == str || !*nxt || *nxt != ',') {
6106 while (*nxt && *nxt++ != ' ')
6107 ;
6108 continue;
6109 }
6110 str = ++nxt;
6111 port = simple_strtoul(str, &nxt, 0);
6112 if (nxt == str || *nxt != '=') {
6113 while (*nxt && *nxt++ != ' ')
6114 ;
6115 continue;
6116 }
6117 str = ++nxt;
6118 val = simple_strtoul(str, &nxt, 0);
6119 if (nxt == str) {
6120 while (*nxt && *nxt++ != ' ')
6121 ;
6122 continue;
6123 }
6124 if (val >= txdds_size)
6125 continue;
6126 seth1 = 0;
6127 h1 = 0;
6128 if (*nxt == ',' && nxt[1]) {
6129 str = ++nxt;
6130 h1 = (u32)simple_strtoul(str, &nxt, 0);
6131 if (nxt == str)
6132 while (*nxt && *nxt++ != ' ')
6133 ;
6134 else
6135 seth1 = 1;
6136 }
6137 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6138 ++pidx) {
6139 struct qib_pportdata *ppd = &dd->pport[pidx];
6140
6141 if (ppd->port != port || !ppd->link_speed_supported)
6142 continue;
6143 ppd->cpspec->no_eep = val;
6144 if (seth1)
6145 ppd->cpspec->h1_val = h1;
6146
6147 init_txdds_table(ppd, 1);
6148
6149
6150
6151 if (IS_QMH(dd) || IS_QME(dd))
6152 qib_set_ib_7322_lstate(ppd, 0,
6153 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6154 any++;
6155 }
6156 if (*nxt == '\n')
6157 break;
6158 }
6159 if (change && !any) {
6160
6161
6162
6163
6164 for (pidx = 0; pidx < dd->num_pports; ++pidx)
6165 if (dd->pport[pidx].link_speed_supported)
6166 init_txdds_table(&dd->pport[pidx], 0);
6167 }
6168}
6169
6170
6171static int setup_txselect(const char *str, struct kernel_param *kp)
6172{
6173 struct qib_devdata *dd;
6174 unsigned long val;
6175 char *n;
6176
6177 if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6178 pr_info("txselect_values string too long\n");
6179 return -ENOSPC;
6180 }
6181 val = simple_strtoul(str, &n, 0);
6182 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6183 TXDDS_MFG_SZ)) {
6184 pr_info("txselect_values must start with a number < %d\n",
6185 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6186 return -EINVAL;
6187 }
6188 strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6189
6190 list_for_each_entry(dd, &qib_dev_list, list)
6191 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6192 set_no_qsfp_atten(dd, 1);
6193 return 0;
6194}
6195
6196
6197
6198
6199
6200
6201static int qib_late_7322_initreg(struct qib_devdata *dd)
6202{
6203 int ret = 0, n;
6204 u64 val;
6205
6206 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6207 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6208 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6209 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6210 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6211 if (val != dd->pioavailregs_phys) {
6212 qib_dev_err(dd,
6213 "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6214 (unsigned long) dd->pioavailregs_phys,
6215 (unsigned long long) val);
6216 ret = -EINVAL;
6217 }
6218
6219 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6220 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6221
6222 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6223
6224 qib_register_observer(dd, &sendctrl_0_observer);
6225 qib_register_observer(dd, &sendctrl_1_observer);
6226
6227 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6228 qib_write_kreg(dd, kr_control, dd->control);
6229
6230
6231
6232
6233
6234
6235 set_no_qsfp_atten(dd, 0);
6236 for (n = 0; n < dd->num_pports; ++n) {
6237 struct qib_pportdata *ppd = dd->pport + n;
6238
6239 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6240 sdma_fetch_prio & 0xf);
6241
6242 if (dd->flags & QIB_HAS_QSFP)
6243 qib_init_7322_qsfp(ppd);
6244 }
6245 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6246 qib_write_kreg(dd, kr_control, dd->control);
6247
6248 return ret;
6249}
6250
6251
6252#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6253 MASK_ACROSS(8, 15))
6254#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6255#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6256 MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6257 MASK_ACROSS(0, 11))
6258
6259
6260
6261
6262
6263
6264
6265static void write_7322_init_portregs(struct qib_pportdata *ppd)
6266{
6267 u64 val;
6268 int i;
6269
6270 if (!ppd->link_speed_supported) {
6271
6272 for (i = 1; i < 8; i++)
6273 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6274 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6275 qib_write_kreg(ppd->dd, kr_scratch, 0);
6276 return;
6277 }
6278
6279
6280
6281
6282
6283 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6284 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6285 val |= (u64)(ppd->vls_supported - 1) <<
6286 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6287 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6288
6289 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6290
6291
6292 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6293 IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6294 IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6295
6296 qib_write_kreg_port(ppd, krp_ncmodectrl,
6297 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6298
6299
6300
6301
6302
6303 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6304 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6305 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6306 if (ppd->dd->cspec->r1)
6307 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6308}
6309
6310
6311
6312
6313
6314
6315
6316
6317static void write_7322_initregs(struct qib_devdata *dd)
6318{
6319 struct qib_pportdata *ppd;
6320 int i, pidx;
6321 u64 val;
6322
6323
6324 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6325
6326 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6327 unsigned n, regno;
6328 unsigned long flags;
6329
6330 if (dd->n_krcv_queues < 2 ||
6331 !dd->pport[pidx].link_speed_supported)
6332 continue;
6333
6334 ppd = &dd->pport[pidx];
6335
6336
6337 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6338 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6339 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6340
6341
6342 regno = krp_rcvqpmaptable;
6343 val = 0;
6344 if (dd->num_pports > 1)
6345 n = dd->first_user_ctxt / dd->num_pports;
6346 else
6347 n = dd->first_user_ctxt - 1;
6348 for (i = 0; i < 32; ) {
6349 unsigned ctxt;
6350
6351 if (dd->num_pports > 1)
6352 ctxt = (i % n) * dd->num_pports + pidx;
6353 else if (i % n)
6354 ctxt = (i % n) + 1;
6355 else
6356 ctxt = ppd->hw_pidx;
6357 val |= ctxt << (5 * (i % 6));
6358 i++;
6359 if (i % 6 == 0) {
6360 qib_write_kreg_port(ppd, regno, val);
6361 val = 0;
6362 regno++;
6363 }
6364 }
6365 qib_write_kreg_port(ppd, regno, val);
6366 }
6367
6368
6369
6370
6371
6372
6373
6374 for (i = 0; i < dd->first_user_ctxt; i++) {
6375 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6376 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6377 }
6378
6379
6380
6381
6382
6383
6384 val = TIDFLOW_ERRBITS;
6385 for (i = 0; i < dd->cfgctxts; i++) {
6386 int flow;
6387
6388 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6389 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6390 }
6391
6392
6393
6394
6395
6396
6397 if (dd->num_pports)
6398 setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6399}
6400
6401static int qib_init_7322_variables(struct qib_devdata *dd)
6402{
6403 struct qib_pportdata *ppd;
6404 unsigned features, pidx, sbufcnt;
6405 int ret, mtu;
6406 u32 sbufs, updthresh;
6407
6408
6409 ppd = (struct qib_pportdata *)(dd + 1);
6410 dd->pport = ppd;
6411 ppd[0].dd = dd;
6412 ppd[1].dd = dd;
6413
6414 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6415
6416 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6417 ppd[1].cpspec = &ppd[0].cpspec[1];
6418 ppd[0].cpspec->ppd = &ppd[0];
6419 ppd[1].cpspec->ppd = &ppd[1];
6420
6421 spin_lock_init(&dd->cspec->rcvmod_lock);
6422 spin_lock_init(&dd->cspec->gpio_lock);
6423
6424
6425 dd->revision = readq(&dd->kregbase[kr_revision]);
6426
6427 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6428 qib_dev_err(dd,
6429 "Revision register read failure, giving up initialization\n");
6430 ret = -ENODEV;
6431 goto bail;
6432 }
6433 dd->flags |= QIB_PRESENT;
6434
6435 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6436 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6437 dd->cspec->r1 = dd->minrev == 1;
6438
6439 get_7322_chip_params(dd);
6440 features = qib_7322_boardname(dd);
6441
6442
6443 sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6444 NUM_VL15_BUFS + BITS_PER_LONG - 1;
6445 sbufcnt /= BITS_PER_LONG;
6446 dd->cspec->sendchkenable =
6447 kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
6448 GFP_KERNEL);
6449 dd->cspec->sendgrhchk =
6450 kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
6451 GFP_KERNEL);
6452 dd->cspec->sendibchk =
6453 kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
6454 GFP_KERNEL);
6455 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6456 !dd->cspec->sendibchk) {
6457 ret = -ENOMEM;
6458 goto bail;
6459 }
6460
6461 ppd = dd->pport;
6462
6463
6464
6465
6466
6467 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6468 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6469 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6470
6471 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6472 QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6473 QIB_HAS_THRESH_UPDATE |
6474 (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6475 dd->flags |= qib_special_trigger ?
6476 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6477
6478
6479
6480
6481
6482 qib_7322_set_baseaddrs(dd);
6483
6484 mtu = ib_mtu_enum_to_int(qib_ibmtu);
6485 if (mtu == -1)
6486 mtu = QIB_DEFAULT_MTU;
6487
6488 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6489
6490 dd->cspec->hwerrmask = ~0ULL;
6491
6492
6493 dd->cspec->hwerrmask &=
6494 ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6495 SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6496 HWE_MASK(LATriggered));
6497
6498 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6499 struct qib_chippport_specific *cp = ppd->cpspec;
6500
6501 ppd->link_speed_supported = features & PORT_SPD_CAP;
6502 features >>= PORT_SPD_CAP_SHIFT;
6503 if (!ppd->link_speed_supported) {
6504
6505 dd->skip_kctxt_mask |= 1 << pidx;
6506 if (pidx == 0) {
6507
6508 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6509 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6510 ppd[0] = ppd[1];
6511 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6512 IBSerdesPClkNotDetectMask_0)
6513 | SYM_MASK(HwErrMask,
6514 SDmaMemReadErrMask_0));
6515 dd->cspec->int_enable_mask &= ~(
6516 SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6517 SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6518 SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6519 SYM_MASK(IntMask, SDmaIntMask_0) |
6520 SYM_MASK(IntMask, ErrIntMask_0) |
6521 SYM_MASK(IntMask, SendDoneIntMask_0));
6522 } else {
6523
6524 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6525 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6526 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6527 IBSerdesPClkNotDetectMask_1)
6528 | SYM_MASK(HwErrMask,
6529 SDmaMemReadErrMask_1));
6530 dd->cspec->int_enable_mask &= ~(
6531 SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6532 SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6533 SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6534 SYM_MASK(IntMask, SDmaIntMask_1) |
6535 SYM_MASK(IntMask, ErrIntMask_1) |
6536 SYM_MASK(IntMask, SendDoneIntMask_1));
6537 }
6538 continue;
6539 }
6540
6541 dd->num_pports++;
6542 ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6543 if (ret) {
6544 dd->num_pports--;
6545 goto bail;
6546 }
6547
6548 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6549 ppd->link_width_enabled = IB_WIDTH_4X;
6550 ppd->link_speed_enabled = ppd->link_speed_supported;
6551
6552
6553
6554
6555 ppd->link_width_active = IB_WIDTH_4X;
6556 ppd->link_speed_active = QIB_IB_SDR;
6557 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6558 switch (qib_num_cfg_vls) {
6559 case 1:
6560 ppd->vls_supported = IB_VL_VL0;
6561 break;
6562 case 2:
6563 ppd->vls_supported = IB_VL_VL0_1;
6564 break;
6565 default:
6566 qib_devinfo(dd->pcidev,
6567 "Invalid num_vls %u, using 4 VLs\n",
6568 qib_num_cfg_vls);
6569 qib_num_cfg_vls = 4;
6570
6571 case 4:
6572 ppd->vls_supported = IB_VL_VL0_3;
6573 break;
6574 case 8:
6575 if (mtu <= 2048)
6576 ppd->vls_supported = IB_VL_VL0_7;
6577 else {
6578 qib_devinfo(dd->pcidev,
6579 "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6580 qib_num_cfg_vls, mtu);
6581 ppd->vls_supported = IB_VL_VL0_3;
6582 qib_num_cfg_vls = 4;
6583 }
6584 break;
6585 }
6586 ppd->vls_operational = ppd->vls_supported;
6587
6588 init_waitqueue_head(&cp->autoneg_wait);
6589 INIT_DELAYED_WORK(&cp->autoneg_work,
6590 autoneg_7322_work);
6591 if (ppd->dd->cspec->r1)
6592 INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6593
6594
6595
6596
6597
6598
6599 if (!(dd->flags & QIB_HAS_QSFP)) {
6600 if (!IS_QMH(dd) && !IS_QME(dd))
6601 qib_devinfo(dd->pcidev,
6602 "IB%u:%u: Unknown mezzanine card type\n",
6603 dd->unit, ppd->port);
6604 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6605
6606
6607
6608
6609 ppd->cpspec->no_eep = IS_QMH(dd) ?
6610 TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6611 } else
6612 cp->h1_val = H1_FORCE_VAL;
6613
6614
6615 if (!qib_mini_init)
6616 write_7322_init_portregs(ppd);
6617
6618 timer_setup(&cp->chase_timer, reenable_chase, 0);
6619
6620 ppd++;
6621 }
6622
6623 dd->rcvhdrentsize = qib_rcvhdrentsize ?
6624 qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6625 dd->rcvhdrsize = qib_rcvhdrsize ?
6626 qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6627 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6628
6629
6630 dd->rcvegrbufsize = max(mtu, 2048);
6631 BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6632 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6633
6634 qib_7322_tidtemplate(dd);
6635
6636
6637
6638
6639
6640 dd->rhdrhead_intr_off =
6641 (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6642
6643
6644 timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6645
6646 dd->ureg_align = 0x10000;
6647
6648 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6649
6650 qib_7322_config_ctxts(dd);
6651 qib_set_ctxtcnt(dd);
6652
6653 if (qib_wc_pat) {
6654 resource_size_t vl15off;
6655
6656
6657
6658
6659
6660
6661
6662 ret = init_chip_wc_pat(dd, 0);
6663 if (ret)
6664 goto bail;
6665
6666
6667 vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6668 dd->piobcnt4k * dd->align4k;
6669 dd->piovl15base = ioremap_nocache(vl15off,
6670 NUM_VL15_BUFS * dd->align4k);
6671 if (!dd->piovl15base) {
6672 ret = -ENOMEM;
6673 goto bail;
6674 }
6675 }
6676 qib_7322_set_baseaddrs(dd);
6677
6678 ret = 0;
6679 if (qib_mini_init)
6680 goto bail;
6681 if (!dd->num_pports) {
6682 qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6683 goto bail;
6684 }
6685
6686 write_7322_initregs(dd);
6687 ret = qib_create_ctxts(dd);
6688 init_7322_cntrnames(dd);
6689
6690 updthresh = 8U;
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702 if (dd->flags & QIB_HAS_SEND_DMA) {
6703 dd->cspec->sdmabufcnt = dd->piobcnt4k;
6704 sbufs = updthresh > 3 ? updthresh : 3;
6705 } else {
6706 dd->cspec->sdmabufcnt = 0;
6707 sbufs = dd->piobcnt4k;
6708 }
6709 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6710 dd->cspec->sdmabufcnt;
6711 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6712 dd->cspec->lastbuf_for_pio--;
6713 dd->last_pio = dd->cspec->lastbuf_for_pio;
6714 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6715 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6716
6717
6718
6719
6720
6721
6722
6723 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6724 updthresh = dd->pbufsctxt - 2;
6725 dd->cspec->updthresh_dflt = updthresh;
6726 dd->cspec->updthresh = updthresh;
6727
6728
6729 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6730 << SYM_LSB(SendCtrl, AvailUpdThld)) |
6731 SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6732
6733 dd->psxmitwait_supported = 1;
6734 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6735bail:
6736 if (!dd->ctxtcnt)
6737 dd->ctxtcnt = 1;
6738
6739 return ret;
6740}
6741
6742static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6743 u32 *pbufnum)
6744{
6745 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6746 struct qib_devdata *dd = ppd->dd;
6747
6748
6749 if (pbc & PBC_7322_VL15_SEND) {
6750 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6751 last = first;
6752 } else {
6753 if ((plen + 1) > dd->piosize2kmax_dwords)
6754 first = dd->piobcnt2k;
6755 else
6756 first = 0;
6757 last = dd->cspec->lastbuf_for_pio;
6758 }
6759 return qib_getsendbuf_range(dd, pbufnum, first, last);
6760}
6761
6762static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6763 u32 start)
6764{
6765 qib_write_kreg_port(ppd, krp_psinterval, intv);
6766 qib_write_kreg_port(ppd, krp_psstart, start);
6767}
6768
6769
6770
6771
6772static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6773{
6774 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6775}
6776
6777
6778
6779
6780static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6781{
6782 u64 reg, reg1, reg2;
6783
6784 reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6785 qib_dev_porterr(ppd->dd, ppd->port,
6786 "SDMA senddmastatus: 0x%016llx\n", reg);
6787
6788 reg = qib_read_kreg_port(ppd, krp_sendctrl);
6789 qib_dev_porterr(ppd->dd, ppd->port,
6790 "SDMA sendctrl: 0x%016llx\n", reg);
6791
6792 reg = qib_read_kreg_port(ppd, krp_senddmabase);
6793 qib_dev_porterr(ppd->dd, ppd->port,
6794 "SDMA senddmabase: 0x%016llx\n", reg);
6795
6796 reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6797 reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6798 reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6799 qib_dev_porterr(ppd->dd, ppd->port,
6800 "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
6801 reg, reg1, reg2);
6802
6803
6804 reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6805 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6806 reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6807 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6808 reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6809 qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6810
6811 qib_dev_porterr(ppd->dd, ppd->port,
6812 "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
6813 reg, reg1, reg2);
6814 reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6815 reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6816 reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6817
6818 qib_dev_porterr(ppd->dd, ppd->port,
6819 "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
6820 reg, reg1, reg2);
6821
6822 reg = qib_read_kreg_port(ppd, krp_senddmatail);
6823 qib_dev_porterr(ppd->dd, ppd->port,
6824 "SDMA senddmatail: 0x%016llx\n", reg);
6825
6826 reg = qib_read_kreg_port(ppd, krp_senddmahead);
6827 qib_dev_porterr(ppd->dd, ppd->port,
6828 "SDMA senddmahead: 0x%016llx\n", reg);
6829
6830 reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6831 qib_dev_porterr(ppd->dd, ppd->port,
6832 "SDMA senddmaheadaddr: 0x%016llx\n", reg);
6833
6834 reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6835 qib_dev_porterr(ppd->dd, ppd->port,
6836 "SDMA senddmalengen: 0x%016llx\n", reg);
6837
6838 reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6839 qib_dev_porterr(ppd->dd, ppd->port,
6840 "SDMA senddmadesccnt: 0x%016llx\n", reg);
6841
6842 reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6843 qib_dev_porterr(ppd->dd, ppd->port,
6844 "SDMA senddmaidlecnt: 0x%016llx\n", reg);
6845
6846 reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6847 qib_dev_porterr(ppd->dd, ppd->port,
6848 "SDMA senddmapriorityhld: 0x%016llx\n", reg);
6849
6850 reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6851 qib_dev_porterr(ppd->dd, ppd->port,
6852 "SDMA senddmareloadcnt: 0x%016llx\n", reg);
6853
6854 dump_sdma_state(ppd);
6855}
6856
6857static struct sdma_set_state_action sdma_7322_action_table[] = {
6858 [qib_sdma_state_s00_hw_down] = {
6859 .go_s99_running_tofalse = 1,
6860 .op_enable = 0,
6861 .op_intenable = 0,
6862 .op_halt = 0,
6863 .op_drain = 0,
6864 },
6865 [qib_sdma_state_s10_hw_start_up_wait] = {
6866 .op_enable = 0,
6867 .op_intenable = 1,
6868 .op_halt = 1,
6869 .op_drain = 0,
6870 },
6871 [qib_sdma_state_s20_idle] = {
6872 .op_enable = 1,
6873 .op_intenable = 1,
6874 .op_halt = 1,
6875 .op_drain = 0,
6876 },
6877 [qib_sdma_state_s30_sw_clean_up_wait] = {
6878 .op_enable = 0,
6879 .op_intenable = 1,
6880 .op_halt = 1,
6881 .op_drain = 0,
6882 },
6883 [qib_sdma_state_s40_hw_clean_up_wait] = {
6884 .op_enable = 1,
6885 .op_intenable = 1,
6886 .op_halt = 1,
6887 .op_drain = 0,
6888 },
6889 [qib_sdma_state_s50_hw_halt_wait] = {
6890 .op_enable = 1,
6891 .op_intenable = 1,
6892 .op_halt = 1,
6893 .op_drain = 1,
6894 },
6895 [qib_sdma_state_s99_running] = {
6896 .op_enable = 1,
6897 .op_intenable = 1,
6898 .op_halt = 0,
6899 .op_drain = 0,
6900 .go_s99_running_totrue = 1,
6901 },
6902};
6903
6904static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6905{
6906 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6907}
6908
6909static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6910{
6911 struct qib_devdata *dd = ppd->dd;
6912 unsigned lastbuf, erstbuf;
6913 u64 senddmabufmask[3] = { 0 };
6914 int n, ret = 0;
6915
6916 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6917 qib_sdma_7322_setlengen(ppd);
6918 qib_sdma_update_7322_tail(ppd, 0);
6919 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6920 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6921 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6922
6923 if (dd->num_pports)
6924 n = dd->cspec->sdmabufcnt / dd->num_pports;
6925 else
6926 n = dd->cspec->sdmabufcnt;
6927 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6928 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6929 dd->cspec->sdmabufcnt);
6930 lastbuf = erstbuf + n;
6931
6932 ppd->sdma_state.first_sendbuf = erstbuf;
6933 ppd->sdma_state.last_sendbuf = lastbuf;
6934 for (; erstbuf < lastbuf; ++erstbuf) {
6935 unsigned word = erstbuf / BITS_PER_LONG;
6936 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6937
6938 BUG_ON(word >= 3);
6939 senddmabufmask[word] |= 1ULL << bit;
6940 }
6941 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6942 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6943 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6944 return ret;
6945}
6946
6947
6948static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6949{
6950 struct qib_devdata *dd = ppd->dd;
6951 int sane;
6952 int use_dmahead;
6953 u16 swhead;
6954 u16 swtail;
6955 u16 cnt;
6956 u16 hwhead;
6957
6958 use_dmahead = __qib_sdma_running(ppd) &&
6959 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6960retry:
6961 hwhead = use_dmahead ?
6962 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6963 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6964
6965 swhead = ppd->sdma_descq_head;
6966 swtail = ppd->sdma_descq_tail;
6967 cnt = ppd->sdma_descq_cnt;
6968
6969 if (swhead < swtail)
6970
6971 sane = (hwhead >= swhead) & (hwhead <= swtail);
6972 else if (swhead > swtail)
6973
6974 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6975 (hwhead <= swtail);
6976 else
6977
6978 sane = (hwhead == swhead);
6979
6980 if (unlikely(!sane)) {
6981 if (use_dmahead) {
6982
6983 use_dmahead = 0;
6984 goto retry;
6985 }
6986
6987 hwhead = swhead;
6988 }
6989
6990 return hwhead;
6991}
6992
6993static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6994{
6995 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6996
6997 return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6998 (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6999 !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
7000 !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
7001}
7002
7003
7004
7005
7006
7007
7008
7009static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
7010 u8 srate, u8 vl)
7011{
7012 u8 snd_mult = ppd->delay_mult;
7013 u8 rcv_mult = ib_rate_to_delay[srate];
7014 u32 ret;
7015
7016 ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
7017
7018
7019 if (vl == 15)
7020 ret |= PBC_7322_VL15_SEND_CTRL;
7021 else
7022 ret |= vl << PBC_VL_NUM_LSB;
7023 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
7024
7025 return ret;
7026}
7027
7028
7029
7030
7031
7032
7033
7034
7035static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7036{
7037 unsigned vl15bufs;
7038
7039 vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7040 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7041 TXCHK_CHG_TYPE_KERN, NULL);
7042}
7043
7044static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7045{
7046 if (rcd->ctxt < NUM_IB_PORTS) {
7047 if (rcd->dd->num_pports > 1) {
7048 rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7049 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7050 } else {
7051 rcd->rcvegrcnt = KCTXT0_EGRCNT;
7052 rcd->rcvegr_tid_base = 0;
7053 }
7054 } else {
7055 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7056 rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7057 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7058 }
7059}
7060
7061#define QTXSLEEPS 5000
7062static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7063 u32 len, u32 which, struct qib_ctxtdata *rcd)
7064{
7065 int i;
7066 const int last = start + len - 1;
7067 const int lastr = last / BITS_PER_LONG;
7068 u32 sleeps = 0;
7069 int wait = rcd != NULL;
7070 unsigned long flags;
7071
7072 while (wait) {
7073 unsigned long shadow = 0;
7074 int cstart, previ = -1;
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084 for (cstart = start; cstart <= last; cstart++) {
7085 i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7086 / BITS_PER_LONG;
7087 if (i != previ) {
7088 shadow = (unsigned long)
7089 le64_to_cpu(dd->pioavailregs_dma[i]);
7090 previ = i;
7091 }
7092 if (test_bit(((2 * cstart) +
7093 QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7094 % BITS_PER_LONG, &shadow))
7095 break;
7096 }
7097
7098 if (cstart > last)
7099 break;
7100
7101 if (sleeps == QTXSLEEPS)
7102 break;
7103
7104 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7105 sleeps++;
7106 msleep(20);
7107 }
7108
7109 switch (which) {
7110 case TXCHK_CHG_TYPE_DIS1:
7111
7112
7113
7114
7115 for (i = start; i <= last; i++)
7116 clear_bit(i, dd->cspec->sendchkenable);
7117 break;
7118
7119 case TXCHK_CHG_TYPE_ENAB1:
7120
7121
7122
7123
7124
7125
7126 qib_read_kreg32(dd, kr_scratch);
7127 for (i = start; i <= last; i++)
7128 set_bit(i, dd->cspec->sendchkenable);
7129 break;
7130
7131 case TXCHK_CHG_TYPE_KERN:
7132
7133 for (i = start; i <= last; i++) {
7134 set_bit(i, dd->cspec->sendibchk);
7135 clear_bit(i, dd->cspec->sendgrhchk);
7136 }
7137 spin_lock_irqsave(&dd->uctxt_lock, flags);
7138
7139 for (i = dd->first_user_ctxt;
7140 dd->cspec->updthresh != dd->cspec->updthresh_dflt
7141 && i < dd->cfgctxts; i++)
7142 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7143 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7144 < dd->cspec->updthresh_dflt)
7145 break;
7146 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7147 if (i == dd->cfgctxts) {
7148 spin_lock_irqsave(&dd->sendctrl_lock, flags);
7149 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7150 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7151 dd->sendctrl |= (dd->cspec->updthresh &
7152 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7153 SYM_LSB(SendCtrl, AvailUpdThld);
7154 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7155 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7156 }
7157 break;
7158
7159 case TXCHK_CHG_TYPE_USER:
7160
7161 for (i = start; i <= last; i++) {
7162 clear_bit(i, dd->cspec->sendibchk);
7163 set_bit(i, dd->cspec->sendgrhchk);
7164 }
7165 spin_lock_irqsave(&dd->sendctrl_lock, flags);
7166 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7167 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7168 dd->cspec->updthresh = (rcd->piocnt /
7169 rcd->subctxt_cnt) - 1;
7170 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7171 dd->sendctrl |= (dd->cspec->updthresh &
7172 SYM_RMASK(SendCtrl, AvailUpdThld))
7173 << SYM_LSB(SendCtrl, AvailUpdThld);
7174 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7175 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7176 } else
7177 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7178 break;
7179
7180 default:
7181 break;
7182 }
7183
7184 for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7185 qib_write_kreg(dd, kr_sendcheckmask + i,
7186 dd->cspec->sendchkenable[i]);
7187
7188 for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7189 qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7190 dd->cspec->sendgrhchk[i]);
7191 qib_write_kreg(dd, kr_sendibpktmask + i,
7192 dd->cspec->sendibchk[i]);
7193 }
7194
7195
7196
7197
7198
7199 qib_read_kreg32(dd, kr_scratch);
7200}
7201
7202
7203
7204static void writescratch(struct qib_devdata *dd, u32 val)
7205{
7206 qib_write_kreg(dd, kr_scratch, val);
7207}
7208
7209
7210static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7211{
7212 return -ENXIO;
7213}
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223
7224
7225
7226struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7227 const struct pci_device_id *ent)
7228{
7229 struct qib_devdata *dd;
7230 int ret, i;
7231 u32 tabsize, actual_cnt = 0;
7232
7233 dd = qib_alloc_devdata(pdev,
7234 NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7235 sizeof(struct qib_chip_specific) +
7236 NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7237 if (IS_ERR(dd))
7238 goto bail;
7239
7240 dd->f_bringup_serdes = qib_7322_bringup_serdes;
7241 dd->f_cleanup = qib_setup_7322_cleanup;
7242 dd->f_clear_tids = qib_7322_clear_tids;
7243 dd->f_free_irq = qib_7322_free_irq;
7244 dd->f_get_base_info = qib_7322_get_base_info;
7245 dd->f_get_msgheader = qib_7322_get_msgheader;
7246 dd->f_getsendbuf = qib_7322_getsendbuf;
7247 dd->f_gpio_mod = gpio_7322_mod;
7248 dd->f_eeprom_wen = qib_7322_eeprom_wen;
7249 dd->f_hdrqempty = qib_7322_hdrqempty;
7250 dd->f_ib_updown = qib_7322_ib_updown;
7251 dd->f_init_ctxt = qib_7322_init_ctxt;
7252 dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
7253 dd->f_intr_fallback = qib_7322_intr_fallback;
7254 dd->f_late_initreg = qib_late_7322_initreg;
7255 dd->f_setpbc_control = qib_7322_setpbc_control;
7256 dd->f_portcntr = qib_portcntr_7322;
7257 dd->f_put_tid = qib_7322_put_tid;
7258 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
7259 dd->f_rcvctrl = rcvctrl_7322_mod;
7260 dd->f_read_cntrs = qib_read_7322cntrs;
7261 dd->f_read_portcntrs = qib_read_7322portcntrs;
7262 dd->f_reset = qib_do_7322_reset;
7263 dd->f_init_sdma_regs = init_sdma_7322_regs;
7264 dd->f_sdma_busy = qib_sdma_7322_busy;
7265 dd->f_sdma_gethead = qib_sdma_7322_gethead;
7266 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
7267 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7268 dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
7269 dd->f_sendctrl = sendctrl_7322_mod;
7270 dd->f_set_armlaunch = qib_set_7322_armlaunch;
7271 dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
7272 dd->f_iblink_state = qib_7322_iblink_state;
7273 dd->f_ibphys_portstate = qib_7322_phys_portstate;
7274 dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
7275 dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
7276 dd->f_set_ib_loopback = qib_7322_set_loopback;
7277 dd->f_get_ib_table = qib_7322_get_ib_table;
7278 dd->f_set_ib_table = qib_7322_set_ib_table;
7279 dd->f_set_intr_state = qib_7322_set_intr_state;
7280 dd->f_setextled = qib_setup_7322_setextled;
7281 dd->f_txchk_change = qib_7322_txchk_change;
7282 dd->f_update_usrhead = qib_update_7322_usrhead;
7283 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
7284 dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
7285 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
7286 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
7287 dd->f_sdma_init_early = qib_7322_sdma_init_early;
7288 dd->f_writescratch = writescratch;
7289 dd->f_tempsense_rd = qib_7322_tempsense_rd;
7290#ifdef CONFIG_INFINIBAND_QIB_DCA
7291 dd->f_notify_dca = qib_7322_notify_dca;
7292#endif
7293
7294
7295
7296
7297
7298
7299 ret = qib_pcie_ddinit(dd, pdev, ent);
7300 if (ret < 0)
7301 goto bail_free;
7302
7303
7304 ret = qib_init_7322_variables(dd);
7305 if (ret)
7306 goto bail_cleanup;
7307
7308 if (qib_mini_init || !dd->num_pports)
7309 goto bail;
7310
7311
7312
7313
7314
7315
7316
7317 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7318 for (i = 0; i < tabsize; i++)
7319 if ((i < ARRAY_SIZE(irq_table) &&
7320 irq_table[i].port <= dd->num_pports) ||
7321 (i >= ARRAY_SIZE(irq_table) &&
7322 dd->rcd[i - ARRAY_SIZE(irq_table)]))
7323 actual_cnt++;
7324
7325 if (qib_krcvq01_no_msi)
7326 actual_cnt -= dd->num_pports;
7327
7328 tabsize = actual_cnt;
7329 dd->cspec->msix_entries = kcalloc(tabsize,
7330 sizeof(struct qib_msix_entry),
7331 GFP_KERNEL);
7332 if (!dd->cspec->msix_entries)
7333 tabsize = 0;
7334
7335 if (qib_pcie_params(dd, 8, &tabsize))
7336 qib_dev_err(dd,
7337 "Failed to setup PCIe or interrupts; continuing anyway\n");
7338
7339 dd->cspec->num_msix_entries = tabsize;
7340
7341
7342 qib_setup_7322_interrupt(dd, 1);
7343
7344
7345 qib_write_kreg(dd, kr_hwdiagctrl, 0);
7346#ifdef CONFIG_INFINIBAND_QIB_DCA
7347 if (!dca_add_requester(&pdev->dev)) {
7348 qib_devinfo(dd->pcidev, "DCA enabled\n");
7349 dd->flags |= QIB_DCA_ENABLED;
7350 qib_setup_dca(dd);
7351 }
7352#endif
7353 goto bail;
7354
7355bail_cleanup:
7356 qib_pcie_ddcleanup(dd);
7357bail_free:
7358 qib_free_devdata(dd);
7359 dd = ERR_PTR(ret);
7360bail:
7361 return dd;
7362}
7363
7364
7365
7366
7367
7368
7369
7370
7371#define DDS_ENT_AMP_LSB 14
7372#define DDS_ENT_MAIN_LSB 9
7373#define DDS_ENT_POST_LSB 5
7374#define DDS_ENT_PRE_XTRA_LSB 3
7375#define DDS_ENT_PRE_LSB 0
7376
7377
7378
7379
7380
7381
7382static void set_txdds(struct qib_pportdata *ppd, int ridx,
7383 const struct txdds_ent *tp)
7384{
7385 struct qib_devdata *dd = ppd->dd;
7386 u32 pack_ent;
7387 int regidx;
7388
7389
7390 regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7391
7392
7393
7394
7395
7396 if (ppd->hw_pidx)
7397 regidx += (dd->palign / sizeof(u64));
7398
7399 pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7400 pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7401 pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7402 pack_ent |= tp->post << DDS_ENT_POST_LSB;
7403 qib_write_kreg(dd, regidx, pack_ent);
7404
7405 qib_write_kreg(ppd->dd, kr_scratch, 0);
7406}
7407
7408static const struct vendor_txdds_ent vendor_txdds[] = {
7409 {
7410 { 0x41, 0x50, 0x48 }, "584470002 ",
7411 { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
7412 },
7413 {
7414 { 0x41, 0x50, 0x48 }, "584470004 ",
7415 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
7416 },
7417 {
7418 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7419 { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
7420 },
7421 {
7422 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7423 { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
7424 },
7425 {
7426 { 0x00, 0x90, 0x65 }, NULL,
7427 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
7428 },
7429 {
7430 { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
7431 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
7432 },
7433 {
7434 { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
7435 { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
7436 },
7437 {
7438 { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
7439 { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
7440 },
7441 {
7442 { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
7443 { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
7444 },
7445 {
7446 { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
7447 { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
7448 },
7449 {
7450 { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
7451 { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
7452 },
7453 {
7454 { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
7455 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
7456 },
7457 {
7458 { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
7459 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
7460 },
7461 {
7462 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7463 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
7464 },
7465 {
7466 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7467 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
7468 },
7469 {
7470 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7471 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
7472 },
7473 {
7474 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7475 { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
7476 },
7477 {
7478 { 0x00, 0x30, 0xB4 }, NULL,
7479 { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
7480 },
7481 {
7482 { 0x00, 0x25, 0x63 }, NULL,
7483 { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
7484 },
7485 {
7486 { 0x00, 0x09, 0x3A }, "74763-0025 ",
7487 { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
7488 },
7489 {
7490 { 0x00, 0x09, 0x3A }, "74757-2201 ",
7491 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
7492 },
7493};
7494
7495static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7496
7497 { 2, 2, 15, 6 },
7498 { 0, 0, 0, 1 },
7499 { 0, 0, 0, 2 },
7500 { 0, 0, 0, 3 },
7501 { 0, 0, 0, 4 },
7502 { 0, 0, 0, 5 },
7503 { 0, 0, 0, 6 },
7504 { 0, 0, 0, 7 },
7505 { 0, 0, 0, 8 },
7506 { 0, 0, 0, 9 },
7507 { 0, 0, 0, 10 },
7508 { 0, 0, 0, 11 },
7509 { 0, 0, 0, 12 },
7510 { 0, 0, 0, 13 },
7511 { 0, 0, 0, 14 },
7512 { 0, 0, 0, 15 },
7513};
7514
7515static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7516
7517 { 2, 2, 15, 6 },
7518 { 0, 0, 0, 8 },
7519 { 0, 0, 0, 8 },
7520 { 0, 0, 0, 9 },
7521 { 0, 0, 0, 9 },
7522 { 0, 0, 0, 10 },
7523 { 0, 0, 0, 10 },
7524 { 0, 0, 0, 11 },
7525 { 0, 0, 0, 11 },
7526 { 0, 0, 0, 12 },
7527 { 0, 0, 0, 12 },
7528 { 0, 0, 0, 13 },
7529 { 0, 0, 0, 13 },
7530 { 0, 0, 0, 14 },
7531 { 0, 0, 0, 14 },
7532 { 0, 0, 0, 15 },
7533};
7534
7535static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7536
7537 { 2, 2, 15, 6 },
7538 { 0, 1, 0, 7 },
7539 { 0, 1, 0, 9 },
7540 { 0, 1, 0, 11 },
7541 { 0, 1, 0, 13 },
7542 { 0, 1, 0, 15 },
7543 { 0, 1, 3, 15 },
7544 { 0, 1, 7, 15 },
7545 { 0, 1, 7, 15 },
7546 { 0, 1, 8, 15 },
7547 { 0, 1, 9, 15 },
7548 { 0, 1, 10, 15 },
7549 { 0, 2, 6, 15 },
7550 { 0, 2, 7, 15 },
7551 { 0, 2, 8, 15 },
7552 { 0, 2, 9, 15 },
7553};
7554
7555
7556
7557
7558
7559
7560
7561static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7562
7563 { 0, 0, 0, 1 },
7564 { 0, 0, 0, 1 },
7565 { 0, 0, 0, 2 },
7566 { 0, 0, 0, 2 },
7567 { 0, 0, 0, 3 },
7568 { 0, 0, 0, 4 },
7569 { 0, 1, 4, 15 },
7570 { 0, 1, 3, 15 },
7571 { 0, 1, 0, 12 },
7572 { 0, 1, 0, 11 },
7573 { 0, 1, 0, 9 },
7574 { 0, 1, 0, 14 },
7575 { 0, 1, 2, 15 },
7576 { 0, 1, 0, 11 },
7577 { 0, 1, 0, 7 },
7578 { 0, 1, 0, 9 },
7579 { 0, 1, 0, 6 },
7580 { 0, 1, 0, 8 },
7581};
7582
7583static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7584
7585 { 0, 0, 0, 7 },
7586 { 0, 0, 0, 7 },
7587 { 0, 0, 0, 8 },
7588 { 0, 0, 0, 8 },
7589 { 0, 0, 0, 9 },
7590 { 0, 0, 0, 10 },
7591 { 0, 1, 4, 15 },
7592 { 0, 1, 3, 15 },
7593 { 0, 1, 0, 12 },
7594 { 0, 1, 0, 11 },
7595 { 0, 1, 0, 9 },
7596 { 0, 1, 0, 14 },
7597 { 0, 1, 2, 15 },
7598 { 0, 1, 0, 11 },
7599 { 0, 1, 0, 7 },
7600 { 0, 1, 0, 9 },
7601 { 0, 1, 0, 6 },
7602 { 0, 1, 0, 8 },
7603};
7604
7605static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7606
7607 { 0, 1, 0, 4 },
7608 { 0, 1, 0, 5 },
7609 { 0, 1, 0, 6 },
7610 { 0, 1, 0, 8 },
7611 { 0, 1, 0, 10 },
7612 { 0, 1, 0, 12 },
7613 { 0, 1, 4, 15 },
7614 { 0, 1, 3, 15 },
7615 { 0, 1, 0, 12 },
7616 { 0, 1, 0, 11 },
7617 { 0, 1, 0, 9 },
7618 { 0, 1, 0, 14 },
7619 { 0, 1, 2, 15 },
7620 { 0, 1, 0, 11 },
7621 { 0, 1, 0, 7 },
7622 { 0, 1, 0, 9 },
7623 { 0, 1, 0, 6 },
7624 { 0, 1, 0, 8 },
7625};
7626
7627static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7628
7629 { 0, 0, 0, 0 },
7630 { 0, 0, 0, 6 },
7631};
7632
7633static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7634 unsigned atten)
7635{
7636
7637
7638
7639
7640 if (atten <= 2)
7641 atten = 1;
7642 else if (atten > TXDDS_TABLE_SZ)
7643 atten = TXDDS_TABLE_SZ - 1;
7644 else
7645 atten--;
7646 return txdds + atten;
7647}
7648
7649
7650
7651
7652
7653static void find_best_ent(struct qib_pportdata *ppd,
7654 const struct txdds_ent **sdr_dds,
7655 const struct txdds_ent **ddr_dds,
7656 const struct txdds_ent **qdr_dds, int override)
7657{
7658 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7659 int idx;
7660
7661
7662 for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7663 const struct vendor_txdds_ent *v = vendor_txdds + idx;
7664
7665 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7666 (!v->partnum ||
7667 !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7668 *sdr_dds = &v->sdr;
7669 *ddr_dds = &v->ddr;
7670 *qdr_dds = &v->qdr;
7671 return;
7672 }
7673 }
7674
7675
7676
7677 if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7678 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7679 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7680 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7681 return;
7682 }
7683
7684 if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7685 qd->atten[1])) {
7686 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7687 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7688 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7689 return;
7690 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7691
7692
7693
7694
7695
7696
7697 idx = ppd->cpspec->no_eep;
7698 *sdr_dds = &txdds_sdr[idx];
7699 *ddr_dds = &txdds_ddr[idx];
7700 *qdr_dds = &txdds_qdr[idx];
7701 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7702
7703 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7704 *sdr_dds = &txdds_extra_sdr[idx];
7705 *ddr_dds = &txdds_extra_ddr[idx];
7706 *qdr_dds = &txdds_extra_qdr[idx];
7707 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7708 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7709 TXDDS_MFG_SZ)) {
7710 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7711 pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7712 ppd->dd->unit, ppd->port, idx);
7713 *sdr_dds = &txdds_extra_mfg[idx];
7714 *ddr_dds = &txdds_extra_mfg[idx];
7715 *qdr_dds = &txdds_extra_mfg[idx];
7716 } else {
7717
7718 *sdr_dds = txdds_sdr + qib_long_atten;
7719 *ddr_dds = txdds_ddr + qib_long_atten;
7720 *qdr_dds = txdds_qdr + qib_long_atten;
7721 }
7722}
7723
7724static void init_txdds_table(struct qib_pportdata *ppd, int override)
7725{
7726 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7727 struct txdds_ent *dds;
7728 int idx;
7729 int single_ent = 0;
7730
7731 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7732
7733
7734 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7735 single_ent = 1;
7736
7737
7738 set_txdds(ppd, 0, sdr_dds);
7739 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7740 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7741 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7742 QIBL_LINKACTIVE)) {
7743 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7744 QIB_IB_QDR ? qdr_dds :
7745 (ppd->link_speed_active ==
7746 QIB_IB_DDR ? ddr_dds : sdr_dds));
7747 write_tx_serdes_param(ppd, dds);
7748 }
7749
7750
7751 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7752 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7753 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7754 single_ent ? ddr_dds : txdds_ddr + idx);
7755 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7756 single_ent ? qdr_dds : txdds_qdr + idx);
7757 }
7758}
7759
7760#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7761#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7762#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7763#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7764#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7765#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7766#define AHB_TRANS_TRIES 10
7767
7768
7769
7770
7771
7772
7773static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7774 u32 data, u32 mask)
7775{
7776 u32 rd_data, wr_data, sz_mask;
7777 u64 trans, acc, prev_acc;
7778 u32 ret = 0xBAD0BAD;
7779 int tries;
7780
7781 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7782
7783 acc = (quad << 1) | 1;
7784 qib_write_kreg(dd, KR_AHB_ACC, acc);
7785
7786 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7787 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7788 if (trans & AHB_TRANS_RDY)
7789 break;
7790 }
7791 if (tries >= AHB_TRANS_TRIES) {
7792 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7793 goto bail;
7794 }
7795
7796
7797
7798
7799 sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7800 wr_data = data & mask & sz_mask;
7801 if ((~mask & sz_mask) != 0) {
7802 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7803 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7804
7805 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7806 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7807 if (trans & AHB_TRANS_RDY)
7808 break;
7809 }
7810 if (tries >= AHB_TRANS_TRIES) {
7811 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7812 AHB_TRANS_TRIES);
7813 goto bail;
7814 }
7815
7816 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7817 rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7818 wr_data |= (rd_data & ~mask & sz_mask);
7819 }
7820
7821
7822 if (mask & sz_mask) {
7823 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7824 trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7825 trans |= AHB_WR;
7826 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7827
7828 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7829 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7830 if (trans & AHB_TRANS_RDY)
7831 break;
7832 }
7833 if (tries >= AHB_TRANS_TRIES) {
7834 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7835 AHB_TRANS_TRIES);
7836 goto bail;
7837 }
7838 }
7839 ret = wr_data;
7840bail:
7841 qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7842 return ret;
7843}
7844
7845static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7846 unsigned mask)
7847{
7848 struct qib_devdata *dd = ppd->dd;
7849 int chan;
7850 u32 rbc;
7851
7852 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7853 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7854 data, mask);
7855 rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7856 addr, 0, 0);
7857 }
7858}
7859
7860static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7861{
7862 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7863 u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7864
7865 if (enable && !state) {
7866 pr_info("IB%u:%u Turning LOS on\n",
7867 ppd->dd->unit, ppd->port);
7868 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7869 } else if (!enable && state) {
7870 pr_info("IB%u:%u Turning LOS off\n",
7871 ppd->dd->unit, ppd->port);
7872 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7873 }
7874 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7875}
7876
7877static int serdes_7322_init(struct qib_pportdata *ppd)
7878{
7879 int ret = 0;
7880
7881 if (ppd->dd->cspec->r1)
7882 ret = serdes_7322_init_old(ppd);
7883 else
7884 ret = serdes_7322_init_new(ppd);
7885 return ret;
7886}
7887
7888static int serdes_7322_init_old(struct qib_pportdata *ppd)
7889{
7890 u32 le_val;
7891
7892
7893
7894
7895
7896 init_txdds_table(ppd, 0);
7897
7898
7899 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7900 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7901 reset_tx_deemphasis_override));
7902
7903
7904
7905 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7906
7907
7908 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7909
7910 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7911
7912
7913 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7914 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7915
7916
7917 le_val = IS_QME(ppd->dd) ? 0 : 1;
7918 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7919
7920
7921 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7922
7923
7924 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7925
7926
7927
7928 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7929 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7930 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7931 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7932
7933
7934 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7935 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7936 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7937 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7938
7939
7940 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7941
7942
7943 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3));
7944 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10));
7945 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13));
7946
7947 serdes_7322_los_enable(ppd, 1);
7948
7949
7950 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7951
7952
7953 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7954
7955
7956 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7957 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7958
7959
7960
7961
7962
7963 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7964 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7965 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7966 ppd->dd->cspec->r1 ?
7967 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7968 ppd->cpspec->qdr_dfe_on = 1;
7969
7970
7971 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7972
7973
7974 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7975
7976 if (!ppd->dd->cspec->r1) {
7977 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7978 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7979 }
7980
7981
7982 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7983
7984 return 0;
7985}
7986
7987static int serdes_7322_init_new(struct qib_pportdata *ppd)
7988{
7989 unsigned long tend;
7990 u32 le_val, rxcaldone;
7991 int chan, chan_done = (1 << SERDES_CHANS) - 1;
7992
7993
7994 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7995
7996
7997 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7998 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7999 reset_tx_deemphasis_override));
8000
8001
8002
8003
8004 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
8005
8006 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
8007
8008 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
8009
8010 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
8011
8012 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8013
8014 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
8015
8016 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
8017
8018 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
8019
8020 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
8021
8022 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8023
8024 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
8025
8026 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
8027
8028 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
8029
8030 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
8031
8032 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
8033
8034 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
8035
8036 if (!ppd->dd->cspec->r1) {
8037 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8038 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8039 } else {
8040 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8041 }
8042
8043
8044
8045
8046
8047
8048
8049
8050
8051 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8052 msleep(20);
8053
8054 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8055 msleep(20);
8056
8057 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8058 msleep(20);
8059
8060
8061
8062 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8063 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8064 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8065 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8066
8067
8068 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8069 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8070 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8071 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8072
8073
8074 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8075
8076
8077 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3));
8078 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10));
8079 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13));
8080
8081
8082 serdes_7322_los_enable(ppd, 1);
8083
8084 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8085
8086
8087
8088 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8089
8090 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8091 msleep(20);
8092
8093 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8094 tend = jiffies + msecs_to_jiffies(500);
8095 while (chan_done && !time_is_before_jiffies(tend)) {
8096 msleep(20);
8097 for (chan = 0; chan < SERDES_CHANS; ++chan) {
8098 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8099 (chan + (chan >> 1)),
8100 25, 0, 0);
8101 if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8102 (~chan_done & (1 << chan)) == 0)
8103 chan_done &= ~(1 << chan);
8104 }
8105 }
8106 if (chan_done) {
8107 pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8108 IBSD(ppd->hw_pidx), chan_done);
8109 } else {
8110 for (chan = 0; chan < SERDES_CHANS; ++chan) {
8111 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8112 (chan + (chan >> 1)),
8113 25, 0, 0);
8114 if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8115 pr_info("Serdes %d chan %d calibration failed\n",
8116 IBSD(ppd->hw_pidx), chan);
8117 }
8118 }
8119
8120
8121 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8122 msleep(20);
8123
8124
8125
8126 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8127 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8128
8129 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8130
8131 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8132 msleep(20);
8133
8134 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8135
8136 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8137 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8138
8139 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8140 msleep(20);
8141
8142 ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8143
8144 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8145
8146 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8147
8148 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8149 msleep(50);
8150
8151
8152
8153
8154 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8155 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8156 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8157 ppd->dd->cspec->r1 ?
8158 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8159 ppd->cpspec->qdr_dfe_on = 1;
8160
8161 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8162
8163 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8164 msleep(20);
8165
8166 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8167
8168 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8169
8170 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8171
8172 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8173
8174
8175
8176
8177
8178 init_txdds_table(ppd, 0);
8179
8180 return 0;
8181}
8182
8183
8184
8185static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8186{
8187 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8188 9, code << 9, 0x3f << 9);
8189}
8190
8191static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8192 int enable, u32 tapenable)
8193{
8194 if (enable)
8195 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8196 1, 3 << 10, 0x1f << 10);
8197 else
8198 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8199 1, 0, 0x1f << 10);
8200}
8201
8202
8203static void clock_man(struct qib_pportdata *ppd, int chan)
8204{
8205 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8206 4, 0x4000, 0x4000);
8207 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8208 4, 0, 0x4000);
8209 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8210 4, 0x4000, 0x4000);
8211 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8212 4, 0, 0x4000);
8213}
8214
8215
8216
8217
8218
8219
8220static void write_tx_serdes_param(struct qib_pportdata *ppd,
8221 struct txdds_ent *txdds)
8222{
8223 u64 deemph;
8224
8225 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8226
8227 deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8228 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8229 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8230 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8231
8232 deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8233 tx_override_deemphasis_select);
8234 deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8235 txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8236 txampcntl_d2a);
8237 deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8238 txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8239 txc0_ena);
8240 deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8241 txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8242 txcp1_ena);
8243 deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8244 txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8245 txcn1_ena);
8246 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8247}
8248
8249
8250
8251
8252
8253
8254static void adj_tx_serdes(struct qib_pportdata *ppd)
8255{
8256 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8257 struct txdds_ent *dds;
8258
8259 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8260 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8261 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8262 ddr_dds : sdr_dds));
8263 write_tx_serdes_param(ppd, dds);
8264}
8265
8266
8267static void force_h1(struct qib_pportdata *ppd)
8268{
8269 int chan;
8270
8271 ppd->cpspec->qdr_reforce = 0;
8272 if (!ppd->dd->cspec->r1)
8273 return;
8274
8275 for (chan = 0; chan < SERDES_CHANS; chan++) {
8276 set_man_mode_h1(ppd, chan, 1, 0);
8277 set_man_code(ppd, chan, ppd->cpspec->h1_val);
8278 clock_man(ppd, chan);
8279 set_man_mode_h1(ppd, chan, 0, 0);
8280 }
8281}
8282
8283#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8284#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8285
8286#define R_OPCODE_LSB 3
8287#define R_OP_NOP 0
8288#define R_OP_SHIFT 2
8289#define R_OP_UPDATE 3
8290#define R_TDI_LSB 2
8291#define R_TDO_LSB 1
8292#define R_RDY 1
8293
8294static int qib_r_grab(struct qib_devdata *dd)
8295{
8296 u64 val = SJA_EN;
8297
8298 qib_write_kreg(dd, kr_r_access, val);
8299 qib_read_kreg32(dd, kr_scratch);
8300 return 0;
8301}
8302
8303
8304
8305
8306static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8307{
8308 u64 val;
8309 int timeout;
8310
8311 for (timeout = 0; timeout < 100 ; ++timeout) {
8312 val = qib_read_kreg32(dd, kr_r_access);
8313 if (val & R_RDY)
8314 return (val >> R_TDO_LSB) & 1;
8315 }
8316 return -1;
8317}
8318
8319static int qib_r_shift(struct qib_devdata *dd, int bisten,
8320 int len, u8 *inp, u8 *outp)
8321{
8322 u64 valbase, val;
8323 int ret, pos;
8324
8325 valbase = SJA_EN | (bisten << BISTEN_LSB) |
8326 (R_OP_SHIFT << R_OPCODE_LSB);
8327 ret = qib_r_wait_for_rdy(dd);
8328 if (ret < 0)
8329 goto bail;
8330 for (pos = 0; pos < len; ++pos) {
8331 val = valbase;
8332 if (outp) {
8333 outp[pos >> 3] &= ~(1 << (pos & 7));
8334 outp[pos >> 3] |= (ret << (pos & 7));
8335 }
8336 if (inp) {
8337 int tdi = inp[pos >> 3] >> (pos & 7);
8338
8339 val |= ((tdi & 1) << R_TDI_LSB);
8340 }
8341 qib_write_kreg(dd, kr_r_access, val);
8342 qib_read_kreg32(dd, kr_scratch);
8343 ret = qib_r_wait_for_rdy(dd);
8344 if (ret < 0)
8345 break;
8346 }
8347
8348 val = SJA_EN | (bisten << BISTEN_LSB);
8349 qib_write_kreg(dd, kr_r_access, val);
8350 qib_read_kreg32(dd, kr_scratch);
8351 ret = qib_r_wait_for_rdy(dd);
8352
8353 if (ret >= 0)
8354 ret = pos;
8355bail:
8356 return ret;
8357}
8358
8359static int qib_r_update(struct qib_devdata *dd, int bisten)
8360{
8361 u64 val;
8362 int ret;
8363
8364 val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8365 ret = qib_r_wait_for_rdy(dd);
8366 if (ret >= 0) {
8367 qib_write_kreg(dd, kr_r_access, val);
8368 qib_read_kreg32(dd, kr_scratch);
8369 }
8370 return ret;
8371}
8372
8373#define BISTEN_PORT_SEL 15
8374#define LEN_PORT_SEL 625
8375#define BISTEN_AT 17
8376#define LEN_AT 156
8377#define BISTEN_ETM 16
8378#define LEN_ETM 632
8379
8380#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8381
8382
8383static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8386};
8387static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8390 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8391 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8392 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8393 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8394 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8395 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8396};
8397static u8 at[BIT2BYTE(LEN_AT)] = {
8398 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8400};
8401
8402
8403static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8406 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8407 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8409 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8410 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8411 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8412};
8413
8414
8415static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8418 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8419 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8420 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8421 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8422 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8423 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8424};
8425
8426
8427static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8428 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8429 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8430 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8431 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8432 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8433 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8434 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8435 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8436};
8437
8438
8439static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8440 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8441 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8442 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8443 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8444 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8445 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8446 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8447 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8448};
8449
8450
8451static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8452 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8453 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8454 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8455 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8456 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8457 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8458 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8459 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8460};
8461
8462
8463
8464
8465
8466
8467
8468static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8469{
8470 u8 *portsel, *etm;
8471 struct qib_devdata *dd = ppd->dd;
8472
8473 if (!ppd->dd->cspec->r1)
8474 return;
8475 if (!both) {
8476 dd->cspec->recovery_ports_initted++;
8477 ppd->cpspec->recovery_init = 1;
8478 }
8479 if (!both && dd->cspec->recovery_ports_initted == 1) {
8480 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8481 etm = atetm_1port;
8482 } else {
8483 portsel = portsel_2port;
8484 etm = atetm_2port;
8485 }
8486
8487 if (qib_r_grab(dd) < 0 ||
8488 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8489 qib_r_update(dd, BISTEN_ETM) < 0 ||
8490 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8491 qib_r_update(dd, BISTEN_AT) < 0 ||
8492 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8493 portsel, NULL) < 0 ||
8494 qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8495 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8496 qib_r_update(dd, BISTEN_AT) < 0 ||
8497 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8498 qib_r_update(dd, BISTEN_ETM) < 0)
8499 qib_dev_err(dd, "Failed IB link recovery setup\n");
8500}
8501
8502static void check_7322_rxe_status(struct qib_pportdata *ppd)
8503{
8504 struct qib_devdata *dd = ppd->dd;
8505 u64 fmask;
8506
8507 if (dd->cspec->recovery_ports_initted != 1)
8508 return;
8509 qib_write_kreg(dd, kr_control, dd->control |
8510 SYM_MASK(Control, FreezeMode));
8511 (void)qib_read_kreg64(dd, kr_scratch);
8512 udelay(3);
8513 fmask = qib_read_kreg64(dd, kr_act_fmask);
8514 if (!fmask) {
8515
8516
8517
8518
8519
8520 ppd->dd->cspec->stay_in_freeze = 1;
8521 qib_7322_set_intr_state(ppd->dd, 0);
8522 qib_write_kreg(dd, kr_fmask, 0ULL);
8523 qib_dev_err(dd, "HCA unusable until powercycled\n");
8524 return;
8525 }
8526
8527 qib_write_kreg(ppd->dd, kr_hwerrclear,
8528 SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8529
8530
8531 qib_write_kreg(dd, kr_control, dd->control);
8532 qib_read_kreg32(dd, kr_scratch);
8533
8534 if (ppd->link_speed_supported) {
8535 ppd->cpspec->ibcctrl_a &=
8536 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8537 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8538 ppd->cpspec->ibcctrl_a);
8539 qib_read_kreg32(dd, kr_scratch);
8540 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8541 qib_set_ib_7322_lstate(ppd, 0,
8542 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8543 }
8544}
8545