1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <rdma/ib_verbs.h>
43
44#include "qib.h"
45#include "qib_6120_regs.h"
46
47static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
48static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
49static u8 qib_6120_phys_portstate(u64);
50static u32 qib_6120_iblink_state(u64);
51
52
53
54
55
56
57
58
59#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
60
61
62#define kr_extctrl KREG_IDX(EXTCtrl)
63#define kr_extstatus KREG_IDX(EXTStatus)
64#define kr_gpio_clear KREG_IDX(GPIOClear)
65#define kr_gpio_mask KREG_IDX(GPIOMask)
66#define kr_gpio_out KREG_IDX(GPIOOut)
67#define kr_gpio_status KREG_IDX(GPIOStatus)
68#define kr_rcvctrl KREG_IDX(RcvCtrl)
69#define kr_sendctrl KREG_IDX(SendCtrl)
70#define kr_partitionkey KREG_IDX(RcvPartitionKey)
71#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
72#define kr_ibcstatus KREG_IDX(IBCStatus)
73#define kr_ibcctrl KREG_IDX(IBCCtrl)
74#define kr_sendbuffererror KREG_IDX(SendBufErr0)
75#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
76#define kr_counterregbase KREG_IDX(CntrRegBase)
77#define kr_palign KREG_IDX(PageAlign)
78#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
79#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
80#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
81#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
82#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
83#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
84#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
85#define kr_scratch KREG_IDX(Scratch)
86#define kr_sendctrl KREG_IDX(SendCtrl)
87#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
88#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
89#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
90#define kr_sendpiosize KREG_IDX(SendPIOSize)
91#define kr_sendregbase KREG_IDX(SendRegBase)
92#define kr_userregbase KREG_IDX(UserRegBase)
93#define kr_control KREG_IDX(Control)
94#define kr_intclear KREG_IDX(IntClear)
95#define kr_intmask KREG_IDX(IntMask)
96#define kr_intstatus KREG_IDX(IntStatus)
97#define kr_errclear KREG_IDX(ErrClear)
98#define kr_errmask KREG_IDX(ErrMask)
99#define kr_errstatus KREG_IDX(ErrStatus)
100#define kr_hwerrclear KREG_IDX(HwErrClear)
101#define kr_hwerrmask KREG_IDX(HwErrMask)
102#define kr_hwerrstatus KREG_IDX(HwErrStatus)
103#define kr_revision KREG_IDX(Revision)
104#define kr_portcnt KREG_IDX(PortCnt)
105#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
106#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
107#define kr_serdes_stat KREG_IDX(SerdesStat)
108#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
109
110
111#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
112#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
113
114#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
115 QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
116
117#define cr_badformat CREG_IDX(RxBadFormatCnt)
118#define cr_erricrc CREG_IDX(RxICRCErrCnt)
119#define cr_errlink CREG_IDX(RxLinkProblemCnt)
120#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
121#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
122#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
123#define cr_err_rlen CREG_IDX(RxLenErrCnt)
124#define cr_errslen CREG_IDX(TxLenErrCnt)
125#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
126#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
127#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
128#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
129#define cr_lbint CREG_IDX(LBIntCnt)
130#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
131#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
132#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
133#define cr_pktrcv CREG_IDX(RxDataPktCnt)
134#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
135#define cr_pktsend CREG_IDX(TxDataPktCnt)
136#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
137#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
138#define cr_rcvebp CREG_IDX(RxEBPCnt)
139#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
140#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
141#define cr_sendstall CREG_IDX(TxFlowStallCnt)
142#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
143#define cr_wordrcv CREG_IDX(RxDwordCnt)
144#define cr_wordsend CREG_IDX(TxDwordCnt)
145#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
146#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
147#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
148#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
149#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
150
151#define SYM_RMASK(regname, fldname) ((u64) \
152 QIB_6120_##regname##_##fldname##_RMASK)
153#define SYM_MASK(regname, fldname) ((u64) \
154 QIB_6120_##regname##_##fldname##_RMASK << \
155 QIB_6120_##regname##_##fldname##_LSB)
156#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
157
158#define SYM_FIELD(value, regname, fldname) ((u64) \
159 (((value) >> SYM_LSB(regname, fldname)) & \
160 SYM_RMASK(regname, fldname)))
161#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
162#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
163
164
165#define IB_6120_LT_STATE_DISABLED 0x00
166#define IB_6120_LT_STATE_LINKUP 0x01
167#define IB_6120_LT_STATE_POLLACTIVE 0x02
168#define IB_6120_LT_STATE_POLLQUIET 0x03
169#define IB_6120_LT_STATE_SLEEPDELAY 0x04
170#define IB_6120_LT_STATE_SLEEPQUIET 0x05
171#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
172#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
173#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
174#define IB_6120_LT_STATE_CFGIDLE 0x0b
175#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
176#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
177#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
178
179
180#define IB_6120_L_STATE_DOWN 0x0
181#define IB_6120_L_STATE_INIT 0x1
182#define IB_6120_L_STATE_ARM 0x2
183#define IB_6120_L_STATE_ACTIVE 0x3
184#define IB_6120_L_STATE_ACT_DEFER 0x4
185
186static const u8 qib_6120_physportstate[0x20] = {
187 [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
188 [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
189 [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
190 [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
191 [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
192 [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
193 [IB_6120_LT_STATE_CFGDEBOUNCE] =
194 IB_PHYSPORTSTATE_CFG_TRAIN,
195 [IB_6120_LT_STATE_CFGRCVFCFG] =
196 IB_PHYSPORTSTATE_CFG_TRAIN,
197 [IB_6120_LT_STATE_CFGWAITRMT] =
198 IB_PHYSPORTSTATE_CFG_TRAIN,
199 [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
200 [IB_6120_LT_STATE_RECOVERRETRAIN] =
201 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
202 [IB_6120_LT_STATE_RECOVERWAITRMT] =
203 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
204 [IB_6120_LT_STATE_RECOVERIDLE] =
205 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
206 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
207 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
208 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
209 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
210 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
211 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
212 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
213 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
214};
215
216
217struct qib_chip_specific {
218 u64 __iomem *cregbase;
219 u64 *cntrs;
220 u64 *portcntrs;
221 void *dummy_hdrq;
222 dma_addr_t dummy_hdrq_phys;
223 spinlock_t kernel_tid_lock;
224 spinlock_t user_tid_lock;
225 spinlock_t rcvmod_lock;
226 spinlock_t gpio_lock;
227 u64 hwerrmask;
228 u64 errormask;
229 u64 gpio_out;
230 u64 gpio_mask;
231 u64 extctrl;
232
233
234
235
236
237
238
239
240 u64 ibdeltainprog;
241 u64 ibsymdelta;
242 u64 ibsymsnap;
243 u64 iblnkerrdelta;
244 u64 iblnkerrsnap;
245 u64 ibcctrl;
246 u32 lastlinkrecov;
247 int irq;
248 u32 cntrnamelen;
249 u32 portcntrnamelen;
250 u32 ncntrs;
251 u32 nportcntrs;
252
253 u32 rxfc_unsupvl_errs;
254 u32 overrun_thresh_errs;
255
256
257
258
259 u32 lli_errs;
260 u32 lli_counter;
261 u64 lli_thresh;
262 u64 sword;
263 u64 rword;
264 u64 spkts;
265 u64 rpkts;
266 u64 xmit_wait;
267 struct timer_list pma_timer;
268 char emsgbuf[128];
269 char bitsmsgbuf[64];
270 u8 pma_sample_status;
271};
272
273
274#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
275
276#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
277
278#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
279#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
280
281#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1
282#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2
283#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3
284#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
306 enum qib_ureg regno, int ctxt)
307{
308 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
309 return 0;
310
311 if (dd->userbase)
312 return readl(regno + (u64 __iomem *)
313 ((char __iomem *)dd->userbase +
314 dd->ureg_align * ctxt));
315 else
316 return readl(regno + (u64 __iomem *)
317 (dd->uregbase +
318 (char __iomem *)dd->kregbase +
319 dd->ureg_align * ctxt));
320}
321
322
323
324
325
326
327
328
329
330
331static inline void qib_write_ureg(const struct qib_devdata *dd,
332 enum qib_ureg regno, u64 value, int ctxt)
333{
334 u64 __iomem *ubase;
335 if (dd->userbase)
336 ubase = (u64 __iomem *)
337 ((char __iomem *) dd->userbase +
338 dd->ureg_align * ctxt);
339 else
340 ubase = (u64 __iomem *)
341 (dd->uregbase +
342 (char __iomem *) dd->kregbase +
343 dd->ureg_align * ctxt);
344
345 if (dd->kregbase && (dd->flags & QIB_PRESENT))
346 writeq(value, &ubase[regno]);
347}
348
349static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
350 const u16 regno)
351{
352 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
353 return -1;
354 return readl((u32 __iomem *)&dd->kregbase[regno]);
355}
356
357static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
358 const u16 regno)
359{
360 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
361 return -1;
362
363 return readq(&dd->kregbase[regno]);
364}
365
366static inline void qib_write_kreg(const struct qib_devdata *dd,
367 const u16 regno, u64 value)
368{
369 if (dd->kregbase && (dd->flags & QIB_PRESENT))
370 writeq(value, &dd->kregbase[regno]);
371}
372
373
374
375
376
377
378
379
380static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
381 const u16 regno, unsigned ctxt,
382 u64 value)
383{
384 qib_write_kreg(dd, regno + ctxt, value);
385}
386
387static inline void write_6120_creg(const struct qib_devdata *dd,
388 u16 regno, u64 value)
389{
390 if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
391 writeq(value, &dd->cspec->cregbase[regno]);
392}
393
394static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
395{
396 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
397 return 0;
398 return readq(&dd->cspec->cregbase[regno]);
399}
400
401static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
402{
403 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
404 return 0;
405 return readl(&dd->cspec->cregbase[regno]);
406}
407
408
409#define QLOGIC_IB_C_RESET 1U
410
411
412#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
413#define QLOGIC_IB_I_RCVURG_SHIFT 0
414#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
415#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
416
417#define QLOGIC_IB_C_FREEZEMODE 0x00000002
418#define QLOGIC_IB_C_LINKENABLE 0x00000004
419#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
420#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
421#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
422#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
423#define QLOGIC_IB_I_BITSEXTANT \
424 ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
425 (QLOGIC_IB_I_RCVAVAIL_MASK << \
426 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
427 QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
428 QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
429
430
431#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
432#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
433#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
434#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
435#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
436#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
437#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
438#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
439#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
440#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
441#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
442#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
443
444
445
446#define QLOGIC_IB_EXTS_FREQSEL 0x2
447#define QLOGIC_IB_EXTS_SERDESSEL 0x4
448#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
449#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
450
451
452#define QLOGIC_IB_XGXS_RESET 0x5ULL
453
454#define _QIB_GPIO_SDA_NUM 1
455#define _QIB_GPIO_SCL_NUM 0
456
457
458#define GPIO_RXUVL_BIT 3
459#define GPIO_OVRUN_BIT 4
460#define GPIO_LLI_BIT 5
461#define GPIO_ERRINTR_MASK 0x38
462
463
464#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
465#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
466 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
467#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
468#define QLOGIC_IB_RT_IS_VALID(tid) \
469 (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
470 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
471#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL
472#define QLOGIC_IB_RT_ADDR_SHIFT 10
473
474#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
475#define QLOGIC_IB_R_TAILUPD_SHIFT 31
476#define IBA6120_R_PKEY_DIS_SHIFT 30
477
478#define PBC_6120_VL15_SEND_CTRL (1ULL << 31)
479
480#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
481#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
482
483#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
484 ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
485
486#define TXEMEMPARITYERR_PIOBUF \
487 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
488#define TXEMEMPARITYERR_PIOPBC \
489 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
490#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
491 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
492
493#define RXEMEMPARITYERR_RCVBUF \
494 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
495#define RXEMEMPARITYERR_LOOKUPQ \
496 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
497#define RXEMEMPARITYERR_EXPTID \
498 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
499#define RXEMEMPARITYERR_EAGERTID \
500 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
501#define RXEMEMPARITYERR_FLAGBUF \
502 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
503#define RXEMEMPARITYERR_DATAINFO \
504 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
505#define RXEMEMPARITYERR_HDRINFO \
506 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
507
508
509static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
510
511 QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
512 QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
513
514 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
515 "TXE PIOBUF Memory Parity"),
516 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
517 "TXE PIOPBC Memory Parity"),
518 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
519 "TXE PIOLAUNCHFIFO Memory Parity"),
520
521 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
522 "RXE RCVBUF Memory Parity"),
523 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
524 "RXE LOOKUPQ Memory Parity"),
525 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
526 "RXE EAGERTID Memory Parity"),
527 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
528 "RXE EXPTID Memory Parity"),
529 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
530 "RXE FLAGBUF Memory Parity"),
531 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
532 "RXE DATAINFO Memory Parity"),
533 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
534 "RXE HDRINFO Memory Parity"),
535
536
537 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
538 "PCIe Poisoned TLP"),
539 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
540 "PCIe completion timeout"),
541
542
543
544
545
546
547
548 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
549 "PCIePLL1"),
550 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
551 "PCIePLL0"),
552 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
553 "PCIe XTLH core parity"),
554 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
555 "PCIe ADM TX core parity"),
556 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
557 "PCIe ADM RX core parity"),
558 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
559 "SerDes PLL"),
560};
561
562#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
563#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
564 QLOGIC_IB_HWE_COREPLL_RFSLIP)
565
566
567#define IB_HWE_BITSEXTANT \
568 (HWE_MASK(RXEMemParityErr) | \
569 HWE_MASK(TXEMemParityErr) | \
570 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
571 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
572 QLOGIC_IB_HWE_PCIE1PLLFAILED | \
573 QLOGIC_IB_HWE_PCIE0PLLFAILED | \
574 QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
575 QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
576 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
577 QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
578 QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
579 HWE_MASK(PowerOnBISTFailed) | \
580 QLOGIC_IB_HWE_COREPLL_FBSLIP | \
581 QLOGIC_IB_HWE_COREPLL_RFSLIP | \
582 QLOGIC_IB_HWE_SERDESPLLFAILED | \
583 HWE_MASK(IBCBusToSPCParityErr) | \
584 HWE_MASK(IBCBusFromSPCParityErr))
585
586#define IB_E_BITSEXTANT \
587 (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
588 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
589 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
590 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
591 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
592 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
593 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
594 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
595 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
596 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
597 ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
598 ERR_MASK(SendDroppedSmpPktErr) | \
599 ERR_MASK(SendDroppedDataPktErr) | \
600 ERR_MASK(SendPioArmLaunchErr) | \
601 ERR_MASK(SendUnexpectedPktNumErr) | \
602 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
603 ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
604 ERR_MASK(HardwareErr))
605
606#define QLOGIC_IB_E_PKTERRS ( \
607 ERR_MASK(SendPktLenErr) | \
608 ERR_MASK(SendDroppedDataPktErr) | \
609 ERR_MASK(RcvVCRCErr) | \
610 ERR_MASK(RcvICRCErr) | \
611 ERR_MASK(RcvShortPktLenErr) | \
612 ERR_MASK(RcvEBPErr))
613
614
615#define E_SUM_PKTERRS \
616 (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
617 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
618 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
619 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
620 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
621 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
622
623
624#define E_SUM_ERRS \
625 (ERR_MASK(SendPioArmLaunchErr) | \
626 ERR_MASK(SendUnexpectedPktNumErr) | \
627 ERR_MASK(SendDroppedDataPktErr) | \
628 ERR_MASK(SendDroppedSmpPktErr) | \
629 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
630 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
631 ERR_MASK(InvalidAddrErr))
632
633
634
635
636
637
638
639#define E_SPKT_ERRS_IGNORE \
640 (ERR_MASK(SendDroppedDataPktErr) | \
641 ERR_MASK(SendDroppedSmpPktErr) | \
642 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
643 ERR_MASK(SendPktLenErr))
644
645
646
647
648
649
650
651#define E_SUM_LINK_PKTERRS \
652 (ERR_MASK(SendDroppedDataPktErr) | \
653 ERR_MASK(SendDroppedSmpPktErr) | \
654 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
655 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
656 ERR_MASK(RcvUnexpectedCharErr))
657
658static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
659 u32, unsigned long);
660
661
662
663
664
665
666
667
668static void qib_6120_txe_recover(struct qib_devdata *dd)
669{
670 if (!qib_unordered_wc())
671 qib_devinfo(dd->pcidev,
672 "Recovering from TXE PIO parity error\n");
673}
674
675
676static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
677{
678 if (enable) {
679 if (dd->flags & QIB_BADINTR)
680 return;
681 qib_write_kreg(dd, kr_intmask, ~0ULL);
682
683 qib_write_kreg(dd, kr_intclear, 0ULL);
684 } else
685 qib_write_kreg(dd, kr_intmask, 0ULL);
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703static void qib_6120_clear_freeze(struct qib_devdata *dd)
704{
705
706 qib_write_kreg(dd, kr_errmask, 0ULL);
707
708
709 qib_6120_set_intr_state(dd, 0);
710
711 qib_cancel_sends(dd->pport);
712
713
714 qib_write_kreg(dd, kr_control, dd->control);
715 qib_read_kreg32(dd, kr_scratch);
716
717
718 qib_force_pio_avail_update(dd);
719
720
721
722
723
724
725
726 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
727 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
728 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
729 qib_6120_set_intr_state(dd, 1);
730}
731
732
733
734
735
736
737
738
739
740
741
742
743static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
744 size_t msgl)
745{
746 u64 hwerrs;
747 u32 bits, ctrl;
748 int isfatal = 0;
749 char *bitsmsg;
750 int log_idx;
751
752 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
753 if (!hwerrs)
754 return;
755 if (hwerrs == ~0ULL) {
756 qib_dev_err(dd, "Read of hardware error status failed "
757 "(all bits set); ignoring\n");
758 return;
759 }
760 qib_stats.sps_hwerrs++;
761
762
763
764
765
766
767 qib_write_kreg(dd, kr_hwerrclear,
768 hwerrs & ~HWE_MASK(PowerOnBISTFailed));
769
770 hwerrs &= dd->cspec->hwerrmask;
771
772
773 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
774 if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
775 qib_inc_eeprom_err(dd, log_idx, 1);
776
777
778
779
780
781 if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
782 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
783 "(cleared)\n", (unsigned long long) hwerrs);
784
785 if (hwerrs & ~IB_HWE_BITSEXTANT)
786 qib_dev_err(dd, "hwerror interrupt with unknown errors "
787 "%llx set\n", (unsigned long long)
788 (hwerrs & ~IB_HWE_BITSEXTANT));
789
790 ctrl = qib_read_kreg32(dd, kr_control);
791 if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
792
793
794
795
796
797
798
799
800 if (hwerrs & TXE_PIO_PARITY) {
801 qib_6120_txe_recover(dd);
802 hwerrs &= ~TXE_PIO_PARITY;
803 }
804
805 if (!hwerrs) {
806 static u32 freeze_cnt;
807
808 freeze_cnt++;
809 qib_6120_clear_freeze(dd);
810 } else
811 isfatal = 1;
812 }
813
814 *msg = '\0';
815
816 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
817 isfatal = 1;
818 strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
819 " unusable]", msgl);
820
821 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
822 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
823 }
824
825 qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
826 ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
827
828 bitsmsg = dd->cspec->bitsmsgbuf;
829 if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
830 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
831 bits = (u32) ((hwerrs >>
832 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
833 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
834 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
835 "[PCIe Mem Parity Errs %x] ", bits);
836 strlcat(msg, bitsmsg, msgl);
837 }
838
839 if (hwerrs & _QIB_PLL_FAIL) {
840 isfatal = 1;
841 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
842 "[PLL failed (%llx), InfiniPath hardware unusable]",
843 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
844 strlcat(msg, bitsmsg, msgl);
845
846 dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
847 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
848 }
849
850 if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
851
852
853
854
855 dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
856 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
857 }
858
859 if (hwerrs)
860
861
862
863
864
865
866 qib_dev_err(dd, "%s hardware error\n", msg);
867 else
868 *msg = 0;
869
870 if (isfatal && !dd->diag_client) {
871 qib_dev_err(dd, "Fatal Hardware Error, no longer"
872 " usable, SN %.16s\n", dd->serial);
873
874
875
876
877 if (dd->freezemsg)
878 snprintf(dd->freezemsg, dd->freezelen,
879 "{%s}", msg);
880 qib_disable_after_error(dd);
881 }
882}
883
884
885
886
887
888
889
890static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
891 u64 err)
892{
893 int iserr = 1;
894
895 *buf = '\0';
896 if (err & QLOGIC_IB_E_PKTERRS) {
897 if (!(err & ~QLOGIC_IB_E_PKTERRS))
898 iserr = 0;
899 if ((err & ERR_MASK(RcvICRCErr)) &&
900 !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
901 strlcat(buf, "CRC ", blen);
902 if (!iserr)
903 goto done;
904 }
905 if (err & ERR_MASK(RcvHdrLenErr))
906 strlcat(buf, "rhdrlen ", blen);
907 if (err & ERR_MASK(RcvBadTidErr))
908 strlcat(buf, "rbadtid ", blen);
909 if (err & ERR_MASK(RcvBadVersionErr))
910 strlcat(buf, "rbadversion ", blen);
911 if (err & ERR_MASK(RcvHdrErr))
912 strlcat(buf, "rhdr ", blen);
913 if (err & ERR_MASK(RcvLongPktLenErr))
914 strlcat(buf, "rlongpktlen ", blen);
915 if (err & ERR_MASK(RcvMaxPktLenErr))
916 strlcat(buf, "rmaxpktlen ", blen);
917 if (err & ERR_MASK(RcvMinPktLenErr))
918 strlcat(buf, "rminpktlen ", blen);
919 if (err & ERR_MASK(SendMinPktLenErr))
920 strlcat(buf, "sminpktlen ", blen);
921 if (err & ERR_MASK(RcvFormatErr))
922 strlcat(buf, "rformaterr ", blen);
923 if (err & ERR_MASK(RcvUnsupportedVLErr))
924 strlcat(buf, "runsupvl ", blen);
925 if (err & ERR_MASK(RcvUnexpectedCharErr))
926 strlcat(buf, "runexpchar ", blen);
927 if (err & ERR_MASK(RcvIBFlowErr))
928 strlcat(buf, "ribflow ", blen);
929 if (err & ERR_MASK(SendUnderRunErr))
930 strlcat(buf, "sunderrun ", blen);
931 if (err & ERR_MASK(SendPioArmLaunchErr))
932 strlcat(buf, "spioarmlaunch ", blen);
933 if (err & ERR_MASK(SendUnexpectedPktNumErr))
934 strlcat(buf, "sunexperrpktnum ", blen);
935 if (err & ERR_MASK(SendDroppedSmpPktErr))
936 strlcat(buf, "sdroppedsmppkt ", blen);
937 if (err & ERR_MASK(SendMaxPktLenErr))
938 strlcat(buf, "smaxpktlen ", blen);
939 if (err & ERR_MASK(SendUnsupportedVLErr))
940 strlcat(buf, "sunsupVL ", blen);
941 if (err & ERR_MASK(InvalidAddrErr))
942 strlcat(buf, "invalidaddr ", blen);
943 if (err & ERR_MASK(RcvEgrFullErr))
944 strlcat(buf, "rcvegrfull ", blen);
945 if (err & ERR_MASK(RcvHdrFullErr))
946 strlcat(buf, "rcvhdrfull ", blen);
947 if (err & ERR_MASK(IBStatusChanged))
948 strlcat(buf, "ibcstatuschg ", blen);
949 if (err & ERR_MASK(RcvIBLostLinkErr))
950 strlcat(buf, "riblostlink ", blen);
951 if (err & ERR_MASK(HardwareErr))
952 strlcat(buf, "hardware ", blen);
953 if (err & ERR_MASK(ResetNegated))
954 strlcat(buf, "reset ", blen);
955done:
956 return iserr;
957}
958
959
960
961
962
963static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
964{
965 unsigned long sbuf[2];
966 struct qib_devdata *dd = ppd->dd;
967
968
969
970
971
972 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
973 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
974
975 if (sbuf[0] || sbuf[1])
976 qib_disarm_piobufs_set(dd, sbuf,
977 dd->piobcnt2k + dd->piobcnt4k);
978}
979
980static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
981{
982 int ret = 1;
983 u32 ibstate = qib_6120_iblink_state(ibcs);
984 u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
985
986 if (linkrecov != dd->cspec->lastlinkrecov) {
987
988 dd->cspec->lastlinkrecov = 0;
989 qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
990 ret = 0;
991 }
992 if (ibstate == IB_PORT_ACTIVE)
993 dd->cspec->lastlinkrecov =
994 read_6120_creg32(dd, cr_iblinkerrrecov);
995 return ret;
996}
997
998static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
999{
1000 char *msg;
1001 u64 ignore_this_time = 0;
1002 u64 iserr = 0;
1003 int log_idx;
1004 struct qib_pportdata *ppd = dd->pport;
1005 u64 mask;
1006
1007
1008 errs &= dd->cspec->errormask;
1009 msg = dd->cspec->emsgbuf;
1010
1011
1012 if (errs & ERR_MASK(HardwareErr))
1013 qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1014 else
1015 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1016 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1017 qib_inc_eeprom_err(dd, log_idx, 1);
1018
1019 if (errs & ~IB_E_BITSEXTANT)
1020 qib_dev_err(dd, "error interrupt with unknown errors "
1021 "%llx set\n",
1022 (unsigned long long) (errs & ~IB_E_BITSEXTANT));
1023
1024 if (errs & E_SUM_ERRS) {
1025 qib_disarm_6120_senderrbufs(ppd);
1026 if ((errs & E_SUM_LINK_PKTERRS) &&
1027 !(ppd->lflags & QIBL_LINKACTIVE)) {
1028
1029
1030
1031
1032
1033
1034
1035 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1036 }
1037 } else if ((errs & E_SUM_LINK_PKTERRS) &&
1038 !(ppd->lflags & QIBL_LINKACTIVE)) {
1039
1040
1041
1042
1043
1044
1045
1046 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1047 }
1048
1049 qib_write_kreg(dd, kr_errclear, errs);
1050
1051 errs &= ~ignore_this_time;
1052 if (!errs)
1053 goto done;
1054
1055
1056
1057
1058
1059 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
1060 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
1061 qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
1062
1063 if (errs & E_SUM_PKTERRS)
1064 qib_stats.sps_rcverrs++;
1065 if (errs & E_SUM_ERRS)
1066 qib_stats.sps_txerrs++;
1067
1068 iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
1069
1070 if (errs & ERR_MASK(IBStatusChanged)) {
1071 u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1072 u32 ibstate = qib_6120_iblink_state(ibcs);
1073 int handle = 1;
1074
1075 if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
1076 handle = chk_6120_linkrecovery(dd, ibcs);
1077
1078
1079
1080
1081
1082
1083
1084 if (handle && qib_6120_phys_portstate(ibcs) ==
1085 IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1086 handle = 0;
1087 if (handle)
1088 qib_handle_e_ibstatuschanged(ppd, ibcs);
1089 }
1090
1091 if (errs & ERR_MASK(ResetNegated)) {
1092 qib_dev_err(dd, "Got reset, requires re-init "
1093 "(unload and reload driver)\n");
1094 dd->flags &= ~QIB_INITTED;
1095
1096 *dd->devstatusp |= QIB_STATUS_HWERROR;
1097 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1098 }
1099
1100 if (*msg && iserr)
1101 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1102
1103 if (ppd->state_wanted & ppd->lflags)
1104 wake_up_interruptible(&ppd->state_wait);
1105
1106
1107
1108
1109
1110
1111
1112
1113 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1114 qib_handle_urcv(dd, ~0U);
1115 if (errs & ERR_MASK(RcvEgrFullErr))
1116 qib_stats.sps_buffull++;
1117 else
1118 qib_stats.sps_hdrfull++;
1119 }
1120done:
1121 return;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static void qib_6120_init_hwerrors(struct qib_devdata *dd)
1135{
1136 u64 val;
1137 u64 extsval;
1138
1139 extsval = qib_read_kreg64(dd, kr_extstatus);
1140
1141 if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
1142 qib_dev_err(dd, "MemBIST did not complete!\n");
1143
1144
1145 val = ~0ULL;
1146 if (dd->minrev < 2) {
1147
1148
1149
1150
1151 val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
1152 }
1153
1154 val &= ~TXEMEMPARITYERR_PIOBUF;
1155
1156 dd->cspec->hwerrmask = val;
1157
1158 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1159 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1160
1161
1162 qib_write_kreg(dd, kr_errclear, ~0ULL);
1163
1164 qib_write_kreg(dd, kr_errmask, ~0ULL);
1165 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1166
1167 qib_write_kreg(dd, kr_intclear, ~0ULL);
1168
1169 qib_write_kreg(dd, kr_rcvbthqp,
1170 dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
1171 QIB_KD_QP);
1172}
1173
1174
1175
1176
1177
1178
1179
1180static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
1181{
1182 if (enable) {
1183 qib_write_kreg(dd, kr_errclear,
1184 ERR_MASK(SendPioArmLaunchErr));
1185 dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1186 } else
1187 dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1188 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1189}
1190
1191
1192
1193
1194
1195
1196static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1197 u16 linitcmd)
1198{
1199 u64 mod_wd;
1200 struct qib_devdata *dd = ppd->dd;
1201 unsigned long flags;
1202
1203 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1204
1205
1206
1207
1208 spin_lock_irqsave(&ppd->lflags_lock, flags);
1209 ppd->lflags |= QIBL_IB_LINK_DISABLED;
1210 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1211 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1212
1213
1214
1215
1216
1217 spin_lock_irqsave(&ppd->lflags_lock, flags);
1218 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1219 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1220 }
1221
1222 mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
1223 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1224
1225 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
1226
1227 qib_write_kreg(dd, kr_scratch, 0);
1228}
1229
1230
1231
1232
1233
1234static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
1235{
1236 struct qib_devdata *dd = ppd->dd;
1237 u64 val, config1, prev_val, hwstat, ibc;
1238
1239
1240 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1241 qib_write_kreg(dd, kr_control, 0ULL);
1242
1243 dd->cspec->ibdeltainprog = 1;
1244 dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
1245 dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
1246
1247
1248 ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1249
1250
1251
1252
1253
1254 ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1255
1256 dd->cspec->lli_thresh = 0xf;
1257 ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
1258
1259 ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1260
1261 ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1262
1263
1264
1265
1266 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1267 dd->cspec->ibcctrl = ibc;
1268
1269
1270 val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1271 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1272 qib_write_kreg(dd, kr_ibcctrl, val);
1273
1274 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1275 config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
1276
1277
1278
1279
1280
1281
1282
1283 val |= SYM_MASK(SerdesCfg0, ResetPLL) |
1284 SYM_MASK(SerdesCfg0, RxDetEnX) |
1285 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
1286 SYM_MASK(SerdesCfg0, L1PwrDnB) |
1287 SYM_MASK(SerdesCfg0, L1PwrDnC) |
1288 SYM_MASK(SerdesCfg0, L1PwrDnD));
1289 qib_write_kreg(dd, kr_serdes_cfg0, val);
1290
1291 qib_read_kreg64(dd, kr_scratch);
1292 udelay(5);
1293
1294
1295
1296
1297
1298 val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
1299 SYM_MASK(SerdesCfg0, ResetPLL) |
1300 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
1301 SYM_MASK(SerdesCfg0, L1PwrDnB) |
1302 SYM_MASK(SerdesCfg0, L1PwrDnC) |
1303 SYM_MASK(SerdesCfg0, L1PwrDnD)));
1304 val |= (SYM_MASK(SerdesCfg0, ResetA) |
1305 SYM_MASK(SerdesCfg0, ResetB) |
1306 SYM_MASK(SerdesCfg0, ResetC) |
1307 SYM_MASK(SerdesCfg0, ResetD)) |
1308 SYM_MASK(SerdesCfg0, TxIdeEnX);
1309 qib_write_kreg(dd, kr_serdes_cfg0, val);
1310
1311 (void) qib_read_kreg64(dd, kr_scratch);
1312
1313
1314 udelay(15);
1315 val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
1316 SYM_MASK(SerdesCfg0, ResetB) |
1317 SYM_MASK(SerdesCfg0, ResetC) |
1318 SYM_MASK(SerdesCfg0, ResetD)) |
1319 SYM_MASK(SerdesCfg0, TxIdeEnX));
1320
1321 qib_write_kreg(dd, kr_serdes_cfg0, val);
1322
1323 (void) qib_read_kreg64(dd, kr_scratch);
1324
1325 val = qib_read_kreg64(dd, kr_xgxs_cfg);
1326 prev_val = val;
1327 if (val & QLOGIC_IB_XGXS_RESET)
1328 val &= ~QLOGIC_IB_XGXS_RESET;
1329 if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
1330
1331 val &= ~SYM_MASK(XGXSCfg, polarity_inv);
1332 val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
1333 }
1334 if (val != prev_val)
1335 qib_write_kreg(dd, kr_xgxs_cfg, val);
1336
1337 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1338
1339
1340 config1 &= ~0x0ffffffff00ULL;
1341
1342 config1 |= 0x00000000000ULL;
1343
1344 config1 |= 0x0cccc000000ULL;
1345 qib_write_kreg(dd, kr_serdes_cfg1, config1);
1346
1347
1348 ppd->guid = dd->base_guid;
1349
1350
1351
1352
1353
1354
1355 hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
1356 if (hwstat) {
1357
1358 qib_write_kreg(dd, kr_hwerrclear, hwstat);
1359 qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
1360 }
1361
1362 dd->control |= QLOGIC_IB_C_LINKENABLE;
1363 dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
1364 qib_write_kreg(dd, kr_control, dd->control);
1365
1366 return 0;
1367}
1368
1369
1370
1371
1372
1373
1374static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
1375{
1376 struct qib_devdata *dd = ppd->dd;
1377 u64 val;
1378
1379 qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1380
1381
1382 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1383 qib_write_kreg(dd, kr_control,
1384 dd->control | QLOGIC_IB_C_FREEZEMODE);
1385
1386 if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
1387 dd->cspec->ibdeltainprog) {
1388 u64 diagc;
1389
1390
1391 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1392 qib_write_kreg(dd, kr_hwdiagctrl,
1393 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1394
1395 if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
1396 val = read_6120_creg32(dd, cr_ibsymbolerr);
1397 if (dd->cspec->ibdeltainprog)
1398 val -= val - dd->cspec->ibsymsnap;
1399 val -= dd->cspec->ibsymdelta;
1400 write_6120_creg(dd, cr_ibsymbolerr, val);
1401 }
1402 if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
1403 val = read_6120_creg32(dd, cr_iblinkerrrecov);
1404 if (dd->cspec->ibdeltainprog)
1405 val -= val - dd->cspec->iblnkerrsnap;
1406 val -= dd->cspec->iblnkerrdelta;
1407 write_6120_creg(dd, cr_iblinkerrrecov, val);
1408 }
1409
1410
1411 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1412 }
1413
1414 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1415 val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
1416 qib_write_kreg(dd, kr_serdes_cfg0, val);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
1443{
1444 u64 extctl, val, lst, ltst;
1445 unsigned long flags;
1446 struct qib_devdata *dd = ppd->dd;
1447
1448
1449
1450
1451
1452 if (dd->diag_client)
1453 return;
1454
1455
1456 if (ppd->led_override) {
1457 ltst = (ppd->led_override & QIB_LED_PHYS) ?
1458 IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1459 lst = (ppd->led_override & QIB_LED_LOG) ?
1460 IB_PORT_ACTIVE : IB_PORT_DOWN;
1461 } else if (on) {
1462 val = qib_read_kreg64(dd, kr_ibcstatus);
1463 ltst = qib_6120_phys_portstate(val);
1464 lst = qib_6120_iblink_state(val);
1465 } else {
1466 ltst = 0;
1467 lst = 0;
1468 }
1469
1470 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1471 extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1472 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1473
1474 if (ltst == IB_PHYSPORTSTATE_LINKUP)
1475 extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1476 if (lst == IB_PORT_ACTIVE)
1477 extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1478 dd->cspec->extctrl = extctl;
1479 qib_write_kreg(dd, kr_extctrl, extctl);
1480 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1481}
1482
1483static void qib_6120_free_irq(struct qib_devdata *dd)
1484{
1485 if (dd->cspec->irq) {
1486 free_irq(dd->cspec->irq, dd);
1487 dd->cspec->irq = 0;
1488 }
1489 qib_nomsi(dd);
1490}
1491
1492
1493
1494
1495
1496
1497
1498static void qib_6120_setup_cleanup(struct qib_devdata *dd)
1499{
1500 qib_6120_free_irq(dd);
1501 kfree(dd->cspec->cntrs);
1502 kfree(dd->cspec->portcntrs);
1503 if (dd->cspec->dummy_hdrq) {
1504 dma_free_coherent(&dd->pcidev->dev,
1505 ALIGN(dd->rcvhdrcnt *
1506 dd->rcvhdrentsize *
1507 sizeof(u32), PAGE_SIZE),
1508 dd->cspec->dummy_hdrq,
1509 dd->cspec->dummy_hdrq_phys);
1510 dd->cspec->dummy_hdrq = NULL;
1511 }
1512}
1513
1514static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
1515{
1516 unsigned long flags;
1517
1518 spin_lock_irqsave(&dd->sendctrl_lock, flags);
1519 if (needint)
1520 dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
1521 else
1522 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
1523 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1524 qib_write_kreg(dd, kr_scratch, 0ULL);
1525 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1526}
1527
1528
1529
1530
1531
1532static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
1533{
1534 if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1535 qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
1536 istat & ~QLOGIC_IB_I_BITSEXTANT);
1537
1538 if (istat & QLOGIC_IB_I_ERROR) {
1539 u64 estat = 0;
1540
1541 qib_stats.sps_errints++;
1542 estat = qib_read_kreg64(dd, kr_errstatus);
1543 if (!estat)
1544 qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
1545 "but no error bits set!\n", istat);
1546 handle_6120_errors(dd, estat);
1547 }
1548
1549 if (istat & QLOGIC_IB_I_GPIO) {
1550 u32 gpiostatus;
1551 u32 to_clear = 0;
1552
1553
1554
1555
1556
1557 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1558
1559 if (gpiostatus & GPIO_ERRINTR_MASK) {
1560
1561 to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
1562
1563
1564
1565
1566
1567 if (gpiostatus & (1 << GPIO_RXUVL_BIT))
1568 dd->cspec->rxfc_unsupvl_errs++;
1569 if (gpiostatus & (1 << GPIO_OVRUN_BIT))
1570 dd->cspec->overrun_thresh_errs++;
1571 if (gpiostatus & (1 << GPIO_LLI_BIT))
1572 dd->cspec->lli_errs++;
1573 gpiostatus &= ~GPIO_ERRINTR_MASK;
1574 }
1575 if (gpiostatus) {
1576
1577
1578
1579
1580
1581
1582 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1583
1584
1585
1586
1587
1588
1589 if (mask & gpiostatus) {
1590 to_clear |= (gpiostatus & mask);
1591 dd->cspec->gpio_mask &= ~(gpiostatus & mask);
1592 qib_write_kreg(dd, kr_gpio_mask,
1593 dd->cspec->gpio_mask);
1594 }
1595 }
1596 if (to_clear)
1597 qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
1598 }
1599}
1600
1601static irqreturn_t qib_6120intr(int irq, void *data)
1602{
1603 struct qib_devdata *dd = data;
1604 irqreturn_t ret;
1605 u32 istat, ctxtrbits, rmask, crcs = 0;
1606 unsigned i;
1607
1608 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1609
1610
1611
1612
1613
1614
1615 ret = IRQ_HANDLED;
1616 goto bail;
1617 }
1618
1619 istat = qib_read_kreg32(dd, kr_intstatus);
1620
1621 if (unlikely(!istat)) {
1622 ret = IRQ_NONE;
1623 goto bail;
1624 }
1625 if (unlikely(istat == -1)) {
1626 qib_bad_intrstatus(dd);
1627
1628 ret = IRQ_NONE;
1629 goto bail;
1630 }
1631
1632 qib_stats.sps_ints++;
1633 if (dd->int_counter != (u32) -1)
1634 dd->int_counter++;
1635
1636 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1637 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1638 unlikely_6120_intr(dd, istat);
1639
1640
1641
1642
1643
1644
1645
1646 qib_write_kreg(dd, kr_intclear, istat);
1647
1648
1649
1650
1651
1652
1653 ctxtrbits = istat &
1654 ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1655 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1656 if (ctxtrbits) {
1657 rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1658 (1U << QLOGIC_IB_I_RCVURG_SHIFT);
1659 for (i = 0; i < dd->first_user_ctxt; i++) {
1660 if (ctxtrbits & rmask) {
1661 ctxtrbits &= ~rmask;
1662 crcs += qib_kreceive(dd->rcd[i],
1663 &dd->cspec->lli_counter,
1664 NULL);
1665 }
1666 rmask <<= 1;
1667 }
1668 if (crcs) {
1669 u32 cntr = dd->cspec->lli_counter;
1670 cntr += crcs;
1671 if (cntr) {
1672 if (cntr > dd->cspec->lli_thresh) {
1673 dd->cspec->lli_counter = 0;
1674 dd->cspec->lli_errs++;
1675 } else
1676 dd->cspec->lli_counter += cntr;
1677 }
1678 }
1679
1680
1681 if (ctxtrbits) {
1682 ctxtrbits =
1683 (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1684 (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1685 qib_handle_urcv(dd, ctxtrbits);
1686 }
1687 }
1688
1689 if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
1690 qib_ib_piobufavail(dd);
1691
1692 ret = IRQ_HANDLED;
1693bail:
1694 return ret;
1695}
1696
1697
1698
1699
1700
1701
1702static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1703{
1704
1705
1706
1707
1708
1709
1710 if (SYM_FIELD(dd->revision, Revision_R,
1711 ChipRevMinor) > 1) {
1712
1713 dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
1714 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1715 }
1716
1717 if (!dd->cspec->irq)
1718 qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
1719 "work\n");
1720 else {
1721 int ret;
1722 ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
1723 QIB_DRV_NAME, dd);
1724 if (ret)
1725 qib_dev_err(dd, "Couldn't setup interrupt "
1726 "(irq=%d): %d\n", dd->cspec->irq,
1727 ret);
1728 }
1729}
1730
1731
1732
1733
1734
1735
1736
1737static void pe_boardname(struct qib_devdata *dd)
1738{
1739 char *n;
1740 u32 boardid, namelen;
1741
1742 boardid = SYM_FIELD(dd->revision, Revision,
1743 BoardID);
1744
1745 switch (boardid) {
1746 case 2:
1747 n = "InfiniPath_QLE7140";
1748 break;
1749 default:
1750 qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
1751 n = "Unknown_InfiniPath_6120";
1752 break;
1753 }
1754 namelen = strlen(n) + 1;
1755 dd->boardname = kmalloc(namelen, GFP_KERNEL);
1756 if (!dd->boardname)
1757 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
1758 else
1759 snprintf(dd->boardname, namelen, "%s", n);
1760
1761 if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
1762 qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
1763 "%u.%u!\n", dd->majrev, dd->minrev);
1764
1765 snprintf(dd->boardversion, sizeof(dd->boardversion),
1766 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
1767 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
1768 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
1769 dd->majrev, dd->minrev,
1770 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
1771
1772}
1773
1774
1775
1776
1777
1778
1779static int qib_6120_setup_reset(struct qib_devdata *dd)
1780{
1781 u64 val;
1782 int i;
1783 int ret;
1784 u16 cmdval;
1785 u8 int_line, clinesz;
1786
1787 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
1788
1789
1790 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
1791
1792
1793 qib_6120_set_intr_state(dd, 0);
1794
1795 dd->cspec->ibdeltainprog = 0;
1796 dd->cspec->ibsymdelta = 0;
1797 dd->cspec->iblnkerrdelta = 0;
1798
1799
1800
1801
1802
1803
1804 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
1805 dd->int_counter = 0;
1806 val = dd->control | QLOGIC_IB_C_RESET;
1807 writeq(val, &dd->kregbase[kr_control]);
1808 mb();
1809
1810 for (i = 1; i <= 5; i++) {
1811
1812
1813
1814
1815
1816 msleep(1000 + (1 + i) * 2000);
1817
1818 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
1819
1820
1821
1822
1823
1824 val = readq(&dd->kregbase[kr_revision]);
1825 if (val == dd->revision) {
1826 dd->flags |= QIB_PRESENT;
1827 ret = qib_reinit_intr(dd);
1828 goto bail;
1829 }
1830 }
1831 ret = 0;
1832
1833bail:
1834 if (ret) {
1835 if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
1836 qib_dev_err(dd, "Reset failed to setup PCIe or "
1837 "interrupts; continuing anyway\n");
1838
1839 qib_6120_init_hwerrors(dd);
1840
1841 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1842
1843 qib_6120_init_hwerrors(dd);
1844 }
1845 return ret;
1846}
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
1861 u32 type, unsigned long pa)
1862{
1863 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1864 unsigned long flags;
1865 int tidx;
1866 spinlock_t *tidlockp;
1867
1868 if (!dd->kregbase)
1869 return;
1870
1871 if (pa != dd->tidinvalid) {
1872 if (pa & ((1U << 11) - 1)) {
1873 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1874 pa);
1875 return;
1876 }
1877 pa >>= 11;
1878 if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1879 qib_dev_err(dd, "Physical page address 0x%lx "
1880 "larger than supported\n", pa);
1881 return;
1882 }
1883
1884 if (type == RCVHQ_RCV_TYPE_EAGER)
1885 pa |= dd->tidtemplate;
1886 else
1887 pa |= 2 << 29;
1888 }
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903 tidx = tidptr - dd->egrtidbase;
1904
1905 tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
1906 ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
1907 spin_lock_irqsave(tidlockp, flags);
1908 qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
1909 writel(pa, tidp32);
1910 qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
1911 mmiowb();
1912 spin_unlock_irqrestore(tidlockp, flags);
1913}
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
1928 u32 type, unsigned long pa)
1929{
1930 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1931 u32 tidx;
1932
1933 if (!dd->kregbase)
1934 return;
1935
1936 if (pa != dd->tidinvalid) {
1937 if (pa & ((1U << 11) - 1)) {
1938 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1939 pa);
1940 return;
1941 }
1942 pa >>= 11;
1943 if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1944 qib_dev_err(dd, "Physical page address 0x%lx "
1945 "larger than supported\n", pa);
1946 return;
1947 }
1948
1949 if (type == RCVHQ_RCV_TYPE_EAGER)
1950 pa |= dd->tidtemplate;
1951 else
1952 pa |= 2 << 29;
1953 }
1954 tidx = tidptr - dd->egrtidbase;
1955 writel(pa, tidp32);
1956 mmiowb();
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970static void qib_6120_clear_tids(struct qib_devdata *dd,
1971 struct qib_ctxtdata *rcd)
1972{
1973 u64 __iomem *tidbase;
1974 unsigned long tidinv;
1975 u32 ctxt;
1976 int i;
1977
1978 if (!dd->kregbase || !rcd)
1979 return;
1980
1981 ctxt = rcd->ctxt;
1982
1983 tidinv = dd->tidinvalid;
1984 tidbase = (u64 __iomem *)
1985 ((char __iomem *)(dd->kregbase) +
1986 dd->rcvtidbase +
1987 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
1988
1989 for (i = 0; i < dd->rcvtidcnt; i++)
1990
1991 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1992 tidinv);
1993
1994 tidbase = (u64 __iomem *)
1995 ((char __iomem *)(dd->kregbase) +
1996 dd->rcvegrbase +
1997 rcd->rcvegr_tid_base * sizeof(*tidbase));
1998
1999 for (i = 0; i < rcd->rcvegrcnt; i++)
2000
2001 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2002 tidinv);
2003}
2004
2005
2006
2007
2008
2009
2010
2011static void qib_6120_tidtemplate(struct qib_devdata *dd)
2012{
2013 u32 egrsize = dd->rcvegrbufsize;
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 if (egrsize == 2048)
2025 dd->tidtemplate = 1U << 29;
2026 else if (egrsize == 4096)
2027 dd->tidtemplate = 2U << 29;
2028 dd->tidinvalid = 0;
2029}
2030
2031int __attribute__((weak)) qib_unordered_wc(void)
2032{
2033 return 0;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
2045 struct qib_base_info *kinfo)
2046{
2047 if (qib_unordered_wc())
2048 kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
2049
2050 kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2051 QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
2052 return 0;
2053}
2054
2055
2056static struct qib_message_header *
2057qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2058{
2059 return (struct qib_message_header *)
2060 &rhf_addr[sizeof(u64) / sizeof(u32)];
2061}
2062
2063static void qib_6120_config_ctxts(struct qib_devdata *dd)
2064{
2065 dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
2066 if (qib_n_krcv_queues > 1) {
2067 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2068 if (dd->first_user_ctxt > dd->ctxtcnt)
2069 dd->first_user_ctxt = dd->ctxtcnt;
2070 dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
2071 } else
2072 dd->first_user_ctxt = dd->num_pports;
2073 dd->n_krcv_queues = dd->first_user_ctxt;
2074}
2075
2076static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2077 u32 updegr, u32 egrhd, u32 npkts)
2078{
2079 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2080 if (updegr)
2081 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2082}
2083
2084static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
2085{
2086 u32 head, tail;
2087
2088 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2089 if (rcd->rcvhdrtail_kvaddr)
2090 tail = qib_get_rcvhdrtail(rcd);
2091 else
2092 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2093 return head == tail;
2094}
2095
2096
2097
2098
2099
2100
2101static void alloc_dummy_hdrq(struct qib_devdata *dd)
2102{
2103 dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
2104 dd->rcd[0]->rcvhdrq_size,
2105 &dd->cspec->dummy_hdrq_phys,
2106 GFP_KERNEL | __GFP_COMP);
2107 if (!dd->cspec->dummy_hdrq) {
2108 qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
2109
2110 dd->cspec->dummy_hdrq_phys = 0UL;
2111 }
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
2122 int ctxt)
2123{
2124 struct qib_devdata *dd = ppd->dd;
2125 u64 mask, val;
2126 unsigned long flags;
2127
2128 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2129
2130 if (op & QIB_RCVCTRL_TAILUPD_ENB)
2131 dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2132 if (op & QIB_RCVCTRL_TAILUPD_DIS)
2133 dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2134 if (op & QIB_RCVCTRL_PKEY_ENB)
2135 dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2136 if (op & QIB_RCVCTRL_PKEY_DIS)
2137 dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2138 if (ctxt < 0)
2139 mask = (1ULL << dd->ctxtcnt) - 1;
2140 else
2141 mask = (1ULL << ctxt);
2142 if (op & QIB_RCVCTRL_CTXT_ENB) {
2143
2144 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2145 if (!(dd->flags & QIB_NODMA_RTAIL))
2146 dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
2147
2148 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2149 dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2150 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2151 dd->rcd[ctxt]->rcvhdrq_phys);
2152
2153 if (ctxt == 0 && !dd->cspec->dummy_hdrq)
2154 alloc_dummy_hdrq(dd);
2155 }
2156 if (op & QIB_RCVCTRL_CTXT_DIS)
2157 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2158 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2159 dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2160 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2161 dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2162 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2163 if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2164
2165 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2166 dd->rhdrhead_intr_off;
2167 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2168 }
2169 if (op & QIB_RCVCTRL_CTXT_ENB) {
2170
2171
2172
2173
2174
2175
2176 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2177 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2178
2179 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2180 dd->rcd[ctxt]->head = val;
2181
2182 if (ctxt < dd->first_user_ctxt)
2183 val |= dd->rhdrhead_intr_off;
2184 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2185 }
2186 if (op & QIB_RCVCTRL_CTXT_DIS) {
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196 if (ctxt >= 0) {
2197 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2198 dd->cspec->dummy_hdrq_phys);
2199 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2200 dd->cspec->dummy_hdrq_phys);
2201 } else {
2202 unsigned i;
2203
2204 for (i = 0; i < dd->cfgctxts; i++) {
2205 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2206 i, dd->cspec->dummy_hdrq_phys);
2207 qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
2208 i, dd->cspec->dummy_hdrq_phys);
2209 }
2210 }
2211 }
2212 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2213}
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
2224{
2225 struct qib_devdata *dd = ppd->dd;
2226 u64 tmp_dd_sendctrl;
2227 unsigned long flags;
2228
2229 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2230
2231
2232 if (op & QIB_SENDCTRL_CLEAR)
2233 dd->sendctrl = 0;
2234 if (op & QIB_SENDCTRL_SEND_DIS)
2235 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
2236 else if (op & QIB_SENDCTRL_SEND_ENB)
2237 dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
2238 if (op & QIB_SENDCTRL_AVAIL_DIS)
2239 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2240 else if (op & QIB_SENDCTRL_AVAIL_ENB)
2241 dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
2242
2243 if (op & QIB_SENDCTRL_DISARM_ALL) {
2244 u32 i, last;
2245
2246 tmp_dd_sendctrl = dd->sendctrl;
2247
2248
2249
2250
2251 last = dd->piobcnt2k + dd->piobcnt4k;
2252 tmp_dd_sendctrl &=
2253 ~(SYM_MASK(SendCtrl, PIOEnable) |
2254 SYM_MASK(SendCtrl, PIOBufAvailUpd));
2255 for (i = 0; i < last; i++) {
2256 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
2257 SYM_MASK(SendCtrl, Disarm) | i);
2258 qib_write_kreg(dd, kr_scratch, 0);
2259 }
2260 }
2261
2262 tmp_dd_sendctrl = dd->sendctrl;
2263
2264 if (op & QIB_SENDCTRL_FLUSH)
2265 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2266 if (op & QIB_SENDCTRL_DISARM)
2267 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2268 ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
2269 SYM_LSB(SendCtrl, DisarmPIOBuf));
2270 if (op & QIB_SENDCTRL_AVAIL_BLIP)
2271 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2272
2273 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2274 qib_write_kreg(dd, kr_scratch, 0);
2275
2276 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2277 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2278 qib_write_kreg(dd, kr_scratch, 0);
2279 }
2280
2281 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2282
2283 if (op & QIB_SENDCTRL_FLUSH) {
2284 u32 v;
2285
2286
2287
2288
2289
2290
2291 v = qib_read_kreg32(dd, kr_scratch);
2292 qib_write_kreg(dd, kr_scratch, v);
2293 v = qib_read_kreg32(dd, kr_scratch);
2294 qib_write_kreg(dd, kr_scratch, v);
2295 qib_read_kreg32(dd, kr_scratch);
2296 }
2297}
2298
2299
2300
2301
2302
2303
2304static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
2305{
2306 u64 ret = 0ULL;
2307 struct qib_devdata *dd = ppd->dd;
2308 u16 creg;
2309
2310 static const u16 xlator[] = {
2311 [QIBPORTCNTR_PKTSEND] = cr_pktsend,
2312 [QIBPORTCNTR_WORDSEND] = cr_wordsend,
2313 [QIBPORTCNTR_PSXMITDATA] = 0xffff,
2314 [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
2315 [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
2316 [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2317 [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2318 [QIBPORTCNTR_PSRCVDATA] = 0xffff,
2319 [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
2320 [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2321 [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2322 [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2323 [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2324 [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
2325 [QIBPORTCNTR_RXVLERR] = 0xffff,
2326 [QIBPORTCNTR_ERRICRC] = cr_erricrc,
2327 [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2328 [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2329 [QIBPORTCNTR_BADFORMAT] = cr_badformat,
2330 [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2331 [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2332 [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2333 [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2334 [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
2335 [QIBPORTCNTR_ERRLINK] = cr_errlink,
2336 [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2337 [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2338 [QIBPORTCNTR_LLI] = 0xffff,
2339 [QIBPORTCNTR_PSINTERVAL] = 0xffff,
2340 [QIBPORTCNTR_PSSTART] = 0xffff,
2341 [QIBPORTCNTR_PSSTAT] = 0xffff,
2342 [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
2343 [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2344 [QIBPORTCNTR_KHDROVFL] = 0xffff,
2345 };
2346
2347 if (reg >= ARRAY_SIZE(xlator)) {
2348 qib_devinfo(ppd->dd->pcidev,
2349 "Unimplemented portcounter %u\n", reg);
2350 goto done;
2351 }
2352 creg = xlator[reg];
2353
2354
2355 if (reg == QIBPORTCNTR_LLI)
2356 ret = dd->cspec->lli_errs;
2357 else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
2358 ret = dd->cspec->overrun_thresh_errs;
2359 else if (reg == QIBPORTCNTR_KHDROVFL) {
2360 int i;
2361
2362
2363 for (i = 0; i < dd->first_user_ctxt; i++)
2364 ret += read_6120_creg32(dd, cr_portovfl + i);
2365 } else if (reg == QIBPORTCNTR_PSSTAT)
2366 ret = dd->cspec->pma_sample_status;
2367 if (creg == 0xffff)
2368 goto done;
2369
2370
2371
2372
2373
2374 if (creg == cr_wordsend || creg == cr_wordrcv ||
2375 creg == cr_pktsend || creg == cr_pktrcv)
2376 ret = read_6120_creg(dd, creg);
2377 else
2378 ret = read_6120_creg32(dd, creg);
2379 if (creg == cr_ibsymbolerr) {
2380 if (dd->cspec->ibdeltainprog)
2381 ret -= ret - dd->cspec->ibsymsnap;
2382 ret -= dd->cspec->ibsymdelta;
2383 } else if (creg == cr_iblinkerrrecov) {
2384 if (dd->cspec->ibdeltainprog)
2385 ret -= ret - dd->cspec->iblnkerrsnap;
2386 ret -= dd->cspec->iblnkerrdelta;
2387 }
2388 if (reg == QIBPORTCNTR_RXDROPPKT)
2389 ret += dd->cspec->rxfc_unsupvl_errs;
2390
2391done:
2392 return ret;
2393}
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408static const char cntr6120names[] =
2409 "Interrupts\n"
2410 "HostBusStall\n"
2411 "E RxTIDFull\n"
2412 "RxTIDInvalid\n"
2413 "Ctxt0EgrOvfl\n"
2414 "Ctxt1EgrOvfl\n"
2415 "Ctxt2EgrOvfl\n"
2416 "Ctxt3EgrOvfl\n"
2417 "Ctxt4EgrOvfl\n";
2418
2419static const size_t cntr6120indices[] = {
2420 cr_lbint,
2421 cr_lbflowstall,
2422 cr_errtidfull,
2423 cr_errtidvalid,
2424 cr_portovfl + 0,
2425 cr_portovfl + 1,
2426 cr_portovfl + 2,
2427 cr_portovfl + 3,
2428 cr_portovfl + 4,
2429};
2430
2431
2432
2433
2434
2435
2436static const char portcntr6120names[] =
2437 "TxPkt\n"
2438 "TxFlowPkt\n"
2439 "TxWords\n"
2440 "RxPkt\n"
2441 "RxFlowPkt\n"
2442 "RxWords\n"
2443 "TxFlowStall\n"
2444 "E IBStatusChng\n"
2445 "IBLinkDown\n"
2446 "IBLnkRecov\n"
2447 "IBRxLinkErr\n"
2448 "IBSymbolErr\n"
2449 "RxLLIErr\n"
2450 "RxBadFormat\n"
2451 "RxBadLen\n"
2452 "RxBufOvrfl\n"
2453 "RxEBP\n"
2454 "RxFlowCtlErr\n"
2455 "RxICRCerr\n"
2456 "RxLPCRCerr\n"
2457 "RxVCRCerr\n"
2458 "RxInvalLen\n"
2459 "RxInvalPKey\n"
2460 "RxPktDropped\n"
2461 "TxBadLength\n"
2462 "TxDropped\n"
2463 "TxInvalLen\n"
2464 "TxUnderrun\n"
2465 "TxUnsupVL\n"
2466 ;
2467
2468#define _PORT_VIRT_FLAG 0x8000
2469static const size_t portcntr6120indices[] = {
2470 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
2471 cr_pktsendflow,
2472 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
2473 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
2474 cr_pktrcvflowctrl,
2475 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
2476 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
2477 cr_ibstatuschange,
2478 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
2479 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
2480 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
2481 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
2482 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
2483 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
2484 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
2485 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
2486 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
2487 cr_rcvflowctrl_err,
2488 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
2489 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
2490 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
2491 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
2492 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
2493 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
2494 cr_invalidslen,
2495 cr_senddropped,
2496 cr_errslen,
2497 cr_sendunderrun,
2498 cr_txunsupvl,
2499};
2500
2501
2502static void init_6120_cntrnames(struct qib_devdata *dd)
2503{
2504 int i, j = 0;
2505 char *s;
2506
2507 for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
2508 i++) {
2509
2510 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
2511 j = 1;
2512 s = strchr(s + 1, '\n');
2513 if (s && j)
2514 j++;
2515 }
2516 dd->cspec->ncntrs = i;
2517 if (!s)
2518
2519 dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
2520 else
2521 dd->cspec->cntrnamelen = 1 + s - cntr6120names;
2522 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
2523 * sizeof(u64), GFP_KERNEL);
2524 if (!dd->cspec->cntrs)
2525 qib_dev_err(dd, "Failed allocation for counters\n");
2526
2527 for (i = 0, s = (char *)portcntr6120names; s; i++)
2528 s = strchr(s + 1, '\n');
2529 dd->cspec->nportcntrs = i - 1;
2530 dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
2531 dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
2532 * sizeof(u64), GFP_KERNEL);
2533 if (!dd->cspec->portcntrs)
2534 qib_dev_err(dd, "Failed allocation for portcounters\n");
2535}
2536
2537static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
2538 u64 **cntrp)
2539{
2540 u32 ret;
2541
2542 if (namep) {
2543 ret = dd->cspec->cntrnamelen;
2544 if (pos >= ret)
2545 ret = 0;
2546 else
2547 *namep = (char *)cntr6120names;
2548 } else {
2549 u64 *cntr = dd->cspec->cntrs;
2550 int i;
2551
2552 ret = dd->cspec->ncntrs * sizeof(u64);
2553 if (!cntr || pos >= ret) {
2554
2555 ret = 0;
2556 goto done;
2557 }
2558 if (pos >= ret) {
2559 ret = 0;
2560 goto done;
2561 }
2562 *cntrp = cntr;
2563 for (i = 0; i < dd->cspec->ncntrs; i++)
2564 *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
2565 }
2566done:
2567 return ret;
2568}
2569
2570static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
2571 char **namep, u64 **cntrp)
2572{
2573 u32 ret;
2574
2575 if (namep) {
2576 ret = dd->cspec->portcntrnamelen;
2577 if (pos >= ret)
2578 ret = 0;
2579 else
2580 *namep = (char *)portcntr6120names;
2581 } else {
2582 u64 *cntr = dd->cspec->portcntrs;
2583 struct qib_pportdata *ppd = &dd->pport[port];
2584 int i;
2585
2586 ret = dd->cspec->nportcntrs * sizeof(u64);
2587 if (!cntr || pos >= ret) {
2588
2589 ret = 0;
2590 goto done;
2591 }
2592 *cntrp = cntr;
2593 for (i = 0; i < dd->cspec->nportcntrs; i++) {
2594 if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
2595 *cntr++ = qib_portcntr_6120(ppd,
2596 portcntr6120indices[i] &
2597 ~_PORT_VIRT_FLAG);
2598 else
2599 *cntr++ = read_6120_creg32(dd,
2600 portcntr6120indices[i]);
2601 }
2602 }
2603done:
2604 return ret;
2605}
2606
2607static void qib_chk_6120_errormask(struct qib_devdata *dd)
2608{
2609 static u32 fixed;
2610 u32 ctrl;
2611 unsigned long errormask;
2612 unsigned long hwerrs;
2613
2614 if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
2615 return;
2616
2617 errormask = qib_read_kreg64(dd, kr_errmask);
2618
2619 if (errormask == dd->cspec->errormask)
2620 return;
2621 fixed++;
2622
2623 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2624 ctrl = qib_read_kreg32(dd, kr_control);
2625
2626 qib_write_kreg(dd, kr_errmask,
2627 dd->cspec->errormask);
2628
2629 if ((hwerrs & dd->cspec->hwerrmask) ||
2630 (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
2631 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2632 qib_write_kreg(dd, kr_errclear, 0ULL);
2633
2634 qib_write_kreg(dd, kr_intclear, 0ULL);
2635 qib_devinfo(dd->pcidev,
2636 "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
2637 fixed, errormask, (unsigned long)dd->cspec->errormask,
2638 ctrl, hwerrs);
2639 }
2640}
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650static void qib_get_6120_faststats(unsigned long opaque)
2651{
2652 struct qib_devdata *dd = (struct qib_devdata *) opaque;
2653 struct qib_pportdata *ppd = dd->pport;
2654 unsigned long flags;
2655 u64 traffic_wds;
2656
2657
2658
2659
2660
2661 if (!(dd->flags & QIB_INITTED) || dd->diag_client)
2662
2663 goto done;
2664
2665
2666
2667
2668
2669
2670 traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
2671 qib_portcntr_6120(ppd, cr_wordrcv);
2672 spin_lock_irqsave(&dd->eep_st_lock, flags);
2673 traffic_wds -= dd->traffic_wds;
2674 dd->traffic_wds += traffic_wds;
2675 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
2676 atomic_add(5, &dd->active_time);
2677 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2678
2679 qib_chk_6120_errormask(dd);
2680done:
2681 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
2682}
2683
2684
2685static int qib_6120_nointr_fallback(struct qib_devdata *dd)
2686{
2687 return 0;
2688}
2689
2690
2691
2692
2693
2694
2695
2696static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
2697{
2698 u64 val, prev_val;
2699 struct qib_devdata *dd = ppd->dd;
2700
2701 prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
2702 val = prev_val | QLOGIC_IB_XGXS_RESET;
2703 prev_val &= ~QLOGIC_IB_XGXS_RESET;
2704 qib_write_kreg(dd, kr_control,
2705 dd->control & ~QLOGIC_IB_C_LINKENABLE);
2706 qib_write_kreg(dd, kr_xgxs_cfg, val);
2707 qib_read_kreg32(dd, kr_scratch);
2708 qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
2709 qib_write_kreg(dd, kr_control, dd->control);
2710}
2711
2712static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
2713{
2714 int ret;
2715
2716 switch (which) {
2717 case QIB_IB_CFG_LWID:
2718 ret = ppd->link_width_active;
2719 break;
2720
2721 case QIB_IB_CFG_SPD:
2722 ret = ppd->link_speed_active;
2723 break;
2724
2725 case QIB_IB_CFG_LWID_ENB:
2726 ret = ppd->link_width_enabled;
2727 break;
2728
2729 case QIB_IB_CFG_SPD_ENB:
2730 ret = ppd->link_speed_enabled;
2731 break;
2732
2733 case QIB_IB_CFG_OP_VLS:
2734 ret = ppd->vls_operational;
2735 break;
2736
2737 case QIB_IB_CFG_VL_HIGH_CAP:
2738 ret = 0;
2739 break;
2740
2741 case QIB_IB_CFG_VL_LOW_CAP:
2742 ret = 0;
2743 break;
2744
2745 case QIB_IB_CFG_OVERRUN_THRESH:
2746 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2747 OverrunThreshold);
2748 break;
2749
2750 case QIB_IB_CFG_PHYERR_THRESH:
2751 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2752 PhyerrThreshold);
2753 break;
2754
2755 case QIB_IB_CFG_LINKDEFAULT:
2756
2757 ret = (ppd->dd->cspec->ibcctrl &
2758 SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2759 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2760 break;
2761
2762 case QIB_IB_CFG_HRTBT:
2763 ret = 0;
2764 break;
2765
2766 case QIB_IB_CFG_PMA_TICKS:
2767 ret = 250;
2768 break;
2769
2770 default:
2771 ret = -EINVAL;
2772 break;
2773 }
2774 return ret;
2775}
2776
2777
2778
2779
2780static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2781{
2782 struct qib_devdata *dd = ppd->dd;
2783 int ret = 0;
2784 u64 val64;
2785 u16 lcmd, licmd;
2786
2787 switch (which) {
2788 case QIB_IB_CFG_LWID_ENB:
2789 ppd->link_width_enabled = val;
2790 break;
2791
2792 case QIB_IB_CFG_SPD_ENB:
2793 ppd->link_speed_enabled = val;
2794 break;
2795
2796 case QIB_IB_CFG_OVERRUN_THRESH:
2797 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2798 OverrunThreshold);
2799 if (val64 != val) {
2800 dd->cspec->ibcctrl &=
2801 ~SYM_MASK(IBCCtrl, OverrunThreshold);
2802 dd->cspec->ibcctrl |= (u64) val <<
2803 SYM_LSB(IBCCtrl, OverrunThreshold);
2804 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2805 qib_write_kreg(dd, kr_scratch, 0);
2806 }
2807 break;
2808
2809 case QIB_IB_CFG_PHYERR_THRESH:
2810 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2811 PhyerrThreshold);
2812 if (val64 != val) {
2813 dd->cspec->ibcctrl &=
2814 ~SYM_MASK(IBCCtrl, PhyerrThreshold);
2815 dd->cspec->ibcctrl |= (u64) val <<
2816 SYM_LSB(IBCCtrl, PhyerrThreshold);
2817 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2818 qib_write_kreg(dd, kr_scratch, 0);
2819 }
2820 break;
2821
2822 case QIB_IB_CFG_PKEYS:
2823 val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2824 ((u64) ppd->pkeys[2] << 32) |
2825 ((u64) ppd->pkeys[3] << 48);
2826 qib_write_kreg(dd, kr_partitionkey, val64);
2827 break;
2828
2829 case QIB_IB_CFG_LINKDEFAULT:
2830
2831 if (val == IB_LINKINITCMD_POLL)
2832 dd->cspec->ibcctrl &=
2833 ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2834 else
2835 dd->cspec->ibcctrl |=
2836 SYM_MASK(IBCCtrl, LinkDownDefaultState);
2837 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2838 qib_write_kreg(dd, kr_scratch, 0);
2839 break;
2840
2841 case QIB_IB_CFG_MTU:
2842
2843
2844
2845
2846
2847
2848
2849 val = (ppd->ibmaxlen >> 2) + 1;
2850 dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2851 dd->cspec->ibcctrl |= (u64)val <<
2852 SYM_LSB(IBCCtrl, MaxPktLen);
2853 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2854 qib_write_kreg(dd, kr_scratch, 0);
2855 break;
2856
2857 case QIB_IB_CFG_LSTATE:
2858 switch (val & 0xffff0000) {
2859 case IB_LINKCMD_DOWN:
2860 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2861 if (!dd->cspec->ibdeltainprog) {
2862 dd->cspec->ibdeltainprog = 1;
2863 dd->cspec->ibsymsnap =
2864 read_6120_creg32(dd, cr_ibsymbolerr);
2865 dd->cspec->iblnkerrsnap =
2866 read_6120_creg32(dd, cr_iblinkerrrecov);
2867 }
2868 break;
2869
2870 case IB_LINKCMD_ARMED:
2871 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2872 break;
2873
2874 case IB_LINKCMD_ACTIVE:
2875 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2876 break;
2877
2878 default:
2879 ret = -EINVAL;
2880 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2881 goto bail;
2882 }
2883 switch (val & 0xffff) {
2884 case IB_LINKINITCMD_NOP:
2885 licmd = 0;
2886 break;
2887
2888 case IB_LINKINITCMD_POLL:
2889 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2890 break;
2891
2892 case IB_LINKINITCMD_SLEEP:
2893 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2894 break;
2895
2896 case IB_LINKINITCMD_DISABLE:
2897 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2898 break;
2899
2900 default:
2901 ret = -EINVAL;
2902 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2903 val & 0xffff);
2904 goto bail;
2905 }
2906 qib_set_ib_6120_lstate(ppd, lcmd, licmd);
2907 goto bail;
2908
2909 case QIB_IB_CFG_HRTBT:
2910 ret = -EINVAL;
2911 break;
2912
2913 default:
2914 ret = -EINVAL;
2915 }
2916bail:
2917 return ret;
2918}
2919
2920static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
2921{
2922 int ret = 0;
2923 if (!strncmp(what, "ibc", 3)) {
2924 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2925 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2926 ppd->dd->unit, ppd->port);
2927 } else if (!strncmp(what, "off", 3)) {
2928 ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2929 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
2930 "(normal)\n", ppd->dd->unit, ppd->port);
2931 } else
2932 ret = -EINVAL;
2933 if (!ret) {
2934 qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
2935 qib_write_kreg(ppd->dd, kr_scratch, 0);
2936 }
2937 return ret;
2938}
2939
2940static void pma_6120_timer(unsigned long data)
2941{
2942 struct qib_pportdata *ppd = (struct qib_pportdata *)data;
2943 struct qib_chip_specific *cs = ppd->dd->cspec;
2944 struct qib_ibport *ibp = &ppd->ibport_data;
2945 unsigned long flags;
2946
2947 spin_lock_irqsave(&ibp->lock, flags);
2948 if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
2949 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2950 qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2951 &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2952 mod_timer(&cs->pma_timer,
2953 jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
2954 } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
2955 u64 ta, tb, tc, td, te;
2956
2957 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2958 qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
2959
2960 cs->sword = ta - cs->sword;
2961 cs->rword = tb - cs->rword;
2962 cs->spkts = tc - cs->spkts;
2963 cs->rpkts = td - cs->rpkts;
2964 cs->xmit_wait = te - cs->xmit_wait;
2965 }
2966 spin_unlock_irqrestore(&ibp->lock, flags);
2967}
2968
2969
2970
2971
2972static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
2973 u32 start)
2974{
2975 struct qib_chip_specific *cs = ppd->dd->cspec;
2976
2977 if (start && intv) {
2978 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
2979 mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
2980 } else if (intv) {
2981 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2982 qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2983 &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2984 mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
2985 } else {
2986 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2987 cs->sword = 0;
2988 cs->rword = 0;
2989 cs->spkts = 0;
2990 cs->rpkts = 0;
2991 cs->xmit_wait = 0;
2992 }
2993}
2994
2995static u32 qib_6120_iblink_state(u64 ibcs)
2996{
2997 u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
2998
2999 switch (state) {
3000 case IB_6120_L_STATE_INIT:
3001 state = IB_PORT_INIT;
3002 break;
3003 case IB_6120_L_STATE_ARM:
3004 state = IB_PORT_ARMED;
3005 break;
3006 case IB_6120_L_STATE_ACTIVE:
3007
3008 case IB_6120_L_STATE_ACT_DEFER:
3009 state = IB_PORT_ACTIVE;
3010 break;
3011 default:
3012 case IB_6120_L_STATE_DOWN:
3013 state = IB_PORT_DOWN;
3014 break;
3015 }
3016 return state;
3017}
3018
3019
3020static u8 qib_6120_phys_portstate(u64 ibcs)
3021{
3022 u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3023 return qib_6120_physportstate[state];
3024}
3025
3026static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3027{
3028 unsigned long flags;
3029
3030 spin_lock_irqsave(&ppd->lflags_lock, flags);
3031 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3032 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3033
3034 if (ibup) {
3035 if (ppd->dd->cspec->ibdeltainprog) {
3036 ppd->dd->cspec->ibdeltainprog = 0;
3037 ppd->dd->cspec->ibsymdelta +=
3038 read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
3039 ppd->dd->cspec->ibsymsnap;
3040 ppd->dd->cspec->iblnkerrdelta +=
3041 read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
3042 ppd->dd->cspec->iblnkerrsnap;
3043 }
3044 qib_hol_init(ppd);
3045 } else {
3046 ppd->dd->cspec->lli_counter = 0;
3047 if (!ppd->dd->cspec->ibdeltainprog) {
3048 ppd->dd->cspec->ibdeltainprog = 1;
3049 ppd->dd->cspec->ibsymsnap =
3050 read_6120_creg32(ppd->dd, cr_ibsymbolerr);
3051 ppd->dd->cspec->iblnkerrsnap =
3052 read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
3053 }
3054 qib_hol_down(ppd);
3055 }
3056
3057 qib_6120_setup_setextled(ppd, ibup);
3058
3059 return 0;
3060}
3061
3062
3063
3064
3065
3066
3067
3068static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3069{
3070 u64 read_val, new_out;
3071 unsigned long flags;
3072
3073 if (mask) {
3074
3075 dir &= mask;
3076 out &= mask;
3077 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3078 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3079 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3080 new_out = (dd->cspec->gpio_out & ~mask) | out;
3081
3082 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3083 qib_write_kreg(dd, kr_gpio_out, new_out);
3084 dd->cspec->gpio_out = new_out;
3085 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3086 }
3087
3088
3089
3090
3091
3092
3093
3094
3095 read_val = qib_read_kreg64(dd, kr_extstatus);
3096 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3097}
3098
3099
3100
3101
3102
3103
3104static void get_6120_chip_params(struct qib_devdata *dd)
3105{
3106 u64 val;
3107 u32 piobufs;
3108 int mtu;
3109
3110 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3111
3112 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3113 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3114 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3115 dd->palign = qib_read_kreg32(dd, kr_palign);
3116 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3117 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3118
3119 dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3120
3121 val = qib_read_kreg64(dd, kr_sendpiosize);
3122 dd->piosize2k = val & ~0U;
3123 dd->piosize4k = val >> 32;
3124
3125 mtu = ib_mtu_enum_to_int(qib_ibmtu);
3126 if (mtu == -1)
3127 mtu = QIB_DEFAULT_MTU;
3128 dd->pport->ibmtu = (u32)mtu;
3129
3130 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3131 dd->piobcnt2k = val & ~0U;
3132 dd->piobcnt4k = val >> 32;
3133
3134 dd->pio2kbase = (u32 __iomem *)
3135 (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
3136 if (dd->piobcnt4k) {
3137 dd->pio4kbase = (u32 __iomem *)
3138 (((char __iomem *) dd->kregbase) +
3139 (dd->piobufbase >> 32));
3140
3141
3142
3143
3144
3145 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3146 }
3147
3148 piobufs = dd->piobcnt4k + dd->piobcnt2k;
3149
3150 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3151 (sizeof(u64) * BITS_PER_BYTE / 2);
3152}
3153
3154
3155
3156
3157
3158
3159static void set_6120_baseaddrs(struct qib_devdata *dd)
3160{
3161 u32 cregbase;
3162 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3163 dd->cspec->cregbase = (u64 __iomem *)
3164 ((char __iomem *) dd->kregbase + cregbase);
3165
3166 dd->egrtidbase = (u64 __iomem *)
3167 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3168}
3169
3170
3171
3172
3173
3174
3175static int qib_late_6120_initreg(struct qib_devdata *dd)
3176{
3177 int ret = 0;
3178 u64 val;
3179
3180 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3181 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3182 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3183 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3184 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3185 if (val != dd->pioavailregs_phys) {
3186 qib_dev_err(dd, "Catastrophic software error, "
3187 "SendPIOAvailAddr written as %lx, "
3188 "read back as %llx\n",
3189 (unsigned long) dd->pioavailregs_phys,
3190 (unsigned long long) val);
3191 ret = -EINVAL;
3192 }
3193 return ret;
3194}
3195
3196static int init_6120_variables(struct qib_devdata *dd)
3197{
3198 int ret = 0;
3199 struct qib_pportdata *ppd;
3200 u32 sbufs;
3201
3202 ppd = (struct qib_pportdata *)(dd + 1);
3203 dd->pport = ppd;
3204 dd->num_pports = 1;
3205
3206 dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
3207 ppd->cpspec = NULL;
3208
3209 spin_lock_init(&dd->cspec->kernel_tid_lock);
3210 spin_lock_init(&dd->cspec->user_tid_lock);
3211 spin_lock_init(&dd->cspec->rcvmod_lock);
3212 spin_lock_init(&dd->cspec->gpio_lock);
3213
3214
3215 dd->revision = readq(&dd->kregbase[kr_revision]);
3216
3217 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3218 qib_dev_err(dd, "Revision register read failure, "
3219 "giving up initialization\n");
3220 ret = -ENODEV;
3221 goto bail;
3222 }
3223 dd->flags |= QIB_PRESENT;
3224
3225 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3226 ChipRevMajor);
3227 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3228 ChipRevMinor);
3229
3230 get_6120_chip_params(dd);
3231 pe_boardname(dd);
3232
3233
3234
3235
3236
3237 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
3238 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
3239 dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
3240
3241 if (qib_unordered_wc())
3242 dd->flags |= QIB_PIO_FLUSH_WC;
3243
3244
3245
3246
3247
3248 dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
3249
3250
3251 if (qib_unordered_wc())
3252 dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
3253
3254 dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
3255
3256 dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
3257
3258 qib_init_pportdata(ppd, dd, 0, 1);
3259 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
3260 ppd->link_speed_supported = QIB_IB_SDR;
3261 ppd->link_width_enabled = IB_WIDTH_4X;
3262 ppd->link_speed_enabled = ppd->link_speed_supported;
3263
3264 ppd->link_width_active = ppd->link_width_enabled;
3265 ppd->link_speed_active = ppd->link_speed_enabled;
3266 ppd->vls_supported = IB_VL_VL0;
3267 ppd->vls_operational = ppd->vls_supported;
3268
3269 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
3270 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
3271 dd->rhf_offset = 0;
3272
3273
3274 ret = ib_mtu_enum_to_int(qib_ibmtu);
3275 dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
3276
3277 qib_6120_tidtemplate(dd);
3278
3279
3280
3281
3282
3283
3284 dd->rhdrhead_intr_off = 1ULL << 32;
3285
3286
3287 init_timer(&dd->stats_timer);
3288 dd->stats_timer.function = qib_get_6120_faststats;
3289 dd->stats_timer.data = (unsigned long) dd;
3290
3291 init_timer(&dd->cspec->pma_timer);
3292 dd->cspec->pma_timer.function = pma_6120_timer;
3293 dd->cspec->pma_timer.data = (unsigned long) ppd;
3294
3295 dd->ureg_align = qib_read_kreg32(dd, kr_palign);
3296
3297 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
3298 qib_6120_config_ctxts(dd);
3299 qib_set_ctxtcnt(dd);
3300
3301 if (qib_wc_pat) {
3302 ret = init_chip_wc_pat(dd, 0);
3303 if (ret)
3304 goto bail;
3305 }
3306 set_6120_baseaddrs(dd);
3307
3308 ret = 0;
3309 if (qib_mini_init)
3310 goto bail;
3311
3312 qib_num_cfg_vls = 1;
3313
3314 ret = qib_create_ctxts(dd);
3315 init_6120_cntrnames(dd);
3316
3317
3318 sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
3319
3320 dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
3321 dd->pbufsctxt = dd->lastctxt_piobuf /
3322 (dd->cfgctxts - dd->first_user_ctxt);
3323
3324 if (ret)
3325 goto bail;
3326bail:
3327 return ret;
3328}
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3346{
3347 u32 __iomem *buf;
3348 u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
3349
3350
3351
3352
3353
3354 sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3355 qib_read_kreg64(ppd->dd, kr_scratch);
3356 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3357 if (buf)
3358 goto done;
3359
3360 sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
3361 QIB_SENDCTRL_AVAIL_BLIP);
3362 ppd->dd->upd_pio_shadow = 1;
3363 qib_read_kreg64(ppd->dd, kr_scratch);
3364 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3365done:
3366 return buf;
3367}
3368
3369static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
3370 u32 *pbufnum)
3371{
3372 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
3373 struct qib_devdata *dd = ppd->dd;
3374 u32 __iomem *buf;
3375
3376 if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
3377 !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
3378 buf = get_6120_link_buf(ppd, pbufnum);
3379 else {
3380
3381 if ((plen + 1) > dd->piosize2kmax_dwords)
3382 first = dd->piobcnt2k;
3383 else
3384 first = 0;
3385
3386 last = dd->piobcnt2k + dd->piobcnt4k - 1;
3387 buf = qib_getsendbuf_range(dd, pbufnum, first, last);
3388 }
3389 return buf;
3390}
3391
3392static int init_sdma_6120_regs(struct qib_pportdata *ppd)
3393{
3394 return -ENODEV;
3395}
3396
3397static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
3398{
3399 return 0;
3400}
3401
3402static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
3403{
3404 return 0;
3405}
3406
3407static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
3408{
3409}
3410
3411static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
3412{
3413}
3414
3415static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
3416{
3417}
3418
3419
3420
3421
3422
3423static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
3424 u8 srate, u8 vl)
3425{
3426 return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
3427}
3428
3429static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
3430{
3431}
3432
3433static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
3434{
3435 rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
3436 rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
3437}
3438
3439static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
3440 u32 len, u32 avail, struct qib_ctxtdata *rcd)
3441{
3442}
3443
3444static void writescratch(struct qib_devdata *dd, u32 val)
3445{
3446 (void) qib_write_kreg(dd, kr_scratch, val);
3447}
3448
3449static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
3450{
3451 return -ENXIO;
3452}
3453
3454
3455static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
3456{
3457 return 1;
3458}
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
3472 const struct pci_device_id *ent)
3473{
3474 struct qib_devdata *dd;
3475 int ret;
3476
3477 dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
3478 sizeof(struct qib_chip_specific));
3479 if (IS_ERR(dd))
3480 goto bail;
3481
3482 dd->f_bringup_serdes = qib_6120_bringup_serdes;
3483 dd->f_cleanup = qib_6120_setup_cleanup;
3484 dd->f_clear_tids = qib_6120_clear_tids;
3485 dd->f_free_irq = qib_6120_free_irq;
3486 dd->f_get_base_info = qib_6120_get_base_info;
3487 dd->f_get_msgheader = qib_6120_get_msgheader;
3488 dd->f_getsendbuf = qib_6120_getsendbuf;
3489 dd->f_gpio_mod = gpio_6120_mod;
3490 dd->f_eeprom_wen = qib_6120_eeprom_wen;
3491 dd->f_hdrqempty = qib_6120_hdrqempty;
3492 dd->f_ib_updown = qib_6120_ib_updown;
3493 dd->f_init_ctxt = qib_6120_init_ctxt;
3494 dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
3495 dd->f_intr_fallback = qib_6120_nointr_fallback;
3496 dd->f_late_initreg = qib_late_6120_initreg;
3497 dd->f_setpbc_control = qib_6120_setpbc_control;
3498 dd->f_portcntr = qib_portcntr_6120;
3499 dd->f_put_tid = (dd->minrev >= 2) ?
3500 qib_6120_put_tid_2 :
3501 qib_6120_put_tid;
3502 dd->f_quiet_serdes = qib_6120_quiet_serdes;
3503 dd->f_rcvctrl = rcvctrl_6120_mod;
3504 dd->f_read_cntrs = qib_read_6120cntrs;
3505 dd->f_read_portcntrs = qib_read_6120portcntrs;
3506 dd->f_reset = qib_6120_setup_reset;
3507 dd->f_init_sdma_regs = init_sdma_6120_regs;
3508 dd->f_sdma_busy = qib_sdma_6120_busy;
3509 dd->f_sdma_gethead = qib_sdma_6120_gethead;
3510 dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
3511 dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
3512 dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
3513 dd->f_sendctrl = sendctrl_6120_mod;
3514 dd->f_set_armlaunch = qib_set_6120_armlaunch;
3515 dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
3516 dd->f_iblink_state = qib_6120_iblink_state;
3517 dd->f_ibphys_portstate = qib_6120_phys_portstate;
3518 dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
3519 dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
3520 dd->f_set_ib_loopback = qib_6120_set_loopback;
3521 dd->f_set_intr_state = qib_6120_set_intr_state;
3522 dd->f_setextled = qib_6120_setup_setextled;
3523 dd->f_txchk_change = qib_6120_txchk_change;
3524 dd->f_update_usrhead = qib_update_6120_usrhead;
3525 dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
3526 dd->f_xgxs_reset = qib_6120_xgxs_reset;
3527 dd->f_writescratch = writescratch;
3528 dd->f_tempsense_rd = qib_6120_tempsense_rd;
3529
3530
3531
3532
3533
3534
3535
3536 ret = qib_pcie_ddinit(dd, pdev, ent);
3537 if (ret < 0)
3538 goto bail_free;
3539
3540
3541 ret = init_6120_variables(dd);
3542 if (ret)
3543 goto bail_cleanup;
3544
3545 if (qib_mini_init)
3546 goto bail;
3547
3548 if (qib_pcie_params(dd, 8, NULL, NULL))
3549 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
3550 "continuing anyway\n");
3551 dd->cspec->irq = pdev->irq;
3552
3553
3554 qib_write_kreg(dd, kr_hwdiagctrl, 0);
3555
3556 if (qib_read_kreg64(dd, kr_hwerrstatus) &
3557 QLOGIC_IB_HWE_SERDESPLLFAILED)
3558 qib_write_kreg(dd, kr_hwerrclear,
3559 QLOGIC_IB_HWE_SERDESPLLFAILED);
3560
3561
3562 qib_setup_6120_interrupt(dd);
3563
3564 qib_6120_init_hwerrors(dd);
3565
3566 goto bail;
3567
3568bail_cleanup:
3569 qib_pcie_ddcleanup(dd);
3570bail_free:
3571 qib_free_devdata(dd);
3572 dd = ERR_PTR(ret);
3573bail:
3574 return dd;
3575}
3576