1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
63#include "efivar.h"
64#include "platform.h"
65#include "aspm.h"
66#include "affinity.h"
67
68#define NUM_IB_PORTS 1
69
70uint kdeth_qp;
71module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75module_param(num_vls, uint, S_IRUGO);
76MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78
79
80
81
82
83
84
85uint rcv_intr_timeout = (824 + 16);
86module_param(rcv_intr_timeout, uint, S_IRUGO);
87MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89uint rcv_intr_count = 16;
90module_param(rcv_intr_count, uint, S_IRUGO);
91MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93ushort link_crc_mask = SUPPORTED_CRCS;
94module_param(link_crc_mask, ushort, S_IRUGO);
95MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97uint loopback;
98module_param_named(loopback, loopback, uint, S_IRUGO);
99MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101
102uint rcv_intr_dynamic = 1;
103static ushort crc_14b_sideband = 1;
104static uint use_flr = 1;
105uint quick_linkup;
106
107struct flag_table {
108 u64 flag;
109 char *str;
110 u16 extra;
111 u16 unused0;
112 u32 unused1;
113};
114
115
116#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117#define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119
120#define SEC_WRITE_DROPPED 0x1
121#define SEC_PACKET_DROPPED 0x2
122#define SEC_SC_HALTED 0x4
123#define SEC_SPC_FREEZE 0x8
124
125#define DEFAULT_KRCVQS 2
126#define MIN_KERNEL_KCTXTS 2
127#define FIRST_KERNEL_KCTXT 1
128
129#define NUM_MAP_ENTRIES 256
130#define NUM_MAP_REGS 32
131
132
133#define GUID_HFI_INDEX_SHIFT 39
134
135
136#define emulator_rev(dd) ((dd)->irev >> 8)
137
138#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
139#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
140
141
142
143
144#define IB_PACKET_TYPE 2ull
145#define QW_SHIFT 6ull
146
147#define QPN_WIDTH 7ull
148
149
150#define LRH_BTH_QW 0ull
151#define LRH_BTH_BIT_OFFSET 48ull
152#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
153#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
154#define LRH_BTH_SELECT
155#define LRH_BTH_MASK 3ull
156#define LRH_BTH_VALUE 2ull
157
158
159#define LRH_SC_QW 0ull
160#define LRH_SC_BIT_OFFSET 56ull
161#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
162#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
163#define LRH_SC_MASK 128ull
164#define LRH_SC_VALUE 0ull
165
166
167#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
168
169
170#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
171
172
173#define SC2VL_VAL( \
174 num, \
175 sc0, sc0val, \
176 sc1, sc1val, \
177 sc2, sc2val, \
178 sc3, sc3val, \
179 sc4, sc4val, \
180 sc5, sc5val, \
181 sc6, sc6val, \
182 sc7, sc7val) \
183( \
184 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
185 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
186 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
187 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
188 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
189 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
190 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
191 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
192)
193
194#define DC_SC_VL_VAL( \
195 range, \
196 e0, e0val, \
197 e1, e1val, \
198 e2, e2val, \
199 e3, e3val, \
200 e4, e4val, \
201 e5, e5val, \
202 e6, e6val, \
203 e7, e7val, \
204 e8, e8val, \
205 e9, e9val, \
206 e10, e10val, \
207 e11, e11val, \
208 e12, e12val, \
209 e13, e13val, \
210 e14, e14val, \
211 e15, e15val) \
212( \
213 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
214 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
215 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
216 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
217 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
218 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
219 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
220 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
221 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
222 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
223 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
224 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
225 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
226 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
227 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
228 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
229)
230
231
232#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
233 | CCE_STATUS_RXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_FROZE_SMASK \
235 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
236
237#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
238 | CCE_STATUS_TXE_PAUSED_SMASK \
239 | CCE_STATUS_SDMA_PAUSED_SMASK)
240
241#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
242
243#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
244#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
245
246
247
248
249static struct flag_table cce_err_status_flags[] = {
250 FLAG_ENTRY0("CceCsrParityErr",
251 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
252 FLAG_ENTRY0("CceCsrReadBadAddrErr",
253 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
254 FLAG_ENTRY0("CceCsrWriteBadAddrErr",
255 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
256 FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
258 FLAG_ENTRY0("CceTrgtAccessErr",
259 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
260 FLAG_ENTRY0("CceRspdDataParityErr",
261 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
262 FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
264 FLAG_ENTRY0("CceCsrCfgBusParityErr",
265 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
266 FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
267 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
268 FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
270 FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
272 FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
273 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
274 FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
275 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
276 FLAG_ENTRY0("PcicRetryMemCorErr",
277 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
278 FLAG_ENTRY0("PcicRetryMemCorErr",
279 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
280 FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
282 FLAG_ENTRY0("PcicPostHdQCorErr",
283 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
284 FLAG_ENTRY0("PcicPostHdQCorErr",
285 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
286 FLAG_ENTRY0("PcicCplDatQCorErr",
287 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
288 FLAG_ENTRY0("PcicNPostHQParityErr",
289 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
290 FLAG_ENTRY0("PcicNPostDatQParityErr",
291 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
292 FLAG_ENTRY0("PcicRetryMemUncErr",
293 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
294 FLAG_ENTRY0("PcicRetrySotMemUncErr",
295 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
296 FLAG_ENTRY0("PcicPostHdQUncErr",
297 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
298 FLAG_ENTRY0("PcicPostDatQUncErr",
299 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
300 FLAG_ENTRY0("PcicCplHdQUncErr",
301 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
302 FLAG_ENTRY0("PcicCplDatQUncErr",
303 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
304 FLAG_ENTRY0("PcicTransmitFrontParityErr",
305 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
306 FLAG_ENTRY0("PcicTransmitBackParityErr",
307 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
308 FLAG_ENTRY0("PcicReceiveParityErr",
309 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
310 FLAG_ENTRY0("CceTrgtCplTimeoutErr",
311 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
312 FLAG_ENTRY0("LATriggered",
313 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
314 FLAG_ENTRY0("CceSegReadBadAddrErr",
315 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
316 FLAG_ENTRY0("CceSegWriteBadAddrErr",
317 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
318 FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
319 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
320 FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
321 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
322 FLAG_ENTRY0("CceMsixTableCorErr",
323 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
324 FLAG_ENTRY0("CceMsixTableUncErr",
325 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
326 FLAG_ENTRY0("CceIntMapCorErr",
327 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
328 FLAG_ENTRY0("CceIntMapUncErr",
329 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
330 FLAG_ENTRY0("CceMsixCsrParityErr",
331 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
332
333};
334
335
336
337
338#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
339static struct flag_table misc_err_status_flags[] = {
340 FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
341 FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
342 FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
343 FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
344 FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
345 FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
346 FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
347 FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
348 FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
349 FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
350 FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
351 FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
352 FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
353};
354
355
356
357
358static struct flag_table pio_err_status_flags[] = {
359 FLAG_ENTRY("PioWriteBadCtxt",
360 SEC_WRITE_DROPPED,
361 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
362 FLAG_ENTRY("PioWriteAddrParity",
363 SEC_SPC_FREEZE,
364 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
365 FLAG_ENTRY("PioCsrParity",
366 SEC_SPC_FREEZE,
367 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
368 FLAG_ENTRY("PioSbMemFifo0",
369 SEC_SPC_FREEZE,
370 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
371 FLAG_ENTRY("PioSbMemFifo1",
372 SEC_SPC_FREEZE,
373 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
374 FLAG_ENTRY("PioPccFifoParity",
375 SEC_SPC_FREEZE,
376 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
377 FLAG_ENTRY("PioPecFifoParity",
378 SEC_SPC_FREEZE,
379 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
380 FLAG_ENTRY("PioSbrdctlCrrelParity",
381 SEC_SPC_FREEZE,
382 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
383 FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
384 SEC_SPC_FREEZE,
385 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
386 FLAG_ENTRY("PioPktEvictFifoParityErr",
387 SEC_SPC_FREEZE,
388 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
389 FLAG_ENTRY("PioSmPktResetParity",
390 SEC_SPC_FREEZE,
391 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
392 FLAG_ENTRY("PioVlLenMemBank0Unc",
393 SEC_SPC_FREEZE,
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
395 FLAG_ENTRY("PioVlLenMemBank1Unc",
396 SEC_SPC_FREEZE,
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
398 FLAG_ENTRY("PioVlLenMemBank0Cor",
399 0,
400 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
401 FLAG_ENTRY("PioVlLenMemBank1Cor",
402 0,
403 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
404 FLAG_ENTRY("PioCreditRetFifoParity",
405 SEC_SPC_FREEZE,
406 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
407 FLAG_ENTRY("PioPpmcPblFifo",
408 SEC_SPC_FREEZE,
409 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
410 FLAG_ENTRY("PioInitSmIn",
411 0,
412 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
413 FLAG_ENTRY("PioPktEvictSmOrArbSm",
414 SEC_SPC_FREEZE,
415 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
416 FLAG_ENTRY("PioHostAddrMemUnc",
417 SEC_SPC_FREEZE,
418 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
419 FLAG_ENTRY("PioHostAddrMemCor",
420 0,
421 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
422 FLAG_ENTRY("PioWriteDataParity",
423 SEC_SPC_FREEZE,
424 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
425 FLAG_ENTRY("PioStateMachine",
426 SEC_SPC_FREEZE,
427 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
428 FLAG_ENTRY("PioWriteQwValidParity",
429 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
430 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
431 FLAG_ENTRY("PioBlockQwCountParity",
432 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
433 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
434 FLAG_ENTRY("PioVlfVlLenParity",
435 SEC_SPC_FREEZE,
436 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
437 FLAG_ENTRY("PioVlfSopParity",
438 SEC_SPC_FREEZE,
439 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
440 FLAG_ENTRY("PioVlFifoParity",
441 SEC_SPC_FREEZE,
442 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
443 FLAG_ENTRY("PioPpmcBqcMemParity",
444 SEC_SPC_FREEZE,
445 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
446 FLAG_ENTRY("PioPpmcSopLen",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
449
450 FLAG_ENTRY("PioCurrentFreeCntParity",
451 SEC_SPC_FREEZE,
452 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
453 FLAG_ENTRY("PioLastReturnedCntParity",
454 SEC_SPC_FREEZE,
455 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
456 FLAG_ENTRY("PioPccSopHeadParity",
457 SEC_SPC_FREEZE,
458 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
459 FLAG_ENTRY("PioPecSopHeadParityErr",
460 SEC_SPC_FREEZE,
461 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
462
463};
464
465
466#define ALL_PIO_FREEZE_ERR \
467 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
494 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
495 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
496
497
498
499
500static struct flag_table sdma_err_status_flags[] = {
501 FLAG_ENTRY0("SDmaRpyTagErr",
502 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
503 FLAG_ENTRY0("SDmaCsrParityErr",
504 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
505 FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
506 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
507 FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
508 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
509
510};
511
512
513#define ALL_SDMA_FREEZE_ERR \
514 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
515 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
516 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
517
518
519#define PORT_DISCARD_EGRESS_ERRS \
520 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
521 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
522 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
523
524
525
526
527#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
528static struct flag_table egress_err_status_flags[] = {
529 FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
530 FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
531
532 FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
533 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
534 FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
535 FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
536
537 FLAG_ENTRY0("TxPioLaunchIntfParityErr",
538 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
539 FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
540 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
541
542 FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
543 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
544 FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
545 FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
546 FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
547 FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
548 FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
549 SEES(TX_SDMA0_DISALLOWED_PACKET)),
550 FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
551 SEES(TX_SDMA1_DISALLOWED_PACKET)),
552 FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
553 SEES(TX_SDMA2_DISALLOWED_PACKET)),
554 FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
555 SEES(TX_SDMA3_DISALLOWED_PACKET)),
556 FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
557 SEES(TX_SDMA4_DISALLOWED_PACKET)),
558 FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
559 SEES(TX_SDMA5_DISALLOWED_PACKET)),
560 FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
561 SEES(TX_SDMA6_DISALLOWED_PACKET)),
562 FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
563 SEES(TX_SDMA7_DISALLOWED_PACKET)),
564 FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
565 SEES(TX_SDMA8_DISALLOWED_PACKET)),
566 FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
567 SEES(TX_SDMA9_DISALLOWED_PACKET)),
568 FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
569 SEES(TX_SDMA10_DISALLOWED_PACKET)),
570 FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
571 SEES(TX_SDMA11_DISALLOWED_PACKET)),
572 FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
573 SEES(TX_SDMA12_DISALLOWED_PACKET)),
574 FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
575 SEES(TX_SDMA13_DISALLOWED_PACKET)),
576 FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
577 SEES(TX_SDMA14_DISALLOWED_PACKET)),
578 FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
579 SEES(TX_SDMA15_DISALLOWED_PACKET)),
580 FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
582 FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
584 FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
586 FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
588 FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
590 FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
592 FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
594 FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
595 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
596 FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
597 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
598 FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
599 FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
600 FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
601 FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
602 FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
603 FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
604 FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
605 FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
606 FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
607 FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
608 FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
609 FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
610 FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
611 FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
612 FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
613 FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
614 FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
615 FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
616 FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
617 FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
618 FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
619 FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
620 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
621 FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
622 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
623};
624
625
626
627
628#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
629static struct flag_table egress_err_info_flags[] = {
630 FLAG_ENTRY0("Reserved", 0ull),
631 FLAG_ENTRY0("VLErr", SEEI(VL)),
632 FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
633 FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
634 FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
635 FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
636 FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
637 FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
638 FLAG_ENTRY0("RawErr", SEEI(RAW)),
639 FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
640 FLAG_ENTRY0("GRHErr", SEEI(GRH)),
641 FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
642 FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
643 FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
644 FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
645 FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
646 FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
647 FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
648 FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
649 FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
650 FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
651 FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
652};
653
654
655#define ALL_TXE_EGRESS_FREEZE_ERR \
656 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
657 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
658 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
659 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
660 | SEES(TX_LAUNCH_CSR_PARITY) \
661 | SEES(TX_SBRD_CTL_CSR_PARITY) \
662 | SEES(TX_CONFIG_PARITY) \
663 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
668 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
670 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
671 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
672 | SEES(TX_CREDIT_RETURN_PARITY))
673
674
675
676
677#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
678static struct flag_table send_err_status_flags[] = {
679 FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
680 FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
681 FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
682};
683
684
685
686
687static struct flag_table sc_err_status_flags[] = {
688 FLAG_ENTRY("InconsistentSop",
689 SEC_PACKET_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
691 FLAG_ENTRY("DisallowedPacket",
692 SEC_PACKET_DROPPED | SEC_SC_HALTED,
693 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
694 FLAG_ENTRY("WriteCrossesBoundary",
695 SEC_WRITE_DROPPED | SEC_SC_HALTED,
696 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
697 FLAG_ENTRY("WriteOverflow",
698 SEC_WRITE_DROPPED | SEC_SC_HALTED,
699 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
700 FLAG_ENTRY("WriteOutOfBounds",
701 SEC_WRITE_DROPPED | SEC_SC_HALTED,
702 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
703
704};
705
706
707
708
709#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
710static struct flag_table rxe_err_status_flags[] = {
711 FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
712 FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
713 FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
714 FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
715 FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
716 FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
717 FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
718 FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
719 FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
720 FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
721 FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
722 FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
723 FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
724 FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
725 FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
726 FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
727 FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
728 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
729 FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
730 FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
731 FLAG_ENTRY0("RxRbufBlockListReadUncErr",
732 RXES(RBUF_BLOCK_LIST_READ_UNC)),
733 FLAG_ENTRY0("RxRbufBlockListReadCorErr",
734 RXES(RBUF_BLOCK_LIST_READ_COR)),
735 FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
736 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
737 FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
738 RXES(RBUF_CSR_QENT_CNT_PARITY)),
739 FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
740 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
741 FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
742 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
743 FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
744 FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
745 FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
746 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
747 FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
748 FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
749 FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
750 FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
751 FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
752 FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
753 FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
754 FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
755 RXES(RBUF_FL_INITDONE_PARITY)),
756 FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
757 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
758 FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
759 FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
760 FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
761 FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
762 RXES(LOOKUP_DES_PART1_UNC_COR)),
763 FLAG_ENTRY0("RxLookupDesPart2ParityErr",
764 RXES(LOOKUP_DES_PART2_PARITY)),
765 FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
766 FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
767 FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
768 FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
769 FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
770 FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
771 FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
772 FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
773 FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
774 FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
775 FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
776 FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
777 FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
778 FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
779 FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
780 FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
781 FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
782 FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
783 FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
784 FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
785 FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
786 FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
787};
788
789
790#define ALL_RXE_FREEZE_ERR \
791 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
835
836#define RXE_FREEZE_ABORT_MASK \
837 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
838 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
839 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
840
841
842
843
844#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
845static struct flag_table dcc_err_flags[] = {
846 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
847 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
848 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
849 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
850 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
851 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
852 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
853 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
854 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
855 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
856 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
857 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
858 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
859 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
860 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
861 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
862 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
863 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
864 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
865 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
866 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
867 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
868 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
869 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
870 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
871 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
872 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
873 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
874 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
875 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
876 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
877 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
878 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
879 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
880 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
881 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
882 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
883 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
884 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
885 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
886 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
887 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
888 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
889 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
890 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
891 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
892};
893
894
895
896
897#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
898static struct flag_table lcb_err_flags[] = {
899 FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
900 FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
901 FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
902 FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
903 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
904 FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
905 FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
906 FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
907 FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
908 FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
909 FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
910 FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
911 FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
912 FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
913 FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
914 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
915 FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
916 FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
917 FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
918 FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
919 FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
920 FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
921 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
922 FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
923 FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
924 FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
925 FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
926 FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
927 FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
928 FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
929 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
930 FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
931 FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
932 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
933 FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
934 LCBE(REDUNDANT_FLIT_PARITY_ERR))
935};
936
937
938
939
940#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
941static struct flag_table dc8051_err_flags[] = {
942 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
943 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
944 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
945 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
946 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
947 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
948 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
949 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
950 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
951 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
952 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
953};
954
955
956
957
958
959
960static struct flag_table dc8051_info_err_flags[] = {
961 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
962 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
963 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
964 FLAG_ENTRY0("Serdes internal loopback failure",
965 FAILED_SERDES_INTERNAL_LOOPBACK),
966 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
967 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
968 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
969 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
970 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
971 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
972 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
973 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
974 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
975 FLAG_ENTRY0("External Device Request Timeout",
976 EXTERNAL_DEVICE_REQ_TIMEOUT),
977};
978
979
980
981
982
983
984static struct flag_table dc8051_info_host_msg_flags[] = {
985 FLAG_ENTRY0("Host request done", 0x0001),
986 FLAG_ENTRY0("BC SMA message", 0x0002),
987 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
988 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
989 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
990 FLAG_ENTRY0("External device config request", 0x0020),
991 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
992 FLAG_ENTRY0("LinkUp achieved", 0x0080),
993 FLAG_ENTRY0("Link going down", 0x0100),
994};
995
996static u32 encoded_size(u32 size);
997static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
998static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
999static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1000 u8 *continuous);
1001static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1002 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1003static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1004 u8 *remote_tx_rate, u16 *link_widths);
1005static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1006 u8 *flag_bits, u16 *link_widths);
1007static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1008 u8 *device_rev);
1009static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1010static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1011static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1012 u8 *tx_polarity_inversion,
1013 u8 *rx_polarity_inversion, u8 *max_rate);
1014static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1015 unsigned int context, u64 err_status);
1016static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1017static void handle_dcc_err(struct hfi1_devdata *dd,
1018 unsigned int context, u64 err_status);
1019static void handle_lcb_err(struct hfi1_devdata *dd,
1020 unsigned int context, u64 err_status);
1021static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029static void set_partition_keys(struct hfi1_pportdata *);
1030static const char *link_state_name(u32 state);
1031static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1032 u32 state);
1033static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1034 u64 *out_data);
1035static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1036static int thermal_init(struct hfi1_devdata *dd);
1037
1038static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1039 int msecs);
1040static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1041static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1042static void handle_temp_err(struct hfi1_devdata *);
1043static void dc_shutdown(struct hfi1_devdata *);
1044static void dc_start(struct hfi1_devdata *);
1045static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1046 unsigned int *np);
1047static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1048
1049
1050
1051
1052
1053
1054
1055struct err_reg_info {
1056 u32 status;
1057 u32 clear;
1058 u32 mask;
1059 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1060 const char *desc;
1061};
1062
1063#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1064#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1065#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1066
1067
1068
1069
1070
1071#define EE(reg, handler, desc) \
1072 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1073 handler, desc }
1074#define DC_EE1(reg, handler, desc) \
1075 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1076#define DC_EE2(reg, handler, desc) \
1077 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1078
1079
1080
1081
1082
1083static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1084 EE(CCE_ERR, handle_cce_err, "CceErr"),
1085 EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1086 EE(MISC_ERR, handle_misc_err, "MiscErr"),
1087 { 0, 0, 0, NULL },
1088 EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1089 EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1090 EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1091 EE(SEND_ERR, handle_txe_err, "TxeErr")
1092
1093};
1094
1095
1096
1097
1098
1099#define TCRIT_INT_SOURCE 4
1100
1101
1102
1103
1104
1105static const struct err_reg_info sdma_eng_err =
1106 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1107
1108static const struct err_reg_info various_err[NUM_VARIOUS] = {
1109 { 0, 0, 0, NULL },
1110 { 0, 0, 0, NULL },
1111 EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1112 EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1113 { 0, 0, 0, NULL },
1114
1115};
1116
1117
1118
1119
1120
1121
1122
1123#define DCC_CFG_PORT_MTU_CAP_10240 7
1124
1125
1126
1127
1128
1129static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1130 DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1131 DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1132 DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1133
1134
1135};
1136
1137struct cntr_entry {
1138
1139
1140
1141 char *name;
1142
1143
1144
1145
1146 u64 csr;
1147
1148
1149
1150
1151 int offset;
1152
1153
1154
1155
1156 u8 flags;
1157
1158
1159
1160
1161 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1162 int mode, u64 data);
1163};
1164
1165#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1166#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1167
1168#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1169{ \
1170 name, \
1171 csr, \
1172 offset, \
1173 flags, \
1174 accessor \
1175}
1176
1177
1178#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1179CNTR_ELEM(#name, \
1180 (counter * 8 + RCV_COUNTER_ARRAY32), \
1181 0, flags | CNTR_32BIT, \
1182 port_access_u32_csr)
1183
1184#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1185CNTR_ELEM(#name, \
1186 (counter * 8 + RCV_COUNTER_ARRAY32), \
1187 0, flags | CNTR_32BIT, \
1188 dev_access_u32_csr)
1189
1190
1191#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1192CNTR_ELEM(#name, \
1193 (counter * 8 + RCV_COUNTER_ARRAY64), \
1194 0, flags, \
1195 port_access_u64_csr)
1196
1197#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1198CNTR_ELEM(#name, \
1199 (counter * 8 + RCV_COUNTER_ARRAY64), \
1200 0, flags, \
1201 dev_access_u64_csr)
1202
1203#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1204#define OVR_ELM(ctx) \
1205CNTR_ELEM("RcvHdrOvr" #ctx, \
1206 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1207 0, CNTR_NORMAL, port_access_u64_csr)
1208
1209
1210#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1211CNTR_ELEM(#name, \
1212 (counter * 8 + SEND_COUNTER_ARRAY32), \
1213 0, flags | CNTR_32BIT, \
1214 port_access_u32_csr)
1215
1216
1217#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1218CNTR_ELEM(#name, \
1219 (counter * 8 + SEND_COUNTER_ARRAY64), \
1220 0, flags, \
1221 port_access_u64_csr)
1222
1223# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name,\
1225 counter * 8 + SEND_COUNTER_ARRAY64, \
1226 0, \
1227 flags, \
1228 dev_access_u64_csr)
1229
1230
1231#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1232CNTR_ELEM(#name, \
1233 (counter * 8 + CCE_COUNTER_ARRAY32), \
1234 0, flags | CNTR_32BIT, \
1235 dev_access_u32_csr)
1236
1237#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1240 0, flags | CNTR_32BIT, \
1241 dev_access_u32_csr)
1242
1243
1244#define DC_PERF_CNTR(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 counter, \
1247 0, \
1248 flags, \
1249 dev_access_u64_csr)
1250
1251#define DC_PERF_CNTR_LCB(name, counter, flags) \
1252CNTR_ELEM(#name, \
1253 counter, \
1254 0, \
1255 flags, \
1256 dc_access_lcb_cntr)
1257
1258
1259#define SW_IBP_CNTR(name, cntr) \
1260CNTR_ELEM(#name, \
1261 0, \
1262 0, \
1263 CNTR_SYNTH, \
1264 access_ibp_##cntr)
1265
1266u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1267{
1268 if (dd->flags & HFI1_PRESENT) {
1269 return readq((void __iomem *)dd->kregbase + offset);
1270 }
1271 return -1;
1272}
1273
1274void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1275{
1276 if (dd->flags & HFI1_PRESENT)
1277 writeq(value, (void __iomem *)dd->kregbase + offset);
1278}
1279
1280void __iomem *get_csr_addr(
1281 struct hfi1_devdata *dd,
1282 u32 offset)
1283{
1284 return (void __iomem *)dd->kregbase + offset;
1285}
1286
1287static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1288 int mode, u64 value)
1289{
1290 u64 ret;
1291
1292 if (mode == CNTR_MODE_R) {
1293 ret = read_csr(dd, csr);
1294 } else if (mode == CNTR_MODE_W) {
1295 write_csr(dd, csr, value);
1296 ret = value;
1297 } else {
1298 dd_dev_err(dd, "Invalid cntr register access mode");
1299 return 0;
1300 }
1301
1302 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1303 return ret;
1304}
1305
1306
1307static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1308 void *context, int vl, int mode, u64 data)
1309{
1310 struct hfi1_devdata *dd = context;
1311 u64 csr = entry->csr;
1312
1313 if (entry->flags & CNTR_SDMA) {
1314 if (vl == CNTR_INVALID_VL)
1315 return 0;
1316 csr += 0x100 * vl;
1317 } else {
1318 if (vl != CNTR_INVALID_VL)
1319 return 0;
1320 }
1321 return read_write_csr(dd, csr, mode, data);
1322}
1323
1324static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1325 void *context, int idx, int mode, u64 data)
1326{
1327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1328
1329 if (dd->per_sdma && idx < dd->num_sdma)
1330 return dd->per_sdma[idx].err_cnt;
1331 return 0;
1332}
1333
1334static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1335 void *context, int idx, int mode, u64 data)
1336{
1337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1338
1339 if (dd->per_sdma && idx < dd->num_sdma)
1340 return dd->per_sdma[idx].sdma_int_cnt;
1341 return 0;
1342}
1343
1344static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1345 void *context, int idx, int mode, u64 data)
1346{
1347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1348
1349 if (dd->per_sdma && idx < dd->num_sdma)
1350 return dd->per_sdma[idx].idle_int_cnt;
1351 return 0;
1352}
1353
1354static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1355 void *context, int idx, int mode,
1356 u64 data)
1357{
1358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1359
1360 if (dd->per_sdma && idx < dd->num_sdma)
1361 return dd->per_sdma[idx].progress_int_cnt;
1362 return 0;
1363}
1364
1365static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1366 int vl, int mode, u64 data)
1367{
1368 struct hfi1_devdata *dd = context;
1369
1370 u64 val = 0;
1371 u64 csr = entry->csr;
1372
1373 if (entry->flags & CNTR_VL) {
1374 if (vl == CNTR_INVALID_VL)
1375 return 0;
1376 csr += 8 * vl;
1377 } else {
1378 if (vl != CNTR_INVALID_VL)
1379 return 0;
1380 }
1381
1382 val = read_write_csr(dd, csr, mode, data);
1383 return val;
1384}
1385
1386static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1387 int vl, int mode, u64 data)
1388{
1389 struct hfi1_devdata *dd = context;
1390 u32 csr = entry->csr;
1391 int ret = 0;
1392
1393 if (vl != CNTR_INVALID_VL)
1394 return 0;
1395 if (mode == CNTR_MODE_R)
1396 ret = read_lcb_csr(dd, csr, &data);
1397 else if (mode == CNTR_MODE_W)
1398 ret = write_lcb_csr(dd, csr, data);
1399
1400 if (ret) {
1401 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1402 return 0;
1403 }
1404
1405 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1406 return data;
1407}
1408
1409
1410static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1411 int vl, int mode, u64 data)
1412{
1413 struct hfi1_pportdata *ppd = context;
1414
1415 if (vl != CNTR_INVALID_VL)
1416 return 0;
1417 return read_write_csr(ppd->dd, entry->csr, mode, data);
1418}
1419
1420static u64 port_access_u64_csr(const struct cntr_entry *entry,
1421 void *context, int vl, int mode, u64 data)
1422{
1423 struct hfi1_pportdata *ppd = context;
1424 u64 val;
1425 u64 csr = entry->csr;
1426
1427 if (entry->flags & CNTR_VL) {
1428 if (vl == CNTR_INVALID_VL)
1429 return 0;
1430 csr += 8 * vl;
1431 } else {
1432 if (vl != CNTR_INVALID_VL)
1433 return 0;
1434 }
1435 val = read_write_csr(ppd->dd, csr, mode, data);
1436 return val;
1437}
1438
1439
1440static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1441 u64 data)
1442{
1443 u64 ret;
1444
1445 if (mode == CNTR_MODE_R) {
1446 ret = *cntr;
1447 } else if (mode == CNTR_MODE_W) {
1448 *cntr = data;
1449 ret = data;
1450 } else {
1451 dd_dev_err(dd, "Invalid cntr sw access mode");
1452 return 0;
1453 }
1454
1455 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1456
1457 return ret;
1458}
1459
1460static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1461 int vl, int mode, u64 data)
1462{
1463 struct hfi1_pportdata *ppd = context;
1464
1465 if (vl != CNTR_INVALID_VL)
1466 return 0;
1467 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1468}
1469
1470static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1471 int vl, int mode, u64 data)
1472{
1473 struct hfi1_pportdata *ppd = context;
1474
1475 if (vl != CNTR_INVALID_VL)
1476 return 0;
1477 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1478}
1479
1480static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1481 void *context, int vl, int mode,
1482 u64 data)
1483{
1484 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1485
1486 if (vl != CNTR_INVALID_VL)
1487 return 0;
1488 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1489}
1490
1491static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1492 void *context, int vl, int mode, u64 data)
1493{
1494 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1495 u64 zero = 0;
1496 u64 *counter;
1497
1498 if (vl == CNTR_INVALID_VL)
1499 counter = &ppd->port_xmit_discards;
1500 else if (vl >= 0 && vl < C_VL_COUNT)
1501 counter = &ppd->port_xmit_discards_vl[vl];
1502 else
1503 counter = &zero;
1504
1505 return read_write_sw(ppd->dd, counter, mode, data);
1506}
1507
1508static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1509 void *context, int vl, int mode,
1510 u64 data)
1511{
1512 struct hfi1_pportdata *ppd = context;
1513
1514 if (vl != CNTR_INVALID_VL)
1515 return 0;
1516
1517 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1518 mode, data);
1519}
1520
1521static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1522 void *context, int vl, int mode, u64 data)
1523{
1524 struct hfi1_pportdata *ppd = context;
1525
1526 if (vl != CNTR_INVALID_VL)
1527 return 0;
1528
1529 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1530 mode, data);
1531}
1532
1533u64 get_all_cpu_total(u64 __percpu *cntr)
1534{
1535 int cpu;
1536 u64 counter = 0;
1537
1538 for_each_possible_cpu(cpu)
1539 counter += *per_cpu_ptr(cntr, cpu);
1540 return counter;
1541}
1542
1543static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1544 u64 __percpu *cntr,
1545 int vl, int mode, u64 data)
1546{
1547 u64 ret = 0;
1548
1549 if (vl != CNTR_INVALID_VL)
1550 return 0;
1551
1552 if (mode == CNTR_MODE_R) {
1553 ret = get_all_cpu_total(cntr) - *z_val;
1554 } else if (mode == CNTR_MODE_W) {
1555
1556 if (data == 0)
1557 *z_val = get_all_cpu_total(cntr);
1558 else
1559 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1560 } else {
1561 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1562 return 0;
1563 }
1564
1565 return ret;
1566}
1567
1568static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1569 void *context, int vl, int mode, u64 data)
1570{
1571 struct hfi1_devdata *dd = context;
1572
1573 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1574 mode, data);
1575}
1576
1577static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1578 void *context, int vl, int mode, u64 data)
1579{
1580 struct hfi1_devdata *dd = context;
1581
1582 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1583 mode, data);
1584}
1585
1586static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1588{
1589 struct hfi1_devdata *dd = context;
1590
1591 return dd->verbs_dev.n_piowait;
1592}
1593
1594static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1596{
1597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1598
1599 return dd->verbs_dev.n_piodrain;
1600}
1601
1602static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1604{
1605 struct hfi1_devdata *dd = context;
1606
1607 return dd->verbs_dev.n_txwait;
1608}
1609
1610static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1611 void *context, int vl, int mode, u64 data)
1612{
1613 struct hfi1_devdata *dd = context;
1614
1615 return dd->verbs_dev.n_kmem_wait;
1616}
1617
1618static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1619 void *context, int vl, int mode, u64 data)
1620{
1621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1622
1623 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1624 mode, data);
1625}
1626
1627
1628static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1629 void *context, int vl, int mode,
1630 u64 data)
1631{
1632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1633
1634 return dd->misc_err_status_cnt[12];
1635}
1636
1637static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1638 void *context, int vl, int mode,
1639 u64 data)
1640{
1641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1642
1643 return dd->misc_err_status_cnt[11];
1644}
1645
1646static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1647 void *context, int vl, int mode,
1648 u64 data)
1649{
1650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1651
1652 return dd->misc_err_status_cnt[10];
1653}
1654
1655static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1656 void *context, int vl,
1657 int mode, u64 data)
1658{
1659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1660
1661 return dd->misc_err_status_cnt[9];
1662}
1663
1664static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1665 void *context, int vl, int mode,
1666 u64 data)
1667{
1668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1669
1670 return dd->misc_err_status_cnt[8];
1671}
1672
1673static u64 access_misc_efuse_read_bad_addr_err_cnt(
1674 const struct cntr_entry *entry,
1675 void *context, int vl, int mode, u64 data)
1676{
1677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1678
1679 return dd->misc_err_status_cnt[7];
1680}
1681
1682static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1683 void *context, int vl,
1684 int mode, u64 data)
1685{
1686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1687
1688 return dd->misc_err_status_cnt[6];
1689}
1690
1691static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1692 void *context, int vl, int mode,
1693 u64 data)
1694{
1695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1696
1697 return dd->misc_err_status_cnt[5];
1698}
1699
1700static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1701 void *context, int vl, int mode,
1702 u64 data)
1703{
1704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1705
1706 return dd->misc_err_status_cnt[4];
1707}
1708
1709static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1710 void *context, int vl,
1711 int mode, u64 data)
1712{
1713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1714
1715 return dd->misc_err_status_cnt[3];
1716}
1717
1718static u64 access_misc_csr_write_bad_addr_err_cnt(
1719 const struct cntr_entry *entry,
1720 void *context, int vl, int mode, u64 data)
1721{
1722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1723
1724 return dd->misc_err_status_cnt[2];
1725}
1726
1727static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1728 void *context, int vl,
1729 int mode, u64 data)
1730{
1731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1732
1733 return dd->misc_err_status_cnt[1];
1734}
1735
1736static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1737 void *context, int vl, int mode,
1738 u64 data)
1739{
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742 return dd->misc_err_status_cnt[0];
1743}
1744
1745
1746
1747
1748
1749static u64 access_sw_cce_err_status_aggregated_cnt(
1750 const struct cntr_entry *entry,
1751 void *context, int vl, int mode, u64 data)
1752{
1753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755 return dd->sw_cce_err_status_aggregate;
1756}
1757
1758
1759
1760
1761
1762static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1763 void *context, int vl, int mode,
1764 u64 data)
1765{
1766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767
1768 return dd->cce_err_status_cnt[40];
1769}
1770
1771static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1772 void *context, int vl, int mode,
1773 u64 data)
1774{
1775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776
1777 return dd->cce_err_status_cnt[39];
1778}
1779
1780static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1781 void *context, int vl, int mode,
1782 u64 data)
1783{
1784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1785
1786 return dd->cce_err_status_cnt[38];
1787}
1788
1789static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1790 void *context, int vl, int mode,
1791 u64 data)
1792{
1793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1794
1795 return dd->cce_err_status_cnt[37];
1796}
1797
1798static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1799 void *context, int vl, int mode,
1800 u64 data)
1801{
1802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1803
1804 return dd->cce_err_status_cnt[36];
1805}
1806
1807static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1808 const struct cntr_entry *entry,
1809 void *context, int vl, int mode, u64 data)
1810{
1811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1812
1813 return dd->cce_err_status_cnt[35];
1814}
1815
1816static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1817 const struct cntr_entry *entry,
1818 void *context, int vl, int mode, u64 data)
1819{
1820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1821
1822 return dd->cce_err_status_cnt[34];
1823}
1824
1825static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1826 void *context, int vl,
1827 int mode, u64 data)
1828{
1829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1830
1831 return dd->cce_err_status_cnt[33];
1832}
1833
1834static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1835 void *context, int vl, int mode,
1836 u64 data)
1837{
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840 return dd->cce_err_status_cnt[32];
1841}
1842
1843static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1844 void *context, int vl, int mode, u64 data)
1845{
1846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847
1848 return dd->cce_err_status_cnt[31];
1849}
1850
1851static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1852 void *context, int vl, int mode,
1853 u64 data)
1854{
1855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856
1857 return dd->cce_err_status_cnt[30];
1858}
1859
1860static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1861 void *context, int vl, int mode,
1862 u64 data)
1863{
1864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865
1866 return dd->cce_err_status_cnt[29];
1867}
1868
1869static u64 access_pcic_transmit_back_parity_err_cnt(
1870 const struct cntr_entry *entry,
1871 void *context, int vl, int mode, u64 data)
1872{
1873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874
1875 return dd->cce_err_status_cnt[28];
1876}
1877
1878static u64 access_pcic_transmit_front_parity_err_cnt(
1879 const struct cntr_entry *entry,
1880 void *context, int vl, int mode, u64 data)
1881{
1882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1883
1884 return dd->cce_err_status_cnt[27];
1885}
1886
1887static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1888 void *context, int vl, int mode,
1889 u64 data)
1890{
1891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1892
1893 return dd->cce_err_status_cnt[26];
1894}
1895
1896static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1897 void *context, int vl, int mode,
1898 u64 data)
1899{
1900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1901
1902 return dd->cce_err_status_cnt[25];
1903}
1904
1905static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1906 void *context, int vl, int mode,
1907 u64 data)
1908{
1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1910
1911 return dd->cce_err_status_cnt[24];
1912}
1913
1914static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1915 void *context, int vl, int mode,
1916 u64 data)
1917{
1918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1919
1920 return dd->cce_err_status_cnt[23];
1921}
1922
1923static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1924 void *context, int vl,
1925 int mode, u64 data)
1926{
1927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1928
1929 return dd->cce_err_status_cnt[22];
1930}
1931
1932static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1933 void *context, int vl, int mode,
1934 u64 data)
1935{
1936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1937
1938 return dd->cce_err_status_cnt[21];
1939}
1940
1941static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1942 const struct cntr_entry *entry,
1943 void *context, int vl, int mode, u64 data)
1944{
1945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1946
1947 return dd->cce_err_status_cnt[20];
1948}
1949
1950static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1951 void *context, int vl,
1952 int mode, u64 data)
1953{
1954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1955
1956 return dd->cce_err_status_cnt[19];
1957}
1958
1959static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1960 void *context, int vl, int mode,
1961 u64 data)
1962{
1963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1964
1965 return dd->cce_err_status_cnt[18];
1966}
1967
1968static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1969 void *context, int vl, int mode,
1970 u64 data)
1971{
1972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1973
1974 return dd->cce_err_status_cnt[17];
1975}
1976
1977static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1978 void *context, int vl, int mode,
1979 u64 data)
1980{
1981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1982
1983 return dd->cce_err_status_cnt[16];
1984}
1985
1986static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1987 void *context, int vl, int mode,
1988 u64 data)
1989{
1990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1991
1992 return dd->cce_err_status_cnt[15];
1993}
1994
1995static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1996 void *context, int vl,
1997 int mode, u64 data)
1998{
1999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2000
2001 return dd->cce_err_status_cnt[14];
2002}
2003
2004static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2005 void *context, int vl, int mode,
2006 u64 data)
2007{
2008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2009
2010 return dd->cce_err_status_cnt[13];
2011}
2012
2013static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2014 const struct cntr_entry *entry,
2015 void *context, int vl, int mode, u64 data)
2016{
2017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2018
2019 return dd->cce_err_status_cnt[12];
2020}
2021
2022static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2023 const struct cntr_entry *entry,
2024 void *context, int vl, int mode, u64 data)
2025{
2026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2027
2028 return dd->cce_err_status_cnt[11];
2029}
2030
2031static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2032 const struct cntr_entry *entry,
2033 void *context, int vl, int mode, u64 data)
2034{
2035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2036
2037 return dd->cce_err_status_cnt[10];
2038}
2039
2040static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2041 const struct cntr_entry *entry,
2042 void *context, int vl, int mode, u64 data)
2043{
2044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2045
2046 return dd->cce_err_status_cnt[9];
2047}
2048
2049static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2050 const struct cntr_entry *entry,
2051 void *context, int vl, int mode, u64 data)
2052{
2053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2054
2055 return dd->cce_err_status_cnt[8];
2056}
2057
2058static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2059 void *context, int vl,
2060 int mode, u64 data)
2061{
2062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2063
2064 return dd->cce_err_status_cnt[7];
2065}
2066
2067static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2068 const struct cntr_entry *entry,
2069 void *context, int vl, int mode, u64 data)
2070{
2071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2072
2073 return dd->cce_err_status_cnt[6];
2074}
2075
2076static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2077 void *context, int vl, int mode,
2078 u64 data)
2079{
2080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2081
2082 return dd->cce_err_status_cnt[5];
2083}
2084
2085static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2086 void *context, int vl, int mode,
2087 u64 data)
2088{
2089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2090
2091 return dd->cce_err_status_cnt[4];
2092}
2093
2094static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2095 const struct cntr_entry *entry,
2096 void *context, int vl, int mode, u64 data)
2097{
2098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2099
2100 return dd->cce_err_status_cnt[3];
2101}
2102
2103static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2104 void *context, int vl,
2105 int mode, u64 data)
2106{
2107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2108
2109 return dd->cce_err_status_cnt[2];
2110}
2111
2112static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2113 void *context, int vl,
2114 int mode, u64 data)
2115{
2116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2117
2118 return dd->cce_err_status_cnt[1];
2119}
2120
2121static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2122 void *context, int vl, int mode,
2123 u64 data)
2124{
2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126
2127 return dd->cce_err_status_cnt[0];
2128}
2129
2130
2131
2132
2133
2134static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2135 void *context, int vl, int mode,
2136 u64 data)
2137{
2138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2139
2140 return dd->rcv_err_status_cnt[63];
2141}
2142
2143static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2144 void *context, int vl,
2145 int mode, u64 data)
2146{
2147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2148
2149 return dd->rcv_err_status_cnt[62];
2150}
2151
2152static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2153 void *context, int vl, int mode,
2154 u64 data)
2155{
2156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2157
2158 return dd->rcv_err_status_cnt[61];
2159}
2160
2161static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2162 void *context, int vl, int mode,
2163 u64 data)
2164{
2165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2166
2167 return dd->rcv_err_status_cnt[60];
2168}
2169
2170static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2171 void *context, int vl,
2172 int mode, u64 data)
2173{
2174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2175
2176 return dd->rcv_err_status_cnt[59];
2177}
2178
2179static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2180 void *context, int vl,
2181 int mode, u64 data)
2182{
2183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2184
2185 return dd->rcv_err_status_cnt[58];
2186}
2187
2188static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2189 void *context, int vl, int mode,
2190 u64 data)
2191{
2192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2193
2194 return dd->rcv_err_status_cnt[57];
2195}
2196
2197static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2198 void *context, int vl, int mode,
2199 u64 data)
2200{
2201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2202
2203 return dd->rcv_err_status_cnt[56];
2204}
2205
2206static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2207 void *context, int vl, int mode,
2208 u64 data)
2209{
2210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211
2212 return dd->rcv_err_status_cnt[55];
2213}
2214
2215static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2216 const struct cntr_entry *entry,
2217 void *context, int vl, int mode, u64 data)
2218{
2219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220
2221 return dd->rcv_err_status_cnt[54];
2222}
2223
2224static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2225 const struct cntr_entry *entry,
2226 void *context, int vl, int mode, u64 data)
2227{
2228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2229
2230 return dd->rcv_err_status_cnt[53];
2231}
2232
2233static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2234 void *context, int vl,
2235 int mode, u64 data)
2236{
2237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2238
2239 return dd->rcv_err_status_cnt[52];
2240}
2241
2242static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2243 void *context, int vl,
2244 int mode, u64 data)
2245{
2246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2247
2248 return dd->rcv_err_status_cnt[51];
2249}
2250
2251static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2252 void *context, int vl,
2253 int mode, u64 data)
2254{
2255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2256
2257 return dd->rcv_err_status_cnt[50];
2258}
2259
2260static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2261 void *context, int vl,
2262 int mode, u64 data)
2263{
2264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2265
2266 return dd->rcv_err_status_cnt[49];
2267}
2268
2269static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2270 void *context, int vl,
2271 int mode, u64 data)
2272{
2273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2274
2275 return dd->rcv_err_status_cnt[48];
2276}
2277
2278static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2279 void *context, int vl,
2280 int mode, u64 data)
2281{
2282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2283
2284 return dd->rcv_err_status_cnt[47];
2285}
2286
2287static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2288 void *context, int vl, int mode,
2289 u64 data)
2290{
2291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2292
2293 return dd->rcv_err_status_cnt[46];
2294}
2295
2296static u64 access_rx_hq_intr_csr_parity_err_cnt(
2297 const struct cntr_entry *entry,
2298 void *context, int vl, int mode, u64 data)
2299{
2300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2301
2302 return dd->rcv_err_status_cnt[45];
2303}
2304
2305static u64 access_rx_lookup_csr_parity_err_cnt(
2306 const struct cntr_entry *entry,
2307 void *context, int vl, int mode, u64 data)
2308{
2309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2310
2311 return dd->rcv_err_status_cnt[44];
2312}
2313
2314static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2315 const struct cntr_entry *entry,
2316 void *context, int vl, int mode, u64 data)
2317{
2318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2319
2320 return dd->rcv_err_status_cnt[43];
2321}
2322
2323static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2324 const struct cntr_entry *entry,
2325 void *context, int vl, int mode, u64 data)
2326{
2327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2328
2329 return dd->rcv_err_status_cnt[42];
2330}
2331
2332static u64 access_rx_lookup_des_part2_parity_err_cnt(
2333 const struct cntr_entry *entry,
2334 void *context, int vl, int mode, u64 data)
2335{
2336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2337
2338 return dd->rcv_err_status_cnt[41];
2339}
2340
2341static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2342 const struct cntr_entry *entry,
2343 void *context, int vl, int mode, u64 data)
2344{
2345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2346
2347 return dd->rcv_err_status_cnt[40];
2348}
2349
2350static u64 access_rx_lookup_des_part1_unc_err_cnt(
2351 const struct cntr_entry *entry,
2352 void *context, int vl, int mode, u64 data)
2353{
2354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2355
2356 return dd->rcv_err_status_cnt[39];
2357}
2358
2359static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2360 const struct cntr_entry *entry,
2361 void *context, int vl, int mode, u64 data)
2362{
2363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2364
2365 return dd->rcv_err_status_cnt[38];
2366}
2367
2368static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2369 const struct cntr_entry *entry,
2370 void *context, int vl, int mode, u64 data)
2371{
2372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2373
2374 return dd->rcv_err_status_cnt[37];
2375}
2376
2377static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2378 const struct cntr_entry *entry,
2379 void *context, int vl, int mode, u64 data)
2380{
2381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2382
2383 return dd->rcv_err_status_cnt[36];
2384}
2385
2386static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2387 const struct cntr_entry *entry,
2388 void *context, int vl, int mode, u64 data)
2389{
2390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2391
2392 return dd->rcv_err_status_cnt[35];
2393}
2394
2395static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2396 const struct cntr_entry *entry,
2397 void *context, int vl, int mode, u64 data)
2398{
2399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2400
2401 return dd->rcv_err_status_cnt[34];
2402}
2403
2404static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2405 const struct cntr_entry *entry,
2406 void *context, int vl, int mode, u64 data)
2407{
2408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2409
2410 return dd->rcv_err_status_cnt[33];
2411}
2412
2413static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2414 void *context, int vl, int mode,
2415 u64 data)
2416{
2417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2418
2419 return dd->rcv_err_status_cnt[32];
2420}
2421
2422static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2423 void *context, int vl, int mode,
2424 u64 data)
2425{
2426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2427
2428 return dd->rcv_err_status_cnt[31];
2429}
2430
2431static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2432 void *context, int vl, int mode,
2433 u64 data)
2434{
2435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2436
2437 return dd->rcv_err_status_cnt[30];
2438}
2439
2440static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2441 void *context, int vl, int mode,
2442 u64 data)
2443{
2444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2445
2446 return dd->rcv_err_status_cnt[29];
2447}
2448
2449static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2450 void *context, int vl,
2451 int mode, u64 data)
2452{
2453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2454
2455 return dd->rcv_err_status_cnt[28];
2456}
2457
2458static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2459 const struct cntr_entry *entry,
2460 void *context, int vl, int mode, u64 data)
2461{
2462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2463
2464 return dd->rcv_err_status_cnt[27];
2465}
2466
2467static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2468 const struct cntr_entry *entry,
2469 void *context, int vl, int mode, u64 data)
2470{
2471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2472
2473 return dd->rcv_err_status_cnt[26];
2474}
2475
2476static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2477 const struct cntr_entry *entry,
2478 void *context, int vl, int mode, u64 data)
2479{
2480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2481
2482 return dd->rcv_err_status_cnt[25];
2483}
2484
2485static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2486 const struct cntr_entry *entry,
2487 void *context, int vl, int mode, u64 data)
2488{
2489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2490
2491 return dd->rcv_err_status_cnt[24];
2492}
2493
2494static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2495 const struct cntr_entry *entry,
2496 void *context, int vl, int mode, u64 data)
2497{
2498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2499
2500 return dd->rcv_err_status_cnt[23];
2501}
2502
2503static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2504 const struct cntr_entry *entry,
2505 void *context, int vl, int mode, u64 data)
2506{
2507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2508
2509 return dd->rcv_err_status_cnt[22];
2510}
2511
2512static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2513 const struct cntr_entry *entry,
2514 void *context, int vl, int mode, u64 data)
2515{
2516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2517
2518 return dd->rcv_err_status_cnt[21];
2519}
2520
2521static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2522 const struct cntr_entry *entry,
2523 void *context, int vl, int mode, u64 data)
2524{
2525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2526
2527 return dd->rcv_err_status_cnt[20];
2528}
2529
2530static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2531 const struct cntr_entry *entry,
2532 void *context, int vl, int mode, u64 data)
2533{
2534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2535
2536 return dd->rcv_err_status_cnt[19];
2537}
2538
2539static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2540 void *context, int vl,
2541 int mode, u64 data)
2542{
2543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2544
2545 return dd->rcv_err_status_cnt[18];
2546}
2547
2548static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2549 void *context, int vl,
2550 int mode, u64 data)
2551{
2552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2553
2554 return dd->rcv_err_status_cnt[17];
2555}
2556
2557static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2558 const struct cntr_entry *entry,
2559 void *context, int vl, int mode, u64 data)
2560{
2561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2562
2563 return dd->rcv_err_status_cnt[16];
2564}
2565
2566static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2567 const struct cntr_entry *entry,
2568 void *context, int vl, int mode, u64 data)
2569{
2570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2571
2572 return dd->rcv_err_status_cnt[15];
2573}
2574
2575static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2576 void *context, int vl,
2577 int mode, u64 data)
2578{
2579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2580
2581 return dd->rcv_err_status_cnt[14];
2582}
2583
2584static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2585 void *context, int vl,
2586 int mode, u64 data)
2587{
2588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2589
2590 return dd->rcv_err_status_cnt[13];
2591}
2592
2593static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2594 void *context, int vl, int mode,
2595 u64 data)
2596{
2597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2598
2599 return dd->rcv_err_status_cnt[12];
2600}
2601
2602static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2603 void *context, int vl, int mode,
2604 u64 data)
2605{
2606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2607
2608 return dd->rcv_err_status_cnt[11];
2609}
2610
2611static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2612 void *context, int vl, int mode,
2613 u64 data)
2614{
2615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2616
2617 return dd->rcv_err_status_cnt[10];
2618}
2619
2620static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2621 void *context, int vl, int mode,
2622 u64 data)
2623{
2624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2625
2626 return dd->rcv_err_status_cnt[9];
2627}
2628
2629static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2630 void *context, int vl, int mode,
2631 u64 data)
2632{
2633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2634
2635 return dd->rcv_err_status_cnt[8];
2636}
2637
2638static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2639 const struct cntr_entry *entry,
2640 void *context, int vl, int mode, u64 data)
2641{
2642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2643
2644 return dd->rcv_err_status_cnt[7];
2645}
2646
2647static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2648 const struct cntr_entry *entry,
2649 void *context, int vl, int mode, u64 data)
2650{
2651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2652
2653 return dd->rcv_err_status_cnt[6];
2654}
2655
2656static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2657 void *context, int vl, int mode,
2658 u64 data)
2659{
2660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2661
2662 return dd->rcv_err_status_cnt[5];
2663}
2664
2665static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2666 void *context, int vl, int mode,
2667 u64 data)
2668{
2669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2670
2671 return dd->rcv_err_status_cnt[4];
2672}
2673
2674static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2675 void *context, int vl, int mode,
2676 u64 data)
2677{
2678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2679
2680 return dd->rcv_err_status_cnt[3];
2681}
2682
2683static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2684 void *context, int vl, int mode,
2685 u64 data)
2686{
2687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2688
2689 return dd->rcv_err_status_cnt[2];
2690}
2691
2692static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2693 void *context, int vl, int mode,
2694 u64 data)
2695{
2696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2697
2698 return dd->rcv_err_status_cnt[1];
2699}
2700
2701static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2702 void *context, int vl, int mode,
2703 u64 data)
2704{
2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706
2707 return dd->rcv_err_status_cnt[0];
2708}
2709
2710
2711
2712
2713
2714static u64 access_pio_pec_sop_head_parity_err_cnt(
2715 const struct cntr_entry *entry,
2716 void *context, int vl, int mode, u64 data)
2717{
2718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2719
2720 return dd->send_pio_err_status_cnt[35];
2721}
2722
2723static u64 access_pio_pcc_sop_head_parity_err_cnt(
2724 const struct cntr_entry *entry,
2725 void *context, int vl, int mode, u64 data)
2726{
2727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2728
2729 return dd->send_pio_err_status_cnt[34];
2730}
2731
2732static u64 access_pio_last_returned_cnt_parity_err_cnt(
2733 const struct cntr_entry *entry,
2734 void *context, int vl, int mode, u64 data)
2735{
2736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2737
2738 return dd->send_pio_err_status_cnt[33];
2739}
2740
2741static u64 access_pio_current_free_cnt_parity_err_cnt(
2742 const struct cntr_entry *entry,
2743 void *context, int vl, int mode, u64 data)
2744{
2745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2746
2747 return dd->send_pio_err_status_cnt[32];
2748}
2749
2750static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2751 void *context, int vl, int mode,
2752 u64 data)
2753{
2754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2755
2756 return dd->send_pio_err_status_cnt[31];
2757}
2758
2759static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2760 void *context, int vl, int mode,
2761 u64 data)
2762{
2763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2764
2765 return dd->send_pio_err_status_cnt[30];
2766}
2767
2768static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2769 void *context, int vl, int mode,
2770 u64 data)
2771{
2772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2773
2774 return dd->send_pio_err_status_cnt[29];
2775}
2776
2777static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2778 const struct cntr_entry *entry,
2779 void *context, int vl, int mode, u64 data)
2780{
2781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2782
2783 return dd->send_pio_err_status_cnt[28];
2784}
2785
2786static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2787 void *context, int vl, int mode,
2788 u64 data)
2789{
2790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791
2792 return dd->send_pio_err_status_cnt[27];
2793}
2794
2795static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2796 void *context, int vl, int mode,
2797 u64 data)
2798{
2799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800
2801 return dd->send_pio_err_status_cnt[26];
2802}
2803
2804static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2805 void *context, int vl,
2806 int mode, u64 data)
2807{
2808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2809
2810 return dd->send_pio_err_status_cnt[25];
2811}
2812
2813static u64 access_pio_block_qw_count_parity_err_cnt(
2814 const struct cntr_entry *entry,
2815 void *context, int vl, int mode, u64 data)
2816{
2817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2818
2819 return dd->send_pio_err_status_cnt[24];
2820}
2821
2822static u64 access_pio_write_qw_valid_parity_err_cnt(
2823 const struct cntr_entry *entry,
2824 void *context, int vl, int mode, u64 data)
2825{
2826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2827
2828 return dd->send_pio_err_status_cnt[23];
2829}
2830
2831static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2832 void *context, int vl, int mode,
2833 u64 data)
2834{
2835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2836
2837 return dd->send_pio_err_status_cnt[22];
2838}
2839
2840static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2841 void *context, int vl,
2842 int mode, u64 data)
2843{
2844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2845
2846 return dd->send_pio_err_status_cnt[21];
2847}
2848
2849static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2850 void *context, int vl,
2851 int mode, u64 data)
2852{
2853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2854
2855 return dd->send_pio_err_status_cnt[20];
2856}
2857
2858static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2859 void *context, int vl,
2860 int mode, u64 data)
2861{
2862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2863
2864 return dd->send_pio_err_status_cnt[19];
2865}
2866
2867static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2868 const struct cntr_entry *entry,
2869 void *context, int vl, int mode, u64 data)
2870{
2871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2872
2873 return dd->send_pio_err_status_cnt[18];
2874}
2875
2876static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2877 void *context, int vl, int mode,
2878 u64 data)
2879{
2880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2881
2882 return dd->send_pio_err_status_cnt[17];
2883}
2884
2885static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2886 void *context, int vl, int mode,
2887 u64 data)
2888{
2889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2890
2891 return dd->send_pio_err_status_cnt[16];
2892}
2893
2894static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2895 const struct cntr_entry *entry,
2896 void *context, int vl, int mode, u64 data)
2897{
2898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2899
2900 return dd->send_pio_err_status_cnt[15];
2901}
2902
2903static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2904 const struct cntr_entry *entry,
2905 void *context, int vl, int mode, u64 data)
2906{
2907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2908
2909 return dd->send_pio_err_status_cnt[14];
2910}
2911
2912static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2913 const struct cntr_entry *entry,
2914 void *context, int vl, int mode, u64 data)
2915{
2916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2917
2918 return dd->send_pio_err_status_cnt[13];
2919}
2920
2921static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2922 const struct cntr_entry *entry,
2923 void *context, int vl, int mode, u64 data)
2924{
2925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2926
2927 return dd->send_pio_err_status_cnt[12];
2928}
2929
2930static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2931 const struct cntr_entry *entry,
2932 void *context, int vl, int mode, u64 data)
2933{
2934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2935
2936 return dd->send_pio_err_status_cnt[11];
2937}
2938
2939static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2940 const struct cntr_entry *entry,
2941 void *context, int vl, int mode, u64 data)
2942{
2943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2944
2945 return dd->send_pio_err_status_cnt[10];
2946}
2947
2948static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2949 const struct cntr_entry *entry,
2950 void *context, int vl, int mode, u64 data)
2951{
2952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2953
2954 return dd->send_pio_err_status_cnt[9];
2955}
2956
2957static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2958 const struct cntr_entry *entry,
2959 void *context, int vl, int mode, u64 data)
2960{
2961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2962
2963 return dd->send_pio_err_status_cnt[8];
2964}
2965
2966static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2967 const struct cntr_entry *entry,
2968 void *context, int vl, int mode, u64 data)
2969{
2970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2971
2972 return dd->send_pio_err_status_cnt[7];
2973}
2974
2975static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2976 void *context, int vl, int mode,
2977 u64 data)
2978{
2979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2980
2981 return dd->send_pio_err_status_cnt[6];
2982}
2983
2984static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2985 void *context, int vl, int mode,
2986 u64 data)
2987{
2988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2989
2990 return dd->send_pio_err_status_cnt[5];
2991}
2992
2993static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2994 void *context, int vl, int mode,
2995 u64 data)
2996{
2997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2998
2999 return dd->send_pio_err_status_cnt[4];
3000}
3001
3002static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3003 void *context, int vl, int mode,
3004 u64 data)
3005{
3006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3007
3008 return dd->send_pio_err_status_cnt[3];
3009}
3010
3011static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3012 void *context, int vl, int mode,
3013 u64 data)
3014{
3015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3016
3017 return dd->send_pio_err_status_cnt[2];
3018}
3019
3020static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3021 void *context, int vl,
3022 int mode, u64 data)
3023{
3024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3025
3026 return dd->send_pio_err_status_cnt[1];
3027}
3028
3029static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3030 void *context, int vl, int mode,
3031 u64 data)
3032{
3033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034
3035 return dd->send_pio_err_status_cnt[0];
3036}
3037
3038
3039
3040
3041
3042static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3043 const struct cntr_entry *entry,
3044 void *context, int vl, int mode, u64 data)
3045{
3046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047
3048 return dd->send_dma_err_status_cnt[3];
3049}
3050
3051static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3052 const struct cntr_entry *entry,
3053 void *context, int vl, int mode, u64 data)
3054{
3055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056
3057 return dd->send_dma_err_status_cnt[2];
3058}
3059
3060static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3061 void *context, int vl, int mode,
3062 u64 data)
3063{
3064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065
3066 return dd->send_dma_err_status_cnt[1];
3067}
3068
3069static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3070 void *context, int vl, int mode,
3071 u64 data)
3072{
3073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075 return dd->send_dma_err_status_cnt[0];
3076}
3077
3078
3079
3080
3081
3082static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3083 const struct cntr_entry *entry,
3084 void *context, int vl, int mode, u64 data)
3085{
3086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3087
3088 return dd->send_egress_err_status_cnt[63];
3089}
3090
3091static u64 access_tx_read_sdma_memory_csr_err_cnt(
3092 const struct cntr_entry *entry,
3093 void *context, int vl, int mode, u64 data)
3094{
3095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3096
3097 return dd->send_egress_err_status_cnt[62];
3098}
3099
3100static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3101 void *context, int vl, int mode,
3102 u64 data)
3103{
3104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3105
3106 return dd->send_egress_err_status_cnt[61];
3107}
3108
3109static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3110 void *context, int vl,
3111 int mode, u64 data)
3112{
3113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3114
3115 return dd->send_egress_err_status_cnt[60];
3116}
3117
3118static u64 access_tx_read_sdma_memory_cor_err_cnt(
3119 const struct cntr_entry *entry,
3120 void *context, int vl, int mode, u64 data)
3121{
3122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3123
3124 return dd->send_egress_err_status_cnt[59];
3125}
3126
3127static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3128 void *context, int vl, int mode,
3129 u64 data)
3130{
3131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3132
3133 return dd->send_egress_err_status_cnt[58];
3134}
3135
3136static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3137 void *context, int vl, int mode,
3138 u64 data)
3139{
3140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3141
3142 return dd->send_egress_err_status_cnt[57];
3143}
3144
3145static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3146 void *context, int vl, int mode,
3147 u64 data)
3148{
3149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3150
3151 return dd->send_egress_err_status_cnt[56];
3152}
3153
3154static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3155 void *context, int vl, int mode,
3156 u64 data)
3157{
3158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159
3160 return dd->send_egress_err_status_cnt[55];
3161}
3162
3163static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3164 void *context, int vl, int mode,
3165 u64 data)
3166{
3167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168
3169 return dd->send_egress_err_status_cnt[54];
3170}
3171
3172static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3173 void *context, int vl, int mode,
3174 u64 data)
3175{
3176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3177
3178 return dd->send_egress_err_status_cnt[53];
3179}
3180
3181static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3182 void *context, int vl, int mode,
3183 u64 data)
3184{
3185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3186
3187 return dd->send_egress_err_status_cnt[52];
3188}
3189
3190static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3191 void *context, int vl, int mode,
3192 u64 data)
3193{
3194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3195
3196 return dd->send_egress_err_status_cnt[51];
3197}
3198
3199static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3200 void *context, int vl, int mode,
3201 u64 data)
3202{
3203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3204
3205 return dd->send_egress_err_status_cnt[50];
3206}
3207
3208static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3209 void *context, int vl, int mode,
3210 u64 data)
3211{
3212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3213
3214 return dd->send_egress_err_status_cnt[49];
3215}
3216
3217static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3218 void *context, int vl, int mode,
3219 u64 data)
3220{
3221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3222
3223 return dd->send_egress_err_status_cnt[48];
3224}
3225
3226static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3227 void *context, int vl, int mode,
3228 u64 data)
3229{
3230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3231
3232 return dd->send_egress_err_status_cnt[47];
3233}
3234
3235static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3236 void *context, int vl, int mode,
3237 u64 data)
3238{
3239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3240
3241 return dd->send_egress_err_status_cnt[46];
3242}
3243
3244static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3245 void *context, int vl, int mode,
3246 u64 data)
3247{
3248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3249
3250 return dd->send_egress_err_status_cnt[45];
3251}
3252
3253static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3254 void *context, int vl,
3255 int mode, u64 data)
3256{
3257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3258
3259 return dd->send_egress_err_status_cnt[44];
3260}
3261
3262static u64 access_tx_read_sdma_memory_unc_err_cnt(
3263 const struct cntr_entry *entry,
3264 void *context, int vl, int mode, u64 data)
3265{
3266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3267
3268 return dd->send_egress_err_status_cnt[43];
3269}
3270
3271static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3272 void *context, int vl, int mode,
3273 u64 data)
3274{
3275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3276
3277 return dd->send_egress_err_status_cnt[42];
3278}
3279
3280static u64 access_tx_credit_return_partiy_err_cnt(
3281 const struct cntr_entry *entry,
3282 void *context, int vl, int mode, u64 data)
3283{
3284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3285
3286 return dd->send_egress_err_status_cnt[41];
3287}
3288
3289static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3290 const struct cntr_entry *entry,
3291 void *context, int vl, int mode, u64 data)
3292{
3293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3294
3295 return dd->send_egress_err_status_cnt[40];
3296}
3297
3298static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3299 const struct cntr_entry *entry,
3300 void *context, int vl, int mode, u64 data)
3301{
3302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3303
3304 return dd->send_egress_err_status_cnt[39];
3305}
3306
3307static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3308 const struct cntr_entry *entry,
3309 void *context, int vl, int mode, u64 data)
3310{
3311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3312
3313 return dd->send_egress_err_status_cnt[38];
3314}
3315
3316static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3317 const struct cntr_entry *entry,
3318 void *context, int vl, int mode, u64 data)
3319{
3320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3321
3322 return dd->send_egress_err_status_cnt[37];
3323}
3324
3325static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3326 const struct cntr_entry *entry,
3327 void *context, int vl, int mode, u64 data)
3328{
3329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3330
3331 return dd->send_egress_err_status_cnt[36];
3332}
3333
3334static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3335 const struct cntr_entry *entry,
3336 void *context, int vl, int mode, u64 data)
3337{
3338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3339
3340 return dd->send_egress_err_status_cnt[35];
3341}
3342
3343static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3344 const struct cntr_entry *entry,
3345 void *context, int vl, int mode, u64 data)
3346{
3347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3348
3349 return dd->send_egress_err_status_cnt[34];
3350}
3351
3352static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3353 const struct cntr_entry *entry,
3354 void *context, int vl, int mode, u64 data)
3355{
3356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3357
3358 return dd->send_egress_err_status_cnt[33];
3359}
3360
3361static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3362 const struct cntr_entry *entry,
3363 void *context, int vl, int mode, u64 data)
3364{
3365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3366
3367 return dd->send_egress_err_status_cnt[32];
3368}
3369
3370static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3371 const struct cntr_entry *entry,
3372 void *context, int vl, int mode, u64 data)
3373{
3374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3375
3376 return dd->send_egress_err_status_cnt[31];
3377}
3378
3379static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3380 const struct cntr_entry *entry,
3381 void *context, int vl, int mode, u64 data)
3382{
3383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3384
3385 return dd->send_egress_err_status_cnt[30];
3386}
3387
3388static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3389 const struct cntr_entry *entry,
3390 void *context, int vl, int mode, u64 data)
3391{
3392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3393
3394 return dd->send_egress_err_status_cnt[29];
3395}
3396
3397static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3398 const struct cntr_entry *entry,
3399 void *context, int vl, int mode, u64 data)
3400{
3401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3402
3403 return dd->send_egress_err_status_cnt[28];
3404}
3405
3406static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3407 const struct cntr_entry *entry,
3408 void *context, int vl, int mode, u64 data)
3409{
3410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3411
3412 return dd->send_egress_err_status_cnt[27];
3413}
3414
3415static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3416 const struct cntr_entry *entry,
3417 void *context, int vl, int mode, u64 data)
3418{
3419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3420
3421 return dd->send_egress_err_status_cnt[26];
3422}
3423
3424static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3425 const struct cntr_entry *entry,
3426 void *context, int vl, int mode, u64 data)
3427{
3428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3429
3430 return dd->send_egress_err_status_cnt[25];
3431}
3432
3433static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3434 const struct cntr_entry *entry,
3435 void *context, int vl, int mode, u64 data)
3436{
3437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3438
3439 return dd->send_egress_err_status_cnt[24];
3440}
3441
3442static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3443 const struct cntr_entry *entry,
3444 void *context, int vl, int mode, u64 data)
3445{
3446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3447
3448 return dd->send_egress_err_status_cnt[23];
3449}
3450
3451static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3452 const struct cntr_entry *entry,
3453 void *context, int vl, int mode, u64 data)
3454{
3455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3456
3457 return dd->send_egress_err_status_cnt[22];
3458}
3459
3460static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3461 const struct cntr_entry *entry,
3462 void *context, int vl, int mode, u64 data)
3463{
3464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3465
3466 return dd->send_egress_err_status_cnt[21];
3467}
3468
3469static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3470 const struct cntr_entry *entry,
3471 void *context, int vl, int mode, u64 data)
3472{
3473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3474
3475 return dd->send_egress_err_status_cnt[20];
3476}
3477
3478static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3479 const struct cntr_entry *entry,
3480 void *context, int vl, int mode, u64 data)
3481{
3482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3483
3484 return dd->send_egress_err_status_cnt[19];
3485}
3486
3487static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3488 const struct cntr_entry *entry,
3489 void *context, int vl, int mode, u64 data)
3490{
3491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3492
3493 return dd->send_egress_err_status_cnt[18];
3494}
3495
3496static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3497 const struct cntr_entry *entry,
3498 void *context, int vl, int mode, u64 data)
3499{
3500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3501
3502 return dd->send_egress_err_status_cnt[17];
3503}
3504
3505static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3506 const struct cntr_entry *entry,
3507 void *context, int vl, int mode, u64 data)
3508{
3509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3510
3511 return dd->send_egress_err_status_cnt[16];
3512}
3513
3514static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3515 void *context, int vl, int mode,
3516 u64 data)
3517{
3518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3519
3520 return dd->send_egress_err_status_cnt[15];
3521}
3522
3523static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3524 void *context, int vl,
3525 int mode, u64 data)
3526{
3527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3528
3529 return dd->send_egress_err_status_cnt[14];
3530}
3531
3532static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3533 void *context, int vl, int mode,
3534 u64 data)
3535{
3536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3537
3538 return dd->send_egress_err_status_cnt[13];
3539}
3540
3541static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3542 void *context, int vl, int mode,
3543 u64 data)
3544{
3545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3546
3547 return dd->send_egress_err_status_cnt[12];
3548}
3549
3550static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3551 const struct cntr_entry *entry,
3552 void *context, int vl, int mode, u64 data)
3553{
3554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3555
3556 return dd->send_egress_err_status_cnt[11];
3557}
3558
3559static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3560 void *context, int vl, int mode,
3561 u64 data)
3562{
3563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3564
3565 return dd->send_egress_err_status_cnt[10];
3566}
3567
3568static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3569 void *context, int vl, int mode,
3570 u64 data)
3571{
3572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3573
3574 return dd->send_egress_err_status_cnt[9];
3575}
3576
3577static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3578 const struct cntr_entry *entry,
3579 void *context, int vl, int mode, u64 data)
3580{
3581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3582
3583 return dd->send_egress_err_status_cnt[8];
3584}
3585
3586static u64 access_tx_pio_launch_intf_parity_err_cnt(
3587 const struct cntr_entry *entry,
3588 void *context, int vl, int mode, u64 data)
3589{
3590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3591
3592 return dd->send_egress_err_status_cnt[7];
3593}
3594
3595static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3596 void *context, int vl, int mode,
3597 u64 data)
3598{
3599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3600
3601 return dd->send_egress_err_status_cnt[6];
3602}
3603
3604static u64 access_tx_incorrect_link_state_err_cnt(
3605 const struct cntr_entry *entry,
3606 void *context, int vl, int mode, u64 data)
3607{
3608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3609
3610 return dd->send_egress_err_status_cnt[5];
3611}
3612
3613static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3614 void *context, int vl, int mode,
3615 u64 data)
3616{
3617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3618
3619 return dd->send_egress_err_status_cnt[4];
3620}
3621
3622static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3623 const struct cntr_entry *entry,
3624 void *context, int vl, int mode, u64 data)
3625{
3626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3627
3628 return dd->send_egress_err_status_cnt[3];
3629}
3630
3631static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3632 void *context, int vl, int mode,
3633 u64 data)
3634{
3635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3636
3637 return dd->send_egress_err_status_cnt[2];
3638}
3639
3640static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3641 const struct cntr_entry *entry,
3642 void *context, int vl, int mode, u64 data)
3643{
3644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3645
3646 return dd->send_egress_err_status_cnt[1];
3647}
3648
3649static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3650 const struct cntr_entry *entry,
3651 void *context, int vl, int mode, u64 data)
3652{
3653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654
3655 return dd->send_egress_err_status_cnt[0];
3656}
3657
3658
3659
3660
3661
3662static u64 access_send_csr_write_bad_addr_err_cnt(
3663 const struct cntr_entry *entry,
3664 void *context, int vl, int mode, u64 data)
3665{
3666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3667
3668 return dd->send_err_status_cnt[2];
3669}
3670
3671static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3672 void *context, int vl,
3673 int mode, u64 data)
3674{
3675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3676
3677 return dd->send_err_status_cnt[1];
3678}
3679
3680static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3681 void *context, int vl, int mode,
3682 u64 data)
3683{
3684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685
3686 return dd->send_err_status_cnt[0];
3687}
3688
3689
3690
3691
3692
3693static u64 access_pio_write_out_of_bounds_err_cnt(
3694 const struct cntr_entry *entry,
3695 void *context, int vl, int mode, u64 data)
3696{
3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698
3699 return dd->sw_ctxt_err_status_cnt[4];
3700}
3701
3702static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3703 void *context, int vl, int mode,
3704 u64 data)
3705{
3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707
3708 return dd->sw_ctxt_err_status_cnt[3];
3709}
3710
3711static u64 access_pio_write_crosses_boundary_err_cnt(
3712 const struct cntr_entry *entry,
3713 void *context, int vl, int mode, u64 data)
3714{
3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716
3717 return dd->sw_ctxt_err_status_cnt[2];
3718}
3719
3720static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3721 void *context, int vl,
3722 int mode, u64 data)
3723{
3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725
3726 return dd->sw_ctxt_err_status_cnt[1];
3727}
3728
3729static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3730 void *context, int vl, int mode,
3731 u64 data)
3732{
3733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734
3735 return dd->sw_ctxt_err_status_cnt[0];
3736}
3737
3738
3739
3740
3741
3742static u64 access_sdma_header_request_fifo_cor_err_cnt(
3743 const struct cntr_entry *entry,
3744 void *context, int vl, int mode, u64 data)
3745{
3746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3747
3748 return dd->sw_send_dma_eng_err_status_cnt[23];
3749}
3750
3751static u64 access_sdma_header_storage_cor_err_cnt(
3752 const struct cntr_entry *entry,
3753 void *context, int vl, int mode, u64 data)
3754{
3755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3756
3757 return dd->sw_send_dma_eng_err_status_cnt[22];
3758}
3759
3760static u64 access_sdma_packet_tracking_cor_err_cnt(
3761 const struct cntr_entry *entry,
3762 void *context, int vl, int mode, u64 data)
3763{
3764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3765
3766 return dd->sw_send_dma_eng_err_status_cnt[21];
3767}
3768
3769static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3770 void *context, int vl, int mode,
3771 u64 data)
3772{
3773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3774
3775 return dd->sw_send_dma_eng_err_status_cnt[20];
3776}
3777
3778static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3779 void *context, int vl, int mode,
3780 u64 data)
3781{
3782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3783
3784 return dd->sw_send_dma_eng_err_status_cnt[19];
3785}
3786
3787static u64 access_sdma_header_request_fifo_unc_err_cnt(
3788 const struct cntr_entry *entry,
3789 void *context, int vl, int mode, u64 data)
3790{
3791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3792
3793 return dd->sw_send_dma_eng_err_status_cnt[18];
3794}
3795
3796static u64 access_sdma_header_storage_unc_err_cnt(
3797 const struct cntr_entry *entry,
3798 void *context, int vl, int mode, u64 data)
3799{
3800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3801
3802 return dd->sw_send_dma_eng_err_status_cnt[17];
3803}
3804
3805static u64 access_sdma_packet_tracking_unc_err_cnt(
3806 const struct cntr_entry *entry,
3807 void *context, int vl, int mode, u64 data)
3808{
3809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3810
3811 return dd->sw_send_dma_eng_err_status_cnt[16];
3812}
3813
3814static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3815 void *context, int vl, int mode,
3816 u64 data)
3817{
3818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819
3820 return dd->sw_send_dma_eng_err_status_cnt[15];
3821}
3822
3823static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3824 void *context, int vl, int mode,
3825 u64 data)
3826{
3827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828
3829 return dd->sw_send_dma_eng_err_status_cnt[14];
3830}
3831
3832static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3833 void *context, int vl, int mode,
3834 u64 data)
3835{
3836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3837
3838 return dd->sw_send_dma_eng_err_status_cnt[13];
3839}
3840
3841static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3842 void *context, int vl, int mode,
3843 u64 data)
3844{
3845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3846
3847 return dd->sw_send_dma_eng_err_status_cnt[12];
3848}
3849
3850static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3851 void *context, int vl, int mode,
3852 u64 data)
3853{
3854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3855
3856 return dd->sw_send_dma_eng_err_status_cnt[11];
3857}
3858
3859static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3860 void *context, int vl, int mode,
3861 u64 data)
3862{
3863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3864
3865 return dd->sw_send_dma_eng_err_status_cnt[10];
3866}
3867
3868static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3869 void *context, int vl, int mode,
3870 u64 data)
3871{
3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873
3874 return dd->sw_send_dma_eng_err_status_cnt[9];
3875}
3876
3877static u64 access_sdma_packet_desc_overflow_err_cnt(
3878 const struct cntr_entry *entry,
3879 void *context, int vl, int mode, u64 data)
3880{
3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882
3883 return dd->sw_send_dma_eng_err_status_cnt[8];
3884}
3885
3886static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3887 void *context, int vl,
3888 int mode, u64 data)
3889{
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[7];
3893}
3894
3895static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3896 void *context, int vl, int mode, u64 data)
3897{
3898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899
3900 return dd->sw_send_dma_eng_err_status_cnt[6];
3901}
3902
3903static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3904 void *context, int vl, int mode,
3905 u64 data)
3906{
3907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908
3909 return dd->sw_send_dma_eng_err_status_cnt[5];
3910}
3911
3912static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3913 void *context, int vl, int mode,
3914 u64 data)
3915{
3916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917
3918 return dd->sw_send_dma_eng_err_status_cnt[4];
3919}
3920
3921static u64 access_sdma_tail_out_of_bounds_err_cnt(
3922 const struct cntr_entry *entry,
3923 void *context, int vl, int mode, u64 data)
3924{
3925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926
3927 return dd->sw_send_dma_eng_err_status_cnt[3];
3928}
3929
3930static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3931 void *context, int vl, int mode,
3932 u64 data)
3933{
3934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3935
3936 return dd->sw_send_dma_eng_err_status_cnt[2];
3937}
3938
3939static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3940 void *context, int vl, int mode,
3941 u64 data)
3942{
3943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3944
3945 return dd->sw_send_dma_eng_err_status_cnt[1];
3946}
3947
3948static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3949 void *context, int vl, int mode,
3950 u64 data)
3951{
3952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3953
3954 return dd->sw_send_dma_eng_err_status_cnt[0];
3955}
3956
3957static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3958 void *context, int vl, int mode,
3959 u64 data)
3960{
3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3962
3963 u64 val = 0;
3964 u64 csr = entry->csr;
3965
3966 val = read_write_csr(dd, csr, mode, data);
3967 if (mode == CNTR_MODE_R) {
3968 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3969 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3970 } else if (mode == CNTR_MODE_W) {
3971 dd->sw_rcv_bypass_packet_errors = 0;
3972 } else {
3973 dd_dev_err(dd, "Invalid cntr register access mode");
3974 return 0;
3975 }
3976 return val;
3977}
3978
3979#define def_access_sw_cpu(cntr) \
3980static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3981 void *context, int vl, int mode, u64 data) \
3982{ \
3983 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3984 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3985 ppd->ibport_data.rvp.cntr, vl, \
3986 mode, data); \
3987}
3988
3989def_access_sw_cpu(rc_acks);
3990def_access_sw_cpu(rc_qacks);
3991def_access_sw_cpu(rc_delayed_comp);
3992
3993#define def_access_ibp_counter(cntr) \
3994static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3995 void *context, int vl, int mode, u64 data) \
3996{ \
3997 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3998 \
3999 if (vl != CNTR_INVALID_VL) \
4000 return 0; \
4001 \
4002 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4003 mode, data); \
4004}
4005
4006def_access_ibp_counter(loop_pkts);
4007def_access_ibp_counter(rc_resends);
4008def_access_ibp_counter(rnr_naks);
4009def_access_ibp_counter(other_naks);
4010def_access_ibp_counter(rc_timeouts);
4011def_access_ibp_counter(pkt_drops);
4012def_access_ibp_counter(dmawait);
4013def_access_ibp_counter(rc_seqnak);
4014def_access_ibp_counter(rc_dupreq);
4015def_access_ibp_counter(rdma_seq);
4016def_access_ibp_counter(unaligned);
4017def_access_ibp_counter(seq_naks);
4018
4019static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4020[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4021[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4022 CNTR_NORMAL),
4023[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4024 CNTR_NORMAL),
4025[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4026 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4027 CNTR_NORMAL),
4028[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4029 CNTR_NORMAL),
4030[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4031 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4032[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4033 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4034[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4035 CNTR_NORMAL),
4036[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4037 CNTR_NORMAL),
4038[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4039 CNTR_NORMAL),
4040[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4041 CNTR_NORMAL),
4042[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4043 CNTR_NORMAL),
4044[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4045 CNTR_NORMAL),
4046[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4047 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4048[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4049 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4050[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4051 CNTR_SYNTH),
4052[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4053 access_dc_rcv_err_cnt),
4054[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4055 CNTR_SYNTH),
4056[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4057 CNTR_SYNTH),
4058[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4059 CNTR_SYNTH),
4060[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4061 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4062[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4063 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4064 CNTR_SYNTH),
4065[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4066 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4067[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4068 CNTR_SYNTH),
4069[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4070 CNTR_SYNTH),
4071[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4072 CNTR_SYNTH),
4073[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4074 CNTR_SYNTH),
4075[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4076 CNTR_SYNTH),
4077[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4078 CNTR_SYNTH),
4079[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4080 CNTR_SYNTH),
4081[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4082 CNTR_SYNTH | CNTR_VL),
4083[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4084 CNTR_SYNTH | CNTR_VL),
4085[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4086[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4087 CNTR_SYNTH | CNTR_VL),
4088[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4089[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4090 CNTR_SYNTH | CNTR_VL),
4091[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4092 CNTR_SYNTH),
4093[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4094 CNTR_SYNTH | CNTR_VL),
4095[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4096 CNTR_SYNTH),
4097[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4098 CNTR_SYNTH | CNTR_VL),
4099[C_DC_TOTAL_CRC] =
4100 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4101 CNTR_SYNTH),
4102[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4103 CNTR_SYNTH),
4104[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4105 CNTR_SYNTH),
4106[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4107 CNTR_SYNTH),
4108[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4109 CNTR_SYNTH),
4110[C_DC_CRC_MULT_LN] =
4111 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4112 CNTR_SYNTH),
4113[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4114 CNTR_SYNTH),
4115[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4116 CNTR_SYNTH),
4117[C_DC_SEQ_CRC_CNT] =
4118 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4119 CNTR_SYNTH),
4120[C_DC_ESC0_ONLY_CNT] =
4121 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4122 CNTR_SYNTH),
4123[C_DC_ESC0_PLUS1_CNT] =
4124 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4125 CNTR_SYNTH),
4126[C_DC_ESC0_PLUS2_CNT] =
4127 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4128 CNTR_SYNTH),
4129[C_DC_REINIT_FROM_PEER_CNT] =
4130 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4131 CNTR_SYNTH),
4132[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4133 CNTR_SYNTH),
4134[C_DC_MISC_FLG_CNT] =
4135 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4136 CNTR_SYNTH),
4137[C_DC_PRF_GOOD_LTP_CNT] =
4138 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4139[C_DC_PRF_ACCEPTED_LTP_CNT] =
4140 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4141 CNTR_SYNTH),
4142[C_DC_PRF_RX_FLIT_CNT] =
4143 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4144[C_DC_PRF_TX_FLIT_CNT] =
4145 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4146[C_DC_PRF_CLK_CNTR] =
4147 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4148[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4149 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4150[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4151 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4152 CNTR_SYNTH),
4153[C_DC_PG_STS_TX_SBE_CNT] =
4154 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4155[C_DC_PG_STS_TX_MBE_CNT] =
4156 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4157 CNTR_SYNTH),
4158[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4159 access_sw_cpu_intr),
4160[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4161 access_sw_cpu_rcv_limit),
4162[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4163 access_sw_vtx_wait),
4164[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4165 access_sw_pio_wait),
4166[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4167 access_sw_pio_drain),
4168[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4169 access_sw_kmem_wait),
4170[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4171 access_sw_send_schedule),
4172[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4173 SEND_DMA_DESC_FETCHED_CNT, 0,
4174 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4175 dev_access_u32_csr),
4176[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4177 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4178 access_sde_int_cnt),
4179[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4180 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4181 access_sde_err_cnt),
4182[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4183 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4184 access_sde_idle_int_cnt),
4185[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4186 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4187 access_sde_progress_int_cnt),
4188
4189[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4190 CNTR_NORMAL,
4191 access_misc_pll_lock_fail_err_cnt),
4192[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4193 CNTR_NORMAL,
4194 access_misc_mbist_fail_err_cnt),
4195[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4196 CNTR_NORMAL,
4197 access_misc_invalid_eep_cmd_err_cnt),
4198[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4199 CNTR_NORMAL,
4200 access_misc_efuse_done_parity_err_cnt),
4201[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4202 CNTR_NORMAL,
4203 access_misc_efuse_write_err_cnt),
4204[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4205 0, CNTR_NORMAL,
4206 access_misc_efuse_read_bad_addr_err_cnt),
4207[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4208 CNTR_NORMAL,
4209 access_misc_efuse_csr_parity_err_cnt),
4210[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4211 CNTR_NORMAL,
4212 access_misc_fw_auth_failed_err_cnt),
4213[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4214 CNTR_NORMAL,
4215 access_misc_key_mismatch_err_cnt),
4216[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4217 CNTR_NORMAL,
4218 access_misc_sbus_write_failed_err_cnt),
4219[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4220 CNTR_NORMAL,
4221 access_misc_csr_write_bad_addr_err_cnt),
4222[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4223 CNTR_NORMAL,
4224 access_misc_csr_read_bad_addr_err_cnt),
4225[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4226 CNTR_NORMAL,
4227 access_misc_csr_parity_err_cnt),
4228
4229[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4230 CNTR_NORMAL,
4231 access_sw_cce_err_status_aggregated_cnt),
4232[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4233 CNTR_NORMAL,
4234 access_cce_msix_csr_parity_err_cnt),
4235[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4236 CNTR_NORMAL,
4237 access_cce_int_map_unc_err_cnt),
4238[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4239 CNTR_NORMAL,
4240 access_cce_int_map_cor_err_cnt),
4241[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4242 CNTR_NORMAL,
4243 access_cce_msix_table_unc_err_cnt),
4244[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4245 CNTR_NORMAL,
4246 access_cce_msix_table_cor_err_cnt),
4247[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4248 0, CNTR_NORMAL,
4249 access_cce_rxdma_conv_fifo_parity_err_cnt),
4250[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4251 0, CNTR_NORMAL,
4252 access_cce_rcpl_async_fifo_parity_err_cnt),
4253[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4254 CNTR_NORMAL,
4255 access_cce_seg_write_bad_addr_err_cnt),
4256[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4257 CNTR_NORMAL,
4258 access_cce_seg_read_bad_addr_err_cnt),
4259[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4260 CNTR_NORMAL,
4261 access_la_triggered_cnt),
4262[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4263 CNTR_NORMAL,
4264 access_cce_trgt_cpl_timeout_err_cnt),
4265[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4266 CNTR_NORMAL,
4267 access_pcic_receive_parity_err_cnt),
4268[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4269 CNTR_NORMAL,
4270 access_pcic_transmit_back_parity_err_cnt),
4271[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4272 0, CNTR_NORMAL,
4273 access_pcic_transmit_front_parity_err_cnt),
4274[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4275 CNTR_NORMAL,
4276 access_pcic_cpl_dat_q_unc_err_cnt),
4277[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4278 CNTR_NORMAL,
4279 access_pcic_cpl_hd_q_unc_err_cnt),
4280[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4281 CNTR_NORMAL,
4282 access_pcic_post_dat_q_unc_err_cnt),
4283[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4284 CNTR_NORMAL,
4285 access_pcic_post_hd_q_unc_err_cnt),
4286[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4287 CNTR_NORMAL,
4288 access_pcic_retry_sot_mem_unc_err_cnt),
4289[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4290 CNTR_NORMAL,
4291 access_pcic_retry_mem_unc_err),
4292[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4293 CNTR_NORMAL,
4294 access_pcic_n_post_dat_q_parity_err_cnt),
4295[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_pcic_n_post_h_q_parity_err_cnt),
4298[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4299 CNTR_NORMAL,
4300 access_pcic_cpl_dat_q_cor_err_cnt),
4301[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_pcic_cpl_hd_q_cor_err_cnt),
4304[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4305 CNTR_NORMAL,
4306 access_pcic_post_dat_q_cor_err_cnt),
4307[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_pcic_post_hd_q_cor_err_cnt),
4310[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4311 CNTR_NORMAL,
4312 access_pcic_retry_sot_mem_cor_err_cnt),
4313[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4314 CNTR_NORMAL,
4315 access_pcic_retry_mem_cor_err_cnt),
4316[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4317 "CceCli1AsyncFifoDbgParityError", 0, 0,
4318 CNTR_NORMAL,
4319 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4320[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4321 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4322 CNTR_NORMAL,
4323 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4324 ),
4325[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4326 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4329[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4330 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4331 CNTR_NORMAL,
4332 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4333[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4334 0, CNTR_NORMAL,
4335 access_cce_cli2_async_fifo_parity_err_cnt),
4336[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_cce_csr_cfg_bus_parity_err_cnt),
4339[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4340 0, CNTR_NORMAL,
4341 access_cce_cli0_async_fifo_parity_err_cnt),
4342[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_cce_rspd_data_parity_err_cnt),
4345[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_cce_trgt_access_err_cnt),
4348[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4349 0, CNTR_NORMAL,
4350 access_cce_trgt_async_fifo_parity_err_cnt),
4351[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_cce_csr_write_bad_addr_err_cnt),
4354[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_cce_csr_read_bad_addr_err_cnt),
4357[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_ccs_csr_parity_err_cnt),
4360
4361
4362[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4363 CNTR_NORMAL,
4364 access_rx_csr_parity_err_cnt),
4365[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4366 CNTR_NORMAL,
4367 access_rx_csr_write_bad_addr_err_cnt),
4368[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4369 CNTR_NORMAL,
4370 access_rx_csr_read_bad_addr_err_cnt),
4371[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4372 CNTR_NORMAL,
4373 access_rx_dma_csr_unc_err_cnt),
4374[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4375 CNTR_NORMAL,
4376 access_rx_dma_dq_fsm_encoding_err_cnt),
4377[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4378 CNTR_NORMAL,
4379 access_rx_dma_eq_fsm_encoding_err_cnt),
4380[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4381 CNTR_NORMAL,
4382 access_rx_dma_csr_parity_err_cnt),
4383[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4384 CNTR_NORMAL,
4385 access_rx_rbuf_data_cor_err_cnt),
4386[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4387 CNTR_NORMAL,
4388 access_rx_rbuf_data_unc_err_cnt),
4389[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4390 CNTR_NORMAL,
4391 access_rx_dma_data_fifo_rd_cor_err_cnt),
4392[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4393 CNTR_NORMAL,
4394 access_rx_dma_data_fifo_rd_unc_err_cnt),
4395[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4396 CNTR_NORMAL,
4397 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4398[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4399 CNTR_NORMAL,
4400 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4401[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4402 CNTR_NORMAL,
4403 access_rx_rbuf_desc_part2_cor_err_cnt),
4404[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4405 CNTR_NORMAL,
4406 access_rx_rbuf_desc_part2_unc_err_cnt),
4407[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_rx_rbuf_desc_part1_cor_err_cnt),
4410[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_rx_rbuf_desc_part1_unc_err_cnt),
4413[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4414 CNTR_NORMAL,
4415 access_rx_hq_intr_fsm_err_cnt),
4416[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4417 CNTR_NORMAL,
4418 access_rx_hq_intr_csr_parity_err_cnt),
4419[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4420 CNTR_NORMAL,
4421 access_rx_lookup_csr_parity_err_cnt),
4422[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_lookup_rcv_array_cor_err_cnt),
4425[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_lookup_rcv_array_unc_err_cnt),
4428[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4429 0, CNTR_NORMAL,
4430 access_rx_lookup_des_part2_parity_err_cnt),
4431[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4432 0, CNTR_NORMAL,
4433 access_rx_lookup_des_part1_unc_cor_err_cnt),
4434[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rx_lookup_des_part1_unc_err_cnt),
4437[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_rx_rbuf_next_free_buf_cor_err_cnt),
4440[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4441 CNTR_NORMAL,
4442 access_rx_rbuf_next_free_buf_unc_err_cnt),
4443[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4444 "RxRbufFlInitWrAddrParityErr", 0, 0,
4445 CNTR_NORMAL,
4446 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4447[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4448 0, CNTR_NORMAL,
4449 access_rx_rbuf_fl_initdone_parity_err_cnt),
4450[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4451 0, CNTR_NORMAL,
4452 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4453[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4454 CNTR_NORMAL,
4455 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4456[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4457 CNTR_NORMAL,
4458 access_rx_rbuf_empty_err_cnt),
4459[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4460 CNTR_NORMAL,
4461 access_rx_rbuf_full_err_cnt),
4462[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4463 CNTR_NORMAL,
4464 access_rbuf_bad_lookup_err_cnt),
4465[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4466 CNTR_NORMAL,
4467 access_rbuf_ctx_id_parity_err_cnt),
4468[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4469 CNTR_NORMAL,
4470 access_rbuf_csr_qeopdw_parity_err_cnt),
4471[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4472 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4475[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4476 "RxRbufCsrQTlPtrParityErr", 0, 0,
4477 CNTR_NORMAL,
4478 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4479[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4480 0, CNTR_NORMAL,
4481 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4482[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4483 0, CNTR_NORMAL,
4484 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4485[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4486 0, 0, CNTR_NORMAL,
4487 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4488[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4489 0, CNTR_NORMAL,
4490 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4491[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4492 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4493 CNTR_NORMAL,
4494 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4495[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4496 0, CNTR_NORMAL,
4497 access_rx_rbuf_block_list_read_cor_err_cnt),
4498[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4499 0, CNTR_NORMAL,
4500 access_rx_rbuf_block_list_read_unc_err_cnt),
4501[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rx_rbuf_lookup_des_cor_err_cnt),
4504[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4505 CNTR_NORMAL,
4506 access_rx_rbuf_lookup_des_unc_err_cnt),
4507[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4508 "RxRbufLookupDesRegUncCorErr", 0, 0,
4509 CNTR_NORMAL,
4510 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4511[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4512 CNTR_NORMAL,
4513 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4514[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_rbuf_free_list_cor_err_cnt),
4517[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_rbuf_free_list_unc_err_cnt),
4520[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4521 CNTR_NORMAL,
4522 access_rx_rcv_fsm_encoding_err_cnt),
4523[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4524 CNTR_NORMAL,
4525 access_rx_dma_flag_cor_err_cnt),
4526[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_rx_dma_flag_unc_err_cnt),
4529[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4530 CNTR_NORMAL,
4531 access_rx_dc_sop_eop_parity_err_cnt),
4532[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4533 CNTR_NORMAL,
4534 access_rx_rcv_csr_parity_err_cnt),
4535[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4536 CNTR_NORMAL,
4537 access_rx_rcv_qp_map_table_cor_err_cnt),
4538[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4539 CNTR_NORMAL,
4540 access_rx_rcv_qp_map_table_unc_err_cnt),
4541[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4542 CNTR_NORMAL,
4543 access_rx_rcv_data_cor_err_cnt),
4544[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4545 CNTR_NORMAL,
4546 access_rx_rcv_data_unc_err_cnt),
4547[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_rx_rcv_hdr_cor_err_cnt),
4550[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_rx_rcv_hdr_unc_err_cnt),
4553[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_rx_dc_intf_parity_err_cnt),
4556[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_rx_dma_csr_cor_err_cnt),
4559
4560[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_pio_pec_sop_head_parity_err_cnt),
4563[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_pio_pcc_sop_head_parity_err_cnt),
4566[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4567 0, 0, CNTR_NORMAL,
4568 access_pio_last_returned_cnt_parity_err_cnt),
4569[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4570 0, CNTR_NORMAL,
4571 access_pio_current_free_cnt_parity_err_cnt),
4572[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4573 CNTR_NORMAL,
4574 access_pio_reserved_31_err_cnt),
4575[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4576 CNTR_NORMAL,
4577 access_pio_reserved_30_err_cnt),
4578[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_pio_ppmc_sop_len_err_cnt),
4581[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_pio_ppmc_bqc_mem_parity_err_cnt),
4584[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_pio_vl_fifo_parity_err_cnt),
4587[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_pio_vlf_sop_parity_err_cnt),
4590[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4591 CNTR_NORMAL,
4592 access_pio_vlf_v1_len_parity_err_cnt),
4593[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_pio_block_qw_count_parity_err_cnt),
4596[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_pio_write_qw_valid_parity_err_cnt),
4599[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4600 CNTR_NORMAL,
4601 access_pio_state_machine_err_cnt),
4602[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4603 CNTR_NORMAL,
4604 access_pio_write_data_parity_err_cnt),
4605[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4606 CNTR_NORMAL,
4607 access_pio_host_addr_mem_cor_err_cnt),
4608[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4609 CNTR_NORMAL,
4610 access_pio_host_addr_mem_unc_err_cnt),
4611[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4614[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_init_sm_in_err_cnt),
4617[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4618 CNTR_NORMAL,
4619 access_pio_ppmc_pbl_fifo_err_cnt),
4620[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4621 0, CNTR_NORMAL,
4622 access_pio_credit_ret_fifo_parity_err_cnt),
4623[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_v1_len_mem_bank1_cor_err_cnt),
4626[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_v1_len_mem_bank0_cor_err_cnt),
4629[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_v1_len_mem_bank1_unc_err_cnt),
4632[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_pio_v1_len_mem_bank0_unc_err_cnt),
4635[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4636 CNTR_NORMAL,
4637 access_pio_sm_pkt_reset_parity_err_cnt),
4638[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4639 CNTR_NORMAL,
4640 access_pio_pkt_evict_fifo_parity_err_cnt),
4641[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4642 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4645[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_pio_sbrdctl_crrel_parity_err_cnt),
4648[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_pio_pec_fifo_parity_err_cnt),
4651[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4652 CNTR_NORMAL,
4653 access_pio_pcc_fifo_parity_err_cnt),
4654[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4655 CNTR_NORMAL,
4656 access_pio_sb_mem_fifo1_err_cnt),
4657[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4658 CNTR_NORMAL,
4659 access_pio_sb_mem_fifo0_err_cnt),
4660[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_pio_csr_parity_err_cnt),
4663[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_pio_write_addr_parity_err_cnt),
4666[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_pio_write_bad_ctxt_err_cnt),
4669
4670[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4671 0, CNTR_NORMAL,
4672 access_sdma_pcie_req_tracking_cor_err_cnt),
4673[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4674 0, CNTR_NORMAL,
4675 access_sdma_pcie_req_tracking_unc_err_cnt),
4676[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_sdma_csr_parity_err_cnt),
4679[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_sdma_rpy_tag_err_cnt),
4682
4683[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4684 CNTR_NORMAL,
4685 access_tx_read_pio_memory_csr_unc_err_cnt),
4686[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4687 0, CNTR_NORMAL,
4688 access_tx_read_sdma_memory_csr_err_cnt),
4689[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_tx_egress_fifo_cor_err_cnt),
4692[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_tx_read_pio_memory_cor_err_cnt),
4695[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_tx_read_sdma_memory_cor_err_cnt),
4698[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_tx_sb_hdr_cor_err_cnt),
4701[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4702 CNTR_NORMAL,
4703 access_tx_credit_overrun_err_cnt),
4704[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4705 CNTR_NORMAL,
4706 access_tx_launch_fifo8_cor_err_cnt),
4707[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4708 CNTR_NORMAL,
4709 access_tx_launch_fifo7_cor_err_cnt),
4710[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_tx_launch_fifo6_cor_err_cnt),
4713[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4714 CNTR_NORMAL,
4715 access_tx_launch_fifo5_cor_err_cnt),
4716[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4717 CNTR_NORMAL,
4718 access_tx_launch_fifo4_cor_err_cnt),
4719[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4720 CNTR_NORMAL,
4721 access_tx_launch_fifo3_cor_err_cnt),
4722[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4723 CNTR_NORMAL,
4724 access_tx_launch_fifo2_cor_err_cnt),
4725[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4726 CNTR_NORMAL,
4727 access_tx_launch_fifo1_cor_err_cnt),
4728[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4729 CNTR_NORMAL,
4730 access_tx_launch_fifo0_cor_err_cnt),
4731[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4732 CNTR_NORMAL,
4733 access_tx_credit_return_vl_err_cnt),
4734[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4735 CNTR_NORMAL,
4736 access_tx_hcrc_insertion_err_cnt),
4737[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4738 CNTR_NORMAL,
4739 access_tx_egress_fifo_unc_err_cnt),
4740[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4741 CNTR_NORMAL,
4742 access_tx_read_pio_memory_unc_err_cnt),
4743[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4744 CNTR_NORMAL,
4745 access_tx_read_sdma_memory_unc_err_cnt),
4746[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4747 CNTR_NORMAL,
4748 access_tx_sb_hdr_unc_err_cnt),
4749[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4750 CNTR_NORMAL,
4751 access_tx_credit_return_partiy_err_cnt),
4752[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4753 0, 0, CNTR_NORMAL,
4754 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4755[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4756 0, 0, CNTR_NORMAL,
4757 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4758[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4759 0, 0, CNTR_NORMAL,
4760 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4761[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4762 0, 0, CNTR_NORMAL,
4763 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4764[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4765 0, 0, CNTR_NORMAL,
4766 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4767[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4768 0, 0, CNTR_NORMAL,
4769 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4770[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4771 0, 0, CNTR_NORMAL,
4772 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4773[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4774 0, 0, CNTR_NORMAL,
4775 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4776[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4777 0, 0, CNTR_NORMAL,
4778 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4779[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4780 0, 0, CNTR_NORMAL,
4781 access_tx_sdma15_disallowed_packet_err_cnt),
4782[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4783 0, 0, CNTR_NORMAL,
4784 access_tx_sdma14_disallowed_packet_err_cnt),
4785[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4786 0, 0, CNTR_NORMAL,
4787 access_tx_sdma13_disallowed_packet_err_cnt),
4788[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4789 0, 0, CNTR_NORMAL,
4790 access_tx_sdma12_disallowed_packet_err_cnt),
4791[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4792 0, 0, CNTR_NORMAL,
4793 access_tx_sdma11_disallowed_packet_err_cnt),
4794[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4795 0, 0, CNTR_NORMAL,
4796 access_tx_sdma10_disallowed_packet_err_cnt),
4797[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4798 0, 0, CNTR_NORMAL,
4799 access_tx_sdma9_disallowed_packet_err_cnt),
4800[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4801 0, 0, CNTR_NORMAL,
4802 access_tx_sdma8_disallowed_packet_err_cnt),
4803[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4804 0, 0, CNTR_NORMAL,
4805 access_tx_sdma7_disallowed_packet_err_cnt),
4806[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4807 0, 0, CNTR_NORMAL,
4808 access_tx_sdma6_disallowed_packet_err_cnt),
4809[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4810 0, 0, CNTR_NORMAL,
4811 access_tx_sdma5_disallowed_packet_err_cnt),
4812[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4813 0, 0, CNTR_NORMAL,
4814 access_tx_sdma4_disallowed_packet_err_cnt),
4815[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4816 0, 0, CNTR_NORMAL,
4817 access_tx_sdma3_disallowed_packet_err_cnt),
4818[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4819 0, 0, CNTR_NORMAL,
4820 access_tx_sdma2_disallowed_packet_err_cnt),
4821[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4822 0, 0, CNTR_NORMAL,
4823 access_tx_sdma1_disallowed_packet_err_cnt),
4824[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4825 0, 0, CNTR_NORMAL,
4826 access_tx_sdma0_disallowed_packet_err_cnt),
4827[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_config_parity_err_cnt),
4830[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_tx_sbrd_ctl_csr_parity_err_cnt),
4833[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4834 CNTR_NORMAL,
4835 access_tx_launch_csr_parity_err_cnt),
4836[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4837 CNTR_NORMAL,
4838 access_tx_illegal_vl_err_cnt),
4839[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4840 "TxSbrdCtlStateMachineParityErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4843[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4844 CNTR_NORMAL,
4845 access_egress_reserved_10_err_cnt),
4846[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4847 CNTR_NORMAL,
4848 access_egress_reserved_9_err_cnt),
4849[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4850 0, 0, CNTR_NORMAL,
4851 access_tx_sdma_launch_intf_parity_err_cnt),
4852[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4853 CNTR_NORMAL,
4854 access_tx_pio_launch_intf_parity_err_cnt),
4855[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4856 CNTR_NORMAL,
4857 access_egress_reserved_6_err_cnt),
4858[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4859 CNTR_NORMAL,
4860 access_tx_incorrect_link_state_err_cnt),
4861[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4862 CNTR_NORMAL,
4863 access_tx_linkdown_err_cnt),
4864[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4865 "EgressFifoUnderrunOrParityErr", 0, 0,
4866 CNTR_NORMAL,
4867 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4868[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4869 CNTR_NORMAL,
4870 access_egress_reserved_2_err_cnt),
4871[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_tx_pkt_integrity_mem_unc_err_cnt),
4874[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4875 CNTR_NORMAL,
4876 access_tx_pkt_integrity_mem_cor_err_cnt),
4877
4878[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_send_csr_write_bad_addr_err_cnt),
4881[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_send_csr_read_bad_addr_err_cnt),
4884[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_send_csr_parity_cnt),
4887
4888[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4889 CNTR_NORMAL,
4890 access_pio_write_out_of_bounds_err_cnt),
4891[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4892 CNTR_NORMAL,
4893 access_pio_write_overflow_err_cnt),
4894[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4895 0, 0, CNTR_NORMAL,
4896 access_pio_write_crosses_boundary_err_cnt),
4897[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4898 CNTR_NORMAL,
4899 access_pio_disallowed_packet_err_cnt),
4900[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4901 CNTR_NORMAL,
4902 access_pio_inconsistent_sop_err_cnt),
4903
4904[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4905 0, 0, CNTR_NORMAL,
4906 access_sdma_header_request_fifo_cor_err_cnt),
4907[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_sdma_header_storage_cor_err_cnt),
4910[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_sdma_packet_tracking_cor_err_cnt),
4913[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_sdma_assembly_cor_err_cnt),
4916[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4917 CNTR_NORMAL,
4918 access_sdma_desc_table_cor_err_cnt),
4919[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4920 0, 0, CNTR_NORMAL,
4921 access_sdma_header_request_fifo_unc_err_cnt),
4922[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_sdma_header_storage_unc_err_cnt),
4925[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4926 CNTR_NORMAL,
4927 access_sdma_packet_tracking_unc_err_cnt),
4928[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4929 CNTR_NORMAL,
4930 access_sdma_assembly_unc_err_cnt),
4931[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4932 CNTR_NORMAL,
4933 access_sdma_desc_table_unc_err_cnt),
4934[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4935 CNTR_NORMAL,
4936 access_sdma_timeout_err_cnt),
4937[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4938 CNTR_NORMAL,
4939 access_sdma_header_length_err_cnt),
4940[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4941 CNTR_NORMAL,
4942 access_sdma_header_address_err_cnt),
4943[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4944 CNTR_NORMAL,
4945 access_sdma_header_select_err_cnt),
4946[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4947 CNTR_NORMAL,
4948 access_sdma_reserved_9_err_cnt),
4949[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4950 CNTR_NORMAL,
4951 access_sdma_packet_desc_overflow_err_cnt),
4952[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4953 CNTR_NORMAL,
4954 access_sdma_length_mismatch_err_cnt),
4955[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4956 CNTR_NORMAL,
4957 access_sdma_halt_err_cnt),
4958[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4959 CNTR_NORMAL,
4960 access_sdma_mem_read_err_cnt),
4961[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4962 CNTR_NORMAL,
4963 access_sdma_first_desc_err_cnt),
4964[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4965 CNTR_NORMAL,
4966 access_sdma_tail_out_of_bounds_err_cnt),
4967[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4968 CNTR_NORMAL,
4969 access_sdma_too_long_err_cnt),
4970[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4971 CNTR_NORMAL,
4972 access_sdma_gen_mismatch_err_cnt),
4973[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4974 CNTR_NORMAL,
4975 access_sdma_wrong_dw_err_cnt),
4976};
4977
4978static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4979[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4980 CNTR_NORMAL),
4981[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4982 CNTR_NORMAL),
4983[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4984 CNTR_NORMAL),
4985[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4986 CNTR_NORMAL),
4987[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4988 CNTR_NORMAL),
4989[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4990 CNTR_NORMAL),
4991[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4992 CNTR_NORMAL),
4993[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4994[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4995[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4996[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4997 CNTR_SYNTH | CNTR_VL),
4998[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4999 CNTR_SYNTH | CNTR_VL),
5000[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5001 CNTR_SYNTH | CNTR_VL),
5002[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5003[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5004[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5005 access_sw_link_dn_cnt),
5006[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5007 access_sw_link_up_cnt),
5008[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5009 access_sw_unknown_frame_cnt),
5010[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5011 access_sw_xmit_discards),
5012[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5013 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5014 access_sw_xmit_discards),
5015[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5016 access_xmit_constraint_errs),
5017[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5018 access_rcv_constraint_errs),
5019[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5020[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5021[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5022[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5023[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5024[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5025[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5026[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5027[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5028[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5029[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5030[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5031[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5032 access_sw_cpu_rc_acks),
5033[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5034 access_sw_cpu_rc_qacks),
5035[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5036 access_sw_cpu_rc_delayed_comp),
5037[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5038[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5039[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5040[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5041[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5042[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5043[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5044[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5045[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5046[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5047[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5048[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5049[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5050[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5051[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5052[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5053[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5054[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5055[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5056[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5057[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5058[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5059[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5060[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5061[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5062[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5063[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5064[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5065[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5066[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5067[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5068[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5069[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5070[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5071[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5072[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5073[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5074[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5075[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5076[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5077[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5078[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5079[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5080[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5081[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5082[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5083[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5084[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5085[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5086[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5087[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5088[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5089[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5090[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5091[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5092[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5093[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5094[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5095[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5096[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5097[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5098[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5099[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5100[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5101[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5102[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5103[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5104[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5105[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5106[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5107[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5108[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5109[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5110[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5111[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5112[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5113[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5114[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5115[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5116[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5117};
5118
5119
5120
5121
5122int is_ax(struct hfi1_devdata *dd)
5123{
5124 u8 chip_rev_minor =
5125 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5126 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5127 return (chip_rev_minor & 0xf0) == 0;
5128}
5129
5130
5131int is_bx(struct hfi1_devdata *dd)
5132{
5133 u8 chip_rev_minor =
5134 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5135 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5136 return (chip_rev_minor & 0xF0) == 0x10;
5137}
5138
5139
5140
5141
5142
5143
5144
5145static int append_str(char *buf, char **curp, int *lenp, const char *s)
5146{
5147 char *p = *curp;
5148 int len = *lenp;
5149 int result = 0;
5150 char c;
5151
5152
5153 if (p != buf) {
5154 if (len == 0) {
5155 result = 1;
5156 goto done;
5157 }
5158 *p++ = ',';
5159 len--;
5160 }
5161
5162
5163 while ((c = *s++) != 0) {
5164 if (len == 0) {
5165 result = 1;
5166 goto done;
5167 }
5168 *p++ = c;
5169 len--;
5170 }
5171
5172done:
5173
5174 *curp = p;
5175 *lenp = len;
5176
5177 return result;
5178}
5179
5180
5181
5182
5183
5184static char *flag_string(char *buf, int buf_len, u64 flags,
5185 struct flag_table *table, int table_size)
5186{
5187 char extra[32];
5188 char *p = buf;
5189 int len = buf_len;
5190 int no_room = 0;
5191 int i;
5192
5193
5194 if (len < 2)
5195 return "";
5196
5197 len--;
5198 for (i = 0; i < table_size; i++) {
5199 if (flags & table[i].flag) {
5200 no_room = append_str(buf, &p, &len, table[i].str);
5201 if (no_room)
5202 break;
5203 flags &= ~table[i].flag;
5204 }
5205 }
5206
5207
5208 if (!no_room && flags) {
5209 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5210 no_room = append_str(buf, &p, &len, extra);
5211 }
5212
5213
5214 if (no_room) {
5215
5216 if (len == 0)
5217 --p;
5218 *p++ = '*';
5219 }
5220
5221
5222 *p = 0;
5223 return buf;
5224}
5225
5226
5227static const char * const cce_misc_names[] = {
5228 "CceErrInt",
5229 "RxeErrInt",
5230 "MiscErrInt",
5231 "Reserved3",
5232 "PioErrInt",
5233 "SDmaErrInt",
5234 "EgressErrInt",
5235 "TxeErrInt"
5236};
5237
5238
5239
5240
5241static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5242{
5243 if (source < ARRAY_SIZE(cce_misc_names))
5244 strncpy(buf, cce_misc_names[source], bsize);
5245 else
5246 snprintf(buf, bsize, "Reserved%u",
5247 source + IS_GENERAL_ERR_START);
5248
5249 return buf;
5250}
5251
5252
5253
5254
5255static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5256{
5257 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5258 return buf;
5259}
5260
5261
5262
5263
5264static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5265{
5266 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5267 return buf;
5268}
5269
5270static const char * const various_names[] = {
5271 "PbcInt",
5272 "GpioAssertInt",
5273 "Qsfp1Int",
5274 "Qsfp2Int",
5275 "TCritInt"
5276};
5277
5278
5279
5280
5281static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5282{
5283 if (source < ARRAY_SIZE(various_names))
5284 strncpy(buf, various_names[source], bsize);
5285 else
5286 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5287 return buf;
5288}
5289
5290
5291
5292
5293static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5294{
5295 static const char * const dc_int_names[] = {
5296 "common",
5297 "lcb",
5298 "8051",
5299 "lbm"
5300 };
5301
5302 if (source < ARRAY_SIZE(dc_int_names))
5303 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5304 else
5305 snprintf(buf, bsize, "DCInt%u", source);
5306 return buf;
5307}
5308
5309static const char * const sdma_int_names[] = {
5310 "SDmaInt",
5311 "SdmaIdleInt",
5312 "SdmaProgressInt",
5313};
5314
5315
5316
5317
5318static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5319{
5320
5321 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5322
5323 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5324
5325 if (likely(what < 3))
5326 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5327 else
5328 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5329 return buf;
5330}
5331
5332
5333
5334
5335static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5336{
5337 snprintf(buf, bsize, "RcvAvailInt%u", source);
5338 return buf;
5339}
5340
5341
5342
5343
5344static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5345{
5346 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5347 return buf;
5348}
5349
5350
5351
5352
5353static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5354{
5355 snprintf(buf, bsize, "SendCreditInt%u", source);
5356 return buf;
5357}
5358
5359
5360
5361
5362static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5363{
5364 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5365 return buf;
5366}
5367
5368static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5369{
5370 return flag_string(buf, buf_len, flags,
5371 cce_err_status_flags,
5372 ARRAY_SIZE(cce_err_status_flags));
5373}
5374
5375static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5376{
5377 return flag_string(buf, buf_len, flags,
5378 rxe_err_status_flags,
5379 ARRAY_SIZE(rxe_err_status_flags));
5380}
5381
5382static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5383{
5384 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5385 ARRAY_SIZE(misc_err_status_flags));
5386}
5387
5388static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5389{
5390 return flag_string(buf, buf_len, flags,
5391 pio_err_status_flags,
5392 ARRAY_SIZE(pio_err_status_flags));
5393}
5394
5395static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5396{
5397 return flag_string(buf, buf_len, flags,
5398 sdma_err_status_flags,
5399 ARRAY_SIZE(sdma_err_status_flags));
5400}
5401
5402static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5403{
5404 return flag_string(buf, buf_len, flags,
5405 egress_err_status_flags,
5406 ARRAY_SIZE(egress_err_status_flags));
5407}
5408
5409static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5410{
5411 return flag_string(buf, buf_len, flags,
5412 egress_err_info_flags,
5413 ARRAY_SIZE(egress_err_info_flags));
5414}
5415
5416static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5417{
5418 return flag_string(buf, buf_len, flags,
5419 send_err_status_flags,
5420 ARRAY_SIZE(send_err_status_flags));
5421}
5422
5423static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5424{
5425 char buf[96];
5426 int i = 0;
5427
5428
5429
5430
5431
5432 dd_dev_info(dd, "CCE Error: %s\n",
5433 cce_err_status_string(buf, sizeof(buf), reg));
5434
5435 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5436 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5437
5438
5439 start_freeze_handling(dd->pport, FREEZE_SELF);
5440 }
5441
5442 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5443 if (reg & (1ull << i)) {
5444 incr_cntr64(&dd->cce_err_status_cnt[i]);
5445
5446 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5447 }
5448 }
5449}
5450
5451
5452
5453
5454
5455#define RCVERR_CHECK_TIME 10
5456static void update_rcverr_timer(unsigned long opaque)
5457{
5458 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5459 struct hfi1_pportdata *ppd = dd->pport;
5460 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5461
5462 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5463 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5464 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5465 set_link_down_reason(
5466 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5467 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5468 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5469 }
5470 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5471
5472 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5473}
5474
5475static int init_rcverr(struct hfi1_devdata *dd)
5476{
5477 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5478
5479 dd->rcv_ovfl_cnt = 0;
5480 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5481}
5482
5483static void free_rcverr(struct hfi1_devdata *dd)
5484{
5485 if (dd->rcverr_timer.data)
5486 del_timer_sync(&dd->rcverr_timer);
5487 dd->rcverr_timer.data = 0;
5488}
5489
5490static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5491{
5492 char buf[96];
5493 int i = 0;
5494
5495 dd_dev_info(dd, "Receive Error: %s\n",
5496 rxe_err_status_string(buf, sizeof(buf), reg));
5497
5498 if (reg & ALL_RXE_FREEZE_ERR) {
5499 int flags = 0;
5500
5501
5502
5503
5504
5505 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5506 flags = FREEZE_ABORT;
5507
5508 start_freeze_handling(dd->pport, flags);
5509 }
5510
5511 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5512 if (reg & (1ull << i))
5513 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5514 }
5515}
5516
5517static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518{
5519 char buf[96];
5520 int i = 0;
5521
5522 dd_dev_info(dd, "Misc Error: %s",
5523 misc_err_status_string(buf, sizeof(buf), reg));
5524 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5525 if (reg & (1ull << i))
5526 incr_cntr64(&dd->misc_err_status_cnt[i]);
5527 }
5528}
5529
5530static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5531{
5532 char buf[96];
5533 int i = 0;
5534
5535 dd_dev_info(dd, "PIO Error: %s\n",
5536 pio_err_status_string(buf, sizeof(buf), reg));
5537
5538 if (reg & ALL_PIO_FREEZE_ERR)
5539 start_freeze_handling(dd->pport, 0);
5540
5541 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5542 if (reg & (1ull << i))
5543 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5544 }
5545}
5546
5547static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5548{
5549 char buf[96];
5550 int i = 0;
5551
5552 dd_dev_info(dd, "SDMA Error: %s\n",
5553 sdma_err_status_string(buf, sizeof(buf), reg));
5554
5555 if (reg & ALL_SDMA_FREEZE_ERR)
5556 start_freeze_handling(dd->pport, 0);
5557
5558 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5559 if (reg & (1ull << i))
5560 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5561 }
5562}
5563
5564static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5565{
5566 incr_cntr64(&ppd->port_xmit_discards);
5567}
5568
5569static void count_port_inactive(struct hfi1_devdata *dd)
5570{
5571 __count_port_discards(dd->pport);
5572}
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5584 int vl)
5585{
5586 struct hfi1_pportdata *ppd = dd->pport;
5587 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE);
5588 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5589 char buf[96];
5590
5591
5592 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5593
5594 dd_dev_info(dd,
5595 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5596 info, egress_err_info_string(buf, sizeof(buf), info), src);
5597
5598
5599 if (info & PORT_DISCARD_EGRESS_ERRS) {
5600 int weight, i;
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5625 for (i = 0; i < weight; i++) {
5626 __count_port_discards(ppd);
5627 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5628 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5629 else if (vl == 15)
5630 incr_cntr64(&ppd->port_xmit_discards_vl
5631 [C_VL_15]);
5632 }
5633 }
5634}
5635
5636
5637
5638
5639
5640static inline int port_inactive_err(u64 posn)
5641{
5642 return (posn >= SEES(TX_LINKDOWN) &&
5643 posn <= SEES(TX_INCORRECT_LINK_STATE));
5644}
5645
5646
5647
5648
5649
5650static inline int disallowed_pkt_err(int posn)
5651{
5652 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5653 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5654}
5655
5656
5657
5658
5659
5660
5661static inline int disallowed_pkt_engine(int posn)
5662{
5663 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5664}
5665
5666
5667
5668
5669
5670static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5671{
5672 struct sdma_vl_map *m;
5673 int vl;
5674
5675
5676 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5677 return -1;
5678
5679 rcu_read_lock();
5680 m = rcu_dereference(dd->sdma_map);
5681 vl = m->engine_to_vl[engine];
5682 rcu_read_unlock();
5683
5684 return vl;
5685}
5686
5687
5688
5689
5690
5691static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5692{
5693 struct send_context_info *sci;
5694 struct send_context *sc;
5695 int i;
5696
5697 sci = &dd->send_contexts[sw_index];
5698
5699
5700 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5701 return -1;
5702
5703 sc = sci->sc;
5704 if (!sc)
5705 return -1;
5706 if (dd->vld[15].sc == sc)
5707 return 15;
5708 for (i = 0; i < num_vls; i++)
5709 if (dd->vld[i].sc == sc)
5710 return i;
5711
5712 return -1;
5713}
5714
5715static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5716{
5717 u64 reg_copy = reg, handled = 0;
5718 char buf[96];
5719 int i = 0;
5720
5721 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5722 start_freeze_handling(dd->pport, 0);
5723 else if (is_ax(dd) &&
5724 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5725 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5726 start_freeze_handling(dd->pport, 0);
5727
5728 while (reg_copy) {
5729 int posn = fls64(reg_copy);
5730
5731 int shift = posn - 1;
5732 u64 mask = 1ULL << shift;
5733
5734 if (port_inactive_err(shift)) {
5735 count_port_inactive(dd);
5736 handled |= mask;
5737 } else if (disallowed_pkt_err(shift)) {
5738 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5739
5740 handle_send_egress_err_info(dd, vl);
5741 handled |= mask;
5742 }
5743 reg_copy &= ~mask;
5744 }
5745
5746 reg &= ~handled;
5747
5748 if (reg)
5749 dd_dev_info(dd, "Egress Error: %s\n",
5750 egress_err_status_string(buf, sizeof(buf), reg));
5751
5752 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5753 if (reg & (1ull << i))
5754 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5755 }
5756}
5757
5758static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5759{
5760 char buf[96];
5761 int i = 0;
5762
5763 dd_dev_info(dd, "Send Error: %s\n",
5764 send_err_status_string(buf, sizeof(buf), reg));
5765
5766 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5767 if (reg & (1ull << i))
5768 incr_cntr64(&dd->send_err_status_cnt[i]);
5769 }
5770}
5771
5772
5773
5774
5775
5776#define MAX_CLEAR_COUNT 20
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789static void interrupt_clear_down(struct hfi1_devdata *dd,
5790 u32 context,
5791 const struct err_reg_info *eri)
5792{
5793 u64 reg;
5794 u32 count;
5795
5796
5797 count = 0;
5798 while (1) {
5799 reg = read_kctxt_csr(dd, context, eri->status);
5800 if (reg == 0)
5801 break;
5802 write_kctxt_csr(dd, context, eri->clear, reg);
5803 if (likely(eri->handler))
5804 eri->handler(dd, context, reg);
5805 count++;
5806 if (count > MAX_CLEAR_COUNT) {
5807 u64 mask;
5808
5809 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5810 eri->desc, reg);
5811
5812
5813
5814
5815 mask = read_kctxt_csr(dd, context, eri->mask);
5816 mask &= ~reg;
5817 write_kctxt_csr(dd, context, eri->mask, mask);
5818 break;
5819 }
5820 }
5821}
5822
5823
5824
5825
5826static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5827{
5828 const struct err_reg_info *eri = &misc_errs[source];
5829
5830 if (eri->handler) {
5831 interrupt_clear_down(dd, 0, eri);
5832 } else {
5833 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5834 source);
5835 }
5836}
5837
5838static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5839{
5840 return flag_string(buf, buf_len, flags,
5841 sc_err_status_flags,
5842 ARRAY_SIZE(sc_err_status_flags));
5843}
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5855 unsigned int hw_context)
5856{
5857 struct send_context_info *sci;
5858 struct send_context *sc;
5859 char flags[96];
5860 u64 status;
5861 u32 sw_index;
5862 int i = 0;
5863
5864 sw_index = dd->hw_to_sw[hw_context];
5865 if (sw_index >= dd->num_send_contexts) {
5866 dd_dev_err(dd,
5867 "out of range sw index %u for send context %u\n",
5868 sw_index, hw_context);
5869 return;
5870 }
5871 sci = &dd->send_contexts[sw_index];
5872 sc = sci->sc;
5873 if (!sc) {
5874 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5875 sw_index, hw_context);
5876 return;
5877 }
5878
5879
5880 sc_stop(sc, SCF_HALTED);
5881
5882 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5883
5884 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5885 send_context_err_status_string(flags, sizeof(flags),
5886 status));
5887
5888 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5889 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5890
5891
5892
5893
5894
5895 if (sc->type != SC_USER)
5896 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5897
5898
5899
5900
5901
5902
5903 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5904 if (status & (1ull << i))
5905 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5906 }
5907}
5908
5909static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5910 unsigned int source, u64 status)
5911{
5912 struct sdma_engine *sde;
5913 int i = 0;
5914
5915 sde = &dd->per_sdma[source];
5916#ifdef CONFIG_SDMA_VERBOSITY
5917 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5918 slashstrip(__FILE__), __LINE__, __func__);
5919 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5920 sde->this_idx, source, (unsigned long long)status);
5921#endif
5922 sde->err_cnt++;
5923 sdma_engine_error(sde, status);
5924
5925
5926
5927
5928
5929
5930 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5931 if (status & (1ull << i))
5932 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5933 }
5934}
5935
5936
5937
5938
5939static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5940{
5941#ifdef CONFIG_SDMA_VERBOSITY
5942 struct sdma_engine *sde = &dd->per_sdma[source];
5943
5944 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5945 slashstrip(__FILE__), __LINE__, __func__);
5946 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5947 source);
5948 sdma_dumpstate(sde);
5949#endif
5950 interrupt_clear_down(dd, source, &sdma_eng_err);
5951}
5952
5953
5954
5955
5956static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5957{
5958 const struct err_reg_info *eri = &various_err[source];
5959
5960
5961
5962
5963
5964
5965 if (source == TCRIT_INT_SOURCE)
5966 handle_temp_err(dd);
5967 else if (eri->handler)
5968 interrupt_clear_down(dd, 0, eri);
5969 else
5970 dd_dev_info(dd,
5971 "%s: Unimplemented/reserved interrupt %d\n",
5972 __func__, source);
5973}
5974
5975static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5976{
5977
5978 struct hfi1_pportdata *ppd = dd->pport;
5979 unsigned long flags;
5980 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5981
5982 if (reg & QSFP_HFI0_MODPRST_N) {
5983 if (!qsfp_mod_present(ppd)) {
5984 dd_dev_info(dd, "%s: QSFP module removed\n",
5985 __func__);
5986
5987 ppd->driver_link_ready = 0;
5988
5989
5990
5991
5992
5993 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5994
5995
5996
5997
5998 ppd->qsfp_info.cache_valid = 0;
5999 ppd->qsfp_info.reset_needed = 0;
6000 ppd->qsfp_info.limiting_active = 0;
6001 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6002 flags);
6003
6004 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6005 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6006
6007 if ((ppd->offline_disabled_reason >
6008 HFI1_ODR_MASK(
6009 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6010 (ppd->offline_disabled_reason ==
6011 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6012 ppd->offline_disabled_reason =
6013 HFI1_ODR_MASK(
6014 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6015
6016 if (ppd->host_link_state == HLS_DN_POLL) {
6017
6018
6019
6020
6021
6022
6023 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6024 }
6025 } else {
6026 dd_dev_info(dd, "%s: QSFP module inserted\n",
6027 __func__);
6028
6029 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6030 ppd->qsfp_info.cache_valid = 0;
6031 ppd->qsfp_info.cache_refresh_required = 1;
6032 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6033 flags);
6034
6035
6036
6037
6038
6039 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6040 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6041 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6042
6043 ppd->offline_disabled_reason =
6044 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6045 }
6046 }
6047
6048 if (reg & QSFP_HFI0_INT_N) {
6049 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6050 __func__);
6051 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6052 ppd->qsfp_info.check_interrupt_flags = 1;
6053 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6054 }
6055
6056
6057 if (qsfp_mod_present(ppd))
6058 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6059}
6060
6061static int request_host_lcb_access(struct hfi1_devdata *dd)
6062{
6063 int ret;
6064
6065 ret = do_8051_command(dd, HCMD_MISC,
6066 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6067 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6068 if (ret != HCMD_SUCCESS) {
6069 dd_dev_err(dd, "%s: command failed with error %d\n",
6070 __func__, ret);
6071 }
6072 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6073}
6074
6075static int request_8051_lcb_access(struct hfi1_devdata *dd)
6076{
6077 int ret;
6078
6079 ret = do_8051_command(dd, HCMD_MISC,
6080 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6081 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6082 if (ret != HCMD_SUCCESS) {
6083 dd_dev_err(dd, "%s: command failed with error %d\n",
6084 __func__, ret);
6085 }
6086 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6087}
6088
6089
6090
6091
6092
6093static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6094{
6095 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6096 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6097 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6098}
6099
6100
6101
6102
6103
6104static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6105{
6106 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6107 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6108}
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6121{
6122 struct hfi1_pportdata *ppd = dd->pport;
6123 int ret = 0;
6124
6125
6126
6127
6128
6129
6130
6131 if (sleep_ok) {
6132 mutex_lock(&ppd->hls_lock);
6133 } else {
6134 while (!mutex_trylock(&ppd->hls_lock))
6135 udelay(1);
6136 }
6137
6138
6139 if (ppd->host_link_state & HLS_DOWN) {
6140 dd_dev_info(dd, "%s: link state %s not up\n",
6141 __func__, link_state_name(ppd->host_link_state));
6142 ret = -EBUSY;
6143 goto done;
6144 }
6145
6146 if (dd->lcb_access_count == 0) {
6147 ret = request_host_lcb_access(dd);
6148 if (ret) {
6149 dd_dev_err(dd,
6150 "%s: unable to acquire LCB access, err %d\n",
6151 __func__, ret);
6152 goto done;
6153 }
6154 set_host_lcb_access(dd);
6155 }
6156 dd->lcb_access_count++;
6157done:
6158 mutex_unlock(&ppd->hls_lock);
6159 return ret;
6160}
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6171{
6172 int ret = 0;
6173
6174
6175
6176
6177
6178
6179 if (sleep_ok) {
6180 mutex_lock(&dd->pport->hls_lock);
6181 } else {
6182 while (!mutex_trylock(&dd->pport->hls_lock))
6183 udelay(1);
6184 }
6185
6186 if (dd->lcb_access_count == 0) {
6187 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6188 __func__);
6189 goto done;
6190 }
6191
6192 if (dd->lcb_access_count == 1) {
6193 set_8051_lcb_access(dd);
6194 ret = request_8051_lcb_access(dd);
6195 if (ret) {
6196 dd_dev_err(dd,
6197 "%s: unable to release LCB access, err %d\n",
6198 __func__, ret);
6199
6200 set_host_lcb_access(dd);
6201 goto done;
6202 }
6203 }
6204 dd->lcb_access_count--;
6205done:
6206 mutex_unlock(&dd->pport->hls_lock);
6207 return ret;
6208}
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219static void init_lcb_access(struct hfi1_devdata *dd)
6220{
6221 dd->lcb_access_count = 0;
6222}
6223
6224
6225
6226
6227static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6228{
6229 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6230 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6231 (u64)return_code <<
6232 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6233 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6234}
6235
6236
6237
6238
6239static void handle_8051_request(struct hfi1_pportdata *ppd)
6240{
6241 struct hfi1_devdata *dd = ppd->dd;
6242 u64 reg;
6243 u16 data = 0;
6244 u8 type;
6245
6246 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6247 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6248 return;
6249
6250
6251 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6252
6253
6254 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6255 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6256 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6257 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6258
6259 switch (type) {
6260 case HREQ_LOAD_CONFIG:
6261 case HREQ_SAVE_CONFIG:
6262 case HREQ_READ_CONFIG:
6263 case HREQ_SET_TX_EQ_ABS:
6264 case HREQ_SET_TX_EQ_REL:
6265 case HREQ_ENABLE:
6266 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6267 type);
6268 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6269 break;
6270 case HREQ_CONFIG_DONE:
6271 hreq_response(dd, HREQ_SUCCESS, 0);
6272 break;
6273
6274 case HREQ_INTERFACE_TEST:
6275 hreq_response(dd, HREQ_SUCCESS, data);
6276 break;
6277 default:
6278 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6279 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6280 break;
6281 }
6282}
6283
6284static void write_global_credit(struct hfi1_devdata *dd,
6285 u8 vau, u16 total, u16 shared)
6286{
6287 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6288 ((u64)total <<
6289 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6290 ((u64)shared <<
6291 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6292 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6293}
6294
6295
6296
6297
6298
6299void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6300{
6301
6302 write_global_credit(dd, vau, vl15buf, 0);
6303
6304 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6305 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6306}
6307
6308
6309
6310
6311
6312void reset_link_credits(struct hfi1_devdata *dd)
6313{
6314 int i;
6315
6316
6317 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6318 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6319 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6320 write_global_credit(dd, 0, 0, 0);
6321
6322 pio_send_control(dd, PSC_CM_RESET);
6323}
6324
6325
6326static u32 vcu_to_cu(u8 vcu)
6327{
6328 return 1 << vcu;
6329}
6330
6331
6332static u8 cu_to_vcu(u32 cu)
6333{
6334 return ilog2(cu);
6335}
6336
6337
6338static u32 vau_to_au(u8 vau)
6339{
6340 return 8 * (1 << vau);
6341}
6342
6343static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6344{
6345 ppd->sm_trap_qp = 0x0;
6346 ppd->sa_qp = 0x1;
6347}
6348
6349
6350
6351
6352static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6353{
6354 u64 reg;
6355
6356
6357 write_csr(dd, DC_LCB_CFG_RUN, 0);
6358
6359 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6360 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6361
6362 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6363 reg = read_csr(dd, DCC_CFG_RESET);
6364 write_csr(dd, DCC_CFG_RESET, reg |
6365 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6366 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6367 (void)read_csr(dd, DCC_CFG_RESET);
6368 if (!abort) {
6369 udelay(1);
6370 write_csr(dd, DCC_CFG_RESET, reg);
6371 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6372 }
6373}
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383static void dc_shutdown(struct hfi1_devdata *dd)
6384{
6385 unsigned long flags;
6386
6387 spin_lock_irqsave(&dd->dc8051_lock, flags);
6388 if (dd->dc_shutdown) {
6389 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6390 return;
6391 }
6392 dd->dc_shutdown = 1;
6393 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6394
6395 lcb_shutdown(dd, 1);
6396
6397
6398
6399
6400
6401 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6402}
6403
6404
6405
6406
6407
6408static void dc_start(struct hfi1_devdata *dd)
6409{
6410 unsigned long flags;
6411 int ret;
6412
6413 spin_lock_irqsave(&dd->dc8051_lock, flags);
6414 if (!dd->dc_shutdown)
6415 goto done;
6416 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6417
6418 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6419
6420 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6421 if (ret) {
6422 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6423 __func__);
6424 }
6425
6426 write_csr(dd, DCC_CFG_RESET, 0x10);
6427
6428 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6429 spin_lock_irqsave(&dd->dc8051_lock, flags);
6430 dd->dc_shutdown = 0;
6431done:
6432 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6433}
6434
6435
6436
6437
6438static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6439{
6440 u64 rx_radr, tx_radr;
6441 u32 version;
6442
6443 if (dd->icode != ICODE_FPGA_EMULATION)
6444 return;
6445
6446
6447
6448
6449
6450
6451
6452
6453 if (is_emulator_s(dd))
6454 return;
6455
6456
6457 version = emulator_rev(dd);
6458 if (!is_ax(dd))
6459 version = 0x2d;
6460
6461 if (version <= 0x12) {
6462
6463
6464
6465
6466
6467
6468
6469 rx_radr =
6470 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6473
6474
6475
6476
6477 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6478 } else if (version <= 0x18) {
6479
6480
6481 rx_radr =
6482 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6483 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6484 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6485 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6486 } else if (version == 0x19) {
6487
6488
6489 rx_radr =
6490 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6491 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6492 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6493 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6494 } else if (version == 0x1a) {
6495
6496
6497 rx_radr =
6498 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6499 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6500 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6501 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6502 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6503 } else {
6504
6505
6506 rx_radr =
6507 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6508 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6509 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6510 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6511 }
6512
6513 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6514
6515 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6516 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6517 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6518}
6519
6520
6521
6522
6523
6524
6525void handle_sma_message(struct work_struct *work)
6526{
6527 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6528 sma_message_work);
6529 struct hfi1_devdata *dd = ppd->dd;
6530 u64 msg;
6531 int ret;
6532
6533
6534
6535
6536
6537 ret = read_idle_sma(dd, &msg);
6538 if (ret)
6539 return;
6540 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6541
6542
6543
6544 switch (msg & 0xff) {
6545 case SMA_IDLE_ARM:
6546
6547
6548
6549
6550
6551
6552 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6553 ppd->neighbor_normal = 1;
6554 break;
6555 case SMA_IDLE_ACTIVE:
6556
6557
6558
6559
6560
6561
6562 if (ppd->host_link_state == HLS_UP_ARMED &&
6563 ppd->is_active_optimize_enabled) {
6564 ppd->neighbor_normal = 1;
6565 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6566 if (ret)
6567 dd_dev_err(
6568 dd,
6569 "%s: received Active SMA idle message, couldn't set link to Active\n",
6570 __func__);
6571 }
6572 break;
6573 default:
6574 dd_dev_err(dd,
6575 "%s: received unexpected SMA idle message 0x%llx\n",
6576 __func__, msg);
6577 break;
6578 }
6579}
6580
6581static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6582{
6583 u64 rcvctrl;
6584 unsigned long flags;
6585
6586 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6587 rcvctrl = read_csr(dd, RCV_CTRL);
6588 rcvctrl |= add;
6589 rcvctrl &= ~clear;
6590 write_csr(dd, RCV_CTRL, rcvctrl);
6591 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6592}
6593
6594static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6595{
6596 adjust_rcvctrl(dd, add, 0);
6597}
6598
6599static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6600{
6601 adjust_rcvctrl(dd, 0, clear);
6602}
6603
6604
6605
6606
6607void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6608{
6609 struct hfi1_devdata *dd = ppd->dd;
6610 struct send_context *sc;
6611 int i;
6612
6613 if (flags & FREEZE_SELF)
6614 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6615
6616
6617 dd->flags |= HFI1_FROZEN;
6618
6619
6620 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6621
6622
6623 for (i = 0; i < dd->num_send_contexts; i++) {
6624 sc = dd->send_contexts[i].sc;
6625 if (sc && (sc->flags & SCF_ENABLED))
6626 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6627 }
6628
6629
6630 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6631
6632 if (flags & FREEZE_ABORT) {
6633 dd_dev_err(dd,
6634 "Aborted freeze recovery. Please REBOOT system\n");
6635 return;
6636 }
6637
6638 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6639}
6640
6641
6642
6643
6644
6645
6646
6647
6648static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6649{
6650 unsigned long timeout;
6651 u64 reg;
6652
6653 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6654 while (1) {
6655 reg = read_csr(dd, CCE_STATUS);
6656 if (freeze) {
6657
6658 if ((reg & ALL_FROZE) == ALL_FROZE)
6659 return;
6660 } else {
6661
6662 if ((reg & ALL_FROZE) == 0)
6663 return;
6664 }
6665
6666 if (time_after(jiffies, timeout)) {
6667 dd_dev_err(dd,
6668 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6669 freeze ? "" : "un", reg & ALL_FROZE,
6670 freeze ? ALL_FROZE : 0ull);
6671 return;
6672 }
6673 usleep_range(80, 120);
6674 }
6675}
6676
6677
6678
6679
6680static void rxe_freeze(struct hfi1_devdata *dd)
6681{
6682 int i;
6683
6684
6685 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6686
6687
6688 for (i = 0; i < dd->num_rcv_contexts; i++)
6689 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6690}
6691
6692
6693
6694
6695
6696
6697
6698static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6699{
6700 u32 rcvmask;
6701 int i;
6702
6703
6704 for (i = 0; i < dd->n_krcv_queues; i++) {
6705 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6706
6707 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6708 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6709 hfi1_rcvctrl(dd, rcvmask, i);
6710 }
6711
6712
6713 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6714}
6715
6716
6717
6718
6719
6720
6721void handle_freeze(struct work_struct *work)
6722{
6723 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6724 freeze_work);
6725 struct hfi1_devdata *dd = ppd->dd;
6726
6727
6728 wait_for_freeze_status(dd, 1);
6729
6730
6731
6732
6733 pio_freeze(dd);
6734
6735
6736 sdma_freeze(dd);
6737
6738
6739
6740
6741 rxe_freeze(dd);
6742
6743
6744
6745
6746
6747 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6748 wait_for_freeze_status(dd, 0);
6749
6750 if (is_ax(dd)) {
6751 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6752 wait_for_freeze_status(dd, 1);
6753 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6754 wait_for_freeze_status(dd, 0);
6755 }
6756
6757
6758 pio_kernel_unfreeze(dd);
6759
6760
6761 sdma_unfreeze(dd);
6762
6763
6764
6765
6766 rxe_kernel_unfreeze(dd);
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781 dd->flags &= ~HFI1_FROZEN;
6782 wake_up(&dd->event_queue);
6783
6784
6785}
6786
6787
6788
6789
6790
6791
6792void handle_link_up(struct work_struct *work)
6793{
6794 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6795 link_up_work);
6796 set_link_state(ppd, HLS_UP_INIT);
6797
6798
6799 read_ltp_rtt(ppd->dd);
6800
6801
6802
6803
6804 clear_linkup_counters(ppd->dd);
6805
6806
6807
6808 set_linkup_defaults(ppd);
6809
6810
6811 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6812
6813 dd_dev_err(ppd->dd,
6814 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6815 ppd->link_speed_active, ppd->link_speed_enabled);
6816 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6817 OPA_LINKDOWN_REASON_SPEED_POLICY);
6818 set_link_state(ppd, HLS_DN_OFFLINE);
6819 start_link(ppd);
6820 }
6821}
6822
6823
6824
6825
6826
6827static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6828{
6829 ppd->neighbor_guid = 0;
6830 ppd->neighbor_port_number = 0;
6831 ppd->neighbor_type = 0;
6832 ppd->neighbor_fm_security = 0;
6833}
6834
6835static const char * const link_down_reason_strs[] = {
6836 [OPA_LINKDOWN_REASON_NONE] = "None",
6837 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6838 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6839 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6840 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6841 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6842 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6843 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6844 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6846 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6848 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6849 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6850 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6851 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6852 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6853 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6854 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6855 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6856 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6857 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6858 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6859 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6860 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6861 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6862 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6863 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6864 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6865 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6866 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6867 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6868 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6869 "Excessive buffer overrun",
6870 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6871 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6872 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6873 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6874 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6875 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6876 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6877 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6878 "Local media not installed",
6879 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6880 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6881 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6882 "End to end not installed",
6883 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6884 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6885 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6886 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6887 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6888 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6889};
6890
6891
6892static const char *link_down_reason_str(u8 reason)
6893{
6894 const char *str = NULL;
6895
6896 if (reason < ARRAY_SIZE(link_down_reason_strs))
6897 str = link_down_reason_strs[reason];
6898 if (!str)
6899 str = "(invalid)";
6900
6901 return str;
6902}
6903
6904
6905
6906
6907
6908
6909void handle_link_down(struct work_struct *work)
6910{
6911 u8 lcl_reason, neigh_reason = 0;
6912 u8 link_down_reason;
6913 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6914 link_down_work);
6915 int was_up;
6916 static const char ldr_str[] = "Link down reason: ";
6917
6918 if ((ppd->host_link_state &
6919 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6920 ppd->port_type == PORT_TYPE_FIXED)
6921 ppd->offline_disabled_reason =
6922 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6923
6924
6925 was_up = !!(ppd->host_link_state & HLS_UP);
6926 set_link_state(ppd, HLS_DN_OFFLINE);
6927
6928 if (was_up) {
6929 lcl_reason = 0;
6930
6931 read_link_down_reason(ppd->dd, &link_down_reason);
6932 switch (link_down_reason) {
6933 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6934
6935 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6936 ldr_str);
6937 break;
6938 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6939
6940
6941
6942
6943 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6944 dd_dev_info(ppd->dd,
6945 "%sNeighbor link down message %d, %s\n",
6946 ldr_str, neigh_reason,
6947 link_down_reason_str(neigh_reason));
6948 break;
6949 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6950 dd_dev_info(ppd->dd,
6951 "%sHost requested link to go offline\n",
6952 ldr_str);
6953 break;
6954 default:
6955 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6956 ldr_str, link_down_reason);
6957 break;
6958 }
6959
6960
6961
6962
6963
6964 if (neigh_reason == 0)
6965 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6966 } else {
6967
6968 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6969 }
6970
6971 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6972
6973
6974 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6975 ppd->neigh_link_down_reason.sma == 0) {
6976 ppd->local_link_down_reason.sma =
6977 ppd->local_link_down_reason.latest;
6978 ppd->neigh_link_down_reason.sma =
6979 ppd->neigh_link_down_reason.latest;
6980 }
6981
6982 reset_neighbor_info(ppd);
6983
6984
6985 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6986
6987
6988
6989
6990
6991 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
6992 dc_shutdown(ppd->dd);
6993 else
6994 start_link(ppd);
6995}
6996
6997void handle_link_bounce(struct work_struct *work)
6998{
6999 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7000 link_bounce_work);
7001
7002
7003
7004
7005 if (ppd->host_link_state & HLS_UP) {
7006 set_link_state(ppd, HLS_DN_OFFLINE);
7007 start_link(ppd);
7008 } else {
7009 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7010 __func__, link_state_name(ppd->host_link_state));
7011 }
7012}
7013
7014
7015
7016
7017
7018static int cap_to_port_ltp(int cap)
7019{
7020 int port_ltp = PORT_LTP_CRC_MODE_16;
7021
7022 if (cap & CAP_CRC_14B)
7023 port_ltp |= PORT_LTP_CRC_MODE_14;
7024 if (cap & CAP_CRC_48B)
7025 port_ltp |= PORT_LTP_CRC_MODE_48;
7026 if (cap & CAP_CRC_12B_16B_PER_LANE)
7027 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7028
7029 return port_ltp;
7030}
7031
7032
7033
7034
7035int port_ltp_to_cap(int port_ltp)
7036{
7037 int cap_mask = 0;
7038
7039 if (port_ltp & PORT_LTP_CRC_MODE_14)
7040 cap_mask |= CAP_CRC_14B;
7041 if (port_ltp & PORT_LTP_CRC_MODE_48)
7042 cap_mask |= CAP_CRC_48B;
7043 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7044 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7045
7046 return cap_mask;
7047}
7048
7049
7050
7051
7052static int lcb_to_port_ltp(int lcb_crc)
7053{
7054 int port_ltp = 0;
7055
7056 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7057 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7058 else if (lcb_crc == LCB_CRC_48B)
7059 port_ltp = PORT_LTP_CRC_MODE_48;
7060 else if (lcb_crc == LCB_CRC_14B)
7061 port_ltp = PORT_LTP_CRC_MODE_14;
7062 else
7063 port_ltp = PORT_LTP_CRC_MODE_16;
7064
7065 return port_ltp;
7066}
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7077{
7078 struct hfi1_devdata *dd = ppd->dd;
7079
7080
7081 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7082 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7083 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7084 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7085 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7086 hfi1_event_pkey_change(ppd->dd, ppd->port);
7087}
7088
7089static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7090{
7091 if (ppd->pkeys[2] != 0) {
7092 ppd->pkeys[2] = 0;
7093 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7094 hfi1_event_pkey_change(ppd->dd, ppd->port);
7095 }
7096}
7097
7098
7099
7100
7101static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7102{
7103 switch (width) {
7104 case 0:
7105
7106
7107
7108
7109 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7110 return OPA_LINK_WIDTH_4X;
7111 return 0;
7112 case 1: return OPA_LINK_WIDTH_1X;
7113 case 2: return OPA_LINK_WIDTH_2X;
7114 case 3: return OPA_LINK_WIDTH_3X;
7115 default:
7116 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7117 __func__, width);
7118
7119 case 4: return OPA_LINK_WIDTH_4X;
7120 }
7121}
7122
7123
7124
7125
7126static const u8 bit_counts[16] = {
7127 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7128};
7129
7130static inline u8 nibble_to_count(u8 nibble)
7131{
7132 return bit_counts[nibble & 0xf];
7133}
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7144 u16 *rx_width)
7145{
7146 u16 tx, rx;
7147 u8 enable_lane_rx;
7148 u8 enable_lane_tx;
7149 u8 tx_polarity_inversion;
7150 u8 rx_polarity_inversion;
7151 u8 max_rate;
7152
7153
7154 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7155 &rx_polarity_inversion, &max_rate);
7156 read_local_lni(dd, &enable_lane_rx);
7157
7158
7159 tx = nibble_to_count(enable_lane_tx);
7160 rx = nibble_to_count(enable_lane_rx);
7161
7162
7163
7164
7165
7166
7167 if ((dd->icode == ICODE_RTL_SILICON) &&
7168 (dd->dc8051_ver < dc8051_ver(0, 19))) {
7169
7170 switch (max_rate) {
7171 case 0:
7172 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7173 break;
7174 default:
7175 dd_dev_err(dd,
7176 "%s: unexpected max rate %d, using 25Gb\n",
7177 __func__, (int)max_rate);
7178
7179 case 1:
7180 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7181 break;
7182 }
7183 }
7184
7185 dd_dev_info(dd,
7186 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7187 enable_lane_tx, tx, enable_lane_rx, rx);
7188 *tx_width = link_width_to_bits(dd, tx);
7189 *rx_width = link_width_to_bits(dd, rx);
7190}
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203
7204
7205static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7206 u16 *rx_width)
7207{
7208 u16 widths, tx, rx;
7209 u8 misc_bits, local_flags;
7210 u16 active_tx, active_rx;
7211
7212 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7213 tx = widths >> 12;
7214 rx = (widths >> 8) & 0xf;
7215
7216 *tx_width = link_width_to_bits(dd, tx);
7217 *rx_width = link_width_to_bits(dd, rx);
7218
7219
7220 get_link_widths(dd, &active_tx, &active_rx);
7221}
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7232{
7233 u16 tx_width, rx_width;
7234
7235
7236 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7237
7238
7239 ppd->link_width_active = tx_width;
7240
7241 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7242 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7243
7244 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7245
7246 ppd->current_egress_rate = active_egress_rate(ppd);
7247}
7248
7249
7250
7251
7252
7253
7254void handle_verify_cap(struct work_struct *work)
7255{
7256 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7257 link_vc_work);
7258 struct hfi1_devdata *dd = ppd->dd;
7259 u64 reg;
7260 u8 power_management;
7261 u8 continious;
7262 u8 vcu;
7263 u8 vau;
7264 u8 z;
7265 u16 vl15buf;
7266 u16 link_widths;
7267 u16 crc_mask;
7268 u16 crc_val;
7269 u16 device_id;
7270 u16 active_tx, active_rx;
7271 u8 partner_supported_crc;
7272 u8 remote_tx_rate;
7273 u8 device_rev;
7274
7275 set_link_state(ppd, HLS_VERIFY_CAP);
7276
7277 lcb_shutdown(dd, 0);
7278 adjust_lcb_for_fpga_serdes(dd);
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289 read_vc_remote_phy(dd, &power_management, &continious);
7290 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7291 &partner_supported_crc);
7292 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7293 read_remote_device_id(dd, &device_id, &device_rev);
7294
7295
7296
7297
7298 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7299
7300 get_link_widths(dd, &active_tx, &active_rx);
7301 dd_dev_info(dd,
7302 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7303 (int)power_management, (int)continious);
7304 dd_dev_info(dd,
7305 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7306 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7307 (int)partner_supported_crc);
7308 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7309 (u32)remote_tx_rate, (u32)link_widths);
7310 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7311 (u32)device_id, (u32)device_rev);
7312
7313
7314
7315
7316
7317
7318
7319
7320
7321 if (vau == 0)
7322 vau = 1;
7323 set_up_vl15(dd, vau, vl15buf);
7324
7325
7326 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7327
7328
7329 if (crc_mask & CAP_CRC_14B)
7330 crc_val = LCB_CRC_14B;
7331 else if (crc_mask & CAP_CRC_48B)
7332 crc_val = LCB_CRC_48B;
7333 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7334 crc_val = LCB_CRC_12B_16B_PER_LANE;
7335 else
7336 crc_val = LCB_CRC_16B;
7337
7338 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7339 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7340 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7341
7342
7343 reg = read_csr(dd, SEND_CM_CTRL);
7344 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7345 write_csr(dd, SEND_CM_CTRL,
7346 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7347 } else {
7348 write_csr(dd, SEND_CM_CTRL,
7349 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7350 }
7351
7352 ppd->link_speed_active = 0;
7353 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7354
7355 switch (remote_tx_rate) {
7356 case 0:
7357 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7358 break;
7359 case 1:
7360 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7361 break;
7362 }
7363 } else {
7364
7365 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7366
7367 if (rate & 2)
7368 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7369 else if (rate & 1)
7370 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7371 }
7372 if (ppd->link_speed_active == 0) {
7373 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7374 __func__, (int)remote_tx_rate);
7375 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7376 }
7377
7378
7379
7380
7381
7382
7383
7384
7385 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7386
7387 ppd->port_ltp_crc_mode |=
7388 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7389
7390 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7391
7392
7393
7394 assign_remote_cm_au_table(dd, vcu);
7395
7396
7397
7398
7399
7400
7401
7402
7403
7404
7405 if (is_ax(dd)) {
7406 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7407 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7408 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7409 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7410 }
7411
7412
7413 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7414
7415
7416 write_csr(dd, DC_LCB_ERR_EN, 0);
7417 set_8051_lcb_access(dd);
7418
7419 ppd->neighbor_guid =
7420 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7421 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7422 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7423 ppd->neighbor_type =
7424 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7425 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7426 ppd->neighbor_fm_security =
7427 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7428 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7429 dd_dev_info(dd,
7430 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7431 ppd->neighbor_guid, ppd->neighbor_type,
7432 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7433 if (ppd->mgmt_allowed)
7434 add_full_mgmt_pkey(ppd);
7435
7436
7437 set_link_state(ppd, HLS_GOING_UP);
7438}
7439
7440
7441
7442
7443
7444
7445
7446void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7447{
7448 int do_bounce = 0;
7449 int tries;
7450 u16 lwde;
7451 u16 tx, rx;
7452
7453
7454 tries = 0;
7455retry:
7456 mutex_lock(&ppd->hls_lock);
7457
7458 if (ppd->host_link_state & HLS_DOWN) {
7459
7460 if (ppd->host_link_state & HLS_GOING_UP) {
7461 if (++tries < 1000) {
7462 mutex_unlock(&ppd->hls_lock);
7463 usleep_range(100, 120);
7464 goto retry;
7465 }
7466 dd_dev_err(ppd->dd,
7467 "%s: giving up waiting for link state change\n",
7468 __func__);
7469 }
7470 goto done;
7471 }
7472
7473 lwde = ppd->link_width_downgrade_enabled;
7474
7475 if (refresh_widths) {
7476 get_link_widths(ppd->dd, &tx, &rx);
7477 ppd->link_width_downgrade_tx_active = tx;
7478 ppd->link_width_downgrade_rx_active = rx;
7479 }
7480
7481 if (ppd->link_width_downgrade_tx_active == 0 ||
7482 ppd->link_width_downgrade_rx_active == 0) {
7483
7484 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7485 } else if (lwde == 0) {
7486
7487
7488
7489 if ((ppd->link_width_active !=
7490 ppd->link_width_downgrade_tx_active) ||
7491 (ppd->link_width_active !=
7492 ppd->link_width_downgrade_rx_active)) {
7493 dd_dev_err(ppd->dd,
7494 "Link downgrade is disabled and link has downgraded, downing link\n");
7495 dd_dev_err(ppd->dd,
7496 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7497 ppd->link_width_active,
7498 ppd->link_width_downgrade_tx_active,
7499 ppd->link_width_downgrade_rx_active);
7500 do_bounce = 1;
7501 }
7502 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7503 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7504
7505 dd_dev_err(ppd->dd,
7506 "Link is outside of downgrade allowed, downing link\n");
7507 dd_dev_err(ppd->dd,
7508 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7509 lwde, ppd->link_width_downgrade_tx_active,
7510 ppd->link_width_downgrade_rx_active);
7511 do_bounce = 1;
7512 }
7513
7514done:
7515 mutex_unlock(&ppd->hls_lock);
7516
7517 if (do_bounce) {
7518 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7519 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7520 set_link_state(ppd, HLS_DN_OFFLINE);
7521 start_link(ppd);
7522 }
7523}
7524
7525
7526
7527
7528
7529
7530void handle_link_downgrade(struct work_struct *work)
7531{
7532 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7533 link_downgrade_work);
7534
7535 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7536 apply_link_downgrade_policy(ppd, 1);
7537}
7538
7539static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7540{
7541 return flag_string(buf, buf_len, flags, dcc_err_flags,
7542 ARRAY_SIZE(dcc_err_flags));
7543}
7544
7545static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7546{
7547 return flag_string(buf, buf_len, flags, lcb_err_flags,
7548 ARRAY_SIZE(lcb_err_flags));
7549}
7550
7551static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7552{
7553 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7554 ARRAY_SIZE(dc8051_err_flags));
7555}
7556
7557static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7558{
7559 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7560 ARRAY_SIZE(dc8051_info_err_flags));
7561}
7562
7563static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7564{
7565 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7566 ARRAY_SIZE(dc8051_info_host_msg_flags));
7567}
7568
7569static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7570{
7571 struct hfi1_pportdata *ppd = dd->pport;
7572 u64 info, err, host_msg;
7573 int queue_link_down = 0;
7574 char buf[96];
7575
7576
7577 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7578
7579
7580 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7581 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7582 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7583 host_msg = (info >>
7584 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7585 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7586
7587
7588
7589
7590 if (err & FAILED_LNI) {
7591
7592
7593
7594
7595
7596
7597 if (ppd->host_link_state
7598 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7599 queue_link_down = 1;
7600 dd_dev_info(dd, "Link error: %s\n",
7601 dc8051_info_err_string(buf,
7602 sizeof(buf),
7603 err &
7604 FAILED_LNI));
7605 }
7606 err &= ~(u64)FAILED_LNI;
7607 }
7608
7609 if (err & UNKNOWN_FRAME) {
7610 ppd->unknown_frame_count++;
7611 err &= ~(u64)UNKNOWN_FRAME;
7612 }
7613 if (err) {
7614
7615 dd_dev_err(dd, "8051 info error: %s\n",
7616 dc8051_info_err_string(buf, sizeof(buf),
7617 err));
7618 }
7619
7620
7621
7622
7623 if (host_msg & HOST_REQ_DONE) {
7624
7625
7626
7627
7628
7629
7630
7631
7632
7633 host_msg &= ~(u64)HOST_REQ_DONE;
7634 }
7635 if (host_msg & BC_SMA_MSG) {
7636 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7637 host_msg &= ~(u64)BC_SMA_MSG;
7638 }
7639 if (host_msg & LINKUP_ACHIEVED) {
7640 dd_dev_info(dd, "8051: Link up\n");
7641 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7642 host_msg &= ~(u64)LINKUP_ACHIEVED;
7643 }
7644 if (host_msg & EXT_DEVICE_CFG_REQ) {
7645 handle_8051_request(ppd);
7646 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7647 }
7648 if (host_msg & VERIFY_CAP_FRAME) {
7649 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7650 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7651 }
7652 if (host_msg & LINK_GOING_DOWN) {
7653 const char *extra = "";
7654
7655 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7656 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7657 extra = " (ignoring downgrade)";
7658 }
7659 dd_dev_info(dd, "8051: Link down%s\n", extra);
7660 queue_link_down = 1;
7661 host_msg &= ~(u64)LINK_GOING_DOWN;
7662 }
7663 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7664 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7665 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7666 }
7667 if (host_msg) {
7668
7669 dd_dev_info(dd, "8051 info host message: %s\n",
7670 dc8051_info_host_msg_string(buf,
7671 sizeof(buf),
7672 host_msg));
7673 }
7674
7675 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7676 }
7677 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7678
7679
7680
7681
7682
7683 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7684 write_csr(dd, DC_DC8051_ERR_EN,
7685 read_csr(dd, DC_DC8051_ERR_EN) &
7686 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7687
7688 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7689 }
7690 if (reg) {
7691
7692 dd_dev_err(dd, "8051 error: %s\n",
7693 dc8051_err_string(buf, sizeof(buf), reg));
7694 }
7695
7696 if (queue_link_down) {
7697
7698
7699
7700
7701 if ((ppd->host_link_state &
7702 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7703 ppd->link_enabled == 0) {
7704 dd_dev_info(dd, "%s: not queuing link down\n",
7705 __func__);
7706 } else {
7707 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7708 }
7709 }
7710}
7711
7712static const char * const fm_config_txt[] = {
7713[0] =
7714 "BadHeadDist: Distance violation between two head flits",
7715[1] =
7716 "BadTailDist: Distance violation between two tail flits",
7717[2] =
7718 "BadCtrlDist: Distance violation between two credit control flits",
7719[3] =
7720 "BadCrdAck: Credits return for unsupported VL",
7721[4] =
7722 "UnsupportedVLMarker: Received VL Marker",
7723[5] =
7724 "BadPreempt: Exceeded the preemption nesting level",
7725[6] =
7726 "BadControlFlit: Received unsupported control flit",
7727
7728[8] =
7729 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7730};
7731
7732static const char * const port_rcv_txt[] = {
7733[1] =
7734 "BadPktLen: Illegal PktLen",
7735[2] =
7736 "PktLenTooLong: Packet longer than PktLen",
7737[3] =
7738 "PktLenTooShort: Packet shorter than PktLen",
7739[4] =
7740 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7741[5] =
7742 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7743[6] =
7744 "BadL2: Illegal L2 opcode",
7745[7] =
7746 "BadSC: Unsupported SC",
7747[9] =
7748 "BadRC: Illegal RC",
7749[11] =
7750 "PreemptError: Preempting with same VL",
7751[12] =
7752 "PreemptVL15: Preempting a VL15 packet",
7753};
7754
7755#define OPA_LDR_FMCONFIG_OFFSET 16
7756#define OPA_LDR_PORTRCV_OFFSET 0
7757static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7758{
7759 u64 info, hdr0, hdr1;
7760 const char *extra;
7761 char buf[96];
7762 struct hfi1_pportdata *ppd = dd->pport;
7763 u8 lcl_reason = 0;
7764 int do_bounce = 0;
7765
7766 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7767 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7768 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7769 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7770
7771 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7772 }
7773 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7774 }
7775
7776 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7777 struct hfi1_pportdata *ppd = dd->pport;
7778
7779 if (ppd->link_downed < (u32)UINT_MAX)
7780 ppd->link_downed++;
7781 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7782 }
7783
7784 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7785 u8 reason_valid = 1;
7786
7787 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7788 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7789 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7790
7791 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7792 }
7793 switch (info) {
7794 case 0:
7795 case 1:
7796 case 2:
7797 case 3:
7798 case 4:
7799 case 5:
7800 case 6:
7801 extra = fm_config_txt[info];
7802 break;
7803 case 8:
7804 extra = fm_config_txt[info];
7805 if (ppd->port_error_action &
7806 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7807 do_bounce = 1;
7808
7809
7810
7811
7812 lcl_reason =
7813 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7814 }
7815 break;
7816 default:
7817 reason_valid = 0;
7818 snprintf(buf, sizeof(buf), "reserved%lld", info);
7819 extra = buf;
7820 break;
7821 }
7822
7823 if (reason_valid && !do_bounce) {
7824 do_bounce = ppd->port_error_action &
7825 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7826 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7827 }
7828
7829
7830 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7831 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7832 }
7833
7834 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7835 u8 reason_valid = 1;
7836
7837 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7838 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7839 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7840 if (!(dd->err_info_rcvport.status_and_code &
7841 OPA_EI_STATUS_SMASK)) {
7842 dd->err_info_rcvport.status_and_code =
7843 info & OPA_EI_CODE_SMASK;
7844
7845 dd->err_info_rcvport.status_and_code |=
7846 OPA_EI_STATUS_SMASK;
7847
7848
7849
7850
7851 dd->err_info_rcvport.packet_flit1 = hdr0;
7852 dd->err_info_rcvport.packet_flit2 = hdr1;
7853 }
7854 switch (info) {
7855 case 1:
7856 case 2:
7857 case 3:
7858 case 4:
7859 case 5:
7860 case 6:
7861 case 7:
7862 case 9:
7863 case 11:
7864 case 12:
7865 extra = port_rcv_txt[info];
7866 break;
7867 default:
7868 reason_valid = 0;
7869 snprintf(buf, sizeof(buf), "reserved%lld", info);
7870 extra = buf;
7871 break;
7872 }
7873
7874 if (reason_valid && !do_bounce) {
7875 do_bounce = ppd->port_error_action &
7876 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7877 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7878 }
7879
7880
7881 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7882 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7883 hdr0, hdr1);
7884
7885 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7886 }
7887
7888 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7889
7890 dd_dev_info(dd, "8051 access to LCB blocked\n");
7891 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7892 }
7893 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7894
7895 dd_dev_info(dd, "host access to LCB blocked\n");
7896 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7897 }
7898
7899
7900 if (reg)
7901 dd_dev_info(dd, "DCC Error: %s\n",
7902 dcc_err_string(buf, sizeof(buf), reg));
7903
7904 if (lcl_reason == 0)
7905 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7906
7907 if (do_bounce) {
7908 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7909 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7910 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7911 }
7912}
7913
7914static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7915{
7916 char buf[96];
7917
7918 dd_dev_info(dd, "LCB Error: %s\n",
7919 lcb_err_string(buf, sizeof(buf), reg));
7920}
7921
7922
7923
7924
7925static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7926{
7927 const struct err_reg_info *eri = &dc_errs[source];
7928
7929 if (eri->handler) {
7930 interrupt_clear_down(dd, 0, eri);
7931 } else if (source == 3 ) {
7932
7933
7934
7935
7936
7937
7938
7939
7940
7941 dd_dev_err(dd, "Parity error in DC LBM block\n");
7942 } else {
7943 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7944 }
7945}
7946
7947
7948
7949
7950static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7951{
7952 sc_group_release_update(dd, source);
7953}
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7965{
7966
7967 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7968
7969 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7970
7971#ifdef CONFIG_SDMA_VERBOSITY
7972 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7973 slashstrip(__FILE__), __LINE__, __func__);
7974 sdma_dumpstate(&dd->per_sdma[which]);
7975#endif
7976
7977 if (likely(what < 3 && which < dd->num_sdma)) {
7978 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7979 } else {
7980
7981 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7982 }
7983}
7984
7985
7986
7987
7988static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7989{
7990 struct hfi1_ctxtdata *rcd;
7991 char *err_detail;
7992
7993 if (likely(source < dd->num_rcv_contexts)) {
7994 rcd = dd->rcd[source];
7995 if (rcd) {
7996 if (source < dd->first_user_ctxt)
7997 rcd->do_interrupt(rcd, 0);
7998 else
7999 handle_user_interrupt(rcd);
8000 return;
8001 }
8002
8003 err_detail = "dataless";
8004 } else {
8005
8006 err_detail = "out of range";
8007 }
8008 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8009 err_detail, source);
8010}
8011
8012
8013
8014
8015static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8016{
8017 struct hfi1_ctxtdata *rcd;
8018 char *err_detail;
8019
8020 if (likely(source < dd->num_rcv_contexts)) {
8021 rcd = dd->rcd[source];
8022 if (rcd) {
8023
8024 if (source >= dd->first_user_ctxt)
8025 handle_user_interrupt(rcd);
8026 return;
8027 }
8028
8029 err_detail = "dataless";
8030 } else {
8031
8032 err_detail = "out of range";
8033 }
8034 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8035 err_detail, source);
8036}
8037
8038
8039
8040
8041static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8042{
8043 char name[64];
8044
8045 dd_dev_err(dd, "unexpected %s interrupt\n",
8046 is_reserved_name(name, sizeof(name), source));
8047}
8048
8049static const struct is_table is_table[] = {
8050
8051
8052
8053
8054{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8055 is_misc_err_name, is_misc_err_int },
8056{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8057 is_sdma_eng_err_name, is_sdma_eng_err_int },
8058{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8059 is_sendctxt_err_name, is_sendctxt_err_int },
8060{ IS_SDMA_START, IS_SDMA_END,
8061 is_sdma_eng_name, is_sdma_eng_int },
8062{ IS_VARIOUS_START, IS_VARIOUS_END,
8063 is_various_name, is_various_int },
8064{ IS_DC_START, IS_DC_END,
8065 is_dc_name, is_dc_int },
8066{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8067 is_rcv_avail_name, is_rcv_avail_int },
8068{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8069 is_rcv_urgent_name, is_rcv_urgent_int },
8070{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8071 is_send_credit_name, is_send_credit_int},
8072{ IS_RESERVED_START, IS_RESERVED_END,
8073 is_reserved_name, is_reserved_int},
8074};
8075
8076
8077
8078
8079
8080static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8081{
8082 const struct is_table *entry;
8083
8084
8085 for (entry = &is_table[0]; entry->is_name; entry++) {
8086 if (source < entry->end) {
8087 trace_hfi1_interrupt(dd, entry, source);
8088 entry->is_int(dd, source - entry->start);
8089 return;
8090 }
8091 }
8092
8093 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8094}
8095
8096
8097
8098
8099
8100static irqreturn_t general_interrupt(int irq, void *data)
8101{
8102 struct hfi1_devdata *dd = data;
8103 u64 regs[CCE_NUM_INT_CSRS];
8104 u32 bit;
8105 int i;
8106
8107 this_cpu_inc(*dd->int_counter);
8108
8109
8110 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8111 if (dd->gi_mask[i] == 0) {
8112 regs[i] = 0;
8113 continue;
8114 }
8115 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8116 dd->gi_mask[i];
8117
8118 if (regs[i])
8119 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8120 }
8121
8122
8123 for_each_set_bit(bit, (unsigned long *)®s[0],
8124 CCE_NUM_INT_CSRS * 64) {
8125 is_interrupt(dd, bit);
8126 }
8127
8128 return IRQ_HANDLED;
8129}
8130
8131static irqreturn_t sdma_interrupt(int irq, void *data)
8132{
8133 struct sdma_engine *sde = data;
8134 struct hfi1_devdata *dd = sde->dd;
8135 u64 status;
8136
8137#ifdef CONFIG_SDMA_VERBOSITY
8138 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8139 slashstrip(__FILE__), __LINE__, __func__);
8140 sdma_dumpstate(sde);
8141#endif
8142
8143 this_cpu_inc(*dd->int_counter);
8144
8145
8146 status = read_csr(dd,
8147 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8148 & sde->imask;
8149 if (likely(status)) {
8150
8151 write_csr(dd,
8152 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8153 status);
8154
8155
8156 sdma_engine_interrupt(sde, status);
8157 } else
8158 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8159 sde->this_idx);
8160
8161 return IRQ_HANDLED;
8162}
8163
8164
8165
8166
8167
8168
8169static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8170{
8171 struct hfi1_devdata *dd = rcd->dd;
8172 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8173
8174 mmiowb();
8175 write_csr(dd, addr, rcd->imask);
8176
8177 (void)read_csr(dd, addr);
8178}
8179
8180
8181void force_recv_intr(struct hfi1_ctxtdata *rcd)
8182{
8183 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8184}
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8197{
8198 u32 tail;
8199 int present;
8200
8201 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8202 present = (rcd->seq_cnt ==
8203 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8204 else
8205 present = (rcd->head != get_rcvhdrtail(rcd));
8206
8207 if (present)
8208 return 1;
8209
8210
8211 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8212 return rcd->head != tail;
8213}
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223static irqreturn_t receive_context_interrupt(int irq, void *data)
8224{
8225 struct hfi1_ctxtdata *rcd = data;
8226 struct hfi1_devdata *dd = rcd->dd;
8227 int disposition;
8228 int present;
8229
8230 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8231 this_cpu_inc(*dd->int_counter);
8232 aspm_ctx_disable(rcd);
8233
8234
8235 disposition = rcd->do_interrupt(rcd, 0);
8236
8237
8238
8239
8240
8241
8242 if (disposition == RCV_PKT_LIMIT)
8243 return IRQ_WAKE_THREAD;
8244
8245
8246
8247
8248
8249
8250
8251 clear_recv_intr(rcd);
8252 present = check_packet_present(rcd);
8253 if (present)
8254 force_recv_intr(rcd);
8255
8256 return IRQ_HANDLED;
8257}
8258
8259
8260
8261
8262
8263static irqreturn_t receive_context_thread(int irq, void *data)
8264{
8265 struct hfi1_ctxtdata *rcd = data;
8266 int present;
8267
8268
8269 (void)rcd->do_interrupt(rcd, 1);
8270
8271
8272
8273
8274
8275
8276
8277
8278 local_irq_disable();
8279 clear_recv_intr(rcd);
8280 present = check_packet_present(rcd);
8281 if (present)
8282 force_recv_intr(rcd);
8283 local_irq_enable();
8284
8285 return IRQ_HANDLED;
8286}
8287
8288
8289
8290u32 read_physical_state(struct hfi1_devdata *dd)
8291{
8292 u64 reg;
8293
8294 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8295 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8296 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8297}
8298
8299u32 read_logical_state(struct hfi1_devdata *dd)
8300{
8301 u64 reg;
8302
8303 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8304 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8305 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8306}
8307
8308static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8309{
8310 u64 reg;
8311
8312 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8313
8314 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8315 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8316 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8317}
8318
8319
8320
8321
8322static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8323{
8324 u32 regno;
8325 int ret;
8326
8327 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8328 if (acquire_lcb_access(dd, 0) == 0) {
8329 *data = read_csr(dd, addr);
8330 release_lcb_access(dd, 0);
8331 return 0;
8332 }
8333 return -EBUSY;
8334 }
8335
8336
8337 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8338 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8339 if (ret != HCMD_SUCCESS)
8340 return -EBUSY;
8341 return 0;
8342}
8343
8344
8345
8346
8347
8348int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8349{
8350 struct hfi1_pportdata *ppd = dd->pport;
8351
8352
8353 if (ppd->host_link_state & HLS_UP)
8354 return read_lcb_via_8051(dd, addr, data);
8355
8356 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8357 return -EBUSY;
8358
8359 *data = read_csr(dd, addr);
8360 return 0;
8361}
8362
8363
8364
8365
8366static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8367{
8368 u32 regno;
8369 int ret;
8370
8371 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8372 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8373 if (acquire_lcb_access(dd, 0) == 0) {
8374 write_csr(dd, addr, data);
8375 release_lcb_access(dd, 0);
8376 return 0;
8377 }
8378 return -EBUSY;
8379 }
8380
8381
8382 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8383 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8384 if (ret != HCMD_SUCCESS)
8385 return -EBUSY;
8386 return 0;
8387}
8388
8389
8390
8391
8392
8393int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8394{
8395 struct hfi1_pportdata *ppd = dd->pport;
8396
8397
8398 if (ppd->host_link_state & HLS_UP)
8399 return write_lcb_via_8051(dd, addr, data);
8400
8401 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8402 return -EBUSY;
8403
8404 write_csr(dd, addr, data);
8405 return 0;
8406}
8407
8408
8409
8410
8411
8412
8413static int do_8051_command(
8414 struct hfi1_devdata *dd,
8415 u32 type,
8416 u64 in_data,
8417 u64 *out_data)
8418{
8419 u64 reg, completed;
8420 int return_code;
8421 unsigned long flags;
8422 unsigned long timeout;
8423
8424 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8425
8426
8427
8428
8429
8430 spin_lock_irqsave(&dd->dc8051_lock, flags);
8431
8432
8433 if (dd->dc_shutdown) {
8434 return_code = -ENODEV;
8435 goto fail;
8436 }
8437
8438
8439
8440
8441
8442
8443
8444
8445
8446
8447
8448 if (dd->dc8051_timed_out) {
8449 if (dd->dc8051_timed_out > 1) {
8450 dd_dev_err(dd,
8451 "Previous 8051 host command timed out, skipping command %u\n",
8452 type);
8453 return_code = -ENXIO;
8454 goto fail;
8455 }
8456 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8457 dc_shutdown(dd);
8458 dc_start(dd);
8459 spin_lock_irqsave(&dd->dc8051_lock, flags);
8460 }
8461
8462
8463
8464
8465
8466
8467
8468
8469
8470
8471
8472
8473
8474
8475
8476
8477
8478 if (type == HCMD_WRITE_LCB_CSR) {
8479 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8480 reg = ((((*out_data) >> 40) & 0xff) <<
8481 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8482 | ((((*out_data) >> 48) & 0xffff) <<
8483 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8484 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8485 }
8486
8487
8488
8489
8490
8491 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8492 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8493 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8494 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8495 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8496 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8497 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8498
8499
8500 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8501 while (1) {
8502 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8503 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8504 if (completed)
8505 break;
8506 if (time_after(jiffies, timeout)) {
8507 dd->dc8051_timed_out++;
8508 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8509 if (out_data)
8510 *out_data = 0;
8511 return_code = -ETIMEDOUT;
8512 goto fail;
8513 }
8514 udelay(2);
8515 }
8516
8517 if (out_data) {
8518 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8519 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8520 if (type == HCMD_READ_LCB_CSR) {
8521
8522 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8523 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8524 << (48
8525 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8526 }
8527 }
8528 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8529 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8530 dd->dc8051_timed_out = 0;
8531
8532
8533
8534 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8535
8536fail:
8537 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8538
8539 return return_code;
8540}
8541
8542static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8543{
8544 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8545}
8546
8547int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8548 u8 lane_id, u32 config_data)
8549{
8550 u64 data;
8551 int ret;
8552
8553 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8554 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8555 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8556 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8557 if (ret != HCMD_SUCCESS) {
8558 dd_dev_err(dd,
8559 "load 8051 config: field id %d, lane %d, err %d\n",
8560 (int)field_id, (int)lane_id, ret);
8561 }
8562 return ret;
8563}
8564
8565
8566
8567
8568
8569
8570int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8571 u32 *result)
8572{
8573 u64 big_data;
8574 u32 addr;
8575 int ret;
8576
8577
8578 if (lane_id < 4)
8579 addr = (4 * NUM_GENERAL_FIELDS)
8580 + (lane_id * 4 * NUM_LANE_FIELDS);
8581 else
8582 addr = 0;
8583 addr += field_id * 4;
8584
8585
8586 ret = read_8051_data(dd, addr, 8, &big_data);
8587
8588 if (ret == 0) {
8589
8590 if (addr & 0x4)
8591 *result = (u32)(big_data >> 32);
8592 else
8593 *result = (u32)big_data;
8594 } else {
8595 *result = 0;
8596 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8597 __func__, lane_id, field_id);
8598 }
8599
8600 return ret;
8601}
8602
8603static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8604 u8 continuous)
8605{
8606 u32 frame;
8607
8608 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8609 | power_management << POWER_MANAGEMENT_SHIFT;
8610 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8611 GENERAL_CONFIG, frame);
8612}
8613
8614static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8615 u16 vl15buf, u8 crc_sizes)
8616{
8617 u32 frame;
8618
8619 frame = (u32)vau << VAU_SHIFT
8620 | (u32)z << Z_SHIFT
8621 | (u32)vcu << VCU_SHIFT
8622 | (u32)vl15buf << VL15BUF_SHIFT
8623 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8624 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8625 GENERAL_CONFIG, frame);
8626}
8627
8628static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8629 u8 *flag_bits, u16 *link_widths)
8630{
8631 u32 frame;
8632
8633 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8634 &frame);
8635 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8636 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8637 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8638}
8639
8640static int write_vc_local_link_width(struct hfi1_devdata *dd,
8641 u8 misc_bits,
8642 u8 flag_bits,
8643 u16 link_widths)
8644{
8645 u32 frame;
8646
8647 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8648 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8649 | (u32)link_widths << LINK_WIDTH_SHIFT;
8650 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8651 frame);
8652}
8653
8654static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8655 u8 device_rev)
8656{
8657 u32 frame;
8658
8659 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8660 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8661 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8662}
8663
8664static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8665 u8 *device_rev)
8666{
8667 u32 frame;
8668
8669 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8670 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8671 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8672 & REMOTE_DEVICE_REV_MASK;
8673}
8674
8675void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8676{
8677 u32 frame;
8678
8679 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8680 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8681 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8682}
8683
8684static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8685 u8 *continuous)
8686{
8687 u32 frame;
8688
8689 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8690 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8691 & POWER_MANAGEMENT_MASK;
8692 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8693 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8694}
8695
8696static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8697 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8698{
8699 u32 frame;
8700
8701 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8702 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8703 *z = (frame >> Z_SHIFT) & Z_MASK;
8704 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8705 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8706 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8707}
8708
8709static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8710 u8 *remote_tx_rate,
8711 u16 *link_widths)
8712{
8713 u32 frame;
8714
8715 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8716 &frame);
8717 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8718 & REMOTE_TX_RATE_MASK;
8719 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8720}
8721
8722static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8723{
8724 u32 frame;
8725
8726 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8727 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8728}
8729
8730static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8731{
8732 u32 frame;
8733
8734 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8735 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8736}
8737
8738static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8739{
8740 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8741}
8742
8743static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8744{
8745 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8746}
8747
8748void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8749{
8750 u32 frame;
8751 int ret;
8752
8753 *link_quality = 0;
8754 if (dd->pport->host_link_state & HLS_UP) {
8755 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8756 &frame);
8757 if (ret == 0)
8758 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8759 & LINK_QUALITY_MASK;
8760 }
8761}
8762
8763static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8764{
8765 u32 frame;
8766
8767 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8768 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8769}
8770
8771static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8772{
8773 u32 frame;
8774
8775 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8776 *ldr = (frame & 0xff);
8777}
8778
8779static int read_tx_settings(struct hfi1_devdata *dd,
8780 u8 *enable_lane_tx,
8781 u8 *tx_polarity_inversion,
8782 u8 *rx_polarity_inversion,
8783 u8 *max_rate)
8784{
8785 u32 frame;
8786 int ret;
8787
8788 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8789 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8790 & ENABLE_LANE_TX_MASK;
8791 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8792 & TX_POLARITY_INVERSION_MASK;
8793 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8794 & RX_POLARITY_INVERSION_MASK;
8795 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8796 return ret;
8797}
8798
8799static int write_tx_settings(struct hfi1_devdata *dd,
8800 u8 enable_lane_tx,
8801 u8 tx_polarity_inversion,
8802 u8 rx_polarity_inversion,
8803 u8 max_rate)
8804{
8805 u32 frame;
8806
8807
8808 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8809 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8810 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8811 | max_rate << MAX_RATE_SHIFT;
8812 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8813}
8814
8815
8816
8817
8818
8819
8820static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8821{
8822 int ret;
8823
8824 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8825 if (ret != HCMD_SUCCESS) {
8826 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8827 (u32)type, ret);
8828 return -EINVAL;
8829 }
8830 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8831
8832 *data_out >>= IDLE_PAYLOAD_SHIFT;
8833 return 0;
8834}
8835
8836
8837
8838
8839
8840
8841
8842static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8843{
8844 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8845 data);
8846}
8847
8848
8849
8850
8851
8852
8853static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8854{
8855 int ret;
8856
8857 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8858 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8859 if (ret != HCMD_SUCCESS) {
8860 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8861 data, ret);
8862 return -EINVAL;
8863 }
8864 return 0;
8865}
8866
8867
8868
8869
8870
8871
8872int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8873{
8874 u64 data;
8875
8876 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8877 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8878 return send_idle_message(dd, data);
8879}
8880
8881
8882
8883
8884
8885
8886
8887static int do_quick_linkup(struct hfi1_devdata *dd)
8888{
8889 u64 reg;
8890 unsigned long timeout;
8891 int ret;
8892
8893 lcb_shutdown(dd, 0);
8894
8895 if (loopback) {
8896
8897
8898 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8899 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8900 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8901 }
8902
8903
8904
8905 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8906
8907
8908 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8909
8910 write_csr(dd, DC_LCB_CFG_RUN,
8911 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8912
8913
8914 timeout = jiffies + msecs_to_jiffies(10);
8915 while (1) {
8916 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8917 if (reg)
8918 break;
8919 if (time_after(jiffies, timeout)) {
8920 dd_dev_err(dd,
8921 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8922 return -ETIMEDOUT;
8923 }
8924 udelay(2);
8925 }
8926
8927 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8928 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8929 }
8930
8931 if (!loopback) {
8932
8933
8934
8935
8936
8937
8938
8939 dd_dev_err(dd,
8940 "Pausing for peer to be finished with LCB set up\n");
8941 msleep(5000);
8942 dd_dev_err(dd, "Continuing with quick linkup\n");
8943 }
8944
8945 write_csr(dd, DC_LCB_ERR_EN, 0);
8946 set_8051_lcb_access(dd);
8947
8948
8949
8950
8951
8952
8953 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8954 if (ret != HCMD_SUCCESS) {
8955 dd_dev_err(dd,
8956 "%s: set physical link state to quick LinkUp failed with return %d\n",
8957 __func__, ret);
8958
8959 set_host_lcb_access(dd);
8960 write_csr(dd, DC_LCB_ERR_EN, ~0ull);
8961
8962 if (ret >= 0)
8963 ret = -EINVAL;
8964 return ret;
8965 }
8966
8967 return 0;
8968}
8969
8970
8971
8972
8973
8974static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8975{
8976 int ret;
8977
8978 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8979 if (ret == HCMD_SUCCESS)
8980 return 0;
8981 dd_dev_err(dd,
8982 "Set physical link state to SerDes Loopback failed with return %d\n",
8983 ret);
8984 if (ret >= 0)
8985 ret = -EINVAL;
8986 return ret;
8987}
8988
8989
8990
8991
8992static int init_loopback(struct hfi1_devdata *dd)
8993{
8994 dd_dev_info(dd, "Entering loopback mode\n");
8995
8996
8997 write_csr(dd, DC_DC8051_CFG_MODE,
8998 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8999
9000
9001
9002
9003
9004
9005
9006 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9007 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9008 loopback == LOOPBACK_CABLE)) {
9009 loopback = LOOPBACK_LCB;
9010 quick_linkup = 1;
9011 return 0;
9012 }
9013
9014
9015 if (loopback == LOOPBACK_SERDES) {
9016
9017 if (dd->icode == ICODE_RTL_SILICON)
9018 quick_linkup = 1;
9019 return set_serdes_loopback_mode(dd);
9020 }
9021
9022
9023 if (loopback == LOOPBACK_LCB) {
9024 quick_linkup = 1;
9025
9026
9027 if (dd->icode == ICODE_FPGA_EMULATION) {
9028 dd_dev_err(dd,
9029 "LCB loopback not supported in emulation\n");
9030 return -EINVAL;
9031 }
9032 return 0;
9033 }
9034
9035
9036 if (loopback == LOOPBACK_CABLE)
9037 return 0;
9038
9039 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9040 return -EINVAL;
9041}
9042
9043
9044
9045
9046
9047static u16 opa_to_vc_link_widths(u16 opa_widths)
9048{
9049 int i;
9050 u16 result = 0;
9051
9052 static const struct link_bits {
9053 u16 from;
9054 u16 to;
9055 } opa_link_xlate[] = {
9056 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9057 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9058 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9059 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9060 };
9061
9062 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9063 if (opa_widths & opa_link_xlate[i].from)
9064 result |= opa_link_xlate[i].to;
9065 }
9066 return result;
9067}
9068
9069
9070
9071
9072static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9073{
9074 struct hfi1_devdata *dd = ppd->dd;
9075 u8 enable_lane_tx;
9076 u8 tx_polarity_inversion;
9077 u8 rx_polarity_inversion;
9078 int ret;
9079
9080
9081 fabric_serdes_reset(dd);
9082
9083
9084 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9085 &rx_polarity_inversion, &ppd->local_tx_rate);
9086 if (ret)
9087 goto set_local_link_attributes_fail;
9088
9089 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9090
9091 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9092 ppd->local_tx_rate = 1;
9093 else
9094 ppd->local_tx_rate = 0;
9095 } else {
9096
9097 ppd->local_tx_rate = 0;
9098 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9099 ppd->local_tx_rate |= 2;
9100 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9101 ppd->local_tx_rate |= 1;
9102 }
9103
9104 enable_lane_tx = 0xF;
9105 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9106 rx_polarity_inversion, ppd->local_tx_rate);
9107 if (ret != HCMD_SUCCESS)
9108 goto set_local_link_attributes_fail;
9109
9110
9111
9112
9113 ret = write_vc_local_phy(dd,
9114 0 ,
9115 1 );
9116 if (ret != HCMD_SUCCESS)
9117 goto set_local_link_attributes_fail;
9118
9119
9120 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9121 ppd->port_crc_mode_enabled);
9122 if (ret != HCMD_SUCCESS)
9123 goto set_local_link_attributes_fail;
9124
9125 ret = write_vc_local_link_width(dd, 0, 0,
9126 opa_to_vc_link_widths(
9127 ppd->link_width_enabled));
9128 if (ret != HCMD_SUCCESS)
9129 goto set_local_link_attributes_fail;
9130
9131
9132 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9133 if (ret == HCMD_SUCCESS)
9134 return 0;
9135
9136set_local_link_attributes_fail:
9137 dd_dev_err(dd,
9138 "Failed to set local link attributes, return 0x%x\n",
9139 ret);
9140 return ret;
9141}
9142
9143
9144
9145
9146
9147
9148int start_link(struct hfi1_pportdata *ppd)
9149{
9150
9151
9152
9153
9154 tune_serdes(ppd);
9155
9156 if (!ppd->link_enabled) {
9157 dd_dev_info(ppd->dd,
9158 "%s: stopping link start because link is disabled\n",
9159 __func__);
9160 return 0;
9161 }
9162 if (!ppd->driver_link_ready) {
9163 dd_dev_info(ppd->dd,
9164 "%s: stopping link start because driver is not ready\n",
9165 __func__);
9166 return 0;
9167 }
9168
9169
9170
9171
9172
9173
9174 clear_full_mgmt_pkey(ppd);
9175
9176 return set_link_state(ppd, HLS_DN_POLL);
9177}
9178
9179static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9180{
9181 struct hfi1_devdata *dd = ppd->dd;
9182 u64 mask;
9183 unsigned long timeout;
9184
9185
9186
9187
9188
9189
9190
9191
9192
9193 msleep(500);
9194
9195
9196
9197
9198 timeout = jiffies + msecs_to_jiffies(2000);
9199 while (1) {
9200 mask = read_csr(dd, dd->hfi1_id ?
9201 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9202 if (!(mask & QSFP_HFI0_INT_N))
9203 break;
9204 if (time_after(jiffies, timeout)) {
9205 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9206 __func__);
9207 break;
9208 }
9209 udelay(2);
9210 }
9211}
9212
9213static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9214{
9215 struct hfi1_devdata *dd = ppd->dd;
9216 u64 mask;
9217
9218 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9219 if (enable) {
9220
9221
9222
9223
9224 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9225 QSFP_HFI0_INT_N);
9226 mask |= (u64)QSFP_HFI0_INT_N;
9227 } else {
9228 mask &= ~(u64)QSFP_HFI0_INT_N;
9229 }
9230 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9231}
9232
9233void reset_qsfp(struct hfi1_pportdata *ppd)
9234{
9235 struct hfi1_devdata *dd = ppd->dd;
9236 u64 mask, qsfp_mask;
9237
9238
9239 set_qsfp_int_n(ppd, 0);
9240
9241
9242 mask = (u64)QSFP_HFI0_RESET_N;
9243
9244 qsfp_mask = read_csr(dd,
9245 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9246 qsfp_mask &= ~mask;
9247 write_csr(dd,
9248 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9249
9250 udelay(10);
9251
9252 qsfp_mask |= mask;
9253 write_csr(dd,
9254 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9255
9256 wait_for_qsfp_init(ppd);
9257
9258
9259
9260
9261
9262 set_qsfp_int_n(ppd, 1);
9263}
9264
9265static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9266 u8 *qsfp_interrupt_status)
9267{
9268 struct hfi1_devdata *dd = ppd->dd;
9269
9270 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9271 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9272 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9273 __func__);
9274
9275 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9276 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9277 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9278 __func__);
9279
9280
9281
9282
9283 if (ppd->host_link_state & HLS_DOWN)
9284 return 0;
9285
9286 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9287 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9288 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9289 __func__);
9290
9291 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9292 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9293 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9294 __func__);
9295
9296
9297
9298 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9299 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9300 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9301 __func__);
9302
9303 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9304 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9305 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9306 __func__);
9307
9308 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9309 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9310 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9311 __func__);
9312
9313 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9314 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9315 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9316 __func__);
9317
9318 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9319 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9320 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9321 __func__);
9322
9323 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9324 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9325 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9326 __func__);
9327
9328 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9329 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9330 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9331 __func__);
9332
9333 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9334 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9335 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9336 __func__);
9337
9338 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9339 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9340 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9341 __func__);
9342
9343 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9344 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9345 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9346 __func__);
9347
9348 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9349 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9350 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9351 __func__);
9352
9353 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9354 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9355 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9356 __func__);
9357
9358
9359
9360
9361 return 0;
9362}
9363
9364
9365void qsfp_event(struct work_struct *work)
9366{
9367 struct qsfp_data *qd;
9368 struct hfi1_pportdata *ppd;
9369 struct hfi1_devdata *dd;
9370
9371 qd = container_of(work, struct qsfp_data, qsfp_work);
9372 ppd = qd->ppd;
9373 dd = ppd->dd;
9374
9375
9376 if (!qsfp_mod_present(ppd))
9377 return;
9378
9379
9380
9381
9382
9383 dc_start(dd);
9384
9385 if (qd->cache_refresh_required) {
9386 set_qsfp_int_n(ppd, 0);
9387
9388 wait_for_qsfp_init(ppd);
9389
9390
9391
9392
9393
9394 set_qsfp_int_n(ppd, 1);
9395
9396 start_link(ppd);
9397 }
9398
9399 if (qd->check_interrupt_flags) {
9400 u8 qsfp_interrupt_status[16] = {0,};
9401
9402 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9403 &qsfp_interrupt_status[0], 16) != 16) {
9404 dd_dev_info(dd,
9405 "%s: Failed to read status of QSFP module\n",
9406 __func__);
9407 } else {
9408 unsigned long flags;
9409
9410 handle_qsfp_error_conditions(
9411 ppd, qsfp_interrupt_status);
9412 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9413 ppd->qsfp_info.check_interrupt_flags = 0;
9414 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9415 flags);
9416 }
9417 }
9418}
9419
9420static void init_qsfp_int(struct hfi1_devdata *dd)
9421{
9422 struct hfi1_pportdata *ppd = dd->pport;
9423 u64 qsfp_mask, cce_int_mask;
9424 const int qsfp1_int_smask = QSFP1_INT % 64;
9425 const int qsfp2_int_smask = QSFP2_INT % 64;
9426
9427
9428
9429
9430
9431
9432
9433 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9434 (8 * (QSFP1_INT / 64)));
9435 if (dd->hfi1_id) {
9436 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9437 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9438 cce_int_mask);
9439 } else {
9440 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9441 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9442 cce_int_mask);
9443 }
9444
9445 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9446
9447 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9448 qsfp_mask);
9449 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9450 qsfp_mask);
9451
9452 set_qsfp_int_n(ppd, 0);
9453
9454
9455 if (qsfp_mod_present(ppd))
9456 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9457 write_csr(dd,
9458 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9459 qsfp_mask);
9460}
9461
9462
9463
9464
9465static void init_lcb(struct hfi1_devdata *dd)
9466{
9467
9468 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9469 return;
9470
9471
9472
9473
9474 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9475 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9476 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9477 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9478 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9479 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9480 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9481}
9482
9483
9484
9485
9486
9487static int test_qsfp_read(struct hfi1_pportdata *ppd)
9488{
9489 int ret;
9490 u8 status;
9491
9492
9493 if (ppd->port_type != PORT_TYPE_QSFP)
9494 return 0;
9495
9496
9497 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9498 if (ret < 0)
9499 return ret;
9500 if (ret != 1)
9501 return -EIO;
9502
9503 return 0;
9504}
9505
9506
9507
9508
9509
9510
9511
9512#define MAX_QSFP_RETRIES 20
9513#define QSFP_RETRY_WAIT 500
9514
9515
9516
9517
9518
9519static void try_start_link(struct hfi1_pportdata *ppd)
9520{
9521 if (test_qsfp_read(ppd)) {
9522
9523 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9524 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9525 return;
9526 }
9527 dd_dev_info(ppd->dd,
9528 "QSFP not responding, waiting and retrying %d\n",
9529 (int)ppd->qsfp_retry_count);
9530 ppd->qsfp_retry_count++;
9531 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9532 msecs_to_jiffies(QSFP_RETRY_WAIT));
9533 return;
9534 }
9535 ppd->qsfp_retry_count = 0;
9536
9537 start_link(ppd);
9538}
9539
9540
9541
9542
9543void handle_start_link(struct work_struct *work)
9544{
9545 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9546 start_link_work.work);
9547 try_start_link(ppd);
9548}
9549
9550int bringup_serdes(struct hfi1_pportdata *ppd)
9551{
9552 struct hfi1_devdata *dd = ppd->dd;
9553 u64 guid;
9554 int ret;
9555
9556 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9557 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9558
9559 guid = ppd->guid;
9560 if (!guid) {
9561 if (dd->base_guid)
9562 guid = dd->base_guid + ppd->port - 1;
9563 ppd->guid = guid;
9564 }
9565
9566
9567 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9568
9569
9570 init_lcb(dd);
9571
9572 if (loopback) {
9573 ret = init_loopback(dd);
9574 if (ret < 0)
9575 return ret;
9576 }
9577
9578 get_port_type(ppd);
9579 if (ppd->port_type == PORT_TYPE_QSFP) {
9580 set_qsfp_int_n(ppd, 0);
9581 wait_for_qsfp_init(ppd);
9582 set_qsfp_int_n(ppd, 1);
9583 }
9584
9585 try_start_link(ppd);
9586 return 0;
9587}
9588
9589void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9590{
9591 struct hfi1_devdata *dd = ppd->dd;
9592
9593
9594
9595
9596
9597
9598
9599
9600 ppd->driver_link_ready = 0;
9601 ppd->link_enabled = 0;
9602
9603 ppd->qsfp_retry_count = MAX_QSFP_RETRIES;
9604 flush_delayed_work(&ppd->start_link_work);
9605 cancel_delayed_work_sync(&ppd->start_link_work);
9606
9607 ppd->offline_disabled_reason =
9608 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9609 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9610 OPA_LINKDOWN_REASON_SMA_DISABLED);
9611 set_link_state(ppd, HLS_DN_OFFLINE);
9612
9613
9614 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9615}
9616
9617static inline int init_cpu_counters(struct hfi1_devdata *dd)
9618{
9619 struct hfi1_pportdata *ppd;
9620 int i;
9621
9622 ppd = (struct hfi1_pportdata *)(dd + 1);
9623 for (i = 0; i < dd->num_pports; i++, ppd++) {
9624 ppd->ibport_data.rvp.rc_acks = NULL;
9625 ppd->ibport_data.rvp.rc_qacks = NULL;
9626 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9627 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9628 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9629 if (!ppd->ibport_data.rvp.rc_acks ||
9630 !ppd->ibport_data.rvp.rc_delayed_comp ||
9631 !ppd->ibport_data.rvp.rc_qacks)
9632 return -ENOMEM;
9633 }
9634
9635 return 0;
9636}
9637
9638static const char * const pt_names[] = {
9639 "expected",
9640 "eager",
9641 "invalid"
9642};
9643
9644static const char *pt_name(u32 type)
9645{
9646 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9647}
9648
9649
9650
9651
9652void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9653 u32 type, unsigned long pa, u16 order)
9654{
9655 u64 reg;
9656 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9657 (dd->kregbase + RCV_ARRAY));
9658
9659 if (!(dd->flags & HFI1_PRESENT))
9660 goto done;
9661
9662 if (type == PT_INVALID) {
9663 pa = 0;
9664 } else if (type > PT_INVALID) {
9665 dd_dev_err(dd,
9666 "unexpected receive array type %u for index %u, not handled\n",
9667 type, index);
9668 goto done;
9669 }
9670
9671 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9672 pt_name(type), index, pa, (unsigned long)order);
9673
9674#define RT_ADDR_SHIFT 12
9675 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9676 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9677 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9678 << RCV_ARRAY_RT_ADDR_SHIFT;
9679 writeq(reg, base + (index * 8));
9680
9681 if (type == PT_EAGER)
9682
9683
9684
9685
9686 flush_wc();
9687done:
9688 return;
9689}
9690
9691void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9692{
9693 struct hfi1_devdata *dd = rcd->dd;
9694 u32 i;
9695
9696
9697 for (i = rcd->eager_base; i < rcd->eager_base +
9698 rcd->egrbufs.alloced; i++)
9699 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9700
9701 for (i = rcd->expected_base;
9702 i < rcd->expected_base + rcd->expected_count; i++)
9703 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9704}
9705
9706struct ib_header *hfi1_get_msgheader(
9707 struct hfi1_devdata *dd, __le32 *rhf_addr)
9708{
9709 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9710
9711 return (struct ib_header *)
9712 (rhf_addr - dd->rhf_offset + offset);
9713}
9714
9715static const char * const ib_cfg_name_strings[] = {
9716 "HFI1_IB_CFG_LIDLMC",
9717 "HFI1_IB_CFG_LWID_DG_ENB",
9718 "HFI1_IB_CFG_LWID_ENB",
9719 "HFI1_IB_CFG_LWID",
9720 "HFI1_IB_CFG_SPD_ENB",
9721 "HFI1_IB_CFG_SPD",
9722 "HFI1_IB_CFG_RXPOL_ENB",
9723 "HFI1_IB_CFG_LREV_ENB",
9724 "HFI1_IB_CFG_LINKLATENCY",
9725 "HFI1_IB_CFG_HRTBT",
9726 "HFI1_IB_CFG_OP_VLS",
9727 "HFI1_IB_CFG_VL_HIGH_CAP",
9728 "HFI1_IB_CFG_VL_LOW_CAP",
9729 "HFI1_IB_CFG_OVERRUN_THRESH",
9730 "HFI1_IB_CFG_PHYERR_THRESH",
9731 "HFI1_IB_CFG_LINKDEFAULT",
9732 "HFI1_IB_CFG_PKEYS",
9733 "HFI1_IB_CFG_MTU",
9734 "HFI1_IB_CFG_LSTATE",
9735 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9736 "HFI1_IB_CFG_PMA_TICKS",
9737 "HFI1_IB_CFG_PORT"
9738};
9739
9740static const char *ib_cfg_name(int which)
9741{
9742 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9743 return "invalid";
9744 return ib_cfg_name_strings[which];
9745}
9746
9747int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9748{
9749 struct hfi1_devdata *dd = ppd->dd;
9750 int val = 0;
9751
9752 switch (which) {
9753 case HFI1_IB_CFG_LWID_ENB:
9754 val = ppd->link_width_enabled;
9755 break;
9756 case HFI1_IB_CFG_LWID:
9757 val = ppd->link_width_active;
9758 break;
9759 case HFI1_IB_CFG_SPD_ENB:
9760 val = ppd->link_speed_enabled;
9761 break;
9762 case HFI1_IB_CFG_SPD:
9763 val = ppd->link_speed_active;
9764 break;
9765
9766 case HFI1_IB_CFG_RXPOL_ENB:
9767 case HFI1_IB_CFG_LREV_ENB:
9768 case HFI1_IB_CFG_LINKLATENCY:
9769 goto unimplemented;
9770
9771 case HFI1_IB_CFG_OP_VLS:
9772 val = ppd->vls_operational;
9773 break;
9774 case HFI1_IB_CFG_VL_HIGH_CAP:
9775 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9776 break;
9777 case HFI1_IB_CFG_VL_LOW_CAP:
9778 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9779 break;
9780 case HFI1_IB_CFG_OVERRUN_THRESH:
9781 val = ppd->overrun_threshold;
9782 break;
9783 case HFI1_IB_CFG_PHYERR_THRESH:
9784 val = ppd->phy_error_threshold;
9785 break;
9786 case HFI1_IB_CFG_LINKDEFAULT:
9787 val = dd->link_default;
9788 break;
9789
9790 case HFI1_IB_CFG_HRTBT:
9791 case HFI1_IB_CFG_PMA_TICKS:
9792 default:
9793unimplemented:
9794 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9795 dd_dev_info(
9796 dd,
9797 "%s: which %s: not implemented\n",
9798 __func__,
9799 ib_cfg_name(which));
9800 break;
9801 }
9802
9803 return val;
9804}
9805
9806
9807
9808
9809#define MAX_MAD_PACKET 2048
9810
9811
9812
9813
9814
9815
9816
9817
9818
9819
9820u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9821{
9822
9823
9824
9825
9826
9827
9828
9829
9830
9831
9832 return (dd->rcd[0]->rcvhdrqentsize - 2 + 1) << 2;
9833}
9834
9835
9836
9837
9838
9839
9840
9841
9842
9843
9844
9845
9846static void set_send_length(struct hfi1_pportdata *ppd)
9847{
9848 struct hfi1_devdata *dd = ppd->dd;
9849 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9850 u32 maxvlmtu = dd->vld[15].mtu;
9851 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9852 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9853 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9854 int i, j;
9855 u32 thres;
9856
9857 for (i = 0; i < ppd->vls_supported; i++) {
9858 if (dd->vld[i].mtu > maxvlmtu)
9859 maxvlmtu = dd->vld[i].mtu;
9860 if (i <= 3)
9861 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9862 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9863 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9864 else
9865 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9866 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9867 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9868 }
9869 write_csr(dd, SEND_LEN_CHECK0, len1);
9870 write_csr(dd, SEND_LEN_CHECK1, len2);
9871
9872
9873 for (i = 0; i < ppd->vls_supported; i++) {
9874 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9875 sc_mtu_to_threshold(dd->vld[i].sc,
9876 dd->vld[i].mtu,
9877 dd->rcd[0]->rcvhdrqentsize));
9878 for (j = 0; j < INIT_SC_PER_VL; j++)
9879 sc_set_cr_threshold(
9880 pio_select_send_context_vl(dd, j, i),
9881 thres);
9882 }
9883 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9884 sc_mtu_to_threshold(dd->vld[15].sc,
9885 dd->vld[15].mtu,
9886 dd->rcd[0]->rcvhdrqentsize));
9887 sc_set_cr_threshold(dd->vld[15].sc, thres);
9888
9889
9890 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9891 (ilog2(maxvlmtu >> 8) + 1);
9892 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9893 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9894 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9895 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9896 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9897}
9898
9899static void set_lidlmc(struct hfi1_pportdata *ppd)
9900{
9901 int i;
9902 u64 sreg = 0;
9903 struct hfi1_devdata *dd = ppd->dd;
9904 u32 mask = ~((1U << ppd->lmc) - 1);
9905 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9906
9907 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9908 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9909 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9910 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9911 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9912 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9913 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9914
9915
9916
9917
9918 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9919 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9920 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9921 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9922
9923 for (i = 0; i < dd->chip_send_contexts; i++) {
9924 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9925 i, (u32)sreg);
9926 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9927 }
9928
9929
9930 sdma_update_lmc(dd, mask, ppd->lid);
9931}
9932
9933static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9934{
9935 unsigned long timeout;
9936 u32 curr_state;
9937
9938 timeout = jiffies + msecs_to_jiffies(msecs);
9939 while (1) {
9940 curr_state = read_physical_state(dd);
9941 if (curr_state == state)
9942 break;
9943 if (time_after(jiffies, timeout)) {
9944 dd_dev_err(dd,
9945 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9946 state, curr_state);
9947 return -ETIMEDOUT;
9948 }
9949 usleep_range(1950, 2050);
9950 }
9951
9952 return 0;
9953}
9954
9955static const char *state_completed_string(u32 completed)
9956{
9957 static const char * const state_completed[] = {
9958 "EstablishComm",
9959 "OptimizeEQ",
9960 "VerifyCap"
9961 };
9962
9963 if (completed < ARRAY_SIZE(state_completed))
9964 return state_completed[completed];
9965
9966 return "unknown";
9967}
9968
9969static const char all_lanes_dead_timeout_expired[] =
9970 "All lanes were inactive – was the interconnect media removed?";
9971static const char tx_out_of_policy[] =
9972 "Passing lanes on local port do not meet the local link width policy";
9973static const char no_state_complete[] =
9974 "State timeout occurred before link partner completed the state";
9975static const char * const state_complete_reasons[] = {
9976 [0x00] = "Reason unknown",
9977 [0x01] = "Link was halted by driver, refer to LinkDownReason",
9978 [0x02] = "Link partner reported failure",
9979 [0x10] = "Unable to achieve frame sync on any lane",
9980 [0x11] =
9981 "Unable to find a common bit rate with the link partner",
9982 [0x12] =
9983 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
9984 [0x13] =
9985 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
9986 [0x14] = no_state_complete,
9987 [0x15] =
9988 "State timeout occurred before link partner identified equalization presets",
9989 [0x16] =
9990 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
9991 [0x17] = tx_out_of_policy,
9992 [0x20] = all_lanes_dead_timeout_expired,
9993 [0x21] =
9994 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
9995 [0x22] = no_state_complete,
9996 [0x23] =
9997 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
9998 [0x24] = tx_out_of_policy,
9999 [0x30] = all_lanes_dead_timeout_expired,
10000 [0x31] =
10001 "State timeout occurred waiting for host to process received frames",
10002 [0x32] = no_state_complete,
10003 [0x33] =
10004 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10005 [0x34] = tx_out_of_policy,
10006};
10007
10008static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10009 u32 code)
10010{
10011 const char *str = NULL;
10012
10013 if (code < ARRAY_SIZE(state_complete_reasons))
10014 str = state_complete_reasons[code];
10015
10016 if (str)
10017 return str;
10018 return "Reserved";
10019}
10020
10021
10022static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10023 const char *prefix)
10024{
10025 struct hfi1_devdata *dd = ppd->dd;
10026 u32 success;
10027 u32 state;
10028 u32 reason;
10029 u32 lanes;
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039 success = frame & 0x1;
10040 state = (frame >> 1) & 0x7;
10041 reason = (frame >> 8) & 0xff;
10042 lanes = (frame >> 16) & 0xffff;
10043
10044 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10045 prefix, frame);
10046 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10047 state_completed_string(state), state);
10048 dd_dev_err(dd, " state successfully completed: %s\n",
10049 success ? "yes" : "no");
10050 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10051 reason, state_complete_reason_code_string(ppd, reason));
10052 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10053}
10054
10055
10056
10057
10058
10059
10060static void check_lni_states(struct hfi1_pportdata *ppd)
10061{
10062 u32 last_local_state;
10063 u32 last_remote_state;
10064
10065 read_last_local_state(ppd->dd, &last_local_state);
10066 read_last_remote_state(ppd->dd, &last_remote_state);
10067
10068
10069
10070
10071
10072
10073 if (last_local_state == 0 && last_remote_state == 0)
10074 return;
10075
10076 decode_state_complete(ppd, last_local_state, "transmitted");
10077 decode_state_complete(ppd, last_remote_state, "received");
10078}
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10089{
10090 struct hfi1_devdata *dd = ppd->dd;
10091 u32 pstate, previous_state;
10092 int ret;
10093 int do_transition;
10094 int do_wait;
10095
10096 previous_state = ppd->host_link_state;
10097 ppd->host_link_state = HLS_GOING_OFFLINE;
10098 pstate = read_physical_state(dd);
10099 if (pstate == PLS_OFFLINE) {
10100 do_transition = 0;
10101 do_wait = 0;
10102 } else if ((pstate & 0xff) == PLS_OFFLINE) {
10103 do_transition = 0;
10104 do_wait = 1;
10105 } else {
10106 do_transition = 1;
10107 do_wait = 1;
10108 }
10109
10110 if (do_transition) {
10111 ret = set_physical_link_state(dd,
10112 (rem_reason << 8) | PLS_OFFLINE);
10113
10114 if (ret != HCMD_SUCCESS) {
10115 dd_dev_err(dd,
10116 "Failed to transition to Offline link state, return %d\n",
10117 ret);
10118 return -EINVAL;
10119 }
10120 if (ppd->offline_disabled_reason ==
10121 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10122 ppd->offline_disabled_reason =
10123 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10124 }
10125
10126 if (do_wait) {
10127
10128 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
10129 if (ret < 0)
10130 return ret;
10131 }
10132
10133
10134 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10135
10136
10137
10138
10139
10140 set_host_lcb_access(dd);
10141 write_csr(dd, DC_LCB_ERR_EN, ~0ull);
10142 ppd->host_link_state = HLS_LINK_COOLDOWN;
10143
10144 if (ppd->port_type == PORT_TYPE_QSFP &&
10145 ppd->qsfp_info.limiting_active &&
10146 qsfp_mod_present(ppd)) {
10147 int ret;
10148
10149 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10150 if (ret == 0) {
10151 set_qsfp_tx(ppd, 0);
10152 release_chip_resource(dd, qsfp_resource(dd));
10153 } else {
10154
10155 dd_dev_err(dd,
10156 "Unable to acquire lock to turn off QSFP TX\n");
10157 }
10158 }
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169 ret = wait_fm_ready(dd, 7000);
10170 if (ret) {
10171 dd_dev_err(dd,
10172 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10173
10174 ppd->host_link_state = HLS_DN_OFFLINE;
10175 return ret;
10176 }
10177
10178
10179
10180
10181
10182
10183
10184 ppd->host_link_state = HLS_DN_OFFLINE;
10185 if (previous_state & HLS_UP) {
10186
10187 handle_linkup_change(dd, 0);
10188 } else if (previous_state
10189 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10190
10191 check_lni_states(ppd);
10192 }
10193
10194
10195 ppd->link_width_active = 0;
10196 ppd->link_width_downgrade_tx_active = 0;
10197 ppd->link_width_downgrade_rx_active = 0;
10198 ppd->current_egress_rate = 0;
10199 return 0;
10200}
10201
10202
10203static const char *link_state_name(u32 state)
10204{
10205 const char *name;
10206 int n = ilog2(state);
10207 static const char * const names[] = {
10208 [__HLS_UP_INIT_BP] = "INIT",
10209 [__HLS_UP_ARMED_BP] = "ARMED",
10210 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10211 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10212 [__HLS_DN_POLL_BP] = "POLL",
10213 [__HLS_DN_DISABLE_BP] = "DISABLE",
10214 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10215 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10216 [__HLS_GOING_UP_BP] = "GOING_UP",
10217 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10218 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10219 };
10220
10221 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10222 return name ? name : "unknown";
10223}
10224
10225
10226static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10227{
10228 if (state == HLS_UP_INIT) {
10229 switch (ppd->linkinit_reason) {
10230 case OPA_LINKINIT_REASON_LINKUP:
10231 return "(LINKUP)";
10232 case OPA_LINKINIT_REASON_FLAPPING:
10233 return "(FLAPPING)";
10234 case OPA_LINKINIT_OUTSIDE_POLICY:
10235 return "(OUTSIDE_POLICY)";
10236 case OPA_LINKINIT_QUARANTINED:
10237 return "(QUARANTINED)";
10238 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10239 return "(INSUFIC_CAPABILITY)";
10240 default:
10241 break;
10242 }
10243 }
10244 return "";
10245}
10246
10247
10248
10249
10250
10251
10252u32 driver_physical_state(struct hfi1_pportdata *ppd)
10253{
10254 switch (ppd->host_link_state) {
10255 case HLS_UP_INIT:
10256 case HLS_UP_ARMED:
10257 case HLS_UP_ACTIVE:
10258 return IB_PORTPHYSSTATE_LINKUP;
10259 case HLS_DN_POLL:
10260 return IB_PORTPHYSSTATE_POLLING;
10261 case HLS_DN_DISABLE:
10262 return IB_PORTPHYSSTATE_DISABLED;
10263 case HLS_DN_OFFLINE:
10264 return OPA_PORTPHYSSTATE_OFFLINE;
10265 case HLS_VERIFY_CAP:
10266 return IB_PORTPHYSSTATE_POLLING;
10267 case HLS_GOING_UP:
10268 return IB_PORTPHYSSTATE_POLLING;
10269 case HLS_GOING_OFFLINE:
10270 return OPA_PORTPHYSSTATE_OFFLINE;
10271 case HLS_LINK_COOLDOWN:
10272 return OPA_PORTPHYSSTATE_OFFLINE;
10273 case HLS_DN_DOWNDEF:
10274 default:
10275 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10276 ppd->host_link_state);
10277 return -1;
10278 }
10279}
10280
10281
10282
10283
10284
10285
10286u32 driver_logical_state(struct hfi1_pportdata *ppd)
10287{
10288 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10289 return IB_PORT_DOWN;
10290
10291 switch (ppd->host_link_state & HLS_UP) {
10292 case HLS_UP_INIT:
10293 return IB_PORT_INIT;
10294 case HLS_UP_ARMED:
10295 return IB_PORT_ARMED;
10296 case HLS_UP_ACTIVE:
10297 return IB_PORT_ACTIVE;
10298 default:
10299 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10300 ppd->host_link_state);
10301 return -1;
10302 }
10303}
10304
10305void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10306 u8 neigh_reason, u8 rem_reason)
10307{
10308 if (ppd->local_link_down_reason.latest == 0 &&
10309 ppd->neigh_link_down_reason.latest == 0) {
10310 ppd->local_link_down_reason.latest = lcl_reason;
10311 ppd->neigh_link_down_reason.latest = neigh_reason;
10312 ppd->remote_link_down_reason = rem_reason;
10313 }
10314}
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10325{
10326 struct hfi1_devdata *dd = ppd->dd;
10327 struct ib_event event = {.device = NULL};
10328 int ret1, ret = 0;
10329 int orig_new_state, poll_bounce;
10330
10331 mutex_lock(&ppd->hls_lock);
10332
10333 orig_new_state = state;
10334 if (state == HLS_DN_DOWNDEF)
10335 state = dd->link_default;
10336
10337
10338 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10339 state == HLS_DN_POLL;
10340
10341 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10342 link_state_name(ppd->host_link_state),
10343 link_state_name(orig_new_state),
10344 poll_bounce ? "(bounce) " : "",
10345 link_state_reason_name(ppd, state));
10346
10347
10348
10349
10350
10351
10352 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10353 ppd->is_sm_config_started = 0;
10354
10355
10356
10357
10358
10359 if (ppd->host_link_state == state && !poll_bounce)
10360 goto done;
10361
10362 switch (state) {
10363 case HLS_UP_INIT:
10364 if (ppd->host_link_state == HLS_DN_POLL &&
10365 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10366
10367
10368
10369
10370
10371
10372
10373
10374 } else if (ppd->host_link_state != HLS_GOING_UP) {
10375 goto unexpected;
10376 }
10377
10378 ppd->host_link_state = HLS_UP_INIT;
10379 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10380 if (ret) {
10381
10382 ppd->host_link_state = HLS_GOING_UP;
10383 dd_dev_err(dd,
10384 "%s: logical state did not change to INIT\n",
10385 __func__);
10386 } else {
10387
10388 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10389 ppd->linkinit_reason =
10390 OPA_LINKINIT_REASON_LINKUP;
10391
10392
10393 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10394
10395 handle_linkup_change(dd, 1);
10396 }
10397 break;
10398 case HLS_UP_ARMED:
10399 if (ppd->host_link_state != HLS_UP_INIT)
10400 goto unexpected;
10401
10402 ppd->host_link_state = HLS_UP_ARMED;
10403 set_logical_state(dd, LSTATE_ARMED);
10404 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10405 if (ret) {
10406
10407 ppd->host_link_state = HLS_UP_INIT;
10408 dd_dev_err(dd,
10409 "%s: logical state did not change to ARMED\n",
10410 __func__);
10411 }
10412
10413
10414
10415
10416
10417 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10418 ppd->neighbor_normal = 1;
10419 break;
10420 case HLS_UP_ACTIVE:
10421 if (ppd->host_link_state != HLS_UP_ARMED)
10422 goto unexpected;
10423
10424 ppd->host_link_state = HLS_UP_ACTIVE;
10425 set_logical_state(dd, LSTATE_ACTIVE);
10426 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10427 if (ret) {
10428
10429 ppd->host_link_state = HLS_UP_ARMED;
10430 dd_dev_err(dd,
10431 "%s: logical state did not change to ACTIVE\n",
10432 __func__);
10433 } else {
10434
10435 sdma_all_running(dd);
10436
10437
10438 event.device = &dd->verbs_dev.rdi.ibdev;
10439 event.element.port_num = ppd->port;
10440 event.event = IB_EVENT_PORT_ACTIVE;
10441 }
10442 break;
10443 case HLS_DN_POLL:
10444 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10445 ppd->host_link_state == HLS_DN_OFFLINE) &&
10446 dd->dc_shutdown)
10447 dc_start(dd);
10448
10449 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10450
10451 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10452 u8 tmp = ppd->link_enabled;
10453
10454 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10455 if (ret) {
10456 ppd->link_enabled = tmp;
10457 break;
10458 }
10459 ppd->remote_link_down_reason = 0;
10460
10461 if (ppd->driver_link_ready)
10462 ppd->link_enabled = 1;
10463 }
10464
10465 set_all_slowpath(ppd->dd);
10466 ret = set_local_link_attributes(ppd);
10467 if (ret)
10468 break;
10469
10470 ppd->port_error_action = 0;
10471 ppd->host_link_state = HLS_DN_POLL;
10472
10473 if (quick_linkup) {
10474
10475 ret = do_quick_linkup(dd);
10476 } else {
10477 ret1 = set_physical_link_state(dd, PLS_POLLING);
10478 if (ret1 != HCMD_SUCCESS) {
10479 dd_dev_err(dd,
10480 "Failed to transition to Polling link state, return 0x%x\n",
10481 ret1);
10482 ret = -EINVAL;
10483 }
10484 }
10485 ppd->offline_disabled_reason =
10486 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10487
10488
10489
10490
10491 if (ret)
10492 goto_offline(ppd, 0);
10493 break;
10494 case HLS_DN_DISABLE:
10495
10496 ppd->link_enabled = 0;
10497
10498
10499
10500
10501 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10502 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10503 if (ret)
10504 break;
10505 ppd->remote_link_down_reason = 0;
10506 }
10507
10508 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10509 if (ret1 != HCMD_SUCCESS) {
10510 dd_dev_err(dd,
10511 "Failed to transition to Disabled link state, return 0x%x\n",
10512 ret1);
10513 ret = -EINVAL;
10514 break;
10515 }
10516 ppd->host_link_state = HLS_DN_DISABLE;
10517 dc_shutdown(dd);
10518 break;
10519 case HLS_DN_OFFLINE:
10520 if (ppd->host_link_state == HLS_DN_DISABLE)
10521 dc_start(dd);
10522
10523
10524 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10525 if (!ret)
10526 ppd->remote_link_down_reason = 0;
10527 break;
10528 case HLS_VERIFY_CAP:
10529 if (ppd->host_link_state != HLS_DN_POLL)
10530 goto unexpected;
10531 ppd->host_link_state = HLS_VERIFY_CAP;
10532 break;
10533 case HLS_GOING_UP:
10534 if (ppd->host_link_state != HLS_VERIFY_CAP)
10535 goto unexpected;
10536
10537 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10538 if (ret1 != HCMD_SUCCESS) {
10539 dd_dev_err(dd,
10540 "Failed to transition to link up state, return 0x%x\n",
10541 ret1);
10542 ret = -EINVAL;
10543 break;
10544 }
10545 ppd->host_link_state = HLS_GOING_UP;
10546 break;
10547
10548 case HLS_GOING_OFFLINE:
10549 case HLS_LINK_COOLDOWN:
10550 default:
10551 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10552 __func__, state);
10553 ret = -EINVAL;
10554 break;
10555 }
10556
10557 goto done;
10558
10559unexpected:
10560 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10561 __func__, link_state_name(ppd->host_link_state),
10562 link_state_name(state));
10563 ret = -EINVAL;
10564
10565done:
10566 mutex_unlock(&ppd->hls_lock);
10567
10568 if (event.device)
10569 ib_dispatch_event(&event);
10570
10571 return ret;
10572}
10573
10574int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10575{
10576 u64 reg;
10577 int ret = 0;
10578
10579 switch (which) {
10580 case HFI1_IB_CFG_LIDLMC:
10581 set_lidlmc(ppd);
10582 break;
10583 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10584
10585
10586
10587
10588 val *= 4096 / 64;
10589 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10590 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10591 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10592 break;
10593 case HFI1_IB_CFG_LINKDEFAULT:
10594
10595 if (val != HLS_DN_POLL)
10596 ret = -EINVAL;
10597 break;
10598 case HFI1_IB_CFG_OP_VLS:
10599 if (ppd->vls_operational != val) {
10600 ppd->vls_operational = val;
10601 if (!ppd->port)
10602 ret = -EINVAL;
10603 }
10604 break;
10605
10606
10607
10608
10609
10610
10611
10612
10613 case HFI1_IB_CFG_LWID_ENB:
10614 ppd->link_width_enabled = val & ppd->link_width_supported;
10615 break;
10616 case HFI1_IB_CFG_LWID_DG_ENB:
10617 ppd->link_width_downgrade_enabled =
10618 val & ppd->link_width_downgrade_supported;
10619 break;
10620 case HFI1_IB_CFG_SPD_ENB:
10621 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10622 break;
10623 case HFI1_IB_CFG_OVERRUN_THRESH:
10624
10625
10626
10627
10628 ppd->overrun_threshold = val;
10629 break;
10630 case HFI1_IB_CFG_PHYERR_THRESH:
10631
10632
10633
10634
10635 ppd->phy_error_threshold = val;
10636 break;
10637
10638 case HFI1_IB_CFG_MTU:
10639 set_send_length(ppd);
10640 break;
10641
10642 case HFI1_IB_CFG_PKEYS:
10643 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10644 set_partition_keys(ppd);
10645 break;
10646
10647 default:
10648 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10649 dd_dev_info(ppd->dd,
10650 "%s: which %s, val 0x%x: not implemented\n",
10651 __func__, ib_cfg_name(which), val);
10652 break;
10653 }
10654 return ret;
10655}
10656
10657
10658static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10659{
10660 int i;
10661
10662 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10663 VL_ARB_LOW_PRIO_TABLE_SIZE);
10664 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10665 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677 for (i = 0; i < MAX_PRIO_TABLE; i++)
10678 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10679}
10680
10681
10682
10683
10684
10685
10686
10687static inline struct vl_arb_cache *
10688vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10689{
10690 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10691 return NULL;
10692 spin_lock(&ppd->vl_arb_cache[idx].lock);
10693 return &ppd->vl_arb_cache[idx];
10694}
10695
10696static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10697{
10698 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10699}
10700
10701static void vl_arb_get_cache(struct vl_arb_cache *cache,
10702 struct ib_vl_weight_elem *vl)
10703{
10704 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10705}
10706
10707static void vl_arb_set_cache(struct vl_arb_cache *cache,
10708 struct ib_vl_weight_elem *vl)
10709{
10710 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10711}
10712
10713static int vl_arb_match_cache(struct vl_arb_cache *cache,
10714 struct ib_vl_weight_elem *vl)
10715{
10716 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10717}
10718
10719
10720
10721static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10722 u32 size, struct ib_vl_weight_elem *vl)
10723{
10724 struct hfi1_devdata *dd = ppd->dd;
10725 u64 reg;
10726 unsigned int i, is_up = 0;
10727 int drain, ret = 0;
10728
10729 mutex_lock(&ppd->hls_lock);
10730
10731 if (ppd->host_link_state & HLS_UP)
10732 is_up = 1;
10733
10734 drain = !is_ax(dd) && is_up;
10735
10736 if (drain)
10737
10738
10739
10740
10741
10742
10743 ret = stop_drain_data_vls(dd);
10744
10745 if (ret) {
10746 dd_dev_err(
10747 dd,
10748 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10749 __func__);
10750 goto err;
10751 }
10752
10753 for (i = 0; i < size; i++, vl++) {
10754
10755
10756
10757
10758 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10759 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10760 | (((u64)vl->weight
10761 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10762 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10763 write_csr(dd, target + (i * 8), reg);
10764 }
10765 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10766
10767 if (drain)
10768 open_fill_data_vls(dd);
10769
10770err:
10771 mutex_unlock(&ppd->hls_lock);
10772
10773 return ret;
10774}
10775
10776
10777
10778
10779static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10780 struct vl_limit *vll)
10781{
10782 u64 reg = read_csr(dd, csr);
10783
10784 vll->dedicated = cpu_to_be16(
10785 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10786 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10787 vll->shared = cpu_to_be16(
10788 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10789 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10790}
10791
10792
10793
10794
10795static int get_buffer_control(struct hfi1_devdata *dd,
10796 struct buffer_control *bc, u16 *overall_limit)
10797{
10798 u64 reg;
10799 int i;
10800
10801
10802 memset(bc, 0, sizeof(*bc));
10803
10804
10805 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10806 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10807
10808
10809 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10810
10811 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10812 bc->overall_shared_limit = cpu_to_be16(
10813 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10814 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10815 if (overall_limit)
10816 *overall_limit = (reg
10817 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10818 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10819 return sizeof(struct buffer_control);
10820}
10821
10822static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10823{
10824 u64 reg;
10825 int i;
10826
10827
10828 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10829 for (i = 0; i < sizeof(u64); i++) {
10830 u8 byte = *(((u8 *)®) + i);
10831
10832 dp->vlnt[2 * i] = byte & 0xf;
10833 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10834 }
10835
10836 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10837 for (i = 0; i < sizeof(u64); i++) {
10838 u8 byte = *(((u8 *)®) + i);
10839
10840 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10841 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10842 }
10843 return sizeof(struct sc2vlnt);
10844}
10845
10846static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10847 struct ib_vl_weight_elem *vl)
10848{
10849 unsigned int i;
10850
10851 for (i = 0; i < nelems; i++, vl++) {
10852 vl->vl = 0xf;
10853 vl->weight = 0;
10854 }
10855}
10856
10857static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10858{
10859 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10860 DC_SC_VL_VAL(15_0,
10861 0, dp->vlnt[0] & 0xf,
10862 1, dp->vlnt[1] & 0xf,
10863 2, dp->vlnt[2] & 0xf,
10864 3, dp->vlnt[3] & 0xf,
10865 4, dp->vlnt[4] & 0xf,
10866 5, dp->vlnt[5] & 0xf,
10867 6, dp->vlnt[6] & 0xf,
10868 7, dp->vlnt[7] & 0xf,
10869 8, dp->vlnt[8] & 0xf,
10870 9, dp->vlnt[9] & 0xf,
10871 10, dp->vlnt[10] & 0xf,
10872 11, dp->vlnt[11] & 0xf,
10873 12, dp->vlnt[12] & 0xf,
10874 13, dp->vlnt[13] & 0xf,
10875 14, dp->vlnt[14] & 0xf,
10876 15, dp->vlnt[15] & 0xf));
10877 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10878 DC_SC_VL_VAL(31_16,
10879 16, dp->vlnt[16] & 0xf,
10880 17, dp->vlnt[17] & 0xf,
10881 18, dp->vlnt[18] & 0xf,
10882 19, dp->vlnt[19] & 0xf,
10883 20, dp->vlnt[20] & 0xf,
10884 21, dp->vlnt[21] & 0xf,
10885 22, dp->vlnt[22] & 0xf,
10886 23, dp->vlnt[23] & 0xf,
10887 24, dp->vlnt[24] & 0xf,
10888 25, dp->vlnt[25] & 0xf,
10889 26, dp->vlnt[26] & 0xf,
10890 27, dp->vlnt[27] & 0xf,
10891 28, dp->vlnt[28] & 0xf,
10892 29, dp->vlnt[29] & 0xf,
10893 30, dp->vlnt[30] & 0xf,
10894 31, dp->vlnt[31] & 0xf));
10895}
10896
10897static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10898 u16 limit)
10899{
10900 if (limit != 0)
10901 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10902 what, (int)limit, idx);
10903}
10904
10905
10906static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10907{
10908 u64 reg;
10909
10910 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10911 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10912 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10913 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10914}
10915
10916
10917static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10918{
10919 u64 reg;
10920
10921 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10922 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10923 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10924 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10925}
10926
10927
10928static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10929{
10930 u64 reg;
10931 u32 addr;
10932
10933 if (vl < TXE_NUM_DATA_VL)
10934 addr = SEND_CM_CREDIT_VL + (8 * vl);
10935 else
10936 addr = SEND_CM_CREDIT_VL15;
10937
10938 reg = read_csr(dd, addr);
10939 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10940 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10941 write_csr(dd, addr, reg);
10942}
10943
10944
10945static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10946{
10947 u64 reg;
10948 u32 addr;
10949
10950 if (vl < TXE_NUM_DATA_VL)
10951 addr = SEND_CM_CREDIT_VL + (8 * vl);
10952 else
10953 addr = SEND_CM_CREDIT_VL15;
10954
10955 reg = read_csr(dd, addr);
10956 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10957 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10958 write_csr(dd, addr, reg);
10959}
10960
10961
10962static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10963 const char *which)
10964{
10965 unsigned long timeout;
10966 u64 reg;
10967
10968 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10969 while (1) {
10970 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10971
10972 if (reg == 0)
10973 return;
10974 if (time_after(jiffies, timeout))
10975 break;
10976 udelay(1);
10977 }
10978
10979 dd_dev_err(dd,
10980 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10981 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10982
10983
10984
10985
10986 dd_dev_err(dd,
10987 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10988}
10989
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001
11002
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014int set_buffer_control(struct hfi1_pportdata *ppd,
11015 struct buffer_control *new_bc)
11016{
11017 struct hfi1_devdata *dd = ppd->dd;
11018 u64 changing_mask, ld_mask, stat_mask;
11019 int change_count;
11020 int i, use_all_mask;
11021 int this_shared_changing;
11022 int vl_count = 0, ret;
11023
11024
11025
11026
11027 int any_shared_limit_changing;
11028 struct buffer_control cur_bc;
11029 u8 changing[OPA_MAX_VLS];
11030 u8 lowering_dedicated[OPA_MAX_VLS];
11031 u16 cur_total;
11032 u32 new_total = 0;
11033 const u64 all_mask =
11034 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11035 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11036 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11037 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11038 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11039 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11040 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11041 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11042 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11043
11044#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11045#define NUM_USABLE_VLS 16
11046
11047
11048 for (i = 0; i < OPA_MAX_VLS; i++) {
11049 if (valid_vl(i)) {
11050 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11051 continue;
11052 }
11053 nonzero_msg(dd, i, "dedicated",
11054 be16_to_cpu(new_bc->vl[i].dedicated));
11055 nonzero_msg(dd, i, "shared",
11056 be16_to_cpu(new_bc->vl[i].shared));
11057 new_bc->vl[i].dedicated = 0;
11058 new_bc->vl[i].shared = 0;
11059 }
11060 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11061
11062
11063 get_buffer_control(dd, &cur_bc, &cur_total);
11064
11065
11066
11067
11068 memset(changing, 0, sizeof(changing));
11069 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11070
11071
11072
11073
11074 stat_mask =
11075 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11076 changing_mask = 0;
11077 ld_mask = 0;
11078 change_count = 0;
11079 any_shared_limit_changing = 0;
11080 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11081 if (!valid_vl(i))
11082 continue;
11083 this_shared_changing = new_bc->vl[i].shared
11084 != cur_bc.vl[i].shared;
11085 if (this_shared_changing)
11086 any_shared_limit_changing = 1;
11087 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11088 this_shared_changing) {
11089 changing[i] = 1;
11090 changing_mask |= stat_mask;
11091 change_count++;
11092 }
11093 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11094 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11095 lowering_dedicated[i] = 1;
11096 ld_mask |= stat_mask;
11097 }
11098 }
11099
11100
11101 if (new_total > cur_total)
11102 set_global_limit(dd, new_total);
11103
11104
11105
11106
11107 use_all_mask = 0;
11108 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11109 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11110 (is_ax(dd) && any_shared_limit_changing)) {
11111 set_global_shared(dd, 0);
11112 cur_bc.overall_shared_limit = 0;
11113 use_all_mask = 1;
11114 }
11115
11116 for (i = 0; i < NUM_USABLE_VLS; i++) {
11117 if (!valid_vl(i))
11118 continue;
11119
11120 if (changing[i]) {
11121 set_vl_shared(dd, i, 0);
11122 cur_bc.vl[i].shared = 0;
11123 }
11124 }
11125
11126 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11127 "shared");
11128
11129 if (change_count > 0) {
11130 for (i = 0; i < NUM_USABLE_VLS; i++) {
11131 if (!valid_vl(i))
11132 continue;
11133
11134 if (lowering_dedicated[i]) {
11135 set_vl_dedicated(dd, i,
11136 be16_to_cpu(new_bc->
11137 vl[i].dedicated));
11138 cur_bc.vl[i].dedicated =
11139 new_bc->vl[i].dedicated;
11140 }
11141 }
11142
11143 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11144
11145
11146 for (i = 0; i < NUM_USABLE_VLS; i++) {
11147 if (!valid_vl(i))
11148 continue;
11149
11150 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11151 be16_to_cpu(cur_bc.vl[i].dedicated))
11152 set_vl_dedicated(dd, i,
11153 be16_to_cpu(new_bc->
11154 vl[i].dedicated));
11155 }
11156 }
11157
11158
11159 for (i = 0; i < NUM_USABLE_VLS; i++) {
11160 if (!valid_vl(i))
11161 continue;
11162
11163 if (be16_to_cpu(new_bc->vl[i].shared) >
11164 be16_to_cpu(cur_bc.vl[i].shared))
11165 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11166 }
11167
11168
11169 if (be16_to_cpu(new_bc->overall_shared_limit) >
11170 be16_to_cpu(cur_bc.overall_shared_limit))
11171 set_global_shared(dd,
11172 be16_to_cpu(new_bc->overall_shared_limit));
11173
11174
11175 if (new_total < cur_total)
11176 set_global_limit(dd, new_total);
11177
11178
11179
11180
11181
11182 if (change_count > 0) {
11183 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11184 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11185 be16_to_cpu(new_bc->vl[i].shared) > 0)
11186 vl_count++;
11187 ppd->actual_vls_operational = vl_count;
11188 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11189 ppd->actual_vls_operational :
11190 ppd->vls_operational,
11191 NULL);
11192 if (ret == 0)
11193 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11194 ppd->actual_vls_operational :
11195 ppd->vls_operational, NULL);
11196 if (ret)
11197 return ret;
11198 }
11199 return 0;
11200}
11201
11202
11203
11204
11205
11206
11207int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11208
11209{
11210 int size;
11211 struct vl_arb_cache *vlc;
11212
11213 switch (which) {
11214 case FM_TBL_VL_HIGH_ARB:
11215 size = 256;
11216
11217
11218
11219
11220 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11221 vl_arb_get_cache(vlc, t);
11222 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11223 break;
11224 case FM_TBL_VL_LOW_ARB:
11225 size = 256;
11226
11227
11228
11229
11230 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11231 vl_arb_get_cache(vlc, t);
11232 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11233 break;
11234 case FM_TBL_BUFFER_CONTROL:
11235 size = get_buffer_control(ppd->dd, t, NULL);
11236 break;
11237 case FM_TBL_SC2VLNT:
11238 size = get_sc2vlnt(ppd->dd, t);
11239 break;
11240 case FM_TBL_VL_PREEMPT_ELEMS:
11241 size = 256;
11242
11243 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11244 break;
11245 case FM_TBL_VL_PREEMPT_MATRIX:
11246 size = 256;
11247
11248
11249
11250
11251 break;
11252 default:
11253 return -EINVAL;
11254 }
11255 return size;
11256}
11257
11258
11259
11260
11261int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11262{
11263 int ret = 0;
11264 struct vl_arb_cache *vlc;
11265
11266 switch (which) {
11267 case FM_TBL_VL_HIGH_ARB:
11268 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11269 if (vl_arb_match_cache(vlc, t)) {
11270 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11271 break;
11272 }
11273 vl_arb_set_cache(vlc, t);
11274 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11275 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11276 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11277 break;
11278 case FM_TBL_VL_LOW_ARB:
11279 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11280 if (vl_arb_match_cache(vlc, t)) {
11281 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11282 break;
11283 }
11284 vl_arb_set_cache(vlc, t);
11285 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11286 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11287 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11288 break;
11289 case FM_TBL_BUFFER_CONTROL:
11290 ret = set_buffer_control(ppd, t);
11291 break;
11292 case FM_TBL_SC2VLNT:
11293 set_sc2vlnt(ppd->dd, t);
11294 break;
11295 default:
11296 ret = -EINVAL;
11297 }
11298 return ret;
11299}
11300
11301
11302
11303
11304
11305
11306static int disable_data_vls(struct hfi1_devdata *dd)
11307{
11308 if (is_ax(dd))
11309 return 1;
11310
11311 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11312
11313 return 0;
11314}
11315
11316
11317
11318
11319
11320
11321
11322
11323
11324int open_fill_data_vls(struct hfi1_devdata *dd)
11325{
11326 if (is_ax(dd))
11327 return 1;
11328
11329 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11330
11331 return 0;
11332}
11333
11334
11335
11336
11337
11338
11339static void drain_data_vls(struct hfi1_devdata *dd)
11340{
11341 sc_wait(dd);
11342 sdma_wait(dd);
11343 pause_for_credit_return(dd);
11344}
11345
11346
11347
11348
11349
11350
11351
11352
11353
11354
11355
11356int stop_drain_data_vls(struct hfi1_devdata *dd)
11357{
11358 int ret;
11359
11360 ret = disable_data_vls(dd);
11361 if (ret == 0)
11362 drain_data_vls(dd);
11363
11364 return ret;
11365}
11366
11367
11368
11369
11370
11371u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11372{
11373 u32 cclocks;
11374
11375 if (dd->icode == ICODE_FPGA_EMULATION)
11376 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11377 else
11378 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11379 if (ns && !cclocks)
11380 cclocks = 1;
11381 return cclocks;
11382}
11383
11384
11385
11386
11387
11388u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11389{
11390 u32 ns;
11391
11392 if (dd->icode == ICODE_FPGA_EMULATION)
11393 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11394 else
11395 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11396 if (cclocks && !ns)
11397 ns = 1;
11398 return ns;
11399}
11400
11401
11402
11403
11404
11405
11406
11407static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11408{
11409 struct hfi1_devdata *dd = rcd->dd;
11410 u32 timeout = rcd->rcvavail_timeout;
11411
11412
11413
11414
11415
11416
11417
11418
11419
11420
11421 if (npkts < rcv_intr_count) {
11422
11423
11424
11425
11426 if (timeout < 2)
11427 return;
11428 timeout >>= 1;
11429 } else {
11430
11431
11432
11433
11434 if (timeout >= dd->rcv_intr_timeout_csr)
11435 return;
11436 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11437 }
11438
11439 rcd->rcvavail_timeout = timeout;
11440
11441
11442
11443
11444 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11445 (u64)timeout <<
11446 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11447}
11448
11449void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11450 u32 intr_adjust, u32 npkts)
11451{
11452 struct hfi1_devdata *dd = rcd->dd;
11453 u64 reg;
11454 u32 ctxt = rcd->ctxt;
11455
11456
11457
11458
11459
11460 if (intr_adjust)
11461 adjust_rcv_timeout(rcd, npkts);
11462 if (updegr) {
11463 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11464 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11465 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11466 }
11467 mmiowb();
11468 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11469 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11470 << RCV_HDR_HEAD_HEAD_SHIFT);
11471 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11472 mmiowb();
11473}
11474
11475u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11476{
11477 u32 head, tail;
11478
11479 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11480 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11481
11482 if (rcd->rcvhdrtail_kvaddr)
11483 tail = get_rcvhdrtail(rcd);
11484 else
11485 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11486
11487 return head == tail;
11488}
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
11499
11500
11501
11502
11503
11504
11505
11506
11507
11508
11509static u32 encoded_size(u32 size)
11510{
11511 switch (size) {
11512 case 4 * 1024: return 0x1;
11513 case 8 * 1024: return 0x2;
11514 case 16 * 1024: return 0x3;
11515 case 32 * 1024: return 0x4;
11516 case 64 * 1024: return 0x5;
11517 case 128 * 1024: return 0x6;
11518 case 256 * 1024: return 0x7;
11519 case 512 * 1024: return 0x8;
11520 case 1 * 1024 * 1024: return 0x9;
11521 case 2 * 1024 * 1024: return 0xa;
11522 }
11523 return 0x1;
11524}
11525
11526void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11527{
11528 struct hfi1_ctxtdata *rcd;
11529 u64 rcvctrl, reg;
11530 int did_enable = 0;
11531
11532 rcd = dd->rcd[ctxt];
11533 if (!rcd)
11534 return;
11535
11536 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11537
11538 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11539
11540 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11541 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11542
11543 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11544 rcd->rcvhdrq_dma);
11545 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11546 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11547 rcd->rcvhdrqtailaddr_dma);
11548 rcd->seq_cnt = 1;
11549
11550
11551 rcd->head = 0;
11552
11553
11554
11555
11556
11557
11558
11559 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11560
11561
11562 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11563
11564
11565 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11566
11567
11568 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11569 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11570 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11571 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11572
11573
11574 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11575 did_enable = 1;
11576
11577
11578 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11579
11580
11581 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11582 & RCV_EGR_CTRL_EGR_CNT_MASK)
11583 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11584 (((rcd->eager_base >> RCV_SHIFT)
11585 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11586 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11587 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11588
11589
11590
11591
11592
11593
11594
11595 reg = (((rcd->expected_count >> RCV_SHIFT)
11596 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11597 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11598 (((rcd->expected_base >> RCV_SHIFT)
11599 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11600 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11601 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11602 if (ctxt == HFI1_CTRL_CTXT)
11603 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11604 }
11605 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11606 write_csr(dd, RCV_VL15, 0);
11607
11608
11609
11610
11611
11612 if (dd->rcvhdrtail_dummy_dma) {
11613 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11614 dd->rcvhdrtail_dummy_dma);
11615
11616 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11617 }
11618
11619 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11620 }
11621 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11622 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11623 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11624 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11625 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
11626 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11627 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11628
11629 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11630 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11631 }
11632 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11633 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11634 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11635 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11636 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11637
11638
11639
11640
11641 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11642 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11643 }
11644 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11645 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11646 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11647 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11648 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11649 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11650 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11651 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11652 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11653 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11654 rcd->rcvctrl = rcvctrl;
11655 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11656 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11657
11658
11659 if (did_enable &&
11660 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11661 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11662 if (reg != 0) {
11663 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11664 ctxt, reg);
11665 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11666 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11667 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11668 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11669 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11670 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11671 ctxt, reg, reg == 0 ? "not" : "still");
11672 }
11673 }
11674
11675 if (did_enable) {
11676
11677
11678
11679
11680
11681 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11682 (u64)rcd->rcvavail_timeout <<
11683 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11684
11685
11686 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11687 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11688 }
11689
11690 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11691
11692
11693
11694
11695
11696 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11697 dd->rcvhdrtail_dummy_dma);
11698}
11699
11700u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11701{
11702 int ret;
11703 u64 val = 0;
11704
11705 if (namep) {
11706 ret = dd->cntrnameslen;
11707 *namep = dd->cntrnames;
11708 } else {
11709 const struct cntr_entry *entry;
11710 int i, j;
11711
11712 ret = (dd->ndevcntrs) * sizeof(u64);
11713
11714
11715 *cntrp = dd->cntrs;
11716
11717
11718
11719
11720 for (i = 0; i < DEV_CNTR_LAST; i++) {
11721 entry = &dev_cntrs[i];
11722 hfi1_cdbg(CNTR, "reading %s", entry->name);
11723 if (entry->flags & CNTR_DISABLED) {
11724
11725 hfi1_cdbg(CNTR, "\tDisabled\n");
11726 } else {
11727 if (entry->flags & CNTR_VL) {
11728 hfi1_cdbg(CNTR, "\tPer VL\n");
11729 for (j = 0; j < C_VL_COUNT; j++) {
11730 val = entry->rw_cntr(entry,
11731 dd, j,
11732 CNTR_MODE_R,
11733 0);
11734 hfi1_cdbg(
11735 CNTR,
11736 "\t\tRead 0x%llx for %d\n",
11737 val, j);
11738 dd->cntrs[entry->offset + j] =
11739 val;
11740 }
11741 } else if (entry->flags & CNTR_SDMA) {
11742 hfi1_cdbg(CNTR,
11743 "\t Per SDMA Engine\n");
11744 for (j = 0; j < dd->chip_sdma_engines;
11745 j++) {
11746 val =
11747 entry->rw_cntr(entry, dd, j,
11748 CNTR_MODE_R, 0);
11749 hfi1_cdbg(CNTR,
11750 "\t\tRead 0x%llx for %d\n",
11751 val, j);
11752 dd->cntrs[entry->offset + j] =
11753 val;
11754 }
11755 } else {
11756 val = entry->rw_cntr(entry, dd,
11757 CNTR_INVALID_VL,
11758 CNTR_MODE_R, 0);
11759 dd->cntrs[entry->offset] = val;
11760 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11761 }
11762 }
11763 }
11764 }
11765 return ret;
11766}
11767
11768
11769
11770
11771u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11772{
11773 int ret;
11774 u64 val = 0;
11775
11776 if (namep) {
11777 ret = ppd->dd->portcntrnameslen;
11778 *namep = ppd->dd->portcntrnames;
11779 } else {
11780 const struct cntr_entry *entry;
11781 int i, j;
11782
11783 ret = ppd->dd->nportcntrs * sizeof(u64);
11784 *cntrp = ppd->cntrs;
11785
11786 for (i = 0; i < PORT_CNTR_LAST; i++) {
11787 entry = &port_cntrs[i];
11788 hfi1_cdbg(CNTR, "reading %s", entry->name);
11789 if (entry->flags & CNTR_DISABLED) {
11790
11791 hfi1_cdbg(CNTR, "\tDisabled\n");
11792 continue;
11793 }
11794
11795 if (entry->flags & CNTR_VL) {
11796 hfi1_cdbg(CNTR, "\tPer VL");
11797 for (j = 0; j < C_VL_COUNT; j++) {
11798 val = entry->rw_cntr(entry, ppd, j,
11799 CNTR_MODE_R,
11800 0);
11801 hfi1_cdbg(
11802 CNTR,
11803 "\t\tRead 0x%llx for %d",
11804 val, j);
11805 ppd->cntrs[entry->offset + j] = val;
11806 }
11807 } else {
11808 val = entry->rw_cntr(entry, ppd,
11809 CNTR_INVALID_VL,
11810 CNTR_MODE_R,
11811 0);
11812 ppd->cntrs[entry->offset] = val;
11813 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11814 }
11815 }
11816 }
11817 return ret;
11818}
11819
11820static void free_cntrs(struct hfi1_devdata *dd)
11821{
11822 struct hfi1_pportdata *ppd;
11823 int i;
11824
11825 if (dd->synth_stats_timer.data)
11826 del_timer_sync(&dd->synth_stats_timer);
11827 dd->synth_stats_timer.data = 0;
11828 ppd = (struct hfi1_pportdata *)(dd + 1);
11829 for (i = 0; i < dd->num_pports; i++, ppd++) {
11830 kfree(ppd->cntrs);
11831 kfree(ppd->scntrs);
11832 free_percpu(ppd->ibport_data.rvp.rc_acks);
11833 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11834 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11835 ppd->cntrs = NULL;
11836 ppd->scntrs = NULL;
11837 ppd->ibport_data.rvp.rc_acks = NULL;
11838 ppd->ibport_data.rvp.rc_qacks = NULL;
11839 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11840 }
11841 kfree(dd->portcntrnames);
11842 dd->portcntrnames = NULL;
11843 kfree(dd->cntrs);
11844 dd->cntrs = NULL;
11845 kfree(dd->scntrs);
11846 dd->scntrs = NULL;
11847 kfree(dd->cntrnames);
11848 dd->cntrnames = NULL;
11849}
11850
11851static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11852 u64 *psval, void *context, int vl)
11853{
11854 u64 val;
11855 u64 sval = *psval;
11856
11857 if (entry->flags & CNTR_DISABLED) {
11858 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11859 return 0;
11860 }
11861
11862 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11863
11864 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11865
11866
11867 if (entry->flags & CNTR_SYNTH) {
11868 if (sval == CNTR_MAX) {
11869
11870 return CNTR_MAX;
11871 }
11872
11873 if (entry->flags & CNTR_32BIT) {
11874
11875 u64 upper = sval >> 32;
11876 u64 lower = (sval << 32) >> 32;
11877
11878 if (lower > val) {
11879 if (upper == CNTR_32BIT_MAX)
11880 val = CNTR_MAX;
11881 else
11882 upper++;
11883 }
11884
11885 if (val != CNTR_MAX)
11886 val = (upper << 32) | val;
11887
11888 } else {
11889
11890 if ((val < sval) || (val > CNTR_MAX))
11891 val = CNTR_MAX;
11892 }
11893 }
11894
11895 *psval = val;
11896
11897 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11898
11899 return val;
11900}
11901
11902static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11903 struct cntr_entry *entry,
11904 u64 *psval, void *context, int vl, u64 data)
11905{
11906 u64 val;
11907
11908 if (entry->flags & CNTR_DISABLED) {
11909 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11910 return 0;
11911 }
11912
11913 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11914
11915 if (entry->flags & CNTR_SYNTH) {
11916 *psval = data;
11917 if (entry->flags & CNTR_32BIT) {
11918 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11919 (data << 32) >> 32);
11920 val = data;
11921 } else {
11922 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11923 data);
11924 }
11925 } else {
11926 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11927 }
11928
11929 *psval = val;
11930
11931 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11932
11933 return val;
11934}
11935
11936u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11937{
11938 struct cntr_entry *entry;
11939 u64 *sval;
11940
11941 entry = &dev_cntrs[index];
11942 sval = dd->scntrs + entry->offset;
11943
11944 if (vl != CNTR_INVALID_VL)
11945 sval += vl;
11946
11947 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11948}
11949
11950u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11951{
11952 struct cntr_entry *entry;
11953 u64 *sval;
11954
11955 entry = &dev_cntrs[index];
11956 sval = dd->scntrs + entry->offset;
11957
11958 if (vl != CNTR_INVALID_VL)
11959 sval += vl;
11960
11961 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11962}
11963
11964u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11965{
11966 struct cntr_entry *entry;
11967 u64 *sval;
11968
11969 entry = &port_cntrs[index];
11970 sval = ppd->scntrs + entry->offset;
11971
11972 if (vl != CNTR_INVALID_VL)
11973 sval += vl;
11974
11975 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11976 (index <= C_RCV_HDR_OVF_LAST)) {
11977
11978 return 0;
11979 }
11980
11981 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11982}
11983
11984u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11985{
11986 struct cntr_entry *entry;
11987 u64 *sval;
11988
11989 entry = &port_cntrs[index];
11990 sval = ppd->scntrs + entry->offset;
11991
11992 if (vl != CNTR_INVALID_VL)
11993 sval += vl;
11994
11995 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11996 (index <= C_RCV_HDR_OVF_LAST)) {
11997
11998 return 0;
11999 }
12000
12001 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12002}
12003
12004static void update_synth_timer(unsigned long opaque)
12005{
12006 u64 cur_tx;
12007 u64 cur_rx;
12008 u64 total_flits;
12009 u8 update = 0;
12010 int i, j, vl;
12011 struct hfi1_pportdata *ppd;
12012 struct cntr_entry *entry;
12013
12014 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12015
12016
12017
12018
12019
12020
12021
12022 entry = &dev_cntrs[C_DC_RCV_FLITS];
12023 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12024
12025 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12026 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12027
12028 hfi1_cdbg(
12029 CNTR,
12030 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12031 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12032
12033 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12034
12035
12036
12037
12038 update = 1;
12039 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12040 dd->unit);
12041 } else {
12042 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12043 hfi1_cdbg(CNTR,
12044 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12045 total_flits, (u64)CNTR_32BIT_MAX);
12046 if (total_flits >= CNTR_32BIT_MAX) {
12047 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12048 dd->unit);
12049 update = 1;
12050 }
12051 }
12052
12053 if (update) {
12054 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12055 for (i = 0; i < DEV_CNTR_LAST; i++) {
12056 entry = &dev_cntrs[i];
12057 if (entry->flags & CNTR_VL) {
12058 for (vl = 0; vl < C_VL_COUNT; vl++)
12059 read_dev_cntr(dd, i, vl);
12060 } else {
12061 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12062 }
12063 }
12064 ppd = (struct hfi1_pportdata *)(dd + 1);
12065 for (i = 0; i < dd->num_pports; i++, ppd++) {
12066 for (j = 0; j < PORT_CNTR_LAST; j++) {
12067 entry = &port_cntrs[j];
12068 if (entry->flags & CNTR_VL) {
12069 for (vl = 0; vl < C_VL_COUNT; vl++)
12070 read_port_cntr(ppd, j, vl);
12071 } else {
12072 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12073 }
12074 }
12075 }
12076
12077
12078
12079
12080
12081
12082
12083 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12084 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12085 CNTR_MODE_R, 0);
12086
12087 entry = &dev_cntrs[C_DC_RCV_FLITS];
12088 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12089 CNTR_MODE_R, 0);
12090
12091 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12092 dd->unit, dd->last_tx, dd->last_rx);
12093
12094 } else {
12095 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12096 }
12097
12098 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12099}
12100
12101#define C_MAX_NAME 16
12102static int init_cntrs(struct hfi1_devdata *dd)
12103{
12104 int i, rcv_ctxts, j;
12105 size_t sz;
12106 char *p;
12107 char name[C_MAX_NAME];
12108 struct hfi1_pportdata *ppd;
12109 const char *bit_type_32 = ",32";
12110 const int bit_type_32_sz = strlen(bit_type_32);
12111
12112
12113 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12114 (unsigned long)dd);
12115
12116
12117
12118
12119
12120
12121 dd->ndevcntrs = 0;
12122 sz = 0;
12123
12124 for (i = 0; i < DEV_CNTR_LAST; i++) {
12125 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12126 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12127 continue;
12128 }
12129
12130 if (dev_cntrs[i].flags & CNTR_VL) {
12131 dev_cntrs[i].offset = dd->ndevcntrs;
12132 for (j = 0; j < C_VL_COUNT; j++) {
12133 snprintf(name, C_MAX_NAME, "%s%d",
12134 dev_cntrs[i].name, vl_from_idx(j));
12135 sz += strlen(name);
12136
12137 if (dev_cntrs[i].flags & CNTR_32BIT)
12138 sz += bit_type_32_sz;
12139 sz++;
12140 dd->ndevcntrs++;
12141 }
12142 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12143 dev_cntrs[i].offset = dd->ndevcntrs;
12144 for (j = 0; j < dd->chip_sdma_engines; j++) {
12145 snprintf(name, C_MAX_NAME, "%s%d",
12146 dev_cntrs[i].name, j);
12147 sz += strlen(name);
12148
12149 if (dev_cntrs[i].flags & CNTR_32BIT)
12150 sz += bit_type_32_sz;
12151 sz++;
12152 dd->ndevcntrs++;
12153 }
12154 } else {
12155
12156 sz += strlen(dev_cntrs[i].name) + 1;
12157
12158 if (dev_cntrs[i].flags & CNTR_32BIT)
12159 sz += bit_type_32_sz;
12160 dev_cntrs[i].offset = dd->ndevcntrs;
12161 dd->ndevcntrs++;
12162 }
12163 }
12164
12165
12166 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12167 if (!dd->cntrs)
12168 goto bail;
12169
12170 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12171 if (!dd->scntrs)
12172 goto bail;
12173
12174
12175 dd->cntrnameslen = sz;
12176 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12177 if (!dd->cntrnames)
12178 goto bail;
12179
12180
12181 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12182 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12183
12184 } else if (dev_cntrs[i].flags & CNTR_VL) {
12185 for (j = 0; j < C_VL_COUNT; j++) {
12186 snprintf(name, C_MAX_NAME, "%s%d",
12187 dev_cntrs[i].name,
12188 vl_from_idx(j));
12189 memcpy(p, name, strlen(name));
12190 p += strlen(name);
12191
12192
12193 if (dev_cntrs[i].flags & CNTR_32BIT) {
12194 memcpy(p, bit_type_32, bit_type_32_sz);
12195 p += bit_type_32_sz;
12196 }
12197
12198 *p++ = '\n';
12199 }
12200 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12201 for (j = 0; j < dd->chip_sdma_engines; j++) {
12202 snprintf(name, C_MAX_NAME, "%s%d",
12203 dev_cntrs[i].name, j);
12204 memcpy(p, name, strlen(name));
12205 p += strlen(name);
12206
12207
12208 if (dev_cntrs[i].flags & CNTR_32BIT) {
12209 memcpy(p, bit_type_32, bit_type_32_sz);
12210 p += bit_type_32_sz;
12211 }
12212
12213 *p++ = '\n';
12214 }
12215 } else {
12216 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12217 p += strlen(dev_cntrs[i].name);
12218
12219
12220 if (dev_cntrs[i].flags & CNTR_32BIT) {
12221 memcpy(p, bit_type_32, bit_type_32_sz);
12222 p += bit_type_32_sz;
12223 }
12224
12225 *p++ = '\n';
12226 }
12227 }
12228
12229
12230
12231
12232
12233
12234
12235
12236
12237
12238 rcv_ctxts = dd->num_rcv_contexts;
12239 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12240 i <= C_RCV_HDR_OVF_LAST; i++) {
12241 port_cntrs[i].flags |= CNTR_DISABLED;
12242 }
12243
12244
12245 sz = 0;
12246 dd->nportcntrs = 0;
12247 for (i = 0; i < PORT_CNTR_LAST; i++) {
12248 if (port_cntrs[i].flags & CNTR_DISABLED) {
12249 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12250 continue;
12251 }
12252
12253 if (port_cntrs[i].flags & CNTR_VL) {
12254 port_cntrs[i].offset = dd->nportcntrs;
12255 for (j = 0; j < C_VL_COUNT; j++) {
12256 snprintf(name, C_MAX_NAME, "%s%d",
12257 port_cntrs[i].name, vl_from_idx(j));
12258 sz += strlen(name);
12259
12260 if (port_cntrs[i].flags & CNTR_32BIT)
12261 sz += bit_type_32_sz;
12262 sz++;
12263 dd->nportcntrs++;
12264 }
12265 } else {
12266
12267 sz += strlen(port_cntrs[i].name) + 1;
12268
12269 if (port_cntrs[i].flags & CNTR_32BIT)
12270 sz += bit_type_32_sz;
12271 port_cntrs[i].offset = dd->nportcntrs;
12272 dd->nportcntrs++;
12273 }
12274 }
12275
12276
12277 dd->portcntrnameslen = sz;
12278 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12279 if (!dd->portcntrnames)
12280 goto bail;
12281
12282
12283 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12284 if (port_cntrs[i].flags & CNTR_DISABLED)
12285 continue;
12286
12287 if (port_cntrs[i].flags & CNTR_VL) {
12288 for (j = 0; j < C_VL_COUNT; j++) {
12289 snprintf(name, C_MAX_NAME, "%s%d",
12290 port_cntrs[i].name, vl_from_idx(j));
12291 memcpy(p, name, strlen(name));
12292 p += strlen(name);
12293
12294
12295 if (port_cntrs[i].flags & CNTR_32BIT) {
12296 memcpy(p, bit_type_32, bit_type_32_sz);
12297 p += bit_type_32_sz;
12298 }
12299
12300 *p++ = '\n';
12301 }
12302 } else {
12303 memcpy(p, port_cntrs[i].name,
12304 strlen(port_cntrs[i].name));
12305 p += strlen(port_cntrs[i].name);
12306
12307
12308 if (port_cntrs[i].flags & CNTR_32BIT) {
12309 memcpy(p, bit_type_32, bit_type_32_sz);
12310 p += bit_type_32_sz;
12311 }
12312
12313 *p++ = '\n';
12314 }
12315 }
12316
12317
12318 ppd = (struct hfi1_pportdata *)(dd + 1);
12319 for (i = 0; i < dd->num_pports; i++, ppd++) {
12320 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12321 if (!ppd->cntrs)
12322 goto bail;
12323
12324 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12325 if (!ppd->scntrs)
12326 goto bail;
12327 }
12328
12329
12330 if (init_cpu_counters(dd))
12331 goto bail;
12332
12333 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12334 return 0;
12335bail:
12336 free_cntrs(dd);
12337 return -ENOMEM;
12338}
12339
12340static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12341{
12342 switch (chip_lstate) {
12343 default:
12344 dd_dev_err(dd,
12345 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12346 chip_lstate);
12347
12348 case LSTATE_DOWN:
12349 return IB_PORT_DOWN;
12350 case LSTATE_INIT:
12351 return IB_PORT_INIT;
12352 case LSTATE_ARMED:
12353 return IB_PORT_ARMED;
12354 case LSTATE_ACTIVE:
12355 return IB_PORT_ACTIVE;
12356 }
12357}
12358
12359u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12360{
12361
12362 switch (chip_pstate & 0xf0) {
12363 default:
12364 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12365 chip_pstate);
12366
12367 case PLS_DISABLED:
12368 return IB_PORTPHYSSTATE_DISABLED;
12369 case PLS_OFFLINE:
12370 return OPA_PORTPHYSSTATE_OFFLINE;
12371 case PLS_POLLING:
12372 return IB_PORTPHYSSTATE_POLLING;
12373 case PLS_CONFIGPHY:
12374 return IB_PORTPHYSSTATE_TRAINING;
12375 case PLS_LINKUP:
12376 return IB_PORTPHYSSTATE_LINKUP;
12377 case PLS_PHYTEST:
12378 return IB_PORTPHYSSTATE_PHY_TEST;
12379 }
12380}
12381
12382
12383const char *opa_lstate_name(u32 lstate)
12384{
12385 static const char * const port_logical_names[] = {
12386 "PORT_NOP",
12387 "PORT_DOWN",
12388 "PORT_INIT",
12389 "PORT_ARMED",
12390 "PORT_ACTIVE",
12391 "PORT_ACTIVE_DEFER",
12392 };
12393 if (lstate < ARRAY_SIZE(port_logical_names))
12394 return port_logical_names[lstate];
12395 return "unknown";
12396}
12397
12398
12399const char *opa_pstate_name(u32 pstate)
12400{
12401 static const char * const port_physical_names[] = {
12402 "PHYS_NOP",
12403 "reserved1",
12404 "PHYS_POLL",
12405 "PHYS_DISABLED",
12406 "PHYS_TRAINING",
12407 "PHYS_LINKUP",
12408 "PHYS_LINK_ERR_RECOVER",
12409 "PHYS_PHY_TEST",
12410 "reserved8",
12411 "PHYS_OFFLINE",
12412 "PHYS_GANGED",
12413 "PHYS_TEST",
12414 };
12415 if (pstate < ARRAY_SIZE(port_physical_names))
12416 return port_physical_names[pstate];
12417 return "unknown";
12418}
12419
12420
12421
12422
12423
12424u32 get_logical_state(struct hfi1_pportdata *ppd)
12425{
12426 u32 new_state;
12427
12428 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12429 if (new_state != ppd->lstate) {
12430 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12431 opa_lstate_name(new_state), new_state);
12432 ppd->lstate = new_state;
12433 }
12434
12435
12436
12437
12438
12439
12440
12441
12442 if (ppd->statusp) {
12443 switch (ppd->lstate) {
12444 case IB_PORT_DOWN:
12445 case IB_PORT_INIT:
12446 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12447 HFI1_STATUS_IB_READY);
12448 break;
12449 case IB_PORT_ARMED:
12450 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12451 break;
12452 case IB_PORT_ACTIVE:
12453 *ppd->statusp |= HFI1_STATUS_IB_READY;
12454 break;
12455 }
12456 }
12457 return ppd->lstate;
12458}
12459
12460
12461
12462
12463
12464
12465
12466
12467
12468
12469
12470static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12471 int msecs)
12472{
12473 unsigned long timeout;
12474
12475 timeout = jiffies + msecs_to_jiffies(msecs);
12476 while (1) {
12477 if (get_logical_state(ppd) == state)
12478 return 0;
12479 if (time_after(jiffies, timeout))
12480 break;
12481 msleep(20);
12482 }
12483 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12484
12485 return -ETIMEDOUT;
12486}
12487
12488u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12489{
12490 u32 pstate;
12491 u32 ib_pstate;
12492
12493 pstate = read_physical_state(ppd->dd);
12494 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12495 if (ppd->last_pstate != ib_pstate) {
12496 dd_dev_info(ppd->dd,
12497 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12498 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12499 pstate);
12500 ppd->last_pstate = ib_pstate;
12501 }
12502 return ib_pstate;
12503}
12504
12505#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12506(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12507
12508#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12509(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12510
12511int hfi1_init_ctxt(struct send_context *sc)
12512{
12513 if (sc) {
12514 struct hfi1_devdata *dd = sc->dd;
12515 u64 reg;
12516 u8 set = (sc->type == SC_USER ?
12517 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12518 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12519 reg = read_kctxt_csr(dd, sc->hw_context,
12520 SEND_CTXT_CHECK_ENABLE);
12521 if (set)
12522 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12523 else
12524 SET_STATIC_RATE_CONTROL_SMASK(reg);
12525 write_kctxt_csr(dd, sc->hw_context,
12526 SEND_CTXT_CHECK_ENABLE, reg);
12527 }
12528 return 0;
12529}
12530
12531int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12532{
12533 int ret = 0;
12534 u64 reg;
12535
12536 if (dd->icode != ICODE_RTL_SILICON) {
12537 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12538 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12539 __func__);
12540 return -EINVAL;
12541 }
12542 reg = read_csr(dd, ASIC_STS_THERM);
12543 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12544 ASIC_STS_THERM_CURR_TEMP_MASK);
12545 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12546 ASIC_STS_THERM_LO_TEMP_MASK);
12547 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12548 ASIC_STS_THERM_HI_TEMP_MASK);
12549 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12550 ASIC_STS_THERM_CRIT_TEMP_MASK);
12551
12552 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12553
12554 return ret;
12555}
12556
12557
12558
12559
12560
12561
12562void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12563{
12564 int i;
12565
12566
12567
12568
12569 if (enable) {
12570
12571 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12572 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12573
12574 init_qsfp_int(dd);
12575 } else {
12576 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12577 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12578 }
12579}
12580
12581
12582
12583
12584static void clear_all_interrupts(struct hfi1_devdata *dd)
12585{
12586 int i;
12587
12588 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12589 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12590
12591 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12592 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12593 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12594 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12595 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12596 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12597 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12598 for (i = 0; i < dd->chip_send_contexts; i++)
12599 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12600 for (i = 0; i < dd->chip_sdma_engines; i++)
12601 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12602
12603 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12604 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12605 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12606}
12607
12608
12609static void disable_intx(struct pci_dev *pdev)
12610{
12611 pci_intx(pdev, 0);
12612}
12613
12614static void clean_up_interrupts(struct hfi1_devdata *dd)
12615{
12616 int i;
12617
12618
12619 if (dd->num_msix_entries) {
12620
12621 struct hfi1_msix_entry *me = dd->msix_entries;
12622
12623 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12624 if (!me->arg)
12625 continue;
12626 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12627 free_irq(me->msix.vector, me->arg);
12628 }
12629 } else {
12630
12631 if (dd->requested_intx_irq) {
12632 free_irq(dd->pcidev->irq, dd);
12633 dd->requested_intx_irq = 0;
12634 }
12635 }
12636
12637
12638 if (dd->num_msix_entries) {
12639
12640 pci_disable_msix(dd->pcidev);
12641 } else {
12642
12643 disable_intx(dd->pcidev);
12644 }
12645
12646
12647 kfree(dd->msix_entries);
12648 dd->msix_entries = NULL;
12649 dd->num_msix_entries = 0;
12650}
12651
12652
12653
12654
12655
12656static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12657{
12658 u64 reg;
12659 int m, n;
12660
12661
12662 m = isrc / 64;
12663 n = isrc % 64;
12664 dd->gi_mask[m] &= ~((u64)1 << n);
12665
12666
12667 m = isrc / 8;
12668 n = isrc % 8;
12669 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12670 reg &= ~((u64)0xff << (8 * n));
12671 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12672 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12673}
12674
12675static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12676 int engine, int msix_intr)
12677{
12678
12679
12680
12681
12682
12683
12684
12685 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12686 msix_intr);
12687 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12688 msix_intr);
12689 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12690 msix_intr);
12691}
12692
12693static int request_intx_irq(struct hfi1_devdata *dd)
12694{
12695 int ret;
12696
12697 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12698 dd->unit);
12699 ret = request_irq(dd->pcidev->irq, general_interrupt,
12700 IRQF_SHARED, dd->intx_name, dd);
12701 if (ret)
12702 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12703 ret);
12704 else
12705 dd->requested_intx_irq = 1;
12706 return ret;
12707}
12708
12709static int request_msix_irqs(struct hfi1_devdata *dd)
12710{
12711 int first_general, last_general;
12712 int first_sdma, last_sdma;
12713 int first_rx, last_rx;
12714 int i, ret = 0;
12715
12716
12717 first_general = 0;
12718 last_general = first_general + 1;
12719 first_sdma = last_general;
12720 last_sdma = first_sdma + dd->num_sdma;
12721 first_rx = last_sdma;
12722 last_rx = first_rx + dd->n_krcv_queues;
12723
12724
12725
12726
12727
12728
12729 BUILD_BUG_ON(IS_SDMA_START % 64);
12730
12731 for (i = 0; i < dd->num_msix_entries; i++) {
12732 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12733 const char *err_info;
12734 irq_handler_t handler;
12735 irq_handler_t thread = NULL;
12736 void *arg;
12737 int idx;
12738 struct hfi1_ctxtdata *rcd = NULL;
12739 struct sdma_engine *sde = NULL;
12740
12741
12742 if (first_general <= i && i < last_general) {
12743 idx = i - first_general;
12744 handler = general_interrupt;
12745 arg = dd;
12746 snprintf(me->name, sizeof(me->name),
12747 DRIVER_NAME "_%d", dd->unit);
12748 err_info = "general";
12749 me->type = IRQ_GENERAL;
12750 } else if (first_sdma <= i && i < last_sdma) {
12751 idx = i - first_sdma;
12752 sde = &dd->per_sdma[idx];
12753 handler = sdma_interrupt;
12754 arg = sde;
12755 snprintf(me->name, sizeof(me->name),
12756 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12757 err_info = "sdma";
12758 remap_sdma_interrupts(dd, idx, i);
12759 me->type = IRQ_SDMA;
12760 } else if (first_rx <= i && i < last_rx) {
12761 idx = i - first_rx;
12762 rcd = dd->rcd[idx];
12763
12764 if (!rcd)
12765 continue;
12766
12767
12768
12769
12770 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12771 rcd->imask = ((u64)1) <<
12772 ((IS_RCVAVAIL_START + idx) % 64);
12773 handler = receive_context_interrupt;
12774 thread = receive_context_thread;
12775 arg = rcd;
12776 snprintf(me->name, sizeof(me->name),
12777 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12778 err_info = "receive context";
12779 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12780 me->type = IRQ_RCVCTXT;
12781 } else {
12782
12783
12784
12785 dd_dev_err(dd,
12786 "Unexpected extra MSI-X interrupt %d\n", i);
12787 continue;
12788 }
12789
12790 if (!arg)
12791 continue;
12792
12793 me->name[sizeof(me->name) - 1] = 0;
12794
12795 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12796 me->name, arg);
12797 if (ret) {
12798 dd_dev_err(dd,
12799 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12800 err_info, me->msix.vector, idx, ret);
12801 return ret;
12802 }
12803
12804
12805
12806
12807 me->arg = arg;
12808
12809 ret = hfi1_get_irq_affinity(dd, me);
12810 if (ret)
12811 dd_dev_err(dd,
12812 "unable to pin IRQ %d\n", ret);
12813 }
12814
12815 return ret;
12816}
12817
12818
12819
12820
12821
12822static void reset_interrupts(struct hfi1_devdata *dd)
12823{
12824 int i;
12825
12826
12827 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12828 dd->gi_mask[i] = ~(u64)0;
12829
12830
12831 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12832 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12833}
12834
12835static int set_up_interrupts(struct hfi1_devdata *dd)
12836{
12837 struct hfi1_msix_entry *entries;
12838 u32 total, request;
12839 int i, ret;
12840 int single_interrupt = 0;
12841
12842
12843
12844
12845
12846
12847
12848
12849 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12850
12851 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12852 if (!entries) {
12853 ret = -ENOMEM;
12854 goto fail;
12855 }
12856
12857 for (i = 0; i < total; i++)
12858 entries[i].msix.entry = i;
12859
12860
12861 request = total;
12862 request_msix(dd, &request, entries);
12863
12864 if (request == 0) {
12865
12866
12867 kfree(entries);
12868 single_interrupt = 1;
12869 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12870 } else {
12871
12872 dd->num_msix_entries = request;
12873 dd->msix_entries = entries;
12874
12875 if (request != total) {
12876
12877 dd_dev_err(
12878 dd,
12879 "cannot handle reduced interrupt case, want %u, got %u\n",
12880 total, request);
12881 ret = -EINVAL;
12882 goto fail;
12883 }
12884 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12885 }
12886
12887
12888 set_intr_state(dd, 0);
12889
12890 clear_all_interrupts(dd);
12891
12892
12893 reset_interrupts(dd);
12894
12895 if (single_interrupt)
12896 ret = request_intx_irq(dd);
12897 else
12898 ret = request_msix_irqs(dd);
12899 if (ret)
12900 goto fail;
12901
12902 return 0;
12903
12904fail:
12905 clean_up_interrupts(dd);
12906 return ret;
12907}
12908
12909
12910
12911
12912
12913
12914
12915
12916
12917
12918static int set_up_context_variables(struct hfi1_devdata *dd)
12919{
12920 unsigned long num_kernel_contexts;
12921 int total_contexts;
12922 int ret;
12923 unsigned ngroups;
12924 int qos_rmt_count;
12925 int user_rmt_reduced;
12926
12927
12928
12929
12930
12931
12932
12933
12934 if (n_krcvqs)
12935
12936
12937
12938
12939
12940 num_kernel_contexts = n_krcvqs + 1;
12941 else
12942 num_kernel_contexts = DEFAULT_KRCVQS + 1;
12943
12944
12945
12946
12947 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12948 dd_dev_err(dd,
12949 "Reducing # kernel rcv contexts to: %d, from %lu\n",
12950 (int)(dd->chip_send_contexts - num_vls - 1),
12951 num_kernel_contexts);
12952 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12953 }
12954
12955
12956
12957
12958
12959 if (num_user_contexts < 0)
12960 num_user_contexts =
12961 cpumask_weight(&node_affinity.real_cpu_mask);
12962
12963 total_contexts = num_kernel_contexts + num_user_contexts;
12964
12965
12966
12967
12968 if (total_contexts > dd->chip_rcv_contexts) {
12969 dd_dev_err(dd,
12970 "Reducing # user receive contexts to: %d, from %d\n",
12971 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12972 (int)num_user_contexts);
12973 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12974
12975 total_contexts = num_kernel_contexts + num_user_contexts;
12976 }
12977
12978
12979 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12980 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12981 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12982 dd_dev_err(dd,
12983 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12984 (int)num_user_contexts,
12985 user_rmt_reduced);
12986
12987 num_user_contexts = user_rmt_reduced;
12988 total_contexts = num_kernel_contexts + num_user_contexts;
12989 }
12990
12991
12992 dd->num_rcv_contexts = total_contexts;
12993 dd->n_krcv_queues = num_kernel_contexts;
12994 dd->first_user_ctxt = num_kernel_contexts;
12995 dd->num_user_contexts = num_user_contexts;
12996 dd->freectxts = num_user_contexts;
12997 dd_dev_info(dd,
12998 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12999 (int)dd->chip_rcv_contexts,
13000 (int)dd->num_rcv_contexts,
13001 (int)dd->n_krcv_queues,
13002 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
13003
13004
13005
13006
13007
13008
13009
13010
13011
13012
13013
13014
13015 dd->rcv_entries.group_size = RCV_INCREMENT;
13016 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13017 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13018 dd->rcv_entries.nctxt_extra = ngroups -
13019 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13020 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13021 dd->rcv_entries.ngroups,
13022 dd->rcv_entries.nctxt_extra);
13023 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13024 MAX_EAGER_ENTRIES * 2) {
13025 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13026 dd->rcv_entries.group_size;
13027 dd_dev_info(dd,
13028 "RcvArray group count too high, change to %u\n",
13029 dd->rcv_entries.ngroups);
13030 dd->rcv_entries.nctxt_extra = 0;
13031 }
13032
13033
13034
13035 ret = init_sc_pools_and_sizes(dd);
13036 if (ret >= 0) {
13037 dd->num_send_contexts = ret;
13038 dd_dev_info(
13039 dd,
13040 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13041 dd->chip_send_contexts,
13042 dd->num_send_contexts,
13043 dd->sc_sizes[SC_KERNEL].count,
13044 dd->sc_sizes[SC_ACK].count,
13045 dd->sc_sizes[SC_USER].count,
13046 dd->sc_sizes[SC_VL15].count);
13047 ret = 0;
13048 }
13049
13050 return ret;
13051}
13052
13053
13054
13055
13056
13057
13058static void set_partition_keys(struct hfi1_pportdata *ppd)
13059{
13060 struct hfi1_devdata *dd = ppd->dd;
13061 u64 reg = 0;
13062 int i;
13063
13064 dd_dev_info(dd, "Setting partition keys\n");
13065 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13066 reg |= (ppd->pkeys[i] &
13067 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13068 ((i % 4) *
13069 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13070
13071 if ((i % 4) == 3) {
13072 write_csr(dd, RCV_PARTITION_KEY +
13073 ((i - 3) * 2), reg);
13074 reg = 0;
13075 }
13076 }
13077
13078
13079 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13080}
13081
13082
13083
13084
13085
13086
13087
13088
13089
13090static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13091{
13092 int i, j;
13093
13094
13095 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13096 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13097
13098
13099 for (i = 0; i < dd->chip_send_contexts; i++)
13100 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13101
13102
13103
13104
13105
13106
13107
13108
13109
13110
13111
13112 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13113 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13114 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13115 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13116 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13117 }
13118
13119
13120 for (i = 0; i < dd->chip_rcv_array_count; i++)
13121 write_csr(dd, RCV_ARRAY + (8 * i),
13122 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
13123
13124
13125 for (i = 0; i < 32; i++)
13126 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13127}
13128
13129
13130
13131
13132static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13133 u64 ctrl_bits)
13134{
13135 unsigned long timeout;
13136 u64 reg;
13137
13138
13139 reg = read_csr(dd, CCE_STATUS);
13140 if ((reg & status_bits) == 0)
13141 return;
13142
13143
13144 write_csr(dd, CCE_CTRL, ctrl_bits);
13145
13146
13147 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13148 while (1) {
13149 reg = read_csr(dd, CCE_STATUS);
13150 if ((reg & status_bits) == 0)
13151 return;
13152 if (time_after(jiffies, timeout)) {
13153 dd_dev_err(dd,
13154 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13155 status_bits, reg & status_bits);
13156 return;
13157 }
13158 udelay(1);
13159 }
13160}
13161
13162
13163static void reset_cce_csrs(struct hfi1_devdata *dd)
13164{
13165 int i;
13166
13167
13168
13169
13170
13171 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13172 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13173 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13174 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13175 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13176
13177 write_csr(dd, CCE_ERR_MASK, 0);
13178 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13179
13180 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13181 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13182 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13183
13184 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13185 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13186 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13187 CCE_MSIX_TABLE_UPPER_RESETCSR);
13188 }
13189 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13190
13191 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13192 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13193 }
13194 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13195 write_csr(dd, CCE_INT_MAP, 0);
13196 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13197
13198 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13199 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13200
13201
13202 }
13203 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13204 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13205}
13206
13207
13208static void reset_misc_csrs(struct hfi1_devdata *dd)
13209{
13210 int i;
13211
13212 for (i = 0; i < 32; i++) {
13213 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13214 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13215 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13216 }
13217
13218
13219
13220
13221
13222 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13223 write_csr(dd, MISC_CFG_RSA_MU, 0);
13224 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13225
13226
13227
13228
13229
13230 write_csr(dd, MISC_ERR_MASK, 0);
13231 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13232
13233}
13234
13235
13236static void reset_txe_csrs(struct hfi1_devdata *dd)
13237{
13238 int i;
13239
13240
13241
13242
13243 write_csr(dd, SEND_CTRL, 0);
13244 __cm_reset(dd, 0);
13245
13246
13247
13248
13249 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13250 pio_reset_all(dd);
13251
13252 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13253 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13254
13255
13256 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13257 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13258
13259
13260 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13261 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13262
13263 write_csr(dd, SEND_BTH_QP, 0);
13264 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13265 write_csr(dd, SEND_SC2VLT0, 0);
13266 write_csr(dd, SEND_SC2VLT1, 0);
13267 write_csr(dd, SEND_SC2VLT2, 0);
13268 write_csr(dd, SEND_SC2VLT3, 0);
13269 write_csr(dd, SEND_LEN_CHECK0, 0);
13270 write_csr(dd, SEND_LEN_CHECK1, 0);
13271
13272 write_csr(dd, SEND_ERR_MASK, 0);
13273 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13274
13275 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13276 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13277 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13278 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13279 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13280 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13281 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13282 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13283 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13284 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13285 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13286 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13287
13288 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13289 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13290 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13291 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13292 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13293 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13294 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13295 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13296
13297
13298
13299
13300 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13301
13302
13303
13304
13305
13306
13307 for (i = 0; i < dd->chip_send_contexts; i++) {
13308 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13309 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13310 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13311 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13312 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13313 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13314 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13315 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13316 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13317 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13318 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13319 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13320 }
13321
13322
13323
13324
13325 for (i = 0; i < dd->chip_sdma_engines; i++) {
13326 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13327
13328 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13329 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13330 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13331
13332 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13333 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13334
13335 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13336 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13337
13338
13339 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13340 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13341
13342 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13343 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13344 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13345 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13346 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13347 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13348 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13349 }
13350}
13351
13352
13353
13354
13355
13356static void init_rbufs(struct hfi1_devdata *dd)
13357{
13358 u64 reg;
13359 int count;
13360
13361
13362
13363
13364
13365 count = 0;
13366 while (1) {
13367 reg = read_csr(dd, RCV_STATUS);
13368 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13369 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13370 break;
13371
13372
13373
13374
13375
13376
13377
13378 if (count++ > 500) {
13379 dd_dev_err(dd,
13380 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13381 __func__, reg);
13382 break;
13383 }
13384 udelay(2);
13385 }
13386
13387
13388 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13389
13390
13391
13392
13393
13394
13395
13396 read_csr(dd, RCV_CTRL);
13397
13398
13399 count = 0;
13400 while (1) {
13401
13402 udelay(2);
13403 reg = read_csr(dd, RCV_STATUS);
13404 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13405 break;
13406
13407
13408 if (count++ > 50) {
13409 dd_dev_err(dd,
13410 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13411 __func__);
13412 break;
13413 }
13414 }
13415}
13416
13417
13418static void reset_rxe_csrs(struct hfi1_devdata *dd)
13419{
13420 int i, j;
13421
13422
13423
13424
13425 write_csr(dd, RCV_CTRL, 0);
13426 init_rbufs(dd);
13427
13428
13429
13430
13431 write_csr(dd, RCV_BTH_QP, 0);
13432 write_csr(dd, RCV_MULTICAST, 0);
13433 write_csr(dd, RCV_BYPASS, 0);
13434 write_csr(dd, RCV_VL15, 0);
13435
13436 write_csr(dd, RCV_ERR_INFO,
13437 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13438
13439 write_csr(dd, RCV_ERR_MASK, 0);
13440 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13441
13442 for (i = 0; i < 32; i++)
13443 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13444 for (i = 0; i < 4; i++)
13445 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13446 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13447 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13448 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13449 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13450 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13451 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13452 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13453 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13454 }
13455 for (i = 0; i < 32; i++)
13456 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13457
13458
13459
13460
13461 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13462
13463 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13464
13465 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13466 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13467 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13468 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13469 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13470 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13471 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13472 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13473 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13474 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13475
13476
13477
13478 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13479
13480 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13481
13482 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13483 write_uctxt_csr(dd, i,
13484 RCV_TID_FLOW_TABLE + (8 * j), 0);
13485 }
13486 }
13487}
13488
13489
13490
13491
13492
13493
13494
13495
13496
13497
13498
13499
13500static void init_sc2vl_tables(struct hfi1_devdata *dd)
13501{
13502 int i;
13503
13504
13505
13506 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13507 0,
13508 0, 0, 1, 1,
13509 2, 2, 3, 3,
13510 4, 4, 5, 5,
13511 6, 6, 7, 7));
13512 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13513 1,
13514 8, 0, 9, 0,
13515 10, 0, 11, 0,
13516 12, 0, 13, 0,
13517 14, 0, 15, 15));
13518 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13519 2,
13520 16, 0, 17, 0,
13521 18, 0, 19, 0,
13522 20, 0, 21, 0,
13523 22, 0, 23, 0));
13524 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13525 3,
13526 24, 0, 25, 0,
13527 26, 0, 27, 0,
13528 28, 0, 29, 0,
13529 30, 0, 31, 0));
13530
13531
13532 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13533 15_0,
13534 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13535 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13536 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13537 31_16,
13538 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13539 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13540
13541
13542 for (i = 0; i < 32; i++) {
13543 if (i < 8 || i == 15)
13544 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13545 else
13546 *((u8 *)(dd->sc2vl) + i) = 0;
13547 }
13548}
13549
13550
13551
13552
13553
13554
13555
13556
13557
13558
13559static void init_chip(struct hfi1_devdata *dd)
13560{
13561 int i;
13562
13563
13564
13565
13566
13567
13568
13569
13570
13571
13572
13573
13574
13575 write_csr(dd, SEND_CTRL, 0);
13576 for (i = 0; i < dd->chip_send_contexts; i++)
13577 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13578 for (i = 0; i < dd->chip_sdma_engines; i++)
13579 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13580
13581 write_csr(dd, RCV_CTRL, 0);
13582 for (i = 0; i < dd->chip_rcv_contexts; i++)
13583 write_csr(dd, RCV_CTXT_CTRL, 0);
13584
13585 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13586 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13587
13588
13589
13590
13591
13592
13593
13594 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13595 (void)read_csr(dd, CCE_DC_CTRL);
13596
13597 if (use_flr) {
13598
13599
13600
13601
13602
13603 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13604
13605
13606 hfi1_pcie_flr(dd);
13607
13608
13609 restore_pci_variables(dd);
13610
13611 if (is_ax(dd)) {
13612 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13613 hfi1_pcie_flr(dd);
13614 restore_pci_variables(dd);
13615 }
13616 } else {
13617 dd_dev_info(dd, "Resetting CSRs with writes\n");
13618 reset_cce_csrs(dd);
13619 reset_txe_csrs(dd);
13620 reset_rxe_csrs(dd);
13621 reset_misc_csrs(dd);
13622 }
13623
13624 write_csr(dd, CCE_DC_CTRL, 0);
13625
13626
13627 setextled(dd, 0);
13628
13629
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13640 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13641 init_chip_resources(dd);
13642}
13643
13644static void init_early_variables(struct hfi1_devdata *dd)
13645{
13646 int i;
13647
13648
13649 dd->vau = CM_VAU;
13650 dd->link_credits = CM_GLOBAL_CREDITS;
13651 if (is_ax(dd))
13652 dd->link_credits--;
13653 dd->vcu = cu_to_vcu(hfi1_cu);
13654
13655 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13656 if (dd->vl15_init > dd->link_credits)
13657 dd->vl15_init = dd->link_credits;
13658
13659 write_uninitialized_csrs_and_memories(dd);
13660
13661 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13662 for (i = 0; i < dd->num_pports; i++) {
13663 struct hfi1_pportdata *ppd = &dd->pport[i];
13664
13665 set_partition_keys(ppd);
13666 }
13667 init_sc2vl_tables(dd);
13668}
13669
13670static void init_kdeth_qp(struct hfi1_devdata *dd)
13671{
13672
13673 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13674
13675 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13676 kdeth_qp = 0;
13677 }
13678 if (kdeth_qp == 0)
13679 kdeth_qp = DEFAULT_KDETH_QP;
13680
13681 write_csr(dd, SEND_BTH_QP,
13682 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13683 SEND_BTH_QP_KDETH_QP_SHIFT);
13684
13685 write_csr(dd, RCV_BTH_QP,
13686 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13687 RCV_BTH_QP_KDETH_QP_SHIFT);
13688}
13689
13690
13691
13692
13693
13694
13695
13696
13697
13698
13699
13700
13701
13702
13703
13704
13705
13706
13707static void init_qpmap_table(struct hfi1_devdata *dd,
13708 u32 first_ctxt,
13709 u32 last_ctxt)
13710{
13711 u64 reg = 0;
13712 u64 regno = RCV_QP_MAP_TABLE;
13713 int i;
13714 u64 ctxt = first_ctxt;
13715
13716 for (i = 0; i < 256; i++) {
13717 reg |= ctxt << (8 * (i % 8));
13718 ctxt++;
13719 if (ctxt > last_ctxt)
13720 ctxt = first_ctxt;
13721 if (i % 8 == 7) {
13722 write_csr(dd, regno, reg);
13723 reg = 0;
13724 regno += 8;
13725 }
13726 }
13727
13728 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13729 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13730}
13731
13732struct rsm_map_table {
13733 u64 map[NUM_MAP_REGS];
13734 unsigned int used;
13735};
13736
13737struct rsm_rule_data {
13738 u8 offset;
13739 u8 pkt_type;
13740 u32 field1_off;
13741 u32 field2_off;
13742 u32 index1_off;
13743 u32 index1_width;
13744 u32 index2_off;
13745 u32 index2_width;
13746 u32 mask1;
13747 u32 value1;
13748 u32 mask2;
13749 u32 value2;
13750};
13751
13752
13753
13754
13755
13756static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13757{
13758 struct rsm_map_table *rmt;
13759 u8 rxcontext = is_ax(dd) ? 0 : 0xff;
13760
13761 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13762 if (rmt) {
13763 memset(rmt->map, rxcontext, sizeof(rmt->map));
13764 rmt->used = 0;
13765 }
13766
13767 return rmt;
13768}
13769
13770
13771
13772
13773
13774static void complete_rsm_map_table(struct hfi1_devdata *dd,
13775 struct rsm_map_table *rmt)
13776{
13777 int i;
13778
13779 if (rmt) {
13780
13781 for (i = 0; i < NUM_MAP_REGS; i++)
13782 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13783
13784
13785 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13786 }
13787}
13788
13789
13790
13791
13792static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13793 struct rsm_rule_data *rrd)
13794{
13795 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13796 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13797 1ull << rule_index |
13798 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13799 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13800 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13801 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13802 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13803 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13804 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13805 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13806 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13807 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13808 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13809 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13810 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13811}
13812
13813
13814static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13815 unsigned int *np)
13816{
13817 int i;
13818 unsigned int m, n;
13819 u8 max_by_vl = 0;
13820
13821
13822 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13823 num_vls == 1 ||
13824 krcvqsset <= 1)
13825 goto no_qos;
13826
13827
13828 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13829 if (krcvqs[i] > max_by_vl)
13830 max_by_vl = krcvqs[i];
13831 if (max_by_vl > 32)
13832 goto no_qos;
13833 m = ilog2(__roundup_pow_of_two(max_by_vl));
13834
13835
13836 n = ilog2(__roundup_pow_of_two(num_vls));
13837
13838
13839 if ((m + n) > 7)
13840 goto no_qos;
13841
13842 if (mp)
13843 *mp = m;
13844 if (np)
13845 *np = n;
13846
13847 return 1 << (m + n);
13848
13849no_qos:
13850 if (mp)
13851 *mp = 0;
13852 if (np)
13853 *np = 0;
13854 return 0;
13855}
13856
13857
13858
13859
13860
13861
13862
13863
13864
13865
13866
13867
13868
13869
13870
13871static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
13872{
13873 struct rsm_rule_data rrd;
13874 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13875 unsigned int rmt_entries;
13876 u64 reg;
13877
13878 if (!rmt)
13879 goto bail;
13880 rmt_entries = qos_rmt_entries(dd, &m, &n);
13881 if (rmt_entries == 0)
13882 goto bail;
13883 qpns_per_vl = 1 << m;
13884
13885
13886 rmt_entries = 1 << (m + n);
13887 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
13888 goto bail;
13889
13890
13891 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
13892 unsigned tctxt;
13893
13894 for (qpn = 0, tctxt = ctxt;
13895 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13896 unsigned idx, regoff, regidx;
13897
13898
13899 idx = rmt->used + ((qpn << n) ^ i);
13900 regoff = (idx % 8) * 8;
13901 regidx = idx / 8;
13902
13903 reg = rmt->map[regidx];
13904 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13905 << regoff);
13906 reg |= (u64)(tctxt++) << regoff;
13907 rmt->map[regidx] = reg;
13908 if (tctxt == ctxt + krcvqs[i])
13909 tctxt = ctxt;
13910 }
13911 ctxt += krcvqs[i];
13912 }
13913
13914 rrd.offset = rmt->used;
13915 rrd.pkt_type = 2;
13916 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13917 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13918 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13919 rrd.index1_width = n;
13920 rrd.index2_off = QPN_SELECT_OFFSET;
13921 rrd.index2_width = m + n;
13922 rrd.mask1 = LRH_BTH_MASK;
13923 rrd.value1 = LRH_BTH_VALUE;
13924 rrd.mask2 = LRH_SC_MASK;
13925 rrd.value2 = LRH_SC_VALUE;
13926
13927
13928 add_rsm_rule(dd, 0, &rrd);
13929
13930
13931 rmt->used += rmt_entries;
13932
13933 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
13934 dd->qos_shift = n + 1;
13935 return;
13936bail:
13937 dd->qos_shift = 1;
13938 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13939}
13940
13941static void init_user_fecn_handling(struct hfi1_devdata *dd,
13942 struct rsm_map_table *rmt)
13943{
13944 struct rsm_rule_data rrd;
13945 u64 reg;
13946 int i, idx, regoff, regidx;
13947 u8 offset;
13948
13949
13950 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13951 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13952 return;
13953 }
13954
13955
13956
13957
13958
13959
13960
13961
13962
13963
13964
13965 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13966 (int)dd->first_user_ctxt);
13967
13968 for (i = dd->first_user_ctxt, idx = rmt->used;
13969 i < dd->num_rcv_contexts; i++, idx++) {
13970
13971 regoff = (idx % 8) * 8;
13972 regidx = idx / 8;
13973 reg = rmt->map[regidx];
13974 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13975 reg |= (u64)i << regoff;
13976 rmt->map[regidx] = reg;
13977 }
13978
13979
13980
13981
13982
13983
13984
13985
13986
13987
13988 rrd.offset = offset;
13989 rrd.pkt_type = 0;
13990 rrd.field1_off = 95;
13991 rrd.field2_off = 133;
13992 rrd.index1_off = 64;
13993 rrd.index1_width = 8;
13994 rrd.index2_off = 0;
13995 rrd.index2_width = 0;
13996 rrd.mask1 = 1;
13997 rrd.value1 = 1;
13998 rrd.mask2 = 1;
13999 rrd.value2 = 1;
14000
14001
14002 add_rsm_rule(dd, 1, &rrd);
14003
14004 rmt->used += dd->num_user_contexts;
14005}
14006
14007static void init_rxe(struct hfi1_devdata *dd)
14008{
14009 struct rsm_map_table *rmt;
14010
14011
14012 write_csr(dd, RCV_ERR_MASK, ~0ull);
14013
14014 rmt = alloc_rsm_map_table(dd);
14015
14016 init_qos(dd, rmt);
14017 init_user_fecn_handling(dd, rmt);
14018 complete_rsm_map_table(dd, rmt);
14019 kfree(rmt);
14020
14021
14022
14023
14024
14025
14026
14027
14028
14029
14030
14031
14032}
14033
14034static void init_other(struct hfi1_devdata *dd)
14035{
14036
14037 write_csr(dd, CCE_ERR_MASK, ~0ull);
14038
14039 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14040
14041 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14042 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14043}
14044
14045
14046
14047
14048
14049
14050
14051
14052
14053static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14054 u32 csr0to3, u32 csr4to7)
14055{
14056 write_csr(dd, csr0to3,
14057 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14058 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14059 2ull * cu <<
14060 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14061 4ull * cu <<
14062 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14063 write_csr(dd, csr4to7,
14064 8ull * cu <<
14065 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14066 16ull * cu <<
14067 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14068 32ull * cu <<
14069 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14070 64ull * cu <<
14071 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14072}
14073
14074static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14075{
14076 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14077 SEND_CM_LOCAL_AU_TABLE4_TO7);
14078}
14079
14080void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14081{
14082 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14083 SEND_CM_REMOTE_AU_TABLE4_TO7);
14084}
14085
14086static void init_txe(struct hfi1_devdata *dd)
14087{
14088 int i;
14089
14090
14091 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14092 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14093 write_csr(dd, SEND_ERR_MASK, ~0ull);
14094 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14095
14096
14097 for (i = 0; i < dd->chip_send_contexts; i++)
14098 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14099 for (i = 0; i < dd->chip_sdma_engines; i++)
14100 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14101
14102
14103 assign_local_cm_au_table(dd, dd->vcu);
14104
14105
14106
14107
14108
14109 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14110 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14111}
14112
14113int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14114{
14115 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14116 unsigned sctxt;
14117 int ret = 0;
14118 u64 reg;
14119
14120 if (!rcd || !rcd->sc) {
14121 ret = -EINVAL;
14122 goto done;
14123 }
14124 sctxt = rcd->sc->hw_context;
14125 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK |
14126 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14127 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14128
14129 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14130 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14131 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14132
14133
14134
14135 if (!is_ax(dd)) {
14136 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14137 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14138 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14139 }
14140
14141
14142 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14143 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14144 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14145 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14146done:
14147 return ret;
14148}
14149
14150int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14151{
14152 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14153 unsigned sctxt;
14154 int ret = 0;
14155 u64 reg;
14156
14157 if (!rcd || !rcd->sc) {
14158 ret = -EINVAL;
14159 goto done;
14160 }
14161 sctxt = rcd->sc->hw_context;
14162 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14163
14164
14165
14166
14167
14168 if (!is_ax(dd)) {
14169 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14170 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14171 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14172 }
14173
14174 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14175done:
14176 return ret;
14177}
14178
14179int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14180{
14181 struct hfi1_ctxtdata *rcd;
14182 unsigned sctxt;
14183 int ret = 0;
14184 u64 reg;
14185
14186 if (ctxt < dd->num_rcv_contexts) {
14187 rcd = dd->rcd[ctxt];
14188 } else {
14189 ret = -EINVAL;
14190 goto done;
14191 }
14192 if (!rcd || !rcd->sc) {
14193 ret = -EINVAL;
14194 goto done;
14195 }
14196 sctxt = rcd->sc->hw_context;
14197 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14198 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14199 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14200 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14201 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14202 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14203 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14204done:
14205 return ret;
14206}
14207
14208int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14209{
14210 struct hfi1_ctxtdata *rcd;
14211 unsigned sctxt;
14212 int ret = 0;
14213 u64 reg;
14214
14215 if (ctxt < dd->num_rcv_contexts) {
14216 rcd = dd->rcd[ctxt];
14217 } else {
14218 ret = -EINVAL;
14219 goto done;
14220 }
14221 if (!rcd || !rcd->sc) {
14222 ret = -EINVAL;
14223 goto done;
14224 }
14225 sctxt = rcd->sc->hw_context;
14226 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14227 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14228 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14229 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14230done:
14231 return ret;
14232}
14233
14234
14235
14236
14237
14238void hfi1_start_cleanup(struct hfi1_devdata *dd)
14239{
14240 aspm_exit(dd);
14241 free_cntrs(dd);
14242 free_rcverr(dd);
14243 clean_up_interrupts(dd);
14244 finish_chip_resources(dd);
14245}
14246
14247#define HFI_BASE_GUID(dev) \
14248 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14249
14250
14251
14252
14253
14254
14255static int init_asic_data(struct hfi1_devdata *dd)
14256{
14257 unsigned long flags;
14258 struct hfi1_devdata *tmp, *peer = NULL;
14259 struct hfi1_asic_data *asic_data;
14260 int ret = 0;
14261
14262
14263 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14264 if (!asic_data)
14265 return -ENOMEM;
14266
14267 spin_lock_irqsave(&hfi1_devs_lock, flags);
14268
14269 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14270 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14271 dd->unit != tmp->unit) {
14272 peer = tmp;
14273 break;
14274 }
14275 }
14276
14277 if (peer) {
14278
14279 dd->asic_data = peer->asic_data;
14280 kfree(asic_data);
14281 } else {
14282 dd->asic_data = asic_data;
14283 mutex_init(&dd->asic_data->asic_resource_mutex);
14284 }
14285 dd->asic_data->dds[dd->hfi1_id] = dd;
14286 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14287
14288
14289 if (!peer)
14290 ret = set_up_i2c(dd, dd->asic_data);
14291
14292 return ret;
14293}
14294
14295
14296
14297
14298
14299
14300
14301static int obtain_boardname(struct hfi1_devdata *dd)
14302{
14303
14304 const char generic[] =
14305 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14306 unsigned long size;
14307 int ret;
14308
14309 ret = read_hfi1_efi_var(dd, "description", &size,
14310 (void **)&dd->boardname);
14311 if (ret) {
14312 dd_dev_info(dd, "Board description not found\n");
14313
14314 dd->boardname = kstrdup(generic, GFP_KERNEL);
14315 if (!dd->boardname)
14316 return -ENOMEM;
14317 }
14318 return 0;
14319}
14320
14321
14322
14323
14324
14325
14326
14327
14328
14329static int check_int_registers(struct hfi1_devdata *dd)
14330{
14331 u64 reg;
14332 u64 all_bits = ~(u64)0;
14333 u64 mask;
14334
14335
14336 mask = read_csr(dd, CCE_INT_MASK);
14337 write_csr(dd, CCE_INT_MASK, 0ull);
14338 reg = read_csr(dd, CCE_INT_MASK);
14339 if (reg)
14340 goto err_exit;
14341
14342
14343 write_csr(dd, CCE_INT_CLEAR, all_bits);
14344 reg = read_csr(dd, CCE_INT_STATUS);
14345 if (reg)
14346 goto err_exit;
14347
14348
14349 write_csr(dd, CCE_INT_FORCE, all_bits);
14350 reg = read_csr(dd, CCE_INT_STATUS);
14351 if (reg != all_bits)
14352 goto err_exit;
14353
14354
14355 write_csr(dd, CCE_INT_CLEAR, all_bits);
14356 write_csr(dd, CCE_INT_MASK, mask);
14357
14358 return 0;
14359err_exit:
14360 write_csr(dd, CCE_INT_MASK, mask);
14361 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14362 return -EINVAL;
14363}
14364
14365
14366
14367
14368
14369
14370
14371
14372
14373
14374
14375
14376struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14377 const struct pci_device_id *ent)
14378{
14379 struct hfi1_devdata *dd;
14380 struct hfi1_pportdata *ppd;
14381 u64 reg;
14382 int i, ret;
14383 static const char * const inames[] = {
14384 "RTL silicon",
14385 "RTL VCS simulation",
14386 "RTL FPGA emulation",
14387 "Functional simulator"
14388 };
14389 struct pci_dev *parent = pdev->bus->self;
14390
14391 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14392 sizeof(struct hfi1_pportdata));
14393 if (IS_ERR(dd))
14394 goto bail;
14395 ppd = dd->pport;
14396 for (i = 0; i < dd->num_pports; i++, ppd++) {
14397 int vl;
14398
14399 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14400
14401 ppd->link_width_supported =
14402 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14403 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14404 ppd->link_width_downgrade_supported =
14405 ppd->link_width_supported;
14406
14407 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14408 ppd->link_width_downgrade_enabled =
14409 ppd->link_width_downgrade_supported;
14410
14411
14412
14413 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14414 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14415 hfi1_early_err(&pdev->dev,
14416 "Invalid num_vls %u, using %u VLs\n",
14417 num_vls, HFI1_MAX_VLS_SUPPORTED);
14418 num_vls = HFI1_MAX_VLS_SUPPORTED;
14419 }
14420 ppd->vls_supported = num_vls;
14421 ppd->vls_operational = ppd->vls_supported;
14422 ppd->actual_vls_operational = ppd->vls_supported;
14423
14424 for (vl = 0; vl < num_vls; vl++)
14425 dd->vld[vl].mtu = hfi1_max_mtu;
14426 dd->vld[15].mtu = MAX_MAD_PACKET;
14427
14428
14429
14430
14431 ppd->lstate = IB_PORT_DOWN;
14432 ppd->overrun_threshold = 0x4;
14433 ppd->phy_error_threshold = 0xf;
14434 ppd->port_crc_mode_enabled = link_crc_mask;
14435
14436 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14437
14438 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14439
14440 ppd->host_link_state = HLS_DN_OFFLINE;
14441 init_vl_arb_caches(ppd);
14442 ppd->last_pstate = 0xff;
14443 }
14444
14445 dd->link_default = HLS_DN_POLL;
14446
14447
14448
14449
14450
14451
14452 ret = hfi1_pcie_ddinit(dd, pdev);
14453 if (ret < 0)
14454 goto bail_free;
14455
14456
14457 dd->revision = read_csr(dd, CCE_REVISION);
14458 if (dd->revision == ~(u64)0) {
14459 dd_dev_err(dd, "cannot read chip CSRs\n");
14460 ret = -EINVAL;
14461 goto bail_cleanup;
14462 }
14463 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14464 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14465 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14466 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14467
14468
14469
14470
14471
14472
14473 if (!parent) {
14474 ret = check_int_registers(dd);
14475 if (ret)
14476 goto bail_cleanup;
14477 }
14478
14479
14480
14481
14482
14483 reg = read_csr(dd, CCE_REVISION2);
14484 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14485 & CCE_REVISION2_HFI_ID_MASK;
14486
14487 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14488 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14489 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14490 dd->icode < ARRAY_SIZE(inames) ?
14491 inames[dd->icode] : "unknown", (int)dd->irev);
14492
14493
14494 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14495
14496 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14497
14498 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14499
14500 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14501 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14502 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14503 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14504 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14505
14506 ppd = dd->pport;
14507 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14508 ppd->link_width_supported =
14509 ppd->link_width_enabled =
14510 ppd->link_width_downgrade_supported =
14511 ppd->link_width_downgrade_enabled =
14512 OPA_LINK_WIDTH_1X;
14513 }
14514
14515 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14516 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14517 num_vls, dd->chip_sdma_engines);
14518 num_vls = dd->chip_sdma_engines;
14519 ppd->vls_supported = dd->chip_sdma_engines;
14520 ppd->vls_operational = ppd->vls_supported;
14521 }
14522
14523
14524
14525
14526
14527
14528
14529
14530
14531 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14532 if (dd->rcv_intr_timeout_csr >
14533 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14534 dd->rcv_intr_timeout_csr =
14535 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14536 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14537 dd->rcv_intr_timeout_csr = 1;
14538
14539
14540 read_guid(dd);
14541
14542
14543 ret = init_asic_data(dd);
14544 if (ret)
14545 goto bail_cleanup;
14546
14547
14548 init_chip(dd);
14549
14550
14551 ret = pcie_speeds(dd);
14552 if (ret)
14553 goto bail_cleanup;
14554
14555
14556 ret = eprom_init(dd);
14557 if (ret)
14558 goto bail_free_rcverr;
14559
14560
14561 get_platform_config(dd);
14562
14563
14564 ret = hfi1_firmware_init(dd);
14565 if (ret)
14566 goto bail_cleanup;
14567
14568
14569
14570
14571
14572
14573
14574
14575
14576
14577
14578
14579
14580 ret = do_pcie_gen3_transition(dd);
14581 if (ret)
14582 goto bail_cleanup;
14583
14584
14585 init_early_variables(dd);
14586
14587 parse_platform_config(dd);
14588
14589 ret = obtain_boardname(dd);
14590 if (ret)
14591 goto bail_cleanup;
14592
14593 snprintf(dd->boardversion, BOARD_VERS_MAX,
14594 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14595 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14596 (u32)dd->majrev,
14597 (u32)dd->minrev,
14598 (dd->revision >> CCE_REVISION_SW_SHIFT)
14599 & CCE_REVISION_SW_MASK);
14600
14601 ret = set_up_context_variables(dd);
14602 if (ret)
14603 goto bail_cleanup;
14604
14605
14606 init_rxe(dd);
14607
14608 init_txe(dd);
14609
14610 init_other(dd);
14611
14612 init_kdeth_qp(dd);
14613
14614 ret = hfi1_dev_affinity_init(dd);
14615 if (ret)
14616 goto bail_cleanup;
14617
14618
14619 ret = init_send_contexts(dd);
14620 if (ret)
14621 goto bail_cleanup;
14622
14623 ret = hfi1_create_ctxts(dd);
14624 if (ret)
14625 goto bail_cleanup;
14626
14627 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14628
14629
14630
14631
14632 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14633
14634 ret = init_pervl_scs(dd);
14635 if (ret)
14636 goto bail_cleanup;
14637
14638
14639 for (i = 0; i < dd->num_pports; ++i) {
14640 ret = sdma_init(dd, i);
14641 if (ret)
14642 goto bail_cleanup;
14643 }
14644
14645
14646 ret = set_up_interrupts(dd);
14647 if (ret)
14648 goto bail_cleanup;
14649
14650
14651 init_lcb_access(dd);
14652
14653
14654
14655
14656
14657
14658 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14659 (dd->base_guid & 0xFFFFFF) |
14660 ((dd->base_guid >> 11) & 0xF000000));
14661
14662 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14663 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14664 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14665
14666 ret = load_firmware(dd);
14667 if (ret)
14668 goto bail_clear_intr;
14669
14670 thermal_init(dd);
14671
14672 ret = init_cntrs(dd);
14673 if (ret)
14674 goto bail_clear_intr;
14675
14676 ret = init_rcverr(dd);
14677 if (ret)
14678 goto bail_free_cntrs;
14679
14680 init_completion(&dd->user_comp);
14681
14682
14683 atomic_set(&dd->user_refcount, 1);
14684
14685 goto bail;
14686
14687bail_free_rcverr:
14688 free_rcverr(dd);
14689bail_free_cntrs:
14690 free_cntrs(dd);
14691bail_clear_intr:
14692 clean_up_interrupts(dd);
14693bail_cleanup:
14694 hfi1_pcie_ddcleanup(dd);
14695bail_free:
14696 hfi1_free_devdata(dd);
14697 dd = ERR_PTR(ret);
14698bail:
14699 return dd;
14700}
14701
14702static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14703 u32 dw_len)
14704{
14705 u32 delta_cycles;
14706 u32 current_egress_rate = ppd->current_egress_rate;
14707
14708
14709 if (desired_egress_rate == -1)
14710 return 0;
14711
14712 if (desired_egress_rate >= current_egress_rate)
14713 return 0;
14714
14715 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14716 egress_cycles(dw_len * 4, current_egress_rate);
14717
14718 return (u16)delta_cycles;
14719}
14720
14721
14722
14723
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733
14734
14735u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14736 u32 dw_len)
14737{
14738 u64 pbc, delay = 0;
14739
14740 if (unlikely(srate_mbs))
14741 delay = delay_cycles(ppd, srate_mbs, dw_len);
14742
14743 pbc = flags
14744 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14745 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14746 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14747 | (dw_len & PBC_LENGTH_DWS_MASK)
14748 << PBC_LENGTH_DWS_SHIFT;
14749
14750 return pbc;
14751}
14752
14753#define SBUS_THERMAL 0x4f
14754#define SBUS_THERM_MONITOR_MODE 0x1
14755
14756#define THERM_FAILURE(dev, ret, reason) \
14757 dd_dev_err((dd), \
14758 "Thermal sensor initialization failed: %s (%d)\n", \
14759 (reason), (ret))
14760
14761
14762
14763
14764
14765
14766
14767
14768
14769
14770
14771static int thermal_init(struct hfi1_devdata *dd)
14772{
14773 int ret = 0;
14774
14775 if (dd->icode != ICODE_RTL_SILICON ||
14776 check_chip_resource(dd, CR_THERM_INIT, NULL))
14777 return ret;
14778
14779 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14780 if (ret) {
14781 THERM_FAILURE(dd, ret, "Acquire SBus");
14782 return ret;
14783 }
14784
14785 dd_dev_info(dd, "Initializing thermal sensor\n");
14786
14787 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14788 msleep(100);
14789
14790
14791 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14792 RESET_SBUS_RECEIVER, 0);
14793 if (ret) {
14794 THERM_FAILURE(dd, ret, "Bus Reset");
14795 goto done;
14796 }
14797
14798 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14799 WRITE_SBUS_RECEIVER, 0x1);
14800 if (ret) {
14801 THERM_FAILURE(dd, ret, "Therm Block Reset");
14802 goto done;
14803 }
14804
14805 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14806 WRITE_SBUS_RECEIVER, 0x32);
14807 if (ret) {
14808 THERM_FAILURE(dd, ret, "Write Clock Div");
14809 goto done;
14810 }
14811
14812 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14813 WRITE_SBUS_RECEIVER,
14814 SBUS_THERM_MONITOR_MODE);
14815 if (ret) {
14816 THERM_FAILURE(dd, ret, "Write Mode Sel");
14817 goto done;
14818 }
14819
14820 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14821 WRITE_SBUS_RECEIVER, 0x2);
14822 if (ret) {
14823 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14824 goto done;
14825 }
14826
14827 msleep(22);
14828
14829
14830 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14831
14832
14833 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14834 if (ret)
14835 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14836
14837done:
14838 release_chip_resource(dd, CR_SBUS);
14839 return ret;
14840}
14841
14842static void handle_temp_err(struct hfi1_devdata *dd)
14843{
14844 struct hfi1_pportdata *ppd = &dd->pport[0];
14845
14846
14847
14848
14849
14850 dd_dev_emerg(dd,
14851 "Critical temperature reached! Forcing device into freeze mode!\n");
14852 dd->flags |= HFI1_FORCED_FREEZE;
14853 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14854
14855
14856
14857
14858
14859
14860
14861
14862
14863
14864
14865 ppd->driver_link_ready = 0;
14866 ppd->link_enabled = 0;
14867 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14868 PLS_OFFLINE);
14869
14870
14871
14872
14873 dc_shutdown(dd);
14874}
14875