1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
63#include "efivar.h"
64#include "platform.h"
65#include "aspm.h"
66#include "affinity.h"
67#include "debugfs.h"
68#include "fault.h"
69
70uint kdeth_qp;
71module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75module_param(num_vls, uint, S_IRUGO);
76MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78
79
80
81
82
83
84
85uint rcv_intr_timeout = (824 + 16);
86module_param(rcv_intr_timeout, uint, S_IRUGO);
87MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89uint rcv_intr_count = 16;
90module_param(rcv_intr_count, uint, S_IRUGO);
91MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93ushort link_crc_mask = SUPPORTED_CRCS;
94module_param(link_crc_mask, ushort, S_IRUGO);
95MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97uint loopback;
98module_param_named(loopback, loopback, uint, S_IRUGO);
99MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101
102uint rcv_intr_dynamic = 1;
103static ushort crc_14b_sideband = 1;
104static uint use_flr = 1;
105uint quick_linkup;
106
107struct flag_table {
108 u64 flag;
109 char *str;
110 u16 extra;
111 u16 unused0;
112 u32 unused1;
113};
114
115
116#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117#define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119
120#define SEC_WRITE_DROPPED 0x1
121#define SEC_PACKET_DROPPED 0x2
122#define SEC_SC_HALTED 0x4
123#define SEC_SPC_FREEZE 0x8
124
125#define DEFAULT_KRCVQS 2
126#define MIN_KERNEL_KCTXTS 2
127#define FIRST_KERNEL_KCTXT 1
128
129
130
131
132
133
134
135#define RSM_INS_VERBS 0
136#define RSM_INS_FECN 1
137#define RSM_INS_VNIC 2
138
139
140#define GUID_HFI_INDEX_SHIFT 39
141
142
143#define emulator_rev(dd) ((dd)->irev >> 8)
144
145#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
146#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
147
148
149
150#define IB_PACKET_TYPE 2ull
151#define QW_SHIFT 6ull
152
153#define QPN_WIDTH 7ull
154
155
156#define LRH_BTH_QW 0ull
157#define LRH_BTH_BIT_OFFSET 48ull
158#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
159#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
160#define LRH_BTH_SELECT
161#define LRH_BTH_MASK 3ull
162#define LRH_BTH_VALUE 2ull
163
164
165#define LRH_SC_QW 0ull
166#define LRH_SC_BIT_OFFSET 56ull
167#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
168#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
169#define LRH_SC_MASK 128ull
170#define LRH_SC_VALUE 0ull
171
172
173#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
174
175
176#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
177
178
179
180#define L2_TYPE_QW 0ull
181#define L2_TYPE_BIT_OFFSET 61ull
182#define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
183#define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
184#define L2_TYPE_MASK 3ull
185#define L2_16B_VALUE 2ull
186
187
188#define L4_TYPE_QW 1ull
189#define L4_TYPE_BIT_OFFSET 0ull
190#define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
191#define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
192#define L4_16B_TYPE_MASK 0xFFull
193#define L4_16B_ETH_VALUE 0x78ull
194
195
196#define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
197
198#define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
199
200
201#define SC2VL_VAL( \
202 num, \
203 sc0, sc0val, \
204 sc1, sc1val, \
205 sc2, sc2val, \
206 sc3, sc3val, \
207 sc4, sc4val, \
208 sc5, sc5val, \
209 sc6, sc6val, \
210 sc7, sc7val) \
211( \
212 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
213 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
214 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
215 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
216 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
217 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
218 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
219 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
220)
221
222#define DC_SC_VL_VAL( \
223 range, \
224 e0, e0val, \
225 e1, e1val, \
226 e2, e2val, \
227 e3, e3val, \
228 e4, e4val, \
229 e5, e5val, \
230 e6, e6val, \
231 e7, e7val, \
232 e8, e8val, \
233 e9, e9val, \
234 e10, e10val, \
235 e11, e11val, \
236 e12, e12val, \
237 e13, e13val, \
238 e14, e14val, \
239 e15, e15val) \
240( \
241 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
242 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
243 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
244 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
245 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
246 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
247 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
248 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
249 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
250 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
251 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
252 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
253 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
254 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
255 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
256 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
257)
258
259
260#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
261 | CCE_STATUS_RXE_FROZE_SMASK \
262 | CCE_STATUS_TXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
264
265#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
266 | CCE_STATUS_TXE_PAUSED_SMASK \
267 | CCE_STATUS_SDMA_PAUSED_SMASK)
268
269#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
270
271#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
272#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
273
274
275
276
277static struct flag_table cce_err_status_flags[] = {
278 FLAG_ENTRY0("CceCsrParityErr",
279 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
280 FLAG_ENTRY0("CceCsrReadBadAddrErr",
281 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
282 FLAG_ENTRY0("CceCsrWriteBadAddrErr",
283 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
284 FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
285 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
286 FLAG_ENTRY0("CceTrgtAccessErr",
287 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
288 FLAG_ENTRY0("CceRspdDataParityErr",
289 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
290 FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
291 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
292 FLAG_ENTRY0("CceCsrCfgBusParityErr",
293 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
294 FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
295 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
296 FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
297 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
298 FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
300 FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
301 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
302 FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
303 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
304 FLAG_ENTRY0("PcicRetryMemCorErr",
305 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
306 FLAG_ENTRY0("PcicRetryMemCorErr",
307 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
308 FLAG_ENTRY0("PcicPostHdQCorErr",
309 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
310 FLAG_ENTRY0("PcicPostHdQCorErr",
311 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
312 FLAG_ENTRY0("PcicPostHdQCorErr",
313 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
314 FLAG_ENTRY0("PcicCplDatQCorErr",
315 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
316 FLAG_ENTRY0("PcicNPostHQParityErr",
317 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
318 FLAG_ENTRY0("PcicNPostDatQParityErr",
319 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
320 FLAG_ENTRY0("PcicRetryMemUncErr",
321 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
322 FLAG_ENTRY0("PcicRetrySotMemUncErr",
323 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
324 FLAG_ENTRY0("PcicPostHdQUncErr",
325 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
326 FLAG_ENTRY0("PcicPostDatQUncErr",
327 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
328 FLAG_ENTRY0("PcicCplHdQUncErr",
329 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
330 FLAG_ENTRY0("PcicCplDatQUncErr",
331 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
332 FLAG_ENTRY0("PcicTransmitFrontParityErr",
333 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
334 FLAG_ENTRY0("PcicTransmitBackParityErr",
335 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
336 FLAG_ENTRY0("PcicReceiveParityErr",
337 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
338 FLAG_ENTRY0("CceTrgtCplTimeoutErr",
339 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
340 FLAG_ENTRY0("LATriggered",
341 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
342 FLAG_ENTRY0("CceSegReadBadAddrErr",
343 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
344 FLAG_ENTRY0("CceSegWriteBadAddrErr",
345 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
346 FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
347 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
348 FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
349 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
350 FLAG_ENTRY0("CceMsixTableCorErr",
351 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
352 FLAG_ENTRY0("CceMsixTableUncErr",
353 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
354 FLAG_ENTRY0("CceIntMapCorErr",
355 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
356 FLAG_ENTRY0("CceIntMapUncErr",
357 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
358 FLAG_ENTRY0("CceMsixCsrParityErr",
359 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
360
361};
362
363
364
365
366#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
367static struct flag_table misc_err_status_flags[] = {
368 FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
369 FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
370 FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
371 FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
372 FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
373 FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
374 FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
375 FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
376 FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
377 FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
378 FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
379 FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
380 FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
381};
382
383
384
385
386static struct flag_table pio_err_status_flags[] = {
387 FLAG_ENTRY("PioWriteBadCtxt",
388 SEC_WRITE_DROPPED,
389 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
390 FLAG_ENTRY("PioWriteAddrParity",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
393 FLAG_ENTRY("PioCsrParity",
394 SEC_SPC_FREEZE,
395 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
396 FLAG_ENTRY("PioSbMemFifo0",
397 SEC_SPC_FREEZE,
398 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
399 FLAG_ENTRY("PioSbMemFifo1",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
402 FLAG_ENTRY("PioPccFifoParity",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
405 FLAG_ENTRY("PioPecFifoParity",
406 SEC_SPC_FREEZE,
407 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
408 FLAG_ENTRY("PioSbrdctlCrrelParity",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
411 FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
414 FLAG_ENTRY("PioPktEvictFifoParityErr",
415 SEC_SPC_FREEZE,
416 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
417 FLAG_ENTRY("PioSmPktResetParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
420 FLAG_ENTRY("PioVlLenMemBank0Unc",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
423 FLAG_ENTRY("PioVlLenMemBank1Unc",
424 SEC_SPC_FREEZE,
425 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
426 FLAG_ENTRY("PioVlLenMemBank0Cor",
427 0,
428 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
429 FLAG_ENTRY("PioVlLenMemBank1Cor",
430 0,
431 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
432 FLAG_ENTRY("PioCreditRetFifoParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
435 FLAG_ENTRY("PioPpmcPblFifo",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
438 FLAG_ENTRY("PioInitSmIn",
439 0,
440 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
441 FLAG_ENTRY("PioPktEvictSmOrArbSm",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
444 FLAG_ENTRY("PioHostAddrMemUnc",
445 SEC_SPC_FREEZE,
446 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
447 FLAG_ENTRY("PioHostAddrMemCor",
448 0,
449 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
450 FLAG_ENTRY("PioWriteDataParity",
451 SEC_SPC_FREEZE,
452 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
453 FLAG_ENTRY("PioStateMachine",
454 SEC_SPC_FREEZE,
455 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
456 FLAG_ENTRY("PioWriteQwValidParity",
457 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
458 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
459 FLAG_ENTRY("PioBlockQwCountParity",
460 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
461 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
462 FLAG_ENTRY("PioVlfVlLenParity",
463 SEC_SPC_FREEZE,
464 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
465 FLAG_ENTRY("PioVlfSopParity",
466 SEC_SPC_FREEZE,
467 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
468 FLAG_ENTRY("PioVlFifoParity",
469 SEC_SPC_FREEZE,
470 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
471 FLAG_ENTRY("PioPpmcBqcMemParity",
472 SEC_SPC_FREEZE,
473 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
474 FLAG_ENTRY("PioPpmcSopLen",
475 SEC_SPC_FREEZE,
476 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
477
478 FLAG_ENTRY("PioCurrentFreeCntParity",
479 SEC_SPC_FREEZE,
480 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
481 FLAG_ENTRY("PioLastReturnedCntParity",
482 SEC_SPC_FREEZE,
483 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
484 FLAG_ENTRY("PioPccSopHeadParity",
485 SEC_SPC_FREEZE,
486 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
487 FLAG_ENTRY("PioPecSopHeadParityErr",
488 SEC_SPC_FREEZE,
489 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
490
491};
492
493
494#define ALL_PIO_FREEZE_ERR \
495 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
496 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
524
525
526
527
528static struct flag_table sdma_err_status_flags[] = {
529 FLAG_ENTRY0("SDmaRpyTagErr",
530 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
531 FLAG_ENTRY0("SDmaCsrParityErr",
532 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
533 FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
534 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
535 FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
536 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
537
538};
539
540
541#define ALL_SDMA_FREEZE_ERR \
542 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
543 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
545
546
547#define PORT_DISCARD_EGRESS_ERRS \
548 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
549 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
551
552
553
554
555#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
556static struct flag_table egress_err_status_flags[] = {
557 FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
558 FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
559
560 FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
561 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
562 FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
563 FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
564
565 FLAG_ENTRY0("TxPioLaunchIntfParityErr",
566 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
567 FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
568 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
569
570 FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
571 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
572 FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
573 FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
574 FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
575 FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
576 FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
577 SEES(TX_SDMA0_DISALLOWED_PACKET)),
578 FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
579 SEES(TX_SDMA1_DISALLOWED_PACKET)),
580 FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
581 SEES(TX_SDMA2_DISALLOWED_PACKET)),
582 FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
583 SEES(TX_SDMA3_DISALLOWED_PACKET)),
584 FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
585 SEES(TX_SDMA4_DISALLOWED_PACKET)),
586 FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
587 SEES(TX_SDMA5_DISALLOWED_PACKET)),
588 FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
589 SEES(TX_SDMA6_DISALLOWED_PACKET)),
590 FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
591 SEES(TX_SDMA7_DISALLOWED_PACKET)),
592 FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
593 SEES(TX_SDMA8_DISALLOWED_PACKET)),
594 FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
595 SEES(TX_SDMA9_DISALLOWED_PACKET)),
596 FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
597 SEES(TX_SDMA10_DISALLOWED_PACKET)),
598 FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
599 SEES(TX_SDMA11_DISALLOWED_PACKET)),
600 FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
601 SEES(TX_SDMA12_DISALLOWED_PACKET)),
602 FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
603 SEES(TX_SDMA13_DISALLOWED_PACKET)),
604 FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
605 SEES(TX_SDMA14_DISALLOWED_PACKET)),
606 FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
607 SEES(TX_SDMA15_DISALLOWED_PACKET)),
608 FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
609 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
610 FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
611 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
612 FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
613 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
614 FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
615 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
616 FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
617 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
618 FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
619 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
620 FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
621 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
622 FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
623 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
624 FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
625 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
626 FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
627 FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
628 FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
629 FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
630 FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
631 FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
632 FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
633 FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
634 FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
635 FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
636 FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
637 FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
638 FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
639 FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
640 FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
641 FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
642 FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
643 FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
644 FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
645 FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
646 FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
647 FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
648 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
649 FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
650 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
651};
652
653
654
655
656#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
657static struct flag_table egress_err_info_flags[] = {
658 FLAG_ENTRY0("Reserved", 0ull),
659 FLAG_ENTRY0("VLErr", SEEI(VL)),
660 FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
661 FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
663 FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
664 FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
665 FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
666 FLAG_ENTRY0("RawErr", SEEI(RAW)),
667 FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
668 FLAG_ENTRY0("GRHErr", SEEI(GRH)),
669 FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
670 FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
671 FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
672 FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
673 FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
674 FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
675 FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
676 FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
677 FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
678 FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
679 FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
680};
681
682
683#define ALL_TXE_EGRESS_FREEZE_ERR \
684 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
685 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
686 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
688 | SEES(TX_LAUNCH_CSR_PARITY) \
689 | SEES(TX_SBRD_CTL_CSR_PARITY) \
690 | SEES(TX_CONFIG_PARITY) \
691 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
692 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
700 | SEES(TX_CREDIT_RETURN_PARITY))
701
702
703
704
705#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
706static struct flag_table send_err_status_flags[] = {
707 FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
708 FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
709 FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
710};
711
712
713
714
715static struct flag_table sc_err_status_flags[] = {
716 FLAG_ENTRY("InconsistentSop",
717 SEC_PACKET_DROPPED | SEC_SC_HALTED,
718 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
719 FLAG_ENTRY("DisallowedPacket",
720 SEC_PACKET_DROPPED | SEC_SC_HALTED,
721 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
722 FLAG_ENTRY("WriteCrossesBoundary",
723 SEC_WRITE_DROPPED | SEC_SC_HALTED,
724 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
725 FLAG_ENTRY("WriteOverflow",
726 SEC_WRITE_DROPPED | SEC_SC_HALTED,
727 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
728 FLAG_ENTRY("WriteOutOfBounds",
729 SEC_WRITE_DROPPED | SEC_SC_HALTED,
730 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
731
732};
733
734
735
736
737#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
738static struct flag_table rxe_err_status_flags[] = {
739 FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
740 FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
741 FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
742 FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
743 FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
744 FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
745 FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
746 FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
747 FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
748 FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
749 FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
750 FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
751 FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
752 FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
753 FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
754 FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
755 FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
756 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
757 FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
758 FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
759 FLAG_ENTRY0("RxRbufBlockListReadUncErr",
760 RXES(RBUF_BLOCK_LIST_READ_UNC)),
761 FLAG_ENTRY0("RxRbufBlockListReadCorErr",
762 RXES(RBUF_BLOCK_LIST_READ_COR)),
763 FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
764 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
765 FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
766 RXES(RBUF_CSR_QENT_CNT_PARITY)),
767 FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
768 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
769 FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
770 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
771 FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
772 FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
773 FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
774 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
775 FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
776 FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
777 FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
778 FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
779 FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
780 FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
781 FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
782 FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
783 RXES(RBUF_FL_INITDONE_PARITY)),
784 FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
785 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
786 FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
787 FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
788 FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
789 FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
790 RXES(LOOKUP_DES_PART1_UNC_COR)),
791 FLAG_ENTRY0("RxLookupDesPart2ParityErr",
792 RXES(LOOKUP_DES_PART2_PARITY)),
793 FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
794 FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
795 FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
796 FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
797 FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
798 FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
799 FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
800 FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
801 FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
802 FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
803 FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
804 FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
805 FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
806 FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
807 FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
808 FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
809 FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
810 FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
811 FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
812 FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
813 FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
814 FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
815};
816
817
818#define ALL_RXE_FREEZE_ERR \
819 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
863
864#define RXE_FREEZE_ABORT_MASK \
865 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
866 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
868
869
870
871
872#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
873static struct flag_table dcc_err_flags[] = {
874 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
875 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
876 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
877 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
878 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
880 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
881 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
882 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
883 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
884 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
885 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
886 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
887 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
888 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
889 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
890 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
891 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
892 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
893 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
894 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
895 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
896 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
897 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
898 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
899 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
900 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
901 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
902 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
903 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
904 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
905 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
906 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
908 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
909 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
910 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
911 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
912 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
913 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
914 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
915 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
916 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
917 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
919 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
920};
921
922
923
924
925#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
926static struct flag_table lcb_err_flags[] = {
927 FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
928 FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
929 FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
930 FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
931 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
932 FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
933 FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
934 FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
935 FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
936 FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
937 FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
938 FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
939 FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
940 FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
941 FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
942 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
943 FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
944 FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
945 FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
946 FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
947 FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
948 FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
949 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
950 FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
951 FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
952 FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
953 FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
954 FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
955 FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
956 FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
957 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
958 FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
959 FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
960 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
961 FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
962 LCBE(REDUNDANT_FLIT_PARITY_ERR))
963};
964
965
966
967
968#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
969static struct flag_table dc8051_err_flags[] = {
970 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
971 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
972 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
973 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
974 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
975 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
976 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
977 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
978 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
979 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
980 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
981};
982
983
984
985
986
987
988static struct flag_table dc8051_info_err_flags[] = {
989 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
990 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
991 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
992 FLAG_ENTRY0("Serdes internal loopback failure",
993 FAILED_SERDES_INTERNAL_LOOPBACK),
994 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
995 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
996 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
997 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
998 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
999 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
1002 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1003 FLAG_ENTRY0("External Device Request Timeout",
1004 EXTERNAL_DEVICE_REQ_TIMEOUT),
1005};
1006
1007
1008
1009
1010
1011
1012static struct flag_table dc8051_info_host_msg_flags[] = {
1013 FLAG_ENTRY0("Host request done", 0x0001),
1014 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015 FLAG_ENTRY0("BC SMA message", 0x0004),
1016 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018 FLAG_ENTRY0("External device config request", 0x0020),
1019 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021 FLAG_ENTRY0("Link going down", 0x0100),
1022 FLAG_ENTRY0("Link width downgraded", 0x0200),
1023};
1024
1025static u32 encoded_size(u32 size);
1026static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029 u8 *continuous);
1030static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033 u8 *remote_tx_rate, u16 *link_widths);
1034static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035 u8 *flag_bits, u16 *link_widths);
1036static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037 u8 *device_rev);
1038static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040 u8 *tx_polarity_inversion,
1041 u8 *rx_polarity_inversion, u8 *max_rate);
1042static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043 unsigned int context, u64 err_status);
1044static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045static void handle_dcc_err(struct hfi1_devdata *dd,
1046 unsigned int context, u64 err_status);
1047static void handle_lcb_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057static void set_partition_keys(struct hfi1_pportdata *ppd);
1058static const char *link_state_name(u32 state);
1059static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1060 u32 state);
1061static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1062 u64 *out_data);
1063static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064static int thermal_init(struct hfi1_devdata *dd);
1065
1066static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1068 int msecs);
1069static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070 int msecs);
1071static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1074 int msecs);
1075static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1076 int msecs);
1077static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079static void handle_temp_err(struct hfi1_devdata *dd);
1080static void dc_shutdown(struct hfi1_devdata *dd);
1081static void dc_start(struct hfi1_devdata *dd);
1082static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083 unsigned int *np);
1084static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088
1089
1090
1091
1092
1093
1094
1095struct err_reg_info {
1096 u32 status;
1097 u32 clear;
1098 u32 mask;
1099 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100 const char *desc;
1101};
1102
1103#define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104#define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105#define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1106
1107
1108
1109
1110
1111#define EE(reg, handler, desc) \
1112 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113 handler, desc }
1114#define DC_EE1(reg, handler, desc) \
1115 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116#define DC_EE2(reg, handler, desc) \
1117 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118
1119
1120
1121
1122
1123static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124 EE(CCE_ERR, handle_cce_err, "CceErr"),
1125 EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1126 EE(MISC_ERR, handle_misc_err, "MiscErr"),
1127 { 0, 0, 0, NULL },
1128 EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1129 EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1130 EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131 EE(SEND_ERR, handle_txe_err, "TxeErr")
1132
1133};
1134
1135
1136
1137
1138
1139#define TCRIT_INT_SOURCE 4
1140
1141
1142
1143
1144
1145static const struct err_reg_info sdma_eng_err =
1146 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147
1148static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149 { 0, 0, 0, NULL },
1150 { 0, 0, 0, NULL },
1151 EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1152 EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1153 { 0, 0, 0, NULL },
1154
1155};
1156
1157
1158
1159
1160
1161
1162
1163#define DCC_CFG_PORT_MTU_CAP_10240 7
1164
1165
1166
1167
1168
1169static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170 DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1171 DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1172 DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1173
1174
1175};
1176
1177struct cntr_entry {
1178
1179
1180
1181 char *name;
1182
1183
1184
1185
1186 u64 csr;
1187
1188
1189
1190
1191 int offset;
1192
1193
1194
1195
1196 u8 flags;
1197
1198
1199
1200
1201 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202 int mode, u64 data);
1203};
1204
1205#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207
1208#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209{ \
1210 name, \
1211 csr, \
1212 offset, \
1213 flags, \
1214 accessor \
1215}
1216
1217
1218#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219CNTR_ELEM(#name, \
1220 (counter * 8 + RCV_COUNTER_ARRAY32), \
1221 0, flags | CNTR_32BIT, \
1222 port_access_u32_csr)
1223
1224#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226 (counter * 8 + RCV_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1228 dev_access_u32_csr)
1229
1230
1231#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232CNTR_ELEM(#name, \
1233 (counter * 8 + RCV_COUNTER_ARRAY64), \
1234 0, flags, \
1235 port_access_u64_csr)
1236
1237#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 (counter * 8 + RCV_COUNTER_ARRAY64), \
1240 0, flags, \
1241 dev_access_u64_csr)
1242
1243#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244#define OVR_ELM(ctx) \
1245CNTR_ELEM("RcvHdrOvr" #ctx, \
1246 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247 0, CNTR_NORMAL, port_access_u64_csr)
1248
1249
1250#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251CNTR_ELEM(#name, \
1252 (counter * 8 + SEND_COUNTER_ARRAY32), \
1253 0, flags | CNTR_32BIT, \
1254 port_access_u32_csr)
1255
1256
1257#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258CNTR_ELEM(#name, \
1259 (counter * 8 + SEND_COUNTER_ARRAY64), \
1260 0, flags, \
1261 port_access_u64_csr)
1262
1263# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264CNTR_ELEM(#name,\
1265 counter * 8 + SEND_COUNTER_ARRAY64, \
1266 0, \
1267 flags, \
1268 dev_access_u64_csr)
1269
1270
1271#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272CNTR_ELEM(#name, \
1273 (counter * 8 + CCE_COUNTER_ARRAY32), \
1274 0, flags | CNTR_32BIT, \
1275 dev_access_u32_csr)
1276
1277#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278CNTR_ELEM(#name, \
1279 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280 0, flags | CNTR_32BIT, \
1281 dev_access_u32_csr)
1282
1283
1284#define DC_PERF_CNTR(name, counter, flags) \
1285CNTR_ELEM(#name, \
1286 counter, \
1287 0, \
1288 flags, \
1289 dev_access_u64_csr)
1290
1291#define DC_PERF_CNTR_LCB(name, counter, flags) \
1292CNTR_ELEM(#name, \
1293 counter, \
1294 0, \
1295 flags, \
1296 dc_access_lcb_cntr)
1297
1298
1299#define SW_IBP_CNTR(name, cntr) \
1300CNTR_ELEM(#name, \
1301 0, \
1302 0, \
1303 CNTR_SYNTH, \
1304 access_ibp_##cntr)
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314static inline void __iomem *hfi1_addr_from_offset(
1315 const struct hfi1_devdata *dd,
1316 u32 offset)
1317{
1318 if (offset >= dd->base2_start)
1319 return dd->kregbase2 + (offset - dd->base2_start);
1320 return dd->kregbase1 + offset;
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332{
1333 if (dd->flags & HFI1_PRESENT)
1334 return readq(hfi1_addr_from_offset(dd, offset));
1335 return -1;
1336}
1337
1338
1339
1340
1341
1342
1343
1344void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345{
1346 if (dd->flags & HFI1_PRESENT) {
1347 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348
1349
1350 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351 return;
1352 writeq(value, base);
1353 }
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364void __iomem *get_csr_addr(
1365 const struct hfi1_devdata *dd,
1366 u32 offset)
1367{
1368 if (dd->flags & HFI1_PRESENT)
1369 return hfi1_addr_from_offset(dd, offset);
1370 return NULL;
1371}
1372
1373static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374 int mode, u64 value)
1375{
1376 u64 ret;
1377
1378 if (mode == CNTR_MODE_R) {
1379 ret = read_csr(dd, csr);
1380 } else if (mode == CNTR_MODE_W) {
1381 write_csr(dd, csr, value);
1382 ret = value;
1383 } else {
1384 dd_dev_err(dd, "Invalid cntr register access mode");
1385 return 0;
1386 }
1387
1388 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389 return ret;
1390}
1391
1392
1393static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394 void *context, int vl, int mode, u64 data)
1395{
1396 struct hfi1_devdata *dd = context;
1397 u64 csr = entry->csr;
1398
1399 if (entry->flags & CNTR_SDMA) {
1400 if (vl == CNTR_INVALID_VL)
1401 return 0;
1402 csr += 0x100 * vl;
1403 } else {
1404 if (vl != CNTR_INVALID_VL)
1405 return 0;
1406 }
1407 return read_write_csr(dd, csr, mode, data);
1408}
1409
1410static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411 void *context, int idx, int mode, u64 data)
1412{
1413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414
1415 if (dd->per_sdma && idx < dd->num_sdma)
1416 return dd->per_sdma[idx].err_cnt;
1417 return 0;
1418}
1419
1420static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421 void *context, int idx, int mode, u64 data)
1422{
1423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424
1425 if (dd->per_sdma && idx < dd->num_sdma)
1426 return dd->per_sdma[idx].sdma_int_cnt;
1427 return 0;
1428}
1429
1430static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431 void *context, int idx, int mode, u64 data)
1432{
1433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434
1435 if (dd->per_sdma && idx < dd->num_sdma)
1436 return dd->per_sdma[idx].idle_int_cnt;
1437 return 0;
1438}
1439
1440static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441 void *context, int idx, int mode,
1442 u64 data)
1443{
1444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445
1446 if (dd->per_sdma && idx < dd->num_sdma)
1447 return dd->per_sdma[idx].progress_int_cnt;
1448 return 0;
1449}
1450
1451static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452 int vl, int mode, u64 data)
1453{
1454 struct hfi1_devdata *dd = context;
1455
1456 u64 val = 0;
1457 u64 csr = entry->csr;
1458
1459 if (entry->flags & CNTR_VL) {
1460 if (vl == CNTR_INVALID_VL)
1461 return 0;
1462 csr += 8 * vl;
1463 } else {
1464 if (vl != CNTR_INVALID_VL)
1465 return 0;
1466 }
1467
1468 val = read_write_csr(dd, csr, mode, data);
1469 return val;
1470}
1471
1472static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473 int vl, int mode, u64 data)
1474{
1475 struct hfi1_devdata *dd = context;
1476 u32 csr = entry->csr;
1477 int ret = 0;
1478
1479 if (vl != CNTR_INVALID_VL)
1480 return 0;
1481 if (mode == CNTR_MODE_R)
1482 ret = read_lcb_csr(dd, csr, &data);
1483 else if (mode == CNTR_MODE_W)
1484 ret = write_lcb_csr(dd, csr, data);
1485
1486 if (ret) {
1487 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488 return 0;
1489 }
1490
1491 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492 return data;
1493}
1494
1495
1496static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497 int vl, int mode, u64 data)
1498{
1499 struct hfi1_pportdata *ppd = context;
1500
1501 if (vl != CNTR_INVALID_VL)
1502 return 0;
1503 return read_write_csr(ppd->dd, entry->csr, mode, data);
1504}
1505
1506static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507 void *context, int vl, int mode, u64 data)
1508{
1509 struct hfi1_pportdata *ppd = context;
1510 u64 val;
1511 u64 csr = entry->csr;
1512
1513 if (entry->flags & CNTR_VL) {
1514 if (vl == CNTR_INVALID_VL)
1515 return 0;
1516 csr += 8 * vl;
1517 } else {
1518 if (vl != CNTR_INVALID_VL)
1519 return 0;
1520 }
1521 val = read_write_csr(ppd->dd, csr, mode, data);
1522 return val;
1523}
1524
1525
1526static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527 u64 data)
1528{
1529 u64 ret;
1530
1531 if (mode == CNTR_MODE_R) {
1532 ret = *cntr;
1533 } else if (mode == CNTR_MODE_W) {
1534 *cntr = data;
1535 ret = data;
1536 } else {
1537 dd_dev_err(dd, "Invalid cntr sw access mode");
1538 return 0;
1539 }
1540
1541 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542
1543 return ret;
1544}
1545
1546static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547 int vl, int mode, u64 data)
1548{
1549 struct hfi1_pportdata *ppd = context;
1550
1551 if (vl != CNTR_INVALID_VL)
1552 return 0;
1553 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554}
1555
1556static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557 int vl, int mode, u64 data)
1558{
1559 struct hfi1_pportdata *ppd = context;
1560
1561 if (vl != CNTR_INVALID_VL)
1562 return 0;
1563 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564}
1565
1566static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567 void *context, int vl, int mode,
1568 u64 data)
1569{
1570 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571
1572 if (vl != CNTR_INVALID_VL)
1573 return 0;
1574 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575}
1576
1577static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578 void *context, int vl, int mode, u64 data)
1579{
1580 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581 u64 zero = 0;
1582 u64 *counter;
1583
1584 if (vl == CNTR_INVALID_VL)
1585 counter = &ppd->port_xmit_discards;
1586 else if (vl >= 0 && vl < C_VL_COUNT)
1587 counter = &ppd->port_xmit_discards_vl[vl];
1588 else
1589 counter = &zero;
1590
1591 return read_write_sw(ppd->dd, counter, mode, data);
1592}
1593
1594static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595 void *context, int vl, int mode,
1596 u64 data)
1597{
1598 struct hfi1_pportdata *ppd = context;
1599
1600 if (vl != CNTR_INVALID_VL)
1601 return 0;
1602
1603 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604 mode, data);
1605}
1606
1607static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608 void *context, int vl, int mode, u64 data)
1609{
1610 struct hfi1_pportdata *ppd = context;
1611
1612 if (vl != CNTR_INVALID_VL)
1613 return 0;
1614
1615 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616 mode, data);
1617}
1618
1619u64 get_all_cpu_total(u64 __percpu *cntr)
1620{
1621 int cpu;
1622 u64 counter = 0;
1623
1624 for_each_possible_cpu(cpu)
1625 counter += *per_cpu_ptr(cntr, cpu);
1626 return counter;
1627}
1628
1629static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630 u64 __percpu *cntr,
1631 int vl, int mode, u64 data)
1632{
1633 u64 ret = 0;
1634
1635 if (vl != CNTR_INVALID_VL)
1636 return 0;
1637
1638 if (mode == CNTR_MODE_R) {
1639 ret = get_all_cpu_total(cntr) - *z_val;
1640 } else if (mode == CNTR_MODE_W) {
1641
1642 if (data == 0)
1643 *z_val = get_all_cpu_total(cntr);
1644 else
1645 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646 } else {
1647 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648 return 0;
1649 }
1650
1651 return ret;
1652}
1653
1654static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655 void *context, int vl, int mode, u64 data)
1656{
1657 struct hfi1_devdata *dd = context;
1658
1659 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660 mode, data);
1661}
1662
1663static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664 void *context, int vl, int mode, u64 data)
1665{
1666 struct hfi1_devdata *dd = context;
1667
1668 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669 mode, data);
1670}
1671
1672static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673 void *context, int vl, int mode, u64 data)
1674{
1675 struct hfi1_devdata *dd = context;
1676
1677 return dd->verbs_dev.n_piowait;
1678}
1679
1680static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681 void *context, int vl, int mode, u64 data)
1682{
1683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685 return dd->verbs_dev.n_piodrain;
1686}
1687
1688static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1689 void *context, int vl, int mode, u64 data)
1690{
1691 struct hfi1_devdata *dd = context;
1692
1693 return dd->verbs_dev.n_txwait;
1694}
1695
1696static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1697 void *context, int vl, int mode, u64 data)
1698{
1699 struct hfi1_devdata *dd = context;
1700
1701 return dd->verbs_dev.n_kmem_wait;
1702}
1703
1704static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1705 void *context, int vl, int mode, u64 data)
1706{
1707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708
1709 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1710 mode, data);
1711}
1712
1713
1714static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1715 void *context, int vl, int mode,
1716 u64 data)
1717{
1718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720 return dd->misc_err_status_cnt[12];
1721}
1722
1723static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1724 void *context, int vl, int mode,
1725 u64 data)
1726{
1727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729 return dd->misc_err_status_cnt[11];
1730}
1731
1732static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1733 void *context, int vl, int mode,
1734 u64 data)
1735{
1736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737
1738 return dd->misc_err_status_cnt[10];
1739}
1740
1741static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1742 void *context, int vl,
1743 int mode, u64 data)
1744{
1745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747 return dd->misc_err_status_cnt[9];
1748}
1749
1750static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1751 void *context, int vl, int mode,
1752 u64 data)
1753{
1754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755
1756 return dd->misc_err_status_cnt[8];
1757}
1758
1759static u64 access_misc_efuse_read_bad_addr_err_cnt(
1760 const struct cntr_entry *entry,
1761 void *context, int vl, int mode, u64 data)
1762{
1763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765 return dd->misc_err_status_cnt[7];
1766}
1767
1768static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1769 void *context, int vl,
1770 int mode, u64 data)
1771{
1772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774 return dd->misc_err_status_cnt[6];
1775}
1776
1777static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1778 void *context, int vl, int mode,
1779 u64 data)
1780{
1781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783 return dd->misc_err_status_cnt[5];
1784}
1785
1786static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1787 void *context, int vl, int mode,
1788 u64 data)
1789{
1790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792 return dd->misc_err_status_cnt[4];
1793}
1794
1795static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1796 void *context, int vl,
1797 int mode, u64 data)
1798{
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801 return dd->misc_err_status_cnt[3];
1802}
1803
1804static u64 access_misc_csr_write_bad_addr_err_cnt(
1805 const struct cntr_entry *entry,
1806 void *context, int vl, int mode, u64 data)
1807{
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810 return dd->misc_err_status_cnt[2];
1811}
1812
1813static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1814 void *context, int vl,
1815 int mode, u64 data)
1816{
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819 return dd->misc_err_status_cnt[1];
1820}
1821
1822static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl, int mode,
1824 u64 data)
1825{
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828 return dd->misc_err_status_cnt[0];
1829}
1830
1831
1832
1833
1834
1835static u64 access_sw_cce_err_status_aggregated_cnt(
1836 const struct cntr_entry *entry,
1837 void *context, int vl, int mode, u64 data)
1838{
1839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841 return dd->sw_cce_err_status_aggregate;
1842}
1843
1844
1845
1846
1847
1848static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1849 void *context, int vl, int mode,
1850 u64 data)
1851{
1852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854 return dd->cce_err_status_cnt[40];
1855}
1856
1857static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1858 void *context, int vl, int mode,
1859 u64 data)
1860{
1861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863 return dd->cce_err_status_cnt[39];
1864}
1865
1866static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1867 void *context, int vl, int mode,
1868 u64 data)
1869{
1870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872 return dd->cce_err_status_cnt[38];
1873}
1874
1875static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1876 void *context, int vl, int mode,
1877 u64 data)
1878{
1879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881 return dd->cce_err_status_cnt[37];
1882}
1883
1884static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1885 void *context, int vl, int mode,
1886 u64 data)
1887{
1888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890 return dd->cce_err_status_cnt[36];
1891}
1892
1893static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1894 const struct cntr_entry *entry,
1895 void *context, int vl, int mode, u64 data)
1896{
1897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899 return dd->cce_err_status_cnt[35];
1900}
1901
1902static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1903 const struct cntr_entry *entry,
1904 void *context, int vl, int mode, u64 data)
1905{
1906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908 return dd->cce_err_status_cnt[34];
1909}
1910
1911static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1912 void *context, int vl,
1913 int mode, u64 data)
1914{
1915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917 return dd->cce_err_status_cnt[33];
1918}
1919
1920static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1921 void *context, int vl, int mode,
1922 u64 data)
1923{
1924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926 return dd->cce_err_status_cnt[32];
1927}
1928
1929static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1930 void *context, int vl, int mode, u64 data)
1931{
1932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934 return dd->cce_err_status_cnt[31];
1935}
1936
1937static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1938 void *context, int vl, int mode,
1939 u64 data)
1940{
1941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943 return dd->cce_err_status_cnt[30];
1944}
1945
1946static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1947 void *context, int vl, int mode,
1948 u64 data)
1949{
1950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952 return dd->cce_err_status_cnt[29];
1953}
1954
1955static u64 access_pcic_transmit_back_parity_err_cnt(
1956 const struct cntr_entry *entry,
1957 void *context, int vl, int mode, u64 data)
1958{
1959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961 return dd->cce_err_status_cnt[28];
1962}
1963
1964static u64 access_pcic_transmit_front_parity_err_cnt(
1965 const struct cntr_entry *entry,
1966 void *context, int vl, int mode, u64 data)
1967{
1968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970 return dd->cce_err_status_cnt[27];
1971}
1972
1973static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1974 void *context, int vl, int mode,
1975 u64 data)
1976{
1977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979 return dd->cce_err_status_cnt[26];
1980}
1981
1982static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1983 void *context, int vl, int mode,
1984 u64 data)
1985{
1986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988 return dd->cce_err_status_cnt[25];
1989}
1990
1991static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1992 void *context, int vl, int mode,
1993 u64 data)
1994{
1995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997 return dd->cce_err_status_cnt[24];
1998}
1999
2000static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2001 void *context, int vl, int mode,
2002 u64 data)
2003{
2004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006 return dd->cce_err_status_cnt[23];
2007}
2008
2009static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2010 void *context, int vl,
2011 int mode, u64 data)
2012{
2013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015 return dd->cce_err_status_cnt[22];
2016}
2017
2018static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2019 void *context, int vl, int mode,
2020 u64 data)
2021{
2022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024 return dd->cce_err_status_cnt[21];
2025}
2026
2027static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2028 const struct cntr_entry *entry,
2029 void *context, int vl, int mode, u64 data)
2030{
2031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033 return dd->cce_err_status_cnt[20];
2034}
2035
2036static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2037 void *context, int vl,
2038 int mode, u64 data)
2039{
2040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042 return dd->cce_err_status_cnt[19];
2043}
2044
2045static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2046 void *context, int vl, int mode,
2047 u64 data)
2048{
2049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051 return dd->cce_err_status_cnt[18];
2052}
2053
2054static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2055 void *context, int vl, int mode,
2056 u64 data)
2057{
2058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060 return dd->cce_err_status_cnt[17];
2061}
2062
2063static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2064 void *context, int vl, int mode,
2065 u64 data)
2066{
2067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069 return dd->cce_err_status_cnt[16];
2070}
2071
2072static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2073 void *context, int vl, int mode,
2074 u64 data)
2075{
2076 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078 return dd->cce_err_status_cnt[15];
2079}
2080
2081static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2082 void *context, int vl,
2083 int mode, u64 data)
2084{
2085 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087 return dd->cce_err_status_cnt[14];
2088}
2089
2090static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2091 void *context, int vl, int mode,
2092 u64 data)
2093{
2094 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096 return dd->cce_err_status_cnt[13];
2097}
2098
2099static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2100 const struct cntr_entry *entry,
2101 void *context, int vl, int mode, u64 data)
2102{
2103 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105 return dd->cce_err_status_cnt[12];
2106}
2107
2108static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2109 const struct cntr_entry *entry,
2110 void *context, int vl, int mode, u64 data)
2111{
2112 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114 return dd->cce_err_status_cnt[11];
2115}
2116
2117static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2118 const struct cntr_entry *entry,
2119 void *context, int vl, int mode, u64 data)
2120{
2121 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123 return dd->cce_err_status_cnt[10];
2124}
2125
2126static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2127 const struct cntr_entry *entry,
2128 void *context, int vl, int mode, u64 data)
2129{
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132 return dd->cce_err_status_cnt[9];
2133}
2134
2135static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2136 const struct cntr_entry *entry,
2137 void *context, int vl, int mode, u64 data)
2138{
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141 return dd->cce_err_status_cnt[8];
2142}
2143
2144static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl,
2146 int mode, u64 data)
2147{
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150 return dd->cce_err_status_cnt[7];
2151}
2152
2153static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2154 const struct cntr_entry *entry,
2155 void *context, int vl, int mode, u64 data)
2156{
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159 return dd->cce_err_status_cnt[6];
2160}
2161
2162static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl, int mode,
2164 u64 data)
2165{
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168 return dd->cce_err_status_cnt[5];
2169}
2170
2171static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl, int mode,
2173 u64 data)
2174{
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177 return dd->cce_err_status_cnt[4];
2178}
2179
2180static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2181 const struct cntr_entry *entry,
2182 void *context, int vl, int mode, u64 data)
2183{
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186 return dd->cce_err_status_cnt[3];
2187}
2188
2189static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2190 void *context, int vl,
2191 int mode, u64 data)
2192{
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195 return dd->cce_err_status_cnt[2];
2196}
2197
2198static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2199 void *context, int vl,
2200 int mode, u64 data)
2201{
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204 return dd->cce_err_status_cnt[1];
2205}
2206
2207static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2208 void *context, int vl, int mode,
2209 u64 data)
2210{
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213 return dd->cce_err_status_cnt[0];
2214}
2215
2216
2217
2218
2219
2220static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2221 void *context, int vl, int mode,
2222 u64 data)
2223{
2224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226 return dd->rcv_err_status_cnt[63];
2227}
2228
2229static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2230 void *context, int vl,
2231 int mode, u64 data)
2232{
2233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235 return dd->rcv_err_status_cnt[62];
2236}
2237
2238static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2239 void *context, int vl, int mode,
2240 u64 data)
2241{
2242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244 return dd->rcv_err_status_cnt[61];
2245}
2246
2247static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2248 void *context, int vl, int mode,
2249 u64 data)
2250{
2251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253 return dd->rcv_err_status_cnt[60];
2254}
2255
2256static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2257 void *context, int vl,
2258 int mode, u64 data)
2259{
2260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262 return dd->rcv_err_status_cnt[59];
2263}
2264
2265static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266 void *context, int vl,
2267 int mode, u64 data)
2268{
2269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271 return dd->rcv_err_status_cnt[58];
2272}
2273
2274static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2275 void *context, int vl, int mode,
2276 u64 data)
2277{
2278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280 return dd->rcv_err_status_cnt[57];
2281}
2282
2283static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2284 void *context, int vl, int mode,
2285 u64 data)
2286{
2287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289 return dd->rcv_err_status_cnt[56];
2290}
2291
2292static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2293 void *context, int vl, int mode,
2294 u64 data)
2295{
2296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298 return dd->rcv_err_status_cnt[55];
2299}
2300
2301static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2302 const struct cntr_entry *entry,
2303 void *context, int vl, int mode, u64 data)
2304{
2305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307 return dd->rcv_err_status_cnt[54];
2308}
2309
2310static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2311 const struct cntr_entry *entry,
2312 void *context, int vl, int mode, u64 data)
2313{
2314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316 return dd->rcv_err_status_cnt[53];
2317}
2318
2319static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2320 void *context, int vl,
2321 int mode, u64 data)
2322{
2323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325 return dd->rcv_err_status_cnt[52];
2326}
2327
2328static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2329 void *context, int vl,
2330 int mode, u64 data)
2331{
2332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334 return dd->rcv_err_status_cnt[51];
2335}
2336
2337static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2338 void *context, int vl,
2339 int mode, u64 data)
2340{
2341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343 return dd->rcv_err_status_cnt[50];
2344}
2345
2346static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2347 void *context, int vl,
2348 int mode, u64 data)
2349{
2350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352 return dd->rcv_err_status_cnt[49];
2353}
2354
2355static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2356 void *context, int vl,
2357 int mode, u64 data)
2358{
2359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361 return dd->rcv_err_status_cnt[48];
2362}
2363
2364static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2365 void *context, int vl,
2366 int mode, u64 data)
2367{
2368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370 return dd->rcv_err_status_cnt[47];
2371}
2372
2373static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2374 void *context, int vl, int mode,
2375 u64 data)
2376{
2377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379 return dd->rcv_err_status_cnt[46];
2380}
2381
2382static u64 access_rx_hq_intr_csr_parity_err_cnt(
2383 const struct cntr_entry *entry,
2384 void *context, int vl, int mode, u64 data)
2385{
2386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388 return dd->rcv_err_status_cnt[45];
2389}
2390
2391static u64 access_rx_lookup_csr_parity_err_cnt(
2392 const struct cntr_entry *entry,
2393 void *context, int vl, int mode, u64 data)
2394{
2395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397 return dd->rcv_err_status_cnt[44];
2398}
2399
2400static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2401 const struct cntr_entry *entry,
2402 void *context, int vl, int mode, u64 data)
2403{
2404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406 return dd->rcv_err_status_cnt[43];
2407}
2408
2409static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2410 const struct cntr_entry *entry,
2411 void *context, int vl, int mode, u64 data)
2412{
2413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415 return dd->rcv_err_status_cnt[42];
2416}
2417
2418static u64 access_rx_lookup_des_part2_parity_err_cnt(
2419 const struct cntr_entry *entry,
2420 void *context, int vl, int mode, u64 data)
2421{
2422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424 return dd->rcv_err_status_cnt[41];
2425}
2426
2427static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2428 const struct cntr_entry *entry,
2429 void *context, int vl, int mode, u64 data)
2430{
2431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433 return dd->rcv_err_status_cnt[40];
2434}
2435
2436static u64 access_rx_lookup_des_part1_unc_err_cnt(
2437 const struct cntr_entry *entry,
2438 void *context, int vl, int mode, u64 data)
2439{
2440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442 return dd->rcv_err_status_cnt[39];
2443}
2444
2445static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2446 const struct cntr_entry *entry,
2447 void *context, int vl, int mode, u64 data)
2448{
2449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451 return dd->rcv_err_status_cnt[38];
2452}
2453
2454static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2455 const struct cntr_entry *entry,
2456 void *context, int vl, int mode, u64 data)
2457{
2458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460 return dd->rcv_err_status_cnt[37];
2461}
2462
2463static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2464 const struct cntr_entry *entry,
2465 void *context, int vl, int mode, u64 data)
2466{
2467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469 return dd->rcv_err_status_cnt[36];
2470}
2471
2472static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2473 const struct cntr_entry *entry,
2474 void *context, int vl, int mode, u64 data)
2475{
2476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478 return dd->rcv_err_status_cnt[35];
2479}
2480
2481static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2482 const struct cntr_entry *entry,
2483 void *context, int vl, int mode, u64 data)
2484{
2485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487 return dd->rcv_err_status_cnt[34];
2488}
2489
2490static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2491 const struct cntr_entry *entry,
2492 void *context, int vl, int mode, u64 data)
2493{
2494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496 return dd->rcv_err_status_cnt[33];
2497}
2498
2499static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2500 void *context, int vl, int mode,
2501 u64 data)
2502{
2503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505 return dd->rcv_err_status_cnt[32];
2506}
2507
2508static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2509 void *context, int vl, int mode,
2510 u64 data)
2511{
2512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514 return dd->rcv_err_status_cnt[31];
2515}
2516
2517static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2518 void *context, int vl, int mode,
2519 u64 data)
2520{
2521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523 return dd->rcv_err_status_cnt[30];
2524}
2525
2526static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2527 void *context, int vl, int mode,
2528 u64 data)
2529{
2530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532 return dd->rcv_err_status_cnt[29];
2533}
2534
2535static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2536 void *context, int vl,
2537 int mode, u64 data)
2538{
2539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541 return dd->rcv_err_status_cnt[28];
2542}
2543
2544static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2545 const struct cntr_entry *entry,
2546 void *context, int vl, int mode, u64 data)
2547{
2548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550 return dd->rcv_err_status_cnt[27];
2551}
2552
2553static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2554 const struct cntr_entry *entry,
2555 void *context, int vl, int mode, u64 data)
2556{
2557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559 return dd->rcv_err_status_cnt[26];
2560}
2561
2562static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2563 const struct cntr_entry *entry,
2564 void *context, int vl, int mode, u64 data)
2565{
2566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568 return dd->rcv_err_status_cnt[25];
2569}
2570
2571static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2572 const struct cntr_entry *entry,
2573 void *context, int vl, int mode, u64 data)
2574{
2575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577 return dd->rcv_err_status_cnt[24];
2578}
2579
2580static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2581 const struct cntr_entry *entry,
2582 void *context, int vl, int mode, u64 data)
2583{
2584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586 return dd->rcv_err_status_cnt[23];
2587}
2588
2589static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2590 const struct cntr_entry *entry,
2591 void *context, int vl, int mode, u64 data)
2592{
2593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595 return dd->rcv_err_status_cnt[22];
2596}
2597
2598static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2599 const struct cntr_entry *entry,
2600 void *context, int vl, int mode, u64 data)
2601{
2602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604 return dd->rcv_err_status_cnt[21];
2605}
2606
2607static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2608 const struct cntr_entry *entry,
2609 void *context, int vl, int mode, u64 data)
2610{
2611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613 return dd->rcv_err_status_cnt[20];
2614}
2615
2616static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2617 const struct cntr_entry *entry,
2618 void *context, int vl, int mode, u64 data)
2619{
2620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622 return dd->rcv_err_status_cnt[19];
2623}
2624
2625static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2626 void *context, int vl,
2627 int mode, u64 data)
2628{
2629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631 return dd->rcv_err_status_cnt[18];
2632}
2633
2634static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2635 void *context, int vl,
2636 int mode, u64 data)
2637{
2638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640 return dd->rcv_err_status_cnt[17];
2641}
2642
2643static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2644 const struct cntr_entry *entry,
2645 void *context, int vl, int mode, u64 data)
2646{
2647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649 return dd->rcv_err_status_cnt[16];
2650}
2651
2652static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2653 const struct cntr_entry *entry,
2654 void *context, int vl, int mode, u64 data)
2655{
2656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658 return dd->rcv_err_status_cnt[15];
2659}
2660
2661static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2662 void *context, int vl,
2663 int mode, u64 data)
2664{
2665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667 return dd->rcv_err_status_cnt[14];
2668}
2669
2670static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2671 void *context, int vl,
2672 int mode, u64 data)
2673{
2674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676 return dd->rcv_err_status_cnt[13];
2677}
2678
2679static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2680 void *context, int vl, int mode,
2681 u64 data)
2682{
2683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685 return dd->rcv_err_status_cnt[12];
2686}
2687
2688static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2689 void *context, int vl, int mode,
2690 u64 data)
2691{
2692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694 return dd->rcv_err_status_cnt[11];
2695}
2696
2697static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2698 void *context, int vl, int mode,
2699 u64 data)
2700{
2701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703 return dd->rcv_err_status_cnt[10];
2704}
2705
2706static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2707 void *context, int vl, int mode,
2708 u64 data)
2709{
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712 return dd->rcv_err_status_cnt[9];
2713}
2714
2715static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2716 void *context, int vl, int mode,
2717 u64 data)
2718{
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721 return dd->rcv_err_status_cnt[8];
2722}
2723
2724static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2725 const struct cntr_entry *entry,
2726 void *context, int vl, int mode, u64 data)
2727{
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730 return dd->rcv_err_status_cnt[7];
2731}
2732
2733static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2734 const struct cntr_entry *entry,
2735 void *context, int vl, int mode, u64 data)
2736{
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739 return dd->rcv_err_status_cnt[6];
2740}
2741
2742static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2743 void *context, int vl, int mode,
2744 u64 data)
2745{
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748 return dd->rcv_err_status_cnt[5];
2749}
2750
2751static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2752 void *context, int vl, int mode,
2753 u64 data)
2754{
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757 return dd->rcv_err_status_cnt[4];
2758}
2759
2760static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2762 u64 data)
2763{
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766 return dd->rcv_err_status_cnt[3];
2767}
2768
2769static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2770 void *context, int vl, int mode,
2771 u64 data)
2772{
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775 return dd->rcv_err_status_cnt[2];
2776}
2777
2778static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl, int mode,
2780 u64 data)
2781{
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784 return dd->rcv_err_status_cnt[1];
2785}
2786
2787static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2788 void *context, int vl, int mode,
2789 u64 data)
2790{
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793 return dd->rcv_err_status_cnt[0];
2794}
2795
2796
2797
2798
2799
2800static u64 access_pio_pec_sop_head_parity_err_cnt(
2801 const struct cntr_entry *entry,
2802 void *context, int vl, int mode, u64 data)
2803{
2804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806 return dd->send_pio_err_status_cnt[35];
2807}
2808
2809static u64 access_pio_pcc_sop_head_parity_err_cnt(
2810 const struct cntr_entry *entry,
2811 void *context, int vl, int mode, u64 data)
2812{
2813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815 return dd->send_pio_err_status_cnt[34];
2816}
2817
2818static u64 access_pio_last_returned_cnt_parity_err_cnt(
2819 const struct cntr_entry *entry,
2820 void *context, int vl, int mode, u64 data)
2821{
2822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824 return dd->send_pio_err_status_cnt[33];
2825}
2826
2827static u64 access_pio_current_free_cnt_parity_err_cnt(
2828 const struct cntr_entry *entry,
2829 void *context, int vl, int mode, u64 data)
2830{
2831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833 return dd->send_pio_err_status_cnt[32];
2834}
2835
2836static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2837 void *context, int vl, int mode,
2838 u64 data)
2839{
2840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842 return dd->send_pio_err_status_cnt[31];
2843}
2844
2845static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2846 void *context, int vl, int mode,
2847 u64 data)
2848{
2849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851 return dd->send_pio_err_status_cnt[30];
2852}
2853
2854static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2855 void *context, int vl, int mode,
2856 u64 data)
2857{
2858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860 return dd->send_pio_err_status_cnt[29];
2861}
2862
2863static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2864 const struct cntr_entry *entry,
2865 void *context, int vl, int mode, u64 data)
2866{
2867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869 return dd->send_pio_err_status_cnt[28];
2870}
2871
2872static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2873 void *context, int vl, int mode,
2874 u64 data)
2875{
2876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878 return dd->send_pio_err_status_cnt[27];
2879}
2880
2881static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2882 void *context, int vl, int mode,
2883 u64 data)
2884{
2885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887 return dd->send_pio_err_status_cnt[26];
2888}
2889
2890static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2891 void *context, int vl,
2892 int mode, u64 data)
2893{
2894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896 return dd->send_pio_err_status_cnt[25];
2897}
2898
2899static u64 access_pio_block_qw_count_parity_err_cnt(
2900 const struct cntr_entry *entry,
2901 void *context, int vl, int mode, u64 data)
2902{
2903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905 return dd->send_pio_err_status_cnt[24];
2906}
2907
2908static u64 access_pio_write_qw_valid_parity_err_cnt(
2909 const struct cntr_entry *entry,
2910 void *context, int vl, int mode, u64 data)
2911{
2912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914 return dd->send_pio_err_status_cnt[23];
2915}
2916
2917static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2918 void *context, int vl, int mode,
2919 u64 data)
2920{
2921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923 return dd->send_pio_err_status_cnt[22];
2924}
2925
2926static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2927 void *context, int vl,
2928 int mode, u64 data)
2929{
2930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932 return dd->send_pio_err_status_cnt[21];
2933}
2934
2935static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2936 void *context, int vl,
2937 int mode, u64 data)
2938{
2939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941 return dd->send_pio_err_status_cnt[20];
2942}
2943
2944static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2945 void *context, int vl,
2946 int mode, u64 data)
2947{
2948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950 return dd->send_pio_err_status_cnt[19];
2951}
2952
2953static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2954 const struct cntr_entry *entry,
2955 void *context, int vl, int mode, u64 data)
2956{
2957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959 return dd->send_pio_err_status_cnt[18];
2960}
2961
2962static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2963 void *context, int vl, int mode,
2964 u64 data)
2965{
2966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968 return dd->send_pio_err_status_cnt[17];
2969}
2970
2971static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2972 void *context, int vl, int mode,
2973 u64 data)
2974{
2975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977 return dd->send_pio_err_status_cnt[16];
2978}
2979
2980static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2981 const struct cntr_entry *entry,
2982 void *context, int vl, int mode, u64 data)
2983{
2984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986 return dd->send_pio_err_status_cnt[15];
2987}
2988
2989static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2990 const struct cntr_entry *entry,
2991 void *context, int vl, int mode, u64 data)
2992{
2993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995 return dd->send_pio_err_status_cnt[14];
2996}
2997
2998static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2999 const struct cntr_entry *entry,
3000 void *context, int vl, int mode, u64 data)
3001{
3002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004 return dd->send_pio_err_status_cnt[13];
3005}
3006
3007static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3008 const struct cntr_entry *entry,
3009 void *context, int vl, int mode, u64 data)
3010{
3011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013 return dd->send_pio_err_status_cnt[12];
3014}
3015
3016static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3017 const struct cntr_entry *entry,
3018 void *context, int vl, int mode, u64 data)
3019{
3020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022 return dd->send_pio_err_status_cnt[11];
3023}
3024
3025static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3026 const struct cntr_entry *entry,
3027 void *context, int vl, int mode, u64 data)
3028{
3029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031 return dd->send_pio_err_status_cnt[10];
3032}
3033
3034static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3035 const struct cntr_entry *entry,
3036 void *context, int vl, int mode, u64 data)
3037{
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040 return dd->send_pio_err_status_cnt[9];
3041}
3042
3043static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3044 const struct cntr_entry *entry,
3045 void *context, int vl, int mode, u64 data)
3046{
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049 return dd->send_pio_err_status_cnt[8];
3050}
3051
3052static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3053 const struct cntr_entry *entry,
3054 void *context, int vl, int mode, u64 data)
3055{
3056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058 return dd->send_pio_err_status_cnt[7];
3059}
3060
3061static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3062 void *context, int vl, int mode,
3063 u64 data)
3064{
3065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067 return dd->send_pio_err_status_cnt[6];
3068}
3069
3070static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071 void *context, int vl, int mode,
3072 u64 data)
3073{
3074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075
3076 return dd->send_pio_err_status_cnt[5];
3077}
3078
3079static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3080 void *context, int vl, int mode,
3081 u64 data)
3082{
3083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085 return dd->send_pio_err_status_cnt[4];
3086}
3087
3088static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3089 void *context, int vl, int mode,
3090 u64 data)
3091{
3092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094 return dd->send_pio_err_status_cnt[3];
3095}
3096
3097static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3098 void *context, int vl, int mode,
3099 u64 data)
3100{
3101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103 return dd->send_pio_err_status_cnt[2];
3104}
3105
3106static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3107 void *context, int vl,
3108 int mode, u64 data)
3109{
3110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112 return dd->send_pio_err_status_cnt[1];
3113}
3114
3115static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3116 void *context, int vl, int mode,
3117 u64 data)
3118{
3119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121 return dd->send_pio_err_status_cnt[0];
3122}
3123
3124
3125
3126
3127
3128static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3129 const struct cntr_entry *entry,
3130 void *context, int vl, int mode, u64 data)
3131{
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134 return dd->send_dma_err_status_cnt[3];
3135}
3136
3137static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3138 const struct cntr_entry *entry,
3139 void *context, int vl, int mode, u64 data)
3140{
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143 return dd->send_dma_err_status_cnt[2];
3144}
3145
3146static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3148 u64 data)
3149{
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152 return dd->send_dma_err_status_cnt[1];
3153}
3154
3155static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3157 u64 data)
3158{
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161 return dd->send_dma_err_status_cnt[0];
3162}
3163
3164
3165
3166
3167
3168static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3169 const struct cntr_entry *entry,
3170 void *context, int vl, int mode, u64 data)
3171{
3172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174 return dd->send_egress_err_status_cnt[63];
3175}
3176
3177static u64 access_tx_read_sdma_memory_csr_err_cnt(
3178 const struct cntr_entry *entry,
3179 void *context, int vl, int mode, u64 data)
3180{
3181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183 return dd->send_egress_err_status_cnt[62];
3184}
3185
3186static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3187 void *context, int vl, int mode,
3188 u64 data)
3189{
3190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192 return dd->send_egress_err_status_cnt[61];
3193}
3194
3195static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3196 void *context, int vl,
3197 int mode, u64 data)
3198{
3199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201 return dd->send_egress_err_status_cnt[60];
3202}
3203
3204static u64 access_tx_read_sdma_memory_cor_err_cnt(
3205 const struct cntr_entry *entry,
3206 void *context, int vl, int mode, u64 data)
3207{
3208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210 return dd->send_egress_err_status_cnt[59];
3211}
3212
3213static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3214 void *context, int vl, int mode,
3215 u64 data)
3216{
3217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219 return dd->send_egress_err_status_cnt[58];
3220}
3221
3222static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3223 void *context, int vl, int mode,
3224 u64 data)
3225{
3226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228 return dd->send_egress_err_status_cnt[57];
3229}
3230
3231static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3232 void *context, int vl, int mode,
3233 u64 data)
3234{
3235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237 return dd->send_egress_err_status_cnt[56];
3238}
3239
3240static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3241 void *context, int vl, int mode,
3242 u64 data)
3243{
3244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246 return dd->send_egress_err_status_cnt[55];
3247}
3248
3249static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3250 void *context, int vl, int mode,
3251 u64 data)
3252{
3253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255 return dd->send_egress_err_status_cnt[54];
3256}
3257
3258static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3259 void *context, int vl, int mode,
3260 u64 data)
3261{
3262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264 return dd->send_egress_err_status_cnt[53];
3265}
3266
3267static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3268 void *context, int vl, int mode,
3269 u64 data)
3270{
3271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273 return dd->send_egress_err_status_cnt[52];
3274}
3275
3276static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3277 void *context, int vl, int mode,
3278 u64 data)
3279{
3280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282 return dd->send_egress_err_status_cnt[51];
3283}
3284
3285static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3286 void *context, int vl, int mode,
3287 u64 data)
3288{
3289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291 return dd->send_egress_err_status_cnt[50];
3292}
3293
3294static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3295 void *context, int vl, int mode,
3296 u64 data)
3297{
3298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300 return dd->send_egress_err_status_cnt[49];
3301}
3302
3303static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3304 void *context, int vl, int mode,
3305 u64 data)
3306{
3307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309 return dd->send_egress_err_status_cnt[48];
3310}
3311
3312static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3313 void *context, int vl, int mode,
3314 u64 data)
3315{
3316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318 return dd->send_egress_err_status_cnt[47];
3319}
3320
3321static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3322 void *context, int vl, int mode,
3323 u64 data)
3324{
3325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327 return dd->send_egress_err_status_cnt[46];
3328}
3329
3330static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3331 void *context, int vl, int mode,
3332 u64 data)
3333{
3334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336 return dd->send_egress_err_status_cnt[45];
3337}
3338
3339static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3340 void *context, int vl,
3341 int mode, u64 data)
3342{
3343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345 return dd->send_egress_err_status_cnt[44];
3346}
3347
3348static u64 access_tx_read_sdma_memory_unc_err_cnt(
3349 const struct cntr_entry *entry,
3350 void *context, int vl, int mode, u64 data)
3351{
3352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354 return dd->send_egress_err_status_cnt[43];
3355}
3356
3357static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3358 void *context, int vl, int mode,
3359 u64 data)
3360{
3361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363 return dd->send_egress_err_status_cnt[42];
3364}
3365
3366static u64 access_tx_credit_return_partiy_err_cnt(
3367 const struct cntr_entry *entry,
3368 void *context, int vl, int mode, u64 data)
3369{
3370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372 return dd->send_egress_err_status_cnt[41];
3373}
3374
3375static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3376 const struct cntr_entry *entry,
3377 void *context, int vl, int mode, u64 data)
3378{
3379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381 return dd->send_egress_err_status_cnt[40];
3382}
3383
3384static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3385 const struct cntr_entry *entry,
3386 void *context, int vl, int mode, u64 data)
3387{
3388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390 return dd->send_egress_err_status_cnt[39];
3391}
3392
3393static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3394 const struct cntr_entry *entry,
3395 void *context, int vl, int mode, u64 data)
3396{
3397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399 return dd->send_egress_err_status_cnt[38];
3400}
3401
3402static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3403 const struct cntr_entry *entry,
3404 void *context, int vl, int mode, u64 data)
3405{
3406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408 return dd->send_egress_err_status_cnt[37];
3409}
3410
3411static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3412 const struct cntr_entry *entry,
3413 void *context, int vl, int mode, u64 data)
3414{
3415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417 return dd->send_egress_err_status_cnt[36];
3418}
3419
3420static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3421 const struct cntr_entry *entry,
3422 void *context, int vl, int mode, u64 data)
3423{
3424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426 return dd->send_egress_err_status_cnt[35];
3427}
3428
3429static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3430 const struct cntr_entry *entry,
3431 void *context, int vl, int mode, u64 data)
3432{
3433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435 return dd->send_egress_err_status_cnt[34];
3436}
3437
3438static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3439 const struct cntr_entry *entry,
3440 void *context, int vl, int mode, u64 data)
3441{
3442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444 return dd->send_egress_err_status_cnt[33];
3445}
3446
3447static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3448 const struct cntr_entry *entry,
3449 void *context, int vl, int mode, u64 data)
3450{
3451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453 return dd->send_egress_err_status_cnt[32];
3454}
3455
3456static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3457 const struct cntr_entry *entry,
3458 void *context, int vl, int mode, u64 data)
3459{
3460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462 return dd->send_egress_err_status_cnt[31];
3463}
3464
3465static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3466 const struct cntr_entry *entry,
3467 void *context, int vl, int mode, u64 data)
3468{
3469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471 return dd->send_egress_err_status_cnt[30];
3472}
3473
3474static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3475 const struct cntr_entry *entry,
3476 void *context, int vl, int mode, u64 data)
3477{
3478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480 return dd->send_egress_err_status_cnt[29];
3481}
3482
3483static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3484 const struct cntr_entry *entry,
3485 void *context, int vl, int mode, u64 data)
3486{
3487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489 return dd->send_egress_err_status_cnt[28];
3490}
3491
3492static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3493 const struct cntr_entry *entry,
3494 void *context, int vl, int mode, u64 data)
3495{
3496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498 return dd->send_egress_err_status_cnt[27];
3499}
3500
3501static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3502 const struct cntr_entry *entry,
3503 void *context, int vl, int mode, u64 data)
3504{
3505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507 return dd->send_egress_err_status_cnt[26];
3508}
3509
3510static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3511 const struct cntr_entry *entry,
3512 void *context, int vl, int mode, u64 data)
3513{
3514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516 return dd->send_egress_err_status_cnt[25];
3517}
3518
3519static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3520 const struct cntr_entry *entry,
3521 void *context, int vl, int mode, u64 data)
3522{
3523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525 return dd->send_egress_err_status_cnt[24];
3526}
3527
3528static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3529 const struct cntr_entry *entry,
3530 void *context, int vl, int mode, u64 data)
3531{
3532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534 return dd->send_egress_err_status_cnt[23];
3535}
3536
3537static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3538 const struct cntr_entry *entry,
3539 void *context, int vl, int mode, u64 data)
3540{
3541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543 return dd->send_egress_err_status_cnt[22];
3544}
3545
3546static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3547 const struct cntr_entry *entry,
3548 void *context, int vl, int mode, u64 data)
3549{
3550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552 return dd->send_egress_err_status_cnt[21];
3553}
3554
3555static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3556 const struct cntr_entry *entry,
3557 void *context, int vl, int mode, u64 data)
3558{
3559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561 return dd->send_egress_err_status_cnt[20];
3562}
3563
3564static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3565 const struct cntr_entry *entry,
3566 void *context, int vl, int mode, u64 data)
3567{
3568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570 return dd->send_egress_err_status_cnt[19];
3571}
3572
3573static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3574 const struct cntr_entry *entry,
3575 void *context, int vl, int mode, u64 data)
3576{
3577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579 return dd->send_egress_err_status_cnt[18];
3580}
3581
3582static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3583 const struct cntr_entry *entry,
3584 void *context, int vl, int mode, u64 data)
3585{
3586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588 return dd->send_egress_err_status_cnt[17];
3589}
3590
3591static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3592 const struct cntr_entry *entry,
3593 void *context, int vl, int mode, u64 data)
3594{
3595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597 return dd->send_egress_err_status_cnt[16];
3598}
3599
3600static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3601 void *context, int vl, int mode,
3602 u64 data)
3603{
3604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606 return dd->send_egress_err_status_cnt[15];
3607}
3608
3609static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3610 void *context, int vl,
3611 int mode, u64 data)
3612{
3613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615 return dd->send_egress_err_status_cnt[14];
3616}
3617
3618static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3619 void *context, int vl, int mode,
3620 u64 data)
3621{
3622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624 return dd->send_egress_err_status_cnt[13];
3625}
3626
3627static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3628 void *context, int vl, int mode,
3629 u64 data)
3630{
3631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633 return dd->send_egress_err_status_cnt[12];
3634}
3635
3636static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3637 const struct cntr_entry *entry,
3638 void *context, int vl, int mode, u64 data)
3639{
3640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642 return dd->send_egress_err_status_cnt[11];
3643}
3644
3645static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3646 void *context, int vl, int mode,
3647 u64 data)
3648{
3649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651 return dd->send_egress_err_status_cnt[10];
3652}
3653
3654static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3655 void *context, int vl, int mode,
3656 u64 data)
3657{
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660 return dd->send_egress_err_status_cnt[9];
3661}
3662
3663static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3664 const struct cntr_entry *entry,
3665 void *context, int vl, int mode, u64 data)
3666{
3667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669 return dd->send_egress_err_status_cnt[8];
3670}
3671
3672static u64 access_tx_pio_launch_intf_parity_err_cnt(
3673 const struct cntr_entry *entry,
3674 void *context, int vl, int mode, u64 data)
3675{
3676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678 return dd->send_egress_err_status_cnt[7];
3679}
3680
3681static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3682 void *context, int vl, int mode,
3683 u64 data)
3684{
3685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686
3687 return dd->send_egress_err_status_cnt[6];
3688}
3689
3690static u64 access_tx_incorrect_link_state_err_cnt(
3691 const struct cntr_entry *entry,
3692 void *context, int vl, int mode, u64 data)
3693{
3694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696 return dd->send_egress_err_status_cnt[5];
3697}
3698
3699static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3700 void *context, int vl, int mode,
3701 u64 data)
3702{
3703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705 return dd->send_egress_err_status_cnt[4];
3706}
3707
3708static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3709 const struct cntr_entry *entry,
3710 void *context, int vl, int mode, u64 data)
3711{
3712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714 return dd->send_egress_err_status_cnt[3];
3715}
3716
3717static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3718 void *context, int vl, int mode,
3719 u64 data)
3720{
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723 return dd->send_egress_err_status_cnt[2];
3724}
3725
3726static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3727 const struct cntr_entry *entry,
3728 void *context, int vl, int mode, u64 data)
3729{
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732 return dd->send_egress_err_status_cnt[1];
3733}
3734
3735static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3736 const struct cntr_entry *entry,
3737 void *context, int vl, int mode, u64 data)
3738{
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741 return dd->send_egress_err_status_cnt[0];
3742}
3743
3744
3745
3746
3747
3748static u64 access_send_csr_write_bad_addr_err_cnt(
3749 const struct cntr_entry *entry,
3750 void *context, int vl, int mode, u64 data)
3751{
3752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754 return dd->send_err_status_cnt[2];
3755}
3756
3757static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3758 void *context, int vl,
3759 int mode, u64 data)
3760{
3761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763 return dd->send_err_status_cnt[1];
3764}
3765
3766static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3767 void *context, int vl, int mode,
3768 u64 data)
3769{
3770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772 return dd->send_err_status_cnt[0];
3773}
3774
3775
3776
3777
3778
3779static u64 access_pio_write_out_of_bounds_err_cnt(
3780 const struct cntr_entry *entry,
3781 void *context, int vl, int mode, u64 data)
3782{
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785 return dd->sw_ctxt_err_status_cnt[4];
3786}
3787
3788static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3789 void *context, int vl, int mode,
3790 u64 data)
3791{
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794 return dd->sw_ctxt_err_status_cnt[3];
3795}
3796
3797static u64 access_pio_write_crosses_boundary_err_cnt(
3798 const struct cntr_entry *entry,
3799 void *context, int vl, int mode, u64 data)
3800{
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803 return dd->sw_ctxt_err_status_cnt[2];
3804}
3805
3806static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3807 void *context, int vl,
3808 int mode, u64 data)
3809{
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812 return dd->sw_ctxt_err_status_cnt[1];
3813}
3814
3815static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3817 u64 data)
3818{
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821 return dd->sw_ctxt_err_status_cnt[0];
3822}
3823
3824
3825
3826
3827
3828static u64 access_sdma_header_request_fifo_cor_err_cnt(
3829 const struct cntr_entry *entry,
3830 void *context, int vl, int mode, u64 data)
3831{
3832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834 return dd->sw_send_dma_eng_err_status_cnt[23];
3835}
3836
3837static u64 access_sdma_header_storage_cor_err_cnt(
3838 const struct cntr_entry *entry,
3839 void *context, int vl, int mode, u64 data)
3840{
3841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843 return dd->sw_send_dma_eng_err_status_cnt[22];
3844}
3845
3846static u64 access_sdma_packet_tracking_cor_err_cnt(
3847 const struct cntr_entry *entry,
3848 void *context, int vl, int mode, u64 data)
3849{
3850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852 return dd->sw_send_dma_eng_err_status_cnt[21];
3853}
3854
3855static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3856 void *context, int vl, int mode,
3857 u64 data)
3858{
3859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861 return dd->sw_send_dma_eng_err_status_cnt[20];
3862}
3863
3864static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3865 void *context, int vl, int mode,
3866 u64 data)
3867{
3868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870 return dd->sw_send_dma_eng_err_status_cnt[19];
3871}
3872
3873static u64 access_sdma_header_request_fifo_unc_err_cnt(
3874 const struct cntr_entry *entry,
3875 void *context, int vl, int mode, u64 data)
3876{
3877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879 return dd->sw_send_dma_eng_err_status_cnt[18];
3880}
3881
3882static u64 access_sdma_header_storage_unc_err_cnt(
3883 const struct cntr_entry *entry,
3884 void *context, int vl, int mode, u64 data)
3885{
3886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887
3888 return dd->sw_send_dma_eng_err_status_cnt[17];
3889}
3890
3891static u64 access_sdma_packet_tracking_unc_err_cnt(
3892 const struct cntr_entry *entry,
3893 void *context, int vl, int mode, u64 data)
3894{
3895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897 return dd->sw_send_dma_eng_err_status_cnt[16];
3898}
3899
3900static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3901 void *context, int vl, int mode,
3902 u64 data)
3903{
3904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906 return dd->sw_send_dma_eng_err_status_cnt[15];
3907}
3908
3909static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3910 void *context, int vl, int mode,
3911 u64 data)
3912{
3913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915 return dd->sw_send_dma_eng_err_status_cnt[14];
3916}
3917
3918static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3919 void *context, int vl, int mode,
3920 u64 data)
3921{
3922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924 return dd->sw_send_dma_eng_err_status_cnt[13];
3925}
3926
3927static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3928 void *context, int vl, int mode,
3929 u64 data)
3930{
3931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933 return dd->sw_send_dma_eng_err_status_cnt[12];
3934}
3935
3936static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3937 void *context, int vl, int mode,
3938 u64 data)
3939{
3940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942 return dd->sw_send_dma_eng_err_status_cnt[11];
3943}
3944
3945static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3946 void *context, int vl, int mode,
3947 u64 data)
3948{
3949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951 return dd->sw_send_dma_eng_err_status_cnt[10];
3952}
3953
3954static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3955 void *context, int vl, int mode,
3956 u64 data)
3957{
3958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959
3960 return dd->sw_send_dma_eng_err_status_cnt[9];
3961}
3962
3963static u64 access_sdma_packet_desc_overflow_err_cnt(
3964 const struct cntr_entry *entry,
3965 void *context, int vl, int mode, u64 data)
3966{
3967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968
3969 return dd->sw_send_dma_eng_err_status_cnt[8];
3970}
3971
3972static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3973 void *context, int vl,
3974 int mode, u64 data)
3975{
3976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977
3978 return dd->sw_send_dma_eng_err_status_cnt[7];
3979}
3980
3981static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3982 void *context, int vl, int mode, u64 data)
3983{
3984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985
3986 return dd->sw_send_dma_eng_err_status_cnt[6];
3987}
3988
3989static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3990 void *context, int vl, int mode,
3991 u64 data)
3992{
3993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994
3995 return dd->sw_send_dma_eng_err_status_cnt[5];
3996}
3997
3998static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3999 void *context, int vl, int mode,
4000 u64 data)
4001{
4002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003
4004 return dd->sw_send_dma_eng_err_status_cnt[4];
4005}
4006
4007static u64 access_sdma_tail_out_of_bounds_err_cnt(
4008 const struct cntr_entry *entry,
4009 void *context, int vl, int mode, u64 data)
4010{
4011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012
4013 return dd->sw_send_dma_eng_err_status_cnt[3];
4014}
4015
4016static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4017 void *context, int vl, int mode,
4018 u64 data)
4019{
4020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021
4022 return dd->sw_send_dma_eng_err_status_cnt[2];
4023}
4024
4025static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4026 void *context, int vl, int mode,
4027 u64 data)
4028{
4029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030
4031 return dd->sw_send_dma_eng_err_status_cnt[1];
4032}
4033
4034static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4035 void *context, int vl, int mode,
4036 u64 data)
4037{
4038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039
4040 return dd->sw_send_dma_eng_err_status_cnt[0];
4041}
4042
4043static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4044 void *context, int vl, int mode,
4045 u64 data)
4046{
4047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4048
4049 u64 val = 0;
4050 u64 csr = entry->csr;
4051
4052 val = read_write_csr(dd, csr, mode, data);
4053 if (mode == CNTR_MODE_R) {
4054 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4055 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4056 } else if (mode == CNTR_MODE_W) {
4057 dd->sw_rcv_bypass_packet_errors = 0;
4058 } else {
4059 dd_dev_err(dd, "Invalid cntr register access mode");
4060 return 0;
4061 }
4062 return val;
4063}
4064
4065#define def_access_sw_cpu(cntr) \
4066static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4067 void *context, int vl, int mode, u64 data) \
4068{ \
4069 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4070 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4071 ppd->ibport_data.rvp.cntr, vl, \
4072 mode, data); \
4073}
4074
4075def_access_sw_cpu(rc_acks);
4076def_access_sw_cpu(rc_qacks);
4077def_access_sw_cpu(rc_delayed_comp);
4078
4079#define def_access_ibp_counter(cntr) \
4080static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4081 void *context, int vl, int mode, u64 data) \
4082{ \
4083 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4084 \
4085 if (vl != CNTR_INVALID_VL) \
4086 return 0; \
4087 \
4088 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4089 mode, data); \
4090}
4091
4092def_access_ibp_counter(loop_pkts);
4093def_access_ibp_counter(rc_resends);
4094def_access_ibp_counter(rnr_naks);
4095def_access_ibp_counter(other_naks);
4096def_access_ibp_counter(rc_timeouts);
4097def_access_ibp_counter(pkt_drops);
4098def_access_ibp_counter(dmawait);
4099def_access_ibp_counter(rc_seqnak);
4100def_access_ibp_counter(rc_dupreq);
4101def_access_ibp_counter(rdma_seq);
4102def_access_ibp_counter(unaligned);
4103def_access_ibp_counter(seq_naks);
4104def_access_ibp_counter(rc_crwaits);
4105
4106static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4107[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4108[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4109[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4110[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4111[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4112 CNTR_NORMAL),
4113[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4114 CNTR_NORMAL),
4115[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4116 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4117 CNTR_NORMAL),
4118[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4119 CNTR_NORMAL),
4120[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4121 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4122[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4123 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4124[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4125 CNTR_NORMAL),
4126[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4127 CNTR_NORMAL),
4128[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4129 CNTR_NORMAL),
4130[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4131 CNTR_NORMAL),
4132[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4133 CNTR_NORMAL),
4134[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4135 CNTR_NORMAL),
4136[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4137 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4138[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4139 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4140[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4141 CNTR_SYNTH),
4142[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4143 access_dc_rcv_err_cnt),
4144[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4145 CNTR_SYNTH),
4146[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4147 CNTR_SYNTH),
4148[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4149 CNTR_SYNTH),
4150[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4151 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4152[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4153 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4154 CNTR_SYNTH),
4155[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4156 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4157[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4158 CNTR_SYNTH),
4159[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4160 CNTR_SYNTH),
4161[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4162 CNTR_SYNTH),
4163[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4164 CNTR_SYNTH),
4165[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4166 CNTR_SYNTH),
4167[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4168 CNTR_SYNTH),
4169[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4170 CNTR_SYNTH),
4171[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4172 CNTR_SYNTH | CNTR_VL),
4173[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4174 CNTR_SYNTH | CNTR_VL),
4175[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4176[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4177 CNTR_SYNTH | CNTR_VL),
4178[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4179[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4180 CNTR_SYNTH | CNTR_VL),
4181[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4182 CNTR_SYNTH),
4183[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4184 CNTR_SYNTH | CNTR_VL),
4185[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4186 CNTR_SYNTH),
4187[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4188 CNTR_SYNTH | CNTR_VL),
4189[C_DC_TOTAL_CRC] =
4190 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4191 CNTR_SYNTH),
4192[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4193 CNTR_SYNTH),
4194[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4195 CNTR_SYNTH),
4196[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4197 CNTR_SYNTH),
4198[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4199 CNTR_SYNTH),
4200[C_DC_CRC_MULT_LN] =
4201 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4202 CNTR_SYNTH),
4203[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4204 CNTR_SYNTH),
4205[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4206 CNTR_SYNTH),
4207[C_DC_SEQ_CRC_CNT] =
4208 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4209 CNTR_SYNTH),
4210[C_DC_ESC0_ONLY_CNT] =
4211 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4212 CNTR_SYNTH),
4213[C_DC_ESC0_PLUS1_CNT] =
4214 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4215 CNTR_SYNTH),
4216[C_DC_ESC0_PLUS2_CNT] =
4217 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4218 CNTR_SYNTH),
4219[C_DC_REINIT_FROM_PEER_CNT] =
4220 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4221 CNTR_SYNTH),
4222[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4223 CNTR_SYNTH),
4224[C_DC_MISC_FLG_CNT] =
4225 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4226 CNTR_SYNTH),
4227[C_DC_PRF_GOOD_LTP_CNT] =
4228 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4229[C_DC_PRF_ACCEPTED_LTP_CNT] =
4230 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4231 CNTR_SYNTH),
4232[C_DC_PRF_RX_FLIT_CNT] =
4233 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4234[C_DC_PRF_TX_FLIT_CNT] =
4235 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4236[C_DC_PRF_CLK_CNTR] =
4237 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4238[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4239 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4240[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4241 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4242 CNTR_SYNTH),
4243[C_DC_PG_STS_TX_SBE_CNT] =
4244 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4245[C_DC_PG_STS_TX_MBE_CNT] =
4246 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4247 CNTR_SYNTH),
4248[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4249 access_sw_cpu_intr),
4250[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4251 access_sw_cpu_rcv_limit),
4252[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4253 access_sw_vtx_wait),
4254[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4255 access_sw_pio_wait),
4256[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4257 access_sw_pio_drain),
4258[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4259 access_sw_kmem_wait),
4260[C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4261 hfi1_access_sw_tid_wait),
4262[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4263 access_sw_send_schedule),
4264[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4265 SEND_DMA_DESC_FETCHED_CNT, 0,
4266 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267 dev_access_u32_csr),
4268[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4269 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4270 access_sde_int_cnt),
4271[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4272 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4273 access_sde_err_cnt),
4274[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4275 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4276 access_sde_idle_int_cnt),
4277[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4278 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4279 access_sde_progress_int_cnt),
4280
4281[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4282 CNTR_NORMAL,
4283 access_misc_pll_lock_fail_err_cnt),
4284[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4285 CNTR_NORMAL,
4286 access_misc_mbist_fail_err_cnt),
4287[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4288 CNTR_NORMAL,
4289 access_misc_invalid_eep_cmd_err_cnt),
4290[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4291 CNTR_NORMAL,
4292 access_misc_efuse_done_parity_err_cnt),
4293[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4294 CNTR_NORMAL,
4295 access_misc_efuse_write_err_cnt),
4296[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4297 0, CNTR_NORMAL,
4298 access_misc_efuse_read_bad_addr_err_cnt),
4299[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4300 CNTR_NORMAL,
4301 access_misc_efuse_csr_parity_err_cnt),
4302[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4303 CNTR_NORMAL,
4304 access_misc_fw_auth_failed_err_cnt),
4305[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4306 CNTR_NORMAL,
4307 access_misc_key_mismatch_err_cnt),
4308[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4309 CNTR_NORMAL,
4310 access_misc_sbus_write_failed_err_cnt),
4311[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4312 CNTR_NORMAL,
4313 access_misc_csr_write_bad_addr_err_cnt),
4314[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4315 CNTR_NORMAL,
4316 access_misc_csr_read_bad_addr_err_cnt),
4317[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4318 CNTR_NORMAL,
4319 access_misc_csr_parity_err_cnt),
4320
4321[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4322 CNTR_NORMAL,
4323 access_sw_cce_err_status_aggregated_cnt),
4324[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_cce_msix_csr_parity_err_cnt),
4327[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_cce_int_map_unc_err_cnt),
4330[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4331 CNTR_NORMAL,
4332 access_cce_int_map_cor_err_cnt),
4333[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_cce_msix_table_unc_err_cnt),
4336[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_cce_msix_table_cor_err_cnt),
4339[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4340 0, CNTR_NORMAL,
4341 access_cce_rxdma_conv_fifo_parity_err_cnt),
4342[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4343 0, CNTR_NORMAL,
4344 access_cce_rcpl_async_fifo_parity_err_cnt),
4345[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_cce_seg_write_bad_addr_err_cnt),
4348[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_cce_seg_read_bad_addr_err_cnt),
4351[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4352 CNTR_NORMAL,
4353 access_la_triggered_cnt),
4354[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_cce_trgt_cpl_timeout_err_cnt),
4357[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_pcic_receive_parity_err_cnt),
4360[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_pcic_transmit_back_parity_err_cnt),
4363[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4364 0, CNTR_NORMAL,
4365 access_pcic_transmit_front_parity_err_cnt),
4366[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_pcic_cpl_dat_q_unc_err_cnt),
4369[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_pcic_cpl_hd_q_unc_err_cnt),
4372[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_pcic_post_dat_q_unc_err_cnt),
4375[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_pcic_post_hd_q_unc_err_cnt),
4378[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_pcic_retry_sot_mem_unc_err_cnt),
4381[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_pcic_retry_mem_unc_err),
4384[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_pcic_n_post_dat_q_parity_err_cnt),
4387[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_pcic_n_post_h_q_parity_err_cnt),
4390[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_pcic_cpl_dat_q_cor_err_cnt),
4393[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_pcic_cpl_hd_q_cor_err_cnt),
4396[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4397 CNTR_NORMAL,
4398 access_pcic_post_dat_q_cor_err_cnt),
4399[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4400 CNTR_NORMAL,
4401 access_pcic_post_hd_q_cor_err_cnt),
4402[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4403 CNTR_NORMAL,
4404 access_pcic_retry_sot_mem_cor_err_cnt),
4405[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4406 CNTR_NORMAL,
4407 access_pcic_retry_mem_cor_err_cnt),
4408[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4409 "CceCli1AsyncFifoDbgParityError", 0, 0,
4410 CNTR_NORMAL,
4411 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4412[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4413 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4414 CNTR_NORMAL,
4415 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4416 ),
4417[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4418 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4421[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4422 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4425[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4426 0, CNTR_NORMAL,
4427 access_cce_cli2_async_fifo_parity_err_cnt),
4428[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4429 CNTR_NORMAL,
4430 access_cce_csr_cfg_bus_parity_err_cnt),
4431[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4432 0, CNTR_NORMAL,
4433 access_cce_cli0_async_fifo_parity_err_cnt),
4434[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_cce_rspd_data_parity_err_cnt),
4437[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_cce_trgt_access_err_cnt),
4440[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4441 0, CNTR_NORMAL,
4442 access_cce_trgt_async_fifo_parity_err_cnt),
4443[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4444 CNTR_NORMAL,
4445 access_cce_csr_write_bad_addr_err_cnt),
4446[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4447 CNTR_NORMAL,
4448 access_cce_csr_read_bad_addr_err_cnt),
4449[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4450 CNTR_NORMAL,
4451 access_ccs_csr_parity_err_cnt),
4452
4453
4454[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4455 CNTR_NORMAL,
4456 access_rx_csr_parity_err_cnt),
4457[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4458 CNTR_NORMAL,
4459 access_rx_csr_write_bad_addr_err_cnt),
4460[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4461 CNTR_NORMAL,
4462 access_rx_csr_read_bad_addr_err_cnt),
4463[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_dma_csr_unc_err_cnt),
4466[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rx_dma_dq_fsm_encoding_err_cnt),
4469[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4470 CNTR_NORMAL,
4471 access_rx_dma_eq_fsm_encoding_err_cnt),
4472[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_dma_csr_parity_err_cnt),
4475[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_rbuf_data_cor_err_cnt),
4478[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_rbuf_data_unc_err_cnt),
4481[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4482 CNTR_NORMAL,
4483 access_rx_dma_data_fifo_rd_cor_err_cnt),
4484[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4485 CNTR_NORMAL,
4486 access_rx_dma_data_fifo_rd_unc_err_cnt),
4487[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4490[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4493[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_rbuf_desc_part2_cor_err_cnt),
4496[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rx_rbuf_desc_part2_unc_err_cnt),
4499[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rx_rbuf_desc_part1_cor_err_cnt),
4502[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_rbuf_desc_part1_unc_err_cnt),
4505[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_hq_intr_fsm_err_cnt),
4508[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4509 CNTR_NORMAL,
4510 access_rx_hq_intr_csr_parity_err_cnt),
4511[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4512 CNTR_NORMAL,
4513 access_rx_lookup_csr_parity_err_cnt),
4514[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_lookup_rcv_array_cor_err_cnt),
4517[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_lookup_rcv_array_unc_err_cnt),
4520[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4521 0, CNTR_NORMAL,
4522 access_rx_lookup_des_part2_parity_err_cnt),
4523[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4524 0, CNTR_NORMAL,
4525 access_rx_lookup_des_part1_unc_cor_err_cnt),
4526[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_rx_lookup_des_part1_unc_err_cnt),
4529[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4530 CNTR_NORMAL,
4531 access_rx_rbuf_next_free_buf_cor_err_cnt),
4532[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4533 CNTR_NORMAL,
4534 access_rx_rbuf_next_free_buf_unc_err_cnt),
4535[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4536 "RxRbufFlInitWrAddrParityErr", 0, 0,
4537 CNTR_NORMAL,
4538 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4539[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4540 0, CNTR_NORMAL,
4541 access_rx_rbuf_fl_initdone_parity_err_cnt),
4542[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4543 0, CNTR_NORMAL,
4544 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4545[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4546 CNTR_NORMAL,
4547 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4548[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_rx_rbuf_empty_err_cnt),
4551[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4552 CNTR_NORMAL,
4553 access_rx_rbuf_full_err_cnt),
4554[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4555 CNTR_NORMAL,
4556 access_rbuf_bad_lookup_err_cnt),
4557[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_rbuf_ctx_id_parity_err_cnt),
4560[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_rbuf_csr_qeopdw_parity_err_cnt),
4563[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4564 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4567[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4568 "RxRbufCsrQTlPtrParityErr", 0, 0,
4569 CNTR_NORMAL,
4570 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4571[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4572 0, CNTR_NORMAL,
4573 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4574[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4575 0, CNTR_NORMAL,
4576 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4577[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4578 0, 0, CNTR_NORMAL,
4579 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4580[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4581 0, CNTR_NORMAL,
4582 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4583[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4584 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4587[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4588 0, CNTR_NORMAL,
4589 access_rx_rbuf_block_list_read_cor_err_cnt),
4590[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4591 0, CNTR_NORMAL,
4592 access_rx_rbuf_block_list_read_unc_err_cnt),
4593[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_rx_rbuf_lookup_des_cor_err_cnt),
4596[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_rx_rbuf_lookup_des_unc_err_cnt),
4599[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4600 "RxRbufLookupDesRegUncCorErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4603[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4604 CNTR_NORMAL,
4605 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4606[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4607 CNTR_NORMAL,
4608 access_rx_rbuf_free_list_cor_err_cnt),
4609[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_rx_rbuf_free_list_unc_err_cnt),
4612[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_rx_rcv_fsm_encoding_err_cnt),
4615[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_rx_dma_flag_cor_err_cnt),
4618[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4619 CNTR_NORMAL,
4620 access_rx_dma_flag_unc_err_cnt),
4621[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4622 CNTR_NORMAL,
4623 access_rx_dc_sop_eop_parity_err_cnt),
4624[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4625 CNTR_NORMAL,
4626 access_rx_rcv_csr_parity_err_cnt),
4627[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4628 CNTR_NORMAL,
4629 access_rx_rcv_qp_map_table_cor_err_cnt),
4630[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4631 CNTR_NORMAL,
4632 access_rx_rcv_qp_map_table_unc_err_cnt),
4633[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_rx_rcv_data_cor_err_cnt),
4636[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4637 CNTR_NORMAL,
4638 access_rx_rcv_data_unc_err_cnt),
4639[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4640 CNTR_NORMAL,
4641 access_rx_rcv_hdr_cor_err_cnt),
4642[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_rx_rcv_hdr_unc_err_cnt),
4645[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_rx_dc_intf_parity_err_cnt),
4648[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_rx_dma_csr_cor_err_cnt),
4651
4652[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4653 CNTR_NORMAL,
4654 access_pio_pec_sop_head_parity_err_cnt),
4655[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4656 CNTR_NORMAL,
4657 access_pio_pcc_sop_head_parity_err_cnt),
4658[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4659 0, 0, CNTR_NORMAL,
4660 access_pio_last_returned_cnt_parity_err_cnt),
4661[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4662 0, CNTR_NORMAL,
4663 access_pio_current_free_cnt_parity_err_cnt),
4664[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4665 CNTR_NORMAL,
4666 access_pio_reserved_31_err_cnt),
4667[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4668 CNTR_NORMAL,
4669 access_pio_reserved_30_err_cnt),
4670[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4671 CNTR_NORMAL,
4672 access_pio_ppmc_sop_len_err_cnt),
4673[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_pio_ppmc_bqc_mem_parity_err_cnt),
4676[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_pio_vl_fifo_parity_err_cnt),
4679[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_pio_vlf_sop_parity_err_cnt),
4682[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_pio_vlf_v1_len_parity_err_cnt),
4685[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_pio_block_qw_count_parity_err_cnt),
4688[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_pio_write_qw_valid_parity_err_cnt),
4691[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_pio_state_machine_err_cnt),
4694[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_pio_write_data_parity_err_cnt),
4697[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_pio_host_addr_mem_cor_err_cnt),
4700[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_pio_host_addr_mem_unc_err_cnt),
4703[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4706[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_pio_init_sm_in_err_cnt),
4709[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_pio_ppmc_pbl_fifo_err_cnt),
4712[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4713 0, CNTR_NORMAL,
4714 access_pio_credit_ret_fifo_parity_err_cnt),
4715[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_pio_v1_len_mem_bank1_cor_err_cnt),
4718[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_pio_v1_len_mem_bank0_cor_err_cnt),
4721[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4722 CNTR_NORMAL,
4723 access_pio_v1_len_mem_bank1_unc_err_cnt),
4724[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4725 CNTR_NORMAL,
4726 access_pio_v1_len_mem_bank0_unc_err_cnt),
4727[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4728 CNTR_NORMAL,
4729 access_pio_sm_pkt_reset_parity_err_cnt),
4730[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4731 CNTR_NORMAL,
4732 access_pio_pkt_evict_fifo_parity_err_cnt),
4733[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4734 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4735 CNTR_NORMAL,
4736 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4737[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4738 CNTR_NORMAL,
4739 access_pio_sbrdctl_crrel_parity_err_cnt),
4740[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4741 CNTR_NORMAL,
4742 access_pio_pec_fifo_parity_err_cnt),
4743[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4744 CNTR_NORMAL,
4745 access_pio_pcc_fifo_parity_err_cnt),
4746[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4747 CNTR_NORMAL,
4748 access_pio_sb_mem_fifo1_err_cnt),
4749[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4750 CNTR_NORMAL,
4751 access_pio_sb_mem_fifo0_err_cnt),
4752[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4753 CNTR_NORMAL,
4754 access_pio_csr_parity_err_cnt),
4755[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4756 CNTR_NORMAL,
4757 access_pio_write_addr_parity_err_cnt),
4758[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4759 CNTR_NORMAL,
4760 access_pio_write_bad_ctxt_err_cnt),
4761
4762[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4763 0, CNTR_NORMAL,
4764 access_sdma_pcie_req_tracking_cor_err_cnt),
4765[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4766 0, CNTR_NORMAL,
4767 access_sdma_pcie_req_tracking_unc_err_cnt),
4768[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4769 CNTR_NORMAL,
4770 access_sdma_csr_parity_err_cnt),
4771[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4772 CNTR_NORMAL,
4773 access_sdma_rpy_tag_err_cnt),
4774
4775[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4776 CNTR_NORMAL,
4777 access_tx_read_pio_memory_csr_unc_err_cnt),
4778[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4779 0, CNTR_NORMAL,
4780 access_tx_read_sdma_memory_csr_err_cnt),
4781[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4782 CNTR_NORMAL,
4783 access_tx_egress_fifo_cor_err_cnt),
4784[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4785 CNTR_NORMAL,
4786 access_tx_read_pio_memory_cor_err_cnt),
4787[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4788 CNTR_NORMAL,
4789 access_tx_read_sdma_memory_cor_err_cnt),
4790[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4791 CNTR_NORMAL,
4792 access_tx_sb_hdr_cor_err_cnt),
4793[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4794 CNTR_NORMAL,
4795 access_tx_credit_overrun_err_cnt),
4796[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_tx_launch_fifo8_cor_err_cnt),
4799[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_tx_launch_fifo7_cor_err_cnt),
4802[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_launch_fifo6_cor_err_cnt),
4805[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4806 CNTR_NORMAL,
4807 access_tx_launch_fifo5_cor_err_cnt),
4808[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4809 CNTR_NORMAL,
4810 access_tx_launch_fifo4_cor_err_cnt),
4811[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_tx_launch_fifo3_cor_err_cnt),
4814[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_tx_launch_fifo2_cor_err_cnt),
4817[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4818 CNTR_NORMAL,
4819 access_tx_launch_fifo1_cor_err_cnt),
4820[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4821 CNTR_NORMAL,
4822 access_tx_launch_fifo0_cor_err_cnt),
4823[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_credit_return_vl_err_cnt),
4826[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4827 CNTR_NORMAL,
4828 access_tx_hcrc_insertion_err_cnt),
4829[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_tx_egress_fifo_unc_err_cnt),
4832[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4833 CNTR_NORMAL,
4834 access_tx_read_pio_memory_unc_err_cnt),
4835[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4836 CNTR_NORMAL,
4837 access_tx_read_sdma_memory_unc_err_cnt),
4838[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4839 CNTR_NORMAL,
4840 access_tx_sb_hdr_unc_err_cnt),
4841[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_tx_credit_return_partiy_err_cnt),
4844[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4845 0, 0, CNTR_NORMAL,
4846 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4847[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4848 0, 0, CNTR_NORMAL,
4849 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4850[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4851 0, 0, CNTR_NORMAL,
4852 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4853[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4854 0, 0, CNTR_NORMAL,
4855 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4856[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4857 0, 0, CNTR_NORMAL,
4858 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4859[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4860 0, 0, CNTR_NORMAL,
4861 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4862[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4863 0, 0, CNTR_NORMAL,
4864 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4865[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4866 0, 0, CNTR_NORMAL,
4867 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4868[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4869 0, 0, CNTR_NORMAL,
4870 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4871[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4872 0, 0, CNTR_NORMAL,
4873 access_tx_sdma15_disallowed_packet_err_cnt),
4874[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4875 0, 0, CNTR_NORMAL,
4876 access_tx_sdma14_disallowed_packet_err_cnt),
4877[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4878 0, 0, CNTR_NORMAL,
4879 access_tx_sdma13_disallowed_packet_err_cnt),
4880[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4881 0, 0, CNTR_NORMAL,
4882 access_tx_sdma12_disallowed_packet_err_cnt),
4883[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4884 0, 0, CNTR_NORMAL,
4885 access_tx_sdma11_disallowed_packet_err_cnt),
4886[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4887 0, 0, CNTR_NORMAL,
4888 access_tx_sdma10_disallowed_packet_err_cnt),
4889[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4890 0, 0, CNTR_NORMAL,
4891 access_tx_sdma9_disallowed_packet_err_cnt),
4892[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4893 0, 0, CNTR_NORMAL,
4894 access_tx_sdma8_disallowed_packet_err_cnt),
4895[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4896 0, 0, CNTR_NORMAL,
4897 access_tx_sdma7_disallowed_packet_err_cnt),
4898[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4899 0, 0, CNTR_NORMAL,
4900 access_tx_sdma6_disallowed_packet_err_cnt),
4901[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4902 0, 0, CNTR_NORMAL,
4903 access_tx_sdma5_disallowed_packet_err_cnt),
4904[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4905 0, 0, CNTR_NORMAL,
4906 access_tx_sdma4_disallowed_packet_err_cnt),
4907[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4908 0, 0, CNTR_NORMAL,
4909 access_tx_sdma3_disallowed_packet_err_cnt),
4910[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4911 0, 0, CNTR_NORMAL,
4912 access_tx_sdma2_disallowed_packet_err_cnt),
4913[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4914 0, 0, CNTR_NORMAL,
4915 access_tx_sdma1_disallowed_packet_err_cnt),
4916[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4917 0, 0, CNTR_NORMAL,
4918 access_tx_sdma0_disallowed_packet_err_cnt),
4919[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4920 CNTR_NORMAL,
4921 access_tx_config_parity_err_cnt),
4922[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_tx_sbrd_ctl_csr_parity_err_cnt),
4925[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4926 CNTR_NORMAL,
4927 access_tx_launch_csr_parity_err_cnt),
4928[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4929 CNTR_NORMAL,
4930 access_tx_illegal_vl_err_cnt),
4931[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4932 "TxSbrdCtlStateMachineParityErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4935[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4936 CNTR_NORMAL,
4937 access_egress_reserved_10_err_cnt),
4938[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4939 CNTR_NORMAL,
4940 access_egress_reserved_9_err_cnt),
4941[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4942 0, 0, CNTR_NORMAL,
4943 access_tx_sdma_launch_intf_parity_err_cnt),
4944[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4945 CNTR_NORMAL,
4946 access_tx_pio_launch_intf_parity_err_cnt),
4947[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4948 CNTR_NORMAL,
4949 access_egress_reserved_6_err_cnt),
4950[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4951 CNTR_NORMAL,
4952 access_tx_incorrect_link_state_err_cnt),
4953[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4954 CNTR_NORMAL,
4955 access_tx_linkdown_err_cnt),
4956[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4957 "EgressFifoUnderrunOrParityErr", 0, 0,
4958 CNTR_NORMAL,
4959 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4960[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4961 CNTR_NORMAL,
4962 access_egress_reserved_2_err_cnt),
4963[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4964 CNTR_NORMAL,
4965 access_tx_pkt_integrity_mem_unc_err_cnt),
4966[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4967 CNTR_NORMAL,
4968 access_tx_pkt_integrity_mem_cor_err_cnt),
4969
4970[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4971 CNTR_NORMAL,
4972 access_send_csr_write_bad_addr_err_cnt),
4973[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4974 CNTR_NORMAL,
4975 access_send_csr_read_bad_addr_err_cnt),
4976[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4977 CNTR_NORMAL,
4978 access_send_csr_parity_cnt),
4979
4980[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4981 CNTR_NORMAL,
4982 access_pio_write_out_of_bounds_err_cnt),
4983[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4984 CNTR_NORMAL,
4985 access_pio_write_overflow_err_cnt),
4986[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4987 0, 0, CNTR_NORMAL,
4988 access_pio_write_crosses_boundary_err_cnt),
4989[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4990 CNTR_NORMAL,
4991 access_pio_disallowed_packet_err_cnt),
4992[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4993 CNTR_NORMAL,
4994 access_pio_inconsistent_sop_err_cnt),
4995
4996[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4997 0, 0, CNTR_NORMAL,
4998 access_sdma_header_request_fifo_cor_err_cnt),
4999[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5000 CNTR_NORMAL,
5001 access_sdma_header_storage_cor_err_cnt),
5002[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5003 CNTR_NORMAL,
5004 access_sdma_packet_tracking_cor_err_cnt),
5005[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5006 CNTR_NORMAL,
5007 access_sdma_assembly_cor_err_cnt),
5008[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5009 CNTR_NORMAL,
5010 access_sdma_desc_table_cor_err_cnt),
5011[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5012 0, 0, CNTR_NORMAL,
5013 access_sdma_header_request_fifo_unc_err_cnt),
5014[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5015 CNTR_NORMAL,
5016 access_sdma_header_storage_unc_err_cnt),
5017[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5018 CNTR_NORMAL,
5019 access_sdma_packet_tracking_unc_err_cnt),
5020[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5021 CNTR_NORMAL,
5022 access_sdma_assembly_unc_err_cnt),
5023[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5024 CNTR_NORMAL,
5025 access_sdma_desc_table_unc_err_cnt),
5026[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5027 CNTR_NORMAL,
5028 access_sdma_timeout_err_cnt),
5029[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5030 CNTR_NORMAL,
5031 access_sdma_header_length_err_cnt),
5032[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5033 CNTR_NORMAL,
5034 access_sdma_header_address_err_cnt),
5035[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5036 CNTR_NORMAL,
5037 access_sdma_header_select_err_cnt),
5038[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5039 CNTR_NORMAL,
5040 access_sdma_reserved_9_err_cnt),
5041[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5042 CNTR_NORMAL,
5043 access_sdma_packet_desc_overflow_err_cnt),
5044[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5045 CNTR_NORMAL,
5046 access_sdma_length_mismatch_err_cnt),
5047[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5048 CNTR_NORMAL,
5049 access_sdma_halt_err_cnt),
5050[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5051 CNTR_NORMAL,
5052 access_sdma_mem_read_err_cnt),
5053[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5054 CNTR_NORMAL,
5055 access_sdma_first_desc_err_cnt),
5056[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5057 CNTR_NORMAL,
5058 access_sdma_tail_out_of_bounds_err_cnt),
5059[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5060 CNTR_NORMAL,
5061 access_sdma_too_long_err_cnt),
5062[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5063 CNTR_NORMAL,
5064 access_sdma_gen_mismatch_err_cnt),
5065[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5066 CNTR_NORMAL,
5067 access_sdma_wrong_dw_err_cnt),
5068};
5069
5070static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5071[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5072 CNTR_NORMAL),
5073[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5074 CNTR_NORMAL),
5075[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5076 CNTR_NORMAL),
5077[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5078 CNTR_NORMAL),
5079[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5080 CNTR_NORMAL),
5081[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5082 CNTR_NORMAL),
5083[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5084 CNTR_NORMAL),
5085[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5086[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5087[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5088[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5089 CNTR_SYNTH | CNTR_VL),
5090[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5091 CNTR_SYNTH | CNTR_VL),
5092[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5093 CNTR_SYNTH | CNTR_VL),
5094[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5095[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5096[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5097 access_sw_link_dn_cnt),
5098[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5099 access_sw_link_up_cnt),
5100[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5101 access_sw_unknown_frame_cnt),
5102[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5103 access_sw_xmit_discards),
5104[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5105 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5106 access_sw_xmit_discards),
5107[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5108 access_xmit_constraint_errs),
5109[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5110 access_rcv_constraint_errs),
5111[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5112[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5113[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5114[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5115[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5116[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5117[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5118[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5119[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5120[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5121[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5122[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5123[C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
5124[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5125 access_sw_cpu_rc_acks),
5126[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5127 access_sw_cpu_rc_qacks),
5128[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5129 access_sw_cpu_rc_delayed_comp),
5130[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5131[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5132[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5133[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5134[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5135[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5136[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5137[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5138[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5139[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5140[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5141[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5142[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5143[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5144[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5145[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5146[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5147[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5148[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5149[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5150[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5151[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5152[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5153[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5154[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5155[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5156[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5157[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5158[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5159[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5160[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5161[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5162[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5163[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5164[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5165[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5166[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5167[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5168[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5169[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5170[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5171[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5172[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5173[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5174[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5175[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5176[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5177[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5178[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5179[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5180[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5181[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5182[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5183[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5184[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5185[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5186[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5187[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5188[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5189[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5190[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5191[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5192[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5193[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5194[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5195[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5196[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5197[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5198[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5199[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5200[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5201[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5202[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5203[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5204[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5205[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5206[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5207[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5208[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5209[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5210};
5211
5212
5213
5214
5215int is_ax(struct hfi1_devdata *dd)
5216{
5217 u8 chip_rev_minor =
5218 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5219 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5220 return (chip_rev_minor & 0xf0) == 0;
5221}
5222
5223
5224int is_bx(struct hfi1_devdata *dd)
5225{
5226 u8 chip_rev_minor =
5227 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5228 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5229 return (chip_rev_minor & 0xF0) == 0x10;
5230}
5231
5232
5233bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5234{
5235 u64 mask;
5236 u32 is = IS_RCVURGENT_START + rcd->ctxt;
5237 u8 bit = is % 64;
5238
5239 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5240 return !(mask & BIT_ULL(bit));
5241}
5242
5243
5244
5245
5246
5247
5248
5249static int append_str(char *buf, char **curp, int *lenp, const char *s)
5250{
5251 char *p = *curp;
5252 int len = *lenp;
5253 int result = 0;
5254 char c;
5255
5256
5257 if (p != buf) {
5258 if (len == 0) {
5259 result = 1;
5260 goto done;
5261 }
5262 *p++ = ',';
5263 len--;
5264 }
5265
5266
5267 while ((c = *s++) != 0) {
5268 if (len == 0) {
5269 result = 1;
5270 goto done;
5271 }
5272 *p++ = c;
5273 len--;
5274 }
5275
5276done:
5277
5278 *curp = p;
5279 *lenp = len;
5280
5281 return result;
5282}
5283
5284
5285
5286
5287
5288static char *flag_string(char *buf, int buf_len, u64 flags,
5289 struct flag_table *table, int table_size)
5290{
5291 char extra[32];
5292 char *p = buf;
5293 int len = buf_len;
5294 int no_room = 0;
5295 int i;
5296
5297
5298 if (len < 2)
5299 return "";
5300
5301 len--;
5302 for (i = 0; i < table_size; i++) {
5303 if (flags & table[i].flag) {
5304 no_room = append_str(buf, &p, &len, table[i].str);
5305 if (no_room)
5306 break;
5307 flags &= ~table[i].flag;
5308 }
5309 }
5310
5311
5312 if (!no_room && flags) {
5313 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5314 no_room = append_str(buf, &p, &len, extra);
5315 }
5316
5317
5318 if (no_room) {
5319
5320 if (len == 0)
5321 --p;
5322 *p++ = '*';
5323 }
5324
5325
5326 *p = 0;
5327 return buf;
5328}
5329
5330
5331static const char * const cce_misc_names[] = {
5332 "CceErrInt",
5333 "RxeErrInt",
5334 "MiscErrInt",
5335 "Reserved3",
5336 "PioErrInt",
5337 "SDmaErrInt",
5338 "EgressErrInt",
5339 "TxeErrInt"
5340};
5341
5342
5343
5344
5345static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5346{
5347 if (source < ARRAY_SIZE(cce_misc_names))
5348 strncpy(buf, cce_misc_names[source], bsize);
5349 else
5350 snprintf(buf, bsize, "Reserved%u",
5351 source + IS_GENERAL_ERR_START);
5352
5353 return buf;
5354}
5355
5356
5357
5358
5359static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5360{
5361 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5362 return buf;
5363}
5364
5365
5366
5367
5368static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5369{
5370 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5371 return buf;
5372}
5373
5374static const char * const various_names[] = {
5375 "PbcInt",
5376 "GpioAssertInt",
5377 "Qsfp1Int",
5378 "Qsfp2Int",
5379 "TCritInt"
5380};
5381
5382
5383
5384
5385static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5386{
5387 if (source < ARRAY_SIZE(various_names))
5388 strncpy(buf, various_names[source], bsize);
5389 else
5390 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5391 return buf;
5392}
5393
5394
5395
5396
5397static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5398{
5399 static const char * const dc_int_names[] = {
5400 "common",
5401 "lcb",
5402 "8051",
5403 "lbm"
5404 };
5405
5406 if (source < ARRAY_SIZE(dc_int_names))
5407 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5408 else
5409 snprintf(buf, bsize, "DCInt%u", source);
5410 return buf;
5411}
5412
5413static const char * const sdma_int_names[] = {
5414 "SDmaInt",
5415 "SdmaIdleInt",
5416 "SdmaProgressInt",
5417};
5418
5419
5420
5421
5422static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5423{
5424
5425 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5426
5427 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5428
5429 if (likely(what < 3))
5430 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5431 else
5432 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5433 return buf;
5434}
5435
5436
5437
5438
5439static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5440{
5441 snprintf(buf, bsize, "RcvAvailInt%u", source);
5442 return buf;
5443}
5444
5445
5446
5447
5448static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5449{
5450 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5451 return buf;
5452}
5453
5454
5455
5456
5457static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5458{
5459 snprintf(buf, bsize, "SendCreditInt%u", source);
5460 return buf;
5461}
5462
5463
5464
5465
5466static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5467{
5468 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5469 return buf;
5470}
5471
5472static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5473{
5474 return flag_string(buf, buf_len, flags,
5475 cce_err_status_flags,
5476 ARRAY_SIZE(cce_err_status_flags));
5477}
5478
5479static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5480{
5481 return flag_string(buf, buf_len, flags,
5482 rxe_err_status_flags,
5483 ARRAY_SIZE(rxe_err_status_flags));
5484}
5485
5486static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5487{
5488 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5489 ARRAY_SIZE(misc_err_status_flags));
5490}
5491
5492static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5493{
5494 return flag_string(buf, buf_len, flags,
5495 pio_err_status_flags,
5496 ARRAY_SIZE(pio_err_status_flags));
5497}
5498
5499static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5500{
5501 return flag_string(buf, buf_len, flags,
5502 sdma_err_status_flags,
5503 ARRAY_SIZE(sdma_err_status_flags));
5504}
5505
5506static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5507{
5508 return flag_string(buf, buf_len, flags,
5509 egress_err_status_flags,
5510 ARRAY_SIZE(egress_err_status_flags));
5511}
5512
5513static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5514{
5515 return flag_string(buf, buf_len, flags,
5516 egress_err_info_flags,
5517 ARRAY_SIZE(egress_err_info_flags));
5518}
5519
5520static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5521{
5522 return flag_string(buf, buf_len, flags,
5523 send_err_status_flags,
5524 ARRAY_SIZE(send_err_status_flags));
5525}
5526
5527static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5528{
5529 char buf[96];
5530 int i = 0;
5531
5532
5533
5534
5535
5536 dd_dev_info(dd, "CCE Error: %s\n",
5537 cce_err_status_string(buf, sizeof(buf), reg));
5538
5539 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5540 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5541
5542
5543 start_freeze_handling(dd->pport, FREEZE_SELF);
5544 }
5545
5546 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5547 if (reg & (1ull << i)) {
5548 incr_cntr64(&dd->cce_err_status_cnt[i]);
5549
5550 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5551 }
5552 }
5553}
5554
5555
5556
5557
5558
5559#define RCVERR_CHECK_TIME 10
5560static void update_rcverr_timer(struct timer_list *t)
5561{
5562 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5563 struct hfi1_pportdata *ppd = dd->pport;
5564 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5565
5566 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5567 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5568 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5569 set_link_down_reason(
5570 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5571 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5572 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5573 }
5574 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5575
5576 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5577}
5578
5579static int init_rcverr(struct hfi1_devdata *dd)
5580{
5581 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5582
5583 dd->rcv_ovfl_cnt = 0;
5584 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5585}
5586
5587static void free_rcverr(struct hfi1_devdata *dd)
5588{
5589 if (dd->rcverr_timer.function)
5590 del_timer_sync(&dd->rcverr_timer);
5591}
5592
5593static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5594{
5595 char buf[96];
5596 int i = 0;
5597
5598 dd_dev_info(dd, "Receive Error: %s\n",
5599 rxe_err_status_string(buf, sizeof(buf), reg));
5600
5601 if (reg & ALL_RXE_FREEZE_ERR) {
5602 int flags = 0;
5603
5604
5605
5606
5607
5608 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5609 flags = FREEZE_ABORT;
5610
5611 start_freeze_handling(dd->pport, flags);
5612 }
5613
5614 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5615 if (reg & (1ull << i))
5616 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5617 }
5618}
5619
5620static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5621{
5622 char buf[96];
5623 int i = 0;
5624
5625 dd_dev_info(dd, "Misc Error: %s",
5626 misc_err_status_string(buf, sizeof(buf), reg));
5627 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5628 if (reg & (1ull << i))
5629 incr_cntr64(&dd->misc_err_status_cnt[i]);
5630 }
5631}
5632
5633static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5634{
5635 char buf[96];
5636 int i = 0;
5637
5638 dd_dev_info(dd, "PIO Error: %s\n",
5639 pio_err_status_string(buf, sizeof(buf), reg));
5640
5641 if (reg & ALL_PIO_FREEZE_ERR)
5642 start_freeze_handling(dd->pport, 0);
5643
5644 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5645 if (reg & (1ull << i))
5646 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5647 }
5648}
5649
5650static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5651{
5652 char buf[96];
5653 int i = 0;
5654
5655 dd_dev_info(dd, "SDMA Error: %s\n",
5656 sdma_err_status_string(buf, sizeof(buf), reg));
5657
5658 if (reg & ALL_SDMA_FREEZE_ERR)
5659 start_freeze_handling(dd->pport, 0);
5660
5661 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5662 if (reg & (1ull << i))
5663 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5664 }
5665}
5666
5667static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5668{
5669 incr_cntr64(&ppd->port_xmit_discards);
5670}
5671
5672static void count_port_inactive(struct hfi1_devdata *dd)
5673{
5674 __count_port_discards(dd->pport);
5675}
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5687 int vl)
5688{
5689 struct hfi1_pportdata *ppd = dd->pport;
5690 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE);
5691 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5692 char buf[96];
5693
5694
5695 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5696
5697 dd_dev_info(dd,
5698 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5699 info, egress_err_info_string(buf, sizeof(buf), info), src);
5700
5701
5702 if (info & PORT_DISCARD_EGRESS_ERRS) {
5703 int weight, i;
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5728 for (i = 0; i < weight; i++) {
5729 __count_port_discards(ppd);
5730 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5731 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5732 else if (vl == 15)
5733 incr_cntr64(&ppd->port_xmit_discards_vl
5734 [C_VL_15]);
5735 }
5736 }
5737}
5738
5739
5740
5741
5742
5743static inline int port_inactive_err(u64 posn)
5744{
5745 return (posn >= SEES(TX_LINKDOWN) &&
5746 posn <= SEES(TX_INCORRECT_LINK_STATE));
5747}
5748
5749
5750
5751
5752
5753static inline int disallowed_pkt_err(int posn)
5754{
5755 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5756 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5757}
5758
5759
5760
5761
5762
5763
5764static inline int disallowed_pkt_engine(int posn)
5765{
5766 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5767}
5768
5769
5770
5771
5772
5773static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5774{
5775 struct sdma_vl_map *m;
5776 int vl;
5777
5778
5779 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5780 return -1;
5781
5782 rcu_read_lock();
5783 m = rcu_dereference(dd->sdma_map);
5784 vl = m->engine_to_vl[engine];
5785 rcu_read_unlock();
5786
5787 return vl;
5788}
5789
5790
5791
5792
5793
5794static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5795{
5796 struct send_context_info *sci;
5797 struct send_context *sc;
5798 int i;
5799
5800 sci = &dd->send_contexts[sw_index];
5801
5802
5803 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5804 return -1;
5805
5806 sc = sci->sc;
5807 if (!sc)
5808 return -1;
5809 if (dd->vld[15].sc == sc)
5810 return 15;
5811 for (i = 0; i < num_vls; i++)
5812 if (dd->vld[i].sc == sc)
5813 return i;
5814
5815 return -1;
5816}
5817
5818static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5819{
5820 u64 reg_copy = reg, handled = 0;
5821 char buf[96];
5822 int i = 0;
5823
5824 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5825 start_freeze_handling(dd->pport, 0);
5826 else if (is_ax(dd) &&
5827 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5828 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5829 start_freeze_handling(dd->pport, 0);
5830
5831 while (reg_copy) {
5832 int posn = fls64(reg_copy);
5833
5834 int shift = posn - 1;
5835 u64 mask = 1ULL << shift;
5836
5837 if (port_inactive_err(shift)) {
5838 count_port_inactive(dd);
5839 handled |= mask;
5840 } else if (disallowed_pkt_err(shift)) {
5841 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5842
5843 handle_send_egress_err_info(dd, vl);
5844 handled |= mask;
5845 }
5846 reg_copy &= ~mask;
5847 }
5848
5849 reg &= ~handled;
5850
5851 if (reg)
5852 dd_dev_info(dd, "Egress Error: %s\n",
5853 egress_err_status_string(buf, sizeof(buf), reg));
5854
5855 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5856 if (reg & (1ull << i))
5857 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5858 }
5859}
5860
5861static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5862{
5863 char buf[96];
5864 int i = 0;
5865
5866 dd_dev_info(dd, "Send Error: %s\n",
5867 send_err_status_string(buf, sizeof(buf), reg));
5868
5869 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5870 if (reg & (1ull << i))
5871 incr_cntr64(&dd->send_err_status_cnt[i]);
5872 }
5873}
5874
5875
5876
5877
5878
5879#define MAX_CLEAR_COUNT 20
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889
5890
5891
5892static void interrupt_clear_down(struct hfi1_devdata *dd,
5893 u32 context,
5894 const struct err_reg_info *eri)
5895{
5896 u64 reg;
5897 u32 count;
5898
5899
5900 count = 0;
5901 while (1) {
5902 reg = read_kctxt_csr(dd, context, eri->status);
5903 if (reg == 0)
5904 break;
5905 write_kctxt_csr(dd, context, eri->clear, reg);
5906 if (likely(eri->handler))
5907 eri->handler(dd, context, reg);
5908 count++;
5909 if (count > MAX_CLEAR_COUNT) {
5910 u64 mask;
5911
5912 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5913 eri->desc, reg);
5914
5915
5916
5917
5918 mask = read_kctxt_csr(dd, context, eri->mask);
5919 mask &= ~reg;
5920 write_kctxt_csr(dd, context, eri->mask, mask);
5921 break;
5922 }
5923 }
5924}
5925
5926
5927
5928
5929static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5930{
5931 const struct err_reg_info *eri = &misc_errs[source];
5932
5933 if (eri->handler) {
5934 interrupt_clear_down(dd, 0, eri);
5935 } else {
5936 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5937 source);
5938 }
5939}
5940
5941static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5942{
5943 return flag_string(buf, buf_len, flags,
5944 sc_err_status_flags,
5945 ARRAY_SIZE(sc_err_status_flags));
5946}
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5958 unsigned int hw_context)
5959{
5960 struct send_context_info *sci;
5961 struct send_context *sc;
5962 char flags[96];
5963 u64 status;
5964 u32 sw_index;
5965 int i = 0;
5966 unsigned long irq_flags;
5967
5968 sw_index = dd->hw_to_sw[hw_context];
5969 if (sw_index >= dd->num_send_contexts) {
5970 dd_dev_err(dd,
5971 "out of range sw index %u for send context %u\n",
5972 sw_index, hw_context);
5973 return;
5974 }
5975 sci = &dd->send_contexts[sw_index];
5976 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5977 sc = sci->sc;
5978 if (!sc) {
5979 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5980 sw_index, hw_context);
5981 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5982 return;
5983 }
5984
5985
5986 sc_stop(sc, SCF_HALTED);
5987
5988 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5989
5990 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5991 send_context_err_status_string(flags, sizeof(flags),
5992 status));
5993
5994 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5995 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5996
5997
5998
5999
6000
6001 if (sc->type != SC_USER)
6002 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6003 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6004
6005
6006
6007
6008
6009
6010 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6011 if (status & (1ull << i))
6012 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6013 }
6014}
6015
6016static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6017 unsigned int source, u64 status)
6018{
6019 struct sdma_engine *sde;
6020 int i = 0;
6021
6022 sde = &dd->per_sdma[source];
6023#ifdef CONFIG_SDMA_VERBOSITY
6024 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6025 slashstrip(__FILE__), __LINE__, __func__);
6026 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6027 sde->this_idx, source, (unsigned long long)status);
6028#endif
6029 sde->err_cnt++;
6030 sdma_engine_error(sde, status);
6031
6032
6033
6034
6035
6036
6037 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6038 if (status & (1ull << i))
6039 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6040 }
6041}
6042
6043
6044
6045
6046static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6047{
6048#ifdef CONFIG_SDMA_VERBOSITY
6049 struct sdma_engine *sde = &dd->per_sdma[source];
6050
6051 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6052 slashstrip(__FILE__), __LINE__, __func__);
6053 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6054 source);
6055 sdma_dumpstate(sde);
6056#endif
6057 interrupt_clear_down(dd, source, &sdma_eng_err);
6058}
6059
6060
6061
6062
6063static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6064{
6065 const struct err_reg_info *eri = &various_err[source];
6066
6067
6068
6069
6070
6071
6072 if (source == TCRIT_INT_SOURCE)
6073 handle_temp_err(dd);
6074 else if (eri->handler)
6075 interrupt_clear_down(dd, 0, eri);
6076 else
6077 dd_dev_info(dd,
6078 "%s: Unimplemented/reserved interrupt %d\n",
6079 __func__, source);
6080}
6081
6082static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6083{
6084
6085 struct hfi1_pportdata *ppd = dd->pport;
6086 unsigned long flags;
6087 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6088
6089 if (reg & QSFP_HFI0_MODPRST_N) {
6090 if (!qsfp_mod_present(ppd)) {
6091 dd_dev_info(dd, "%s: QSFP module removed\n",
6092 __func__);
6093
6094 ppd->driver_link_ready = 0;
6095
6096
6097
6098
6099
6100 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6101
6102
6103
6104
6105 ppd->qsfp_info.cache_valid = 0;
6106 ppd->qsfp_info.reset_needed = 0;
6107 ppd->qsfp_info.limiting_active = 0;
6108 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6109 flags);
6110
6111 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6112 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6113
6114 if ((ppd->offline_disabled_reason >
6115 HFI1_ODR_MASK(
6116 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6117 (ppd->offline_disabled_reason ==
6118 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6119 ppd->offline_disabled_reason =
6120 HFI1_ODR_MASK(
6121 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6122
6123 if (ppd->host_link_state == HLS_DN_POLL) {
6124
6125
6126
6127
6128
6129
6130 queue_work(ppd->link_wq, &ppd->link_down_work);
6131 }
6132 } else {
6133 dd_dev_info(dd, "%s: QSFP module inserted\n",
6134 __func__);
6135
6136 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6137 ppd->qsfp_info.cache_valid = 0;
6138 ppd->qsfp_info.cache_refresh_required = 1;
6139 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6140 flags);
6141
6142
6143
6144
6145
6146 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6147 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6148 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6149
6150 ppd->offline_disabled_reason =
6151 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6152 }
6153 }
6154
6155 if (reg & QSFP_HFI0_INT_N) {
6156 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6157 __func__);
6158 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6159 ppd->qsfp_info.check_interrupt_flags = 1;
6160 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6161 }
6162
6163
6164 if (qsfp_mod_present(ppd))
6165 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6166}
6167
6168static int request_host_lcb_access(struct hfi1_devdata *dd)
6169{
6170 int ret;
6171
6172 ret = do_8051_command(dd, HCMD_MISC,
6173 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6174 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6175 if (ret != HCMD_SUCCESS) {
6176 dd_dev_err(dd, "%s: command failed with error %d\n",
6177 __func__, ret);
6178 }
6179 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6180}
6181
6182static int request_8051_lcb_access(struct hfi1_devdata *dd)
6183{
6184 int ret;
6185
6186 ret = do_8051_command(dd, HCMD_MISC,
6187 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6188 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6189 if (ret != HCMD_SUCCESS) {
6190 dd_dev_err(dd, "%s: command failed with error %d\n",
6191 __func__, ret);
6192 }
6193 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6194}
6195
6196
6197
6198
6199
6200static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6201{
6202 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6203 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6204 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6205}
6206
6207
6208
6209
6210
6211static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6212{
6213 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6214 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6215}
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6228{
6229 struct hfi1_pportdata *ppd = dd->pport;
6230 int ret = 0;
6231
6232
6233
6234
6235
6236
6237
6238 if (sleep_ok) {
6239 mutex_lock(&ppd->hls_lock);
6240 } else {
6241 while (!mutex_trylock(&ppd->hls_lock))
6242 udelay(1);
6243 }
6244
6245
6246 if (ppd->host_link_state & HLS_DOWN) {
6247 dd_dev_info(dd, "%s: link state %s not up\n",
6248 __func__, link_state_name(ppd->host_link_state));
6249 ret = -EBUSY;
6250 goto done;
6251 }
6252
6253 if (dd->lcb_access_count == 0) {
6254 ret = request_host_lcb_access(dd);
6255 if (ret) {
6256 dd_dev_err(dd,
6257 "%s: unable to acquire LCB access, err %d\n",
6258 __func__, ret);
6259 goto done;
6260 }
6261 set_host_lcb_access(dd);
6262 }
6263 dd->lcb_access_count++;
6264done:
6265 mutex_unlock(&ppd->hls_lock);
6266 return ret;
6267}
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6278{
6279 int ret = 0;
6280
6281
6282
6283
6284
6285
6286 if (sleep_ok) {
6287 mutex_lock(&dd->pport->hls_lock);
6288 } else {
6289 while (!mutex_trylock(&dd->pport->hls_lock))
6290 udelay(1);
6291 }
6292
6293 if (dd->lcb_access_count == 0) {
6294 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6295 __func__);
6296 goto done;
6297 }
6298
6299 if (dd->lcb_access_count == 1) {
6300 set_8051_lcb_access(dd);
6301 ret = request_8051_lcb_access(dd);
6302 if (ret) {
6303 dd_dev_err(dd,
6304 "%s: unable to release LCB access, err %d\n",
6305 __func__, ret);
6306
6307 set_host_lcb_access(dd);
6308 goto done;
6309 }
6310 }
6311 dd->lcb_access_count--;
6312done:
6313 mutex_unlock(&dd->pport->hls_lock);
6314 return ret;
6315}
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326static void init_lcb_access(struct hfi1_devdata *dd)
6327{
6328 dd->lcb_access_count = 0;
6329}
6330
6331
6332
6333
6334static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6335{
6336 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6337 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6338 (u64)return_code <<
6339 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6340 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6341}
6342
6343
6344
6345
6346static void handle_8051_request(struct hfi1_pportdata *ppd)
6347{
6348 struct hfi1_devdata *dd = ppd->dd;
6349 u64 reg;
6350 u16 data = 0;
6351 u8 type;
6352
6353 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6354 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6355 return;
6356
6357
6358 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6359
6360
6361 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6362 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6363 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6364 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6365
6366 switch (type) {
6367 case HREQ_LOAD_CONFIG:
6368 case HREQ_SAVE_CONFIG:
6369 case HREQ_READ_CONFIG:
6370 case HREQ_SET_TX_EQ_ABS:
6371 case HREQ_SET_TX_EQ_REL:
6372 case HREQ_ENABLE:
6373 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6374 type);
6375 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6376 break;
6377 case HREQ_LCB_RESET:
6378
6379 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6380
6381 (void)read_csr(dd, DCC_CFG_RESET);
6382
6383 udelay(1);
6384
6385 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6386 hreq_response(dd, HREQ_SUCCESS, 0);
6387
6388 break;
6389 case HREQ_CONFIG_DONE:
6390 hreq_response(dd, HREQ_SUCCESS, 0);
6391 break;
6392
6393 case HREQ_INTERFACE_TEST:
6394 hreq_response(dd, HREQ_SUCCESS, data);
6395 break;
6396 default:
6397 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6398 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6399 break;
6400 }
6401}
6402
6403
6404
6405
6406void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6407{
6408 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6409
6410
6411 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6412 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6413 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6414}
6415
6416
6417
6418
6419
6420
6421void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6422{
6423 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6424
6425
6426 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6427 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6428
6429
6430
6431
6432
6433 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6434 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6435
6436 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6437 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6438}
6439
6440
6441
6442
6443
6444void reset_link_credits(struct hfi1_devdata *dd)
6445{
6446 int i;
6447
6448
6449 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6450 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6451 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6452 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6453
6454 pio_send_control(dd, PSC_CM_RESET);
6455
6456 dd->vl15buf_cached = 0;
6457}
6458
6459
6460static u32 vcu_to_cu(u8 vcu)
6461{
6462 return 1 << vcu;
6463}
6464
6465
6466static u8 cu_to_vcu(u32 cu)
6467{
6468 return ilog2(cu);
6469}
6470
6471
6472static u32 vau_to_au(u8 vau)
6473{
6474 return 8 * (1 << vau);
6475}
6476
6477static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6478{
6479 ppd->sm_trap_qp = 0x0;
6480 ppd->sa_qp = 0x1;
6481}
6482
6483
6484
6485
6486static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6487{
6488 u64 reg;
6489
6490
6491 write_csr(dd, DC_LCB_CFG_RUN, 0);
6492
6493 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6494 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6495
6496 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6497 reg = read_csr(dd, DCC_CFG_RESET);
6498 write_csr(dd, DCC_CFG_RESET, reg |
6499 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6500 (void)read_csr(dd, DCC_CFG_RESET);
6501 if (!abort) {
6502 udelay(1);
6503 write_csr(dd, DCC_CFG_RESET, reg);
6504 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6505 }
6506}
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518static void _dc_shutdown(struct hfi1_devdata *dd)
6519{
6520 lockdep_assert_held(&dd->dc8051_lock);
6521
6522 if (dd->dc_shutdown)
6523 return;
6524
6525 dd->dc_shutdown = 1;
6526
6527 lcb_shutdown(dd, 1);
6528
6529
6530
6531
6532
6533 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6534}
6535
6536static void dc_shutdown(struct hfi1_devdata *dd)
6537{
6538 mutex_lock(&dd->dc8051_lock);
6539 _dc_shutdown(dd);
6540 mutex_unlock(&dd->dc8051_lock);
6541}
6542
6543
6544
6545
6546
6547
6548
6549static void _dc_start(struct hfi1_devdata *dd)
6550{
6551 lockdep_assert_held(&dd->dc8051_lock);
6552
6553 if (!dd->dc_shutdown)
6554 return;
6555
6556
6557 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6558
6559 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6560 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6561 __func__);
6562
6563
6564 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6565
6566 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6567 dd->dc_shutdown = 0;
6568}
6569
6570static void dc_start(struct hfi1_devdata *dd)
6571{
6572 mutex_lock(&dd->dc8051_lock);
6573 _dc_start(dd);
6574 mutex_unlock(&dd->dc8051_lock);
6575}
6576
6577
6578
6579
6580static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6581{
6582 u64 rx_radr, tx_radr;
6583 u32 version;
6584
6585 if (dd->icode != ICODE_FPGA_EMULATION)
6586 return;
6587
6588
6589
6590
6591
6592
6593
6594
6595 if (is_emulator_s(dd))
6596 return;
6597
6598
6599 version = emulator_rev(dd);
6600 if (!is_ax(dd))
6601 version = 0x2d;
6602
6603 if (version <= 0x12) {
6604
6605
6606
6607
6608
6609
6610
6611 rx_radr =
6612 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6613 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6614 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6615
6616
6617
6618
6619 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6620 } else if (version <= 0x18) {
6621
6622
6623 rx_radr =
6624 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6625 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6626 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6627 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6628 } else if (version == 0x19) {
6629
6630
6631 rx_radr =
6632 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6633 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6634 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6635 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6636 } else if (version == 0x1a) {
6637
6638
6639 rx_radr =
6640 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6641 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6642 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6643 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6644 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6645 } else {
6646
6647
6648 rx_radr =
6649 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6650 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6651 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6652 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6653 }
6654
6655 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6656
6657 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6658 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6659 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6660}
6661
6662
6663
6664
6665
6666
6667void handle_sma_message(struct work_struct *work)
6668{
6669 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6670 sma_message_work);
6671 struct hfi1_devdata *dd = ppd->dd;
6672 u64 msg;
6673 int ret;
6674
6675
6676
6677
6678
6679 ret = read_idle_sma(dd, &msg);
6680 if (ret)
6681 return;
6682 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6683
6684
6685
6686 switch (msg & 0xff) {
6687 case SMA_IDLE_ARM:
6688
6689
6690
6691
6692
6693
6694 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6695 ppd->neighbor_normal = 1;
6696 break;
6697 case SMA_IDLE_ACTIVE:
6698
6699
6700
6701
6702
6703
6704 if (ppd->host_link_state == HLS_UP_ARMED &&
6705 ppd->is_active_optimize_enabled) {
6706 ppd->neighbor_normal = 1;
6707 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6708 if (ret)
6709 dd_dev_err(
6710 dd,
6711 "%s: received Active SMA idle message, couldn't set link to Active\n",
6712 __func__);
6713 }
6714 break;
6715 default:
6716 dd_dev_err(dd,
6717 "%s: received unexpected SMA idle message 0x%llx\n",
6718 __func__, msg);
6719 break;
6720 }
6721}
6722
6723static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6724{
6725 u64 rcvctrl;
6726 unsigned long flags;
6727
6728 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6729 rcvctrl = read_csr(dd, RCV_CTRL);
6730 rcvctrl |= add;
6731 rcvctrl &= ~clear;
6732 write_csr(dd, RCV_CTRL, rcvctrl);
6733 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6734}
6735
6736static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6737{
6738 adjust_rcvctrl(dd, add, 0);
6739}
6740
6741static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6742{
6743 adjust_rcvctrl(dd, 0, clear);
6744}
6745
6746
6747
6748
6749void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6750{
6751 struct hfi1_devdata *dd = ppd->dd;
6752 struct send_context *sc;
6753 int i;
6754 int sc_flags;
6755
6756 if (flags & FREEZE_SELF)
6757 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6758
6759
6760 dd->flags |= HFI1_FROZEN;
6761
6762
6763 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6764
6765 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6766 SCF_LINK_DOWN : 0);
6767
6768 for (i = 0; i < dd->num_send_contexts; i++) {
6769 sc = dd->send_contexts[i].sc;
6770 if (sc && (sc->flags & SCF_ENABLED))
6771 sc_stop(sc, sc_flags);
6772 }
6773
6774
6775 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6776
6777 if (flags & FREEZE_ABORT) {
6778 dd_dev_err(dd,
6779 "Aborted freeze recovery. Please REBOOT system\n");
6780 return;
6781 }
6782
6783 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6784}
6785
6786
6787
6788
6789
6790
6791
6792
6793static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6794{
6795 unsigned long timeout;
6796 u64 reg;
6797
6798 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6799 while (1) {
6800 reg = read_csr(dd, CCE_STATUS);
6801 if (freeze) {
6802
6803 if ((reg & ALL_FROZE) == ALL_FROZE)
6804 return;
6805 } else {
6806
6807 if ((reg & ALL_FROZE) == 0)
6808 return;
6809 }
6810
6811 if (time_after(jiffies, timeout)) {
6812 dd_dev_err(dd,
6813 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6814 freeze ? "" : "un", reg & ALL_FROZE,
6815 freeze ? ALL_FROZE : 0ull);
6816 return;
6817 }
6818 usleep_range(80, 120);
6819 }
6820}
6821
6822
6823
6824
6825static void rxe_freeze(struct hfi1_devdata *dd)
6826{
6827 int i;
6828 struct hfi1_ctxtdata *rcd;
6829
6830
6831 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6832
6833
6834 for (i = 0; i < dd->num_rcv_contexts; i++) {
6835 rcd = hfi1_rcd_get_by_index(dd, i);
6836 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6837 hfi1_rcd_put(rcd);
6838 }
6839}
6840
6841
6842
6843
6844
6845
6846
6847static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6848{
6849 u32 rcvmask;
6850 u16 i;
6851 struct hfi1_ctxtdata *rcd;
6852
6853
6854 for (i = 0; i < dd->num_rcv_contexts; i++) {
6855 rcd = hfi1_rcd_get_by_index(dd, i);
6856
6857
6858 if (!rcd ||
6859 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6860 hfi1_rcd_put(rcd);
6861 continue;
6862 }
6863 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6864
6865 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6866 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6867 hfi1_rcvctrl(dd, rcvmask, rcd);
6868 hfi1_rcd_put(rcd);
6869 }
6870
6871
6872 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6873}
6874
6875
6876
6877
6878
6879
6880void handle_freeze(struct work_struct *work)
6881{
6882 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6883 freeze_work);
6884 struct hfi1_devdata *dd = ppd->dd;
6885
6886
6887 wait_for_freeze_status(dd, 1);
6888
6889
6890
6891
6892 pio_freeze(dd);
6893
6894
6895 sdma_freeze(dd);
6896
6897
6898
6899
6900 rxe_freeze(dd);
6901
6902
6903
6904
6905
6906 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6907 wait_for_freeze_status(dd, 0);
6908
6909 if (is_ax(dd)) {
6910 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6911 wait_for_freeze_status(dd, 1);
6912 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6913 wait_for_freeze_status(dd, 0);
6914 }
6915
6916
6917 pio_kernel_unfreeze(dd);
6918
6919
6920 sdma_unfreeze(dd);
6921
6922
6923
6924
6925 rxe_kernel_unfreeze(dd);
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940 dd->flags &= ~HFI1_FROZEN;
6941 wake_up(&dd->event_queue);
6942
6943
6944}
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6956{
6957 int i;
6958 u16 tx_width;
6959 u16 link_speed;
6960
6961 tx_width = tx_link_width(link_width);
6962 link_speed = get_link_speed(ppd->link_speed_active);
6963
6964
6965
6966
6967
6968 for (i = 0; i < C_VL_COUNT + 1; i++)
6969 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6970}
6971
6972
6973
6974
6975
6976
6977void handle_link_up(struct work_struct *work)
6978{
6979 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6980 link_up_work);
6981 struct hfi1_devdata *dd = ppd->dd;
6982
6983 set_link_state(ppd, HLS_UP_INIT);
6984
6985
6986 read_ltp_rtt(dd);
6987
6988
6989
6990
6991 clear_linkup_counters(dd);
6992
6993
6994
6995 set_linkup_defaults(ppd);
6996
6997
6998
6999
7000
7001
7002
7003 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
7004 set_up_vl15(dd, dd->vl15buf_cached);
7005
7006
7007 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7008
7009 dd_dev_err(dd,
7010 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7011 ppd->link_speed_active, ppd->link_speed_enabled);
7012 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7013 OPA_LINKDOWN_REASON_SPEED_POLICY);
7014 set_link_state(ppd, HLS_DN_OFFLINE);
7015 start_link(ppd);
7016 }
7017}
7018
7019
7020
7021
7022
7023static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7024{
7025 ppd->neighbor_guid = 0;
7026 ppd->neighbor_port_number = 0;
7027 ppd->neighbor_type = 0;
7028 ppd->neighbor_fm_security = 0;
7029}
7030
7031static const char * const link_down_reason_strs[] = {
7032 [OPA_LINKDOWN_REASON_NONE] = "None",
7033 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7034 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7035 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7036 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7037 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7038 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7039 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7040 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7041 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7042 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7043 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7044 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7045 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7046 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7047 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7048 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7049 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7050 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7051 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7052 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7053 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7054 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7055 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7056 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7057 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7058 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7059 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7060 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7061 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7062 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7063 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7064 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7065 "Excessive buffer overrun",
7066 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7067 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7068 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7069 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7070 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7071 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7072 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7073 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7074 "Local media not installed",
7075 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7076 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7077 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7078 "End to end not installed",
7079 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7080 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7081 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7082 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7083 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7084 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7085};
7086
7087
7088static const char *link_down_reason_str(u8 reason)
7089{
7090 const char *str = NULL;
7091
7092 if (reason < ARRAY_SIZE(link_down_reason_strs))
7093 str = link_down_reason_strs[reason];
7094 if (!str)
7095 str = "(invalid)";
7096
7097 return str;
7098}
7099
7100
7101
7102
7103
7104
7105void handle_link_down(struct work_struct *work)
7106{
7107 u8 lcl_reason, neigh_reason = 0;
7108 u8 link_down_reason;
7109 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7110 link_down_work);
7111 int was_up;
7112 static const char ldr_str[] = "Link down reason: ";
7113
7114 if ((ppd->host_link_state &
7115 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7116 ppd->port_type == PORT_TYPE_FIXED)
7117 ppd->offline_disabled_reason =
7118 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7119
7120
7121 was_up = !!(ppd->host_link_state & HLS_UP);
7122 set_link_state(ppd, HLS_DN_OFFLINE);
7123 xchg(&ppd->is_link_down_queued, 0);
7124
7125 if (was_up) {
7126 lcl_reason = 0;
7127
7128 read_link_down_reason(ppd->dd, &link_down_reason);
7129 switch (link_down_reason) {
7130 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7131
7132 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7133 ldr_str);
7134 break;
7135 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7136
7137
7138
7139
7140 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7141 dd_dev_info(ppd->dd,
7142 "%sNeighbor link down message %d, %s\n",
7143 ldr_str, neigh_reason,
7144 link_down_reason_str(neigh_reason));
7145 break;
7146 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7147 dd_dev_info(ppd->dd,
7148 "%sHost requested link to go offline\n",
7149 ldr_str);
7150 break;
7151 default:
7152 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7153 ldr_str, link_down_reason);
7154 break;
7155 }
7156
7157
7158
7159
7160
7161 if (neigh_reason == 0)
7162 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7163 } else {
7164
7165 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7166 }
7167
7168 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7169
7170
7171 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7172 ppd->neigh_link_down_reason.sma == 0) {
7173 ppd->local_link_down_reason.sma =
7174 ppd->local_link_down_reason.latest;
7175 ppd->neigh_link_down_reason.sma =
7176 ppd->neigh_link_down_reason.latest;
7177 }
7178
7179 reset_neighbor_info(ppd);
7180
7181
7182 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7183
7184
7185
7186
7187
7188 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7189 dc_shutdown(ppd->dd);
7190 else
7191 start_link(ppd);
7192}
7193
7194void handle_link_bounce(struct work_struct *work)
7195{
7196 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7197 link_bounce_work);
7198
7199
7200
7201
7202 if (ppd->host_link_state & HLS_UP) {
7203 set_link_state(ppd, HLS_DN_OFFLINE);
7204 start_link(ppd);
7205 } else {
7206 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7207 __func__, link_state_name(ppd->host_link_state));
7208 }
7209}
7210
7211
7212
7213
7214
7215static int cap_to_port_ltp(int cap)
7216{
7217 int port_ltp = PORT_LTP_CRC_MODE_16;
7218
7219 if (cap & CAP_CRC_14B)
7220 port_ltp |= PORT_LTP_CRC_MODE_14;
7221 if (cap & CAP_CRC_48B)
7222 port_ltp |= PORT_LTP_CRC_MODE_48;
7223 if (cap & CAP_CRC_12B_16B_PER_LANE)
7224 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7225
7226 return port_ltp;
7227}
7228
7229
7230
7231
7232int port_ltp_to_cap(int port_ltp)
7233{
7234 int cap_mask = 0;
7235
7236 if (port_ltp & PORT_LTP_CRC_MODE_14)
7237 cap_mask |= CAP_CRC_14B;
7238 if (port_ltp & PORT_LTP_CRC_MODE_48)
7239 cap_mask |= CAP_CRC_48B;
7240 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7241 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7242
7243 return cap_mask;
7244}
7245
7246
7247
7248
7249static int lcb_to_port_ltp(int lcb_crc)
7250{
7251 int port_ltp = 0;
7252
7253 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7254 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7255 else if (lcb_crc == LCB_CRC_48B)
7256 port_ltp = PORT_LTP_CRC_MODE_48;
7257 else if (lcb_crc == LCB_CRC_14B)
7258 port_ltp = PORT_LTP_CRC_MODE_14;
7259 else
7260 port_ltp = PORT_LTP_CRC_MODE_16;
7261
7262 return port_ltp;
7263}
7264
7265static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7266{
7267 if (ppd->pkeys[2] != 0) {
7268 ppd->pkeys[2] = 0;
7269 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7270 hfi1_event_pkey_change(ppd->dd, ppd->port);
7271 }
7272}
7273
7274
7275
7276
7277static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7278{
7279 switch (width) {
7280 case 0:
7281
7282
7283
7284
7285 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7286 return OPA_LINK_WIDTH_4X;
7287 return 0;
7288 case 1: return OPA_LINK_WIDTH_1X;
7289 case 2: return OPA_LINK_WIDTH_2X;
7290 case 3: return OPA_LINK_WIDTH_3X;
7291 default:
7292 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7293 __func__, width);
7294
7295 case 4: return OPA_LINK_WIDTH_4X;
7296 }
7297}
7298
7299
7300
7301
7302static const u8 bit_counts[16] = {
7303 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7304};
7305
7306static inline u8 nibble_to_count(u8 nibble)
7307{
7308 return bit_counts[nibble & 0xf];
7309}
7310
7311
7312
7313
7314
7315
7316
7317
7318
7319static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7320 u16 *rx_width)
7321{
7322 u16 tx, rx;
7323 u8 enable_lane_rx;
7324 u8 enable_lane_tx;
7325 u8 tx_polarity_inversion;
7326 u8 rx_polarity_inversion;
7327 u8 max_rate;
7328
7329
7330 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7331 &rx_polarity_inversion, &max_rate);
7332 read_local_lni(dd, &enable_lane_rx);
7333
7334
7335 tx = nibble_to_count(enable_lane_tx);
7336 rx = nibble_to_count(enable_lane_rx);
7337
7338
7339
7340
7341
7342
7343 if ((dd->icode == ICODE_RTL_SILICON) &&
7344 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7345
7346 switch (max_rate) {
7347 case 0:
7348 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7349 break;
7350 default:
7351 dd_dev_err(dd,
7352 "%s: unexpected max rate %d, using 25Gb\n",
7353 __func__, (int)max_rate);
7354
7355 case 1:
7356 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7357 break;
7358 }
7359 }
7360
7361 dd_dev_info(dd,
7362 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7363 enable_lane_tx, tx, enable_lane_rx, rx);
7364 *tx_width = link_width_to_bits(dd, tx);
7365 *rx_width = link_width_to_bits(dd, rx);
7366}
7367
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379
7380
7381static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7382 u16 *rx_width)
7383{
7384 u16 widths, tx, rx;
7385 u8 misc_bits, local_flags;
7386 u16 active_tx, active_rx;
7387
7388 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7389 tx = widths >> 12;
7390 rx = (widths >> 8) & 0xf;
7391
7392 *tx_width = link_width_to_bits(dd, tx);
7393 *rx_width = link_width_to_bits(dd, rx);
7394
7395
7396 get_link_widths(dd, &active_tx, &active_rx);
7397}
7398
7399
7400
7401
7402
7403
7404
7405
7406
7407void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7408{
7409 u16 tx_width, rx_width;
7410
7411
7412 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7413
7414
7415 ppd->link_width_active = tx_width;
7416
7417 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7418 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7419
7420 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7421
7422 ppd->current_egress_rate = active_egress_rate(ppd);
7423}
7424
7425
7426
7427
7428
7429
7430void handle_verify_cap(struct work_struct *work)
7431{
7432 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7433 link_vc_work);
7434 struct hfi1_devdata *dd = ppd->dd;
7435 u64 reg;
7436 u8 power_management;
7437 u8 continuous;
7438 u8 vcu;
7439 u8 vau;
7440 u8 z;
7441 u16 vl15buf;
7442 u16 link_widths;
7443 u16 crc_mask;
7444 u16 crc_val;
7445 u16 device_id;
7446 u16 active_tx, active_rx;
7447 u8 partner_supported_crc;
7448 u8 remote_tx_rate;
7449 u8 device_rev;
7450
7451 set_link_state(ppd, HLS_VERIFY_CAP);
7452
7453 lcb_shutdown(dd, 0);
7454 adjust_lcb_for_fpga_serdes(dd);
7455
7456 read_vc_remote_phy(dd, &power_management, &continuous);
7457 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7458 &partner_supported_crc);
7459 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7460 read_remote_device_id(dd, &device_id, &device_rev);
7461
7462
7463 get_link_widths(dd, &active_tx, &active_rx);
7464 dd_dev_info(dd,
7465 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7466 (int)power_management, (int)continuous);
7467 dd_dev_info(dd,
7468 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7469 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7470 (int)partner_supported_crc);
7471 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7472 (u32)remote_tx_rate, (u32)link_widths);
7473 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7474 (u32)device_id, (u32)device_rev);
7475
7476
7477
7478
7479
7480
7481
7482
7483
7484 if (vau == 0)
7485 vau = 1;
7486 set_up_vau(dd, vau);
7487
7488
7489
7490
7491
7492 set_up_vl15(dd, 0);
7493 dd->vl15buf_cached = vl15buf;
7494
7495
7496 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7497
7498
7499 if (crc_mask & CAP_CRC_14B)
7500 crc_val = LCB_CRC_14B;
7501 else if (crc_mask & CAP_CRC_48B)
7502 crc_val = LCB_CRC_48B;
7503 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7504 crc_val = LCB_CRC_12B_16B_PER_LANE;
7505 else
7506 crc_val = LCB_CRC_16B;
7507
7508 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7509 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7510 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7511
7512
7513 reg = read_csr(dd, SEND_CM_CTRL);
7514 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7515 write_csr(dd, SEND_CM_CTRL,
7516 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7517 } else {
7518 write_csr(dd, SEND_CM_CTRL,
7519 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7520 }
7521
7522 ppd->link_speed_active = 0;
7523 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7524
7525 switch (remote_tx_rate) {
7526 case 0:
7527 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7528 break;
7529 case 1:
7530 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7531 break;
7532 }
7533 } else {
7534
7535 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7536
7537 if (rate & 2)
7538 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7539 else if (rate & 1)
7540 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7541 }
7542 if (ppd->link_speed_active == 0) {
7543 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7544 __func__, (int)remote_tx_rate);
7545 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7546 }
7547
7548
7549
7550
7551
7552
7553
7554
7555 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7556
7557 ppd->port_ltp_crc_mode |=
7558 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7559
7560 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7561
7562
7563
7564 assign_remote_cm_au_table(dd, vcu);
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575 if (is_ax(dd)) {
7576 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7577 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7578 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7579 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7580 }
7581
7582
7583 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7584
7585
7586 write_csr(dd, DC_LCB_ERR_EN, 0);
7587 set_8051_lcb_access(dd);
7588
7589
7590 set_link_state(ppd, HLS_GOING_UP);
7591}
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7609 bool refresh_widths)
7610{
7611 int do_bounce = 0;
7612 int tries;
7613 u16 lwde;
7614 u16 tx, rx;
7615 bool link_downgraded = refresh_widths;
7616
7617
7618 tries = 0;
7619retry:
7620 mutex_lock(&ppd->hls_lock);
7621
7622 if (ppd->host_link_state & HLS_DOWN) {
7623
7624 if (ppd->host_link_state & HLS_GOING_UP) {
7625 if (++tries < 1000) {
7626 mutex_unlock(&ppd->hls_lock);
7627 usleep_range(100, 120);
7628 goto retry;
7629 }
7630 dd_dev_err(ppd->dd,
7631 "%s: giving up waiting for link state change\n",
7632 __func__);
7633 }
7634 goto done;
7635 }
7636
7637 lwde = ppd->link_width_downgrade_enabled;
7638
7639 if (refresh_widths) {
7640 get_link_widths(ppd->dd, &tx, &rx);
7641 ppd->link_width_downgrade_tx_active = tx;
7642 ppd->link_width_downgrade_rx_active = rx;
7643 }
7644
7645 if (ppd->link_width_downgrade_tx_active == 0 ||
7646 ppd->link_width_downgrade_rx_active == 0) {
7647
7648 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7649 link_downgraded = false;
7650 } else if (lwde == 0) {
7651
7652
7653
7654 if ((ppd->link_width_active !=
7655 ppd->link_width_downgrade_tx_active) ||
7656 (ppd->link_width_active !=
7657 ppd->link_width_downgrade_rx_active)) {
7658 dd_dev_err(ppd->dd,
7659 "Link downgrade is disabled and link has downgraded, downing link\n");
7660 dd_dev_err(ppd->dd,
7661 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7662 ppd->link_width_active,
7663 ppd->link_width_downgrade_tx_active,
7664 ppd->link_width_downgrade_rx_active);
7665 do_bounce = 1;
7666 link_downgraded = false;
7667 }
7668 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7669 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7670
7671 dd_dev_err(ppd->dd,
7672 "Link is outside of downgrade allowed, downing link\n");
7673 dd_dev_err(ppd->dd,
7674 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7675 lwde, ppd->link_width_downgrade_tx_active,
7676 ppd->link_width_downgrade_rx_active);
7677 do_bounce = 1;
7678 link_downgraded = false;
7679 }
7680
7681done:
7682 mutex_unlock(&ppd->hls_lock);
7683
7684 if (do_bounce) {
7685 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7686 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7687 set_link_state(ppd, HLS_DN_OFFLINE);
7688 start_link(ppd);
7689 }
7690
7691 return link_downgraded;
7692}
7693
7694
7695
7696
7697
7698
7699void handle_link_downgrade(struct work_struct *work)
7700{
7701 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7702 link_downgrade_work);
7703
7704 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7705 if (apply_link_downgrade_policy(ppd, true))
7706 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7707}
7708
7709static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7710{
7711 return flag_string(buf, buf_len, flags, dcc_err_flags,
7712 ARRAY_SIZE(dcc_err_flags));
7713}
7714
7715static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7716{
7717 return flag_string(buf, buf_len, flags, lcb_err_flags,
7718 ARRAY_SIZE(lcb_err_flags));
7719}
7720
7721static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7722{
7723 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7724 ARRAY_SIZE(dc8051_err_flags));
7725}
7726
7727static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7728{
7729 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7730 ARRAY_SIZE(dc8051_info_err_flags));
7731}
7732
7733static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7734{
7735 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7736 ARRAY_SIZE(dc8051_info_host_msg_flags));
7737}
7738
7739static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7740{
7741 struct hfi1_pportdata *ppd = dd->pport;
7742 u64 info, err, host_msg;
7743 int queue_link_down = 0;
7744 char buf[96];
7745
7746
7747 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7748
7749
7750 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7751 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7752 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7753 host_msg = (info >>
7754 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7755 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7756
7757
7758
7759
7760 if (err & FAILED_LNI) {
7761
7762
7763
7764
7765
7766
7767 if (ppd->host_link_state
7768 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7769 queue_link_down = 1;
7770 dd_dev_info(dd, "Link error: %s\n",
7771 dc8051_info_err_string(buf,
7772 sizeof(buf),
7773 err &
7774 FAILED_LNI));
7775 }
7776 err &= ~(u64)FAILED_LNI;
7777 }
7778
7779 if (err & UNKNOWN_FRAME) {
7780 ppd->unknown_frame_count++;
7781 err &= ~(u64)UNKNOWN_FRAME;
7782 }
7783 if (err) {
7784
7785 dd_dev_err(dd, "8051 info error: %s\n",
7786 dc8051_info_err_string(buf, sizeof(buf),
7787 err));
7788 }
7789
7790
7791
7792
7793 if (host_msg & HOST_REQ_DONE) {
7794
7795
7796
7797
7798
7799
7800
7801
7802
7803 host_msg &= ~(u64)HOST_REQ_DONE;
7804 }
7805 if (host_msg & BC_SMA_MSG) {
7806 queue_work(ppd->link_wq, &ppd->sma_message_work);
7807 host_msg &= ~(u64)BC_SMA_MSG;
7808 }
7809 if (host_msg & LINKUP_ACHIEVED) {
7810 dd_dev_info(dd, "8051: Link up\n");
7811 queue_work(ppd->link_wq, &ppd->link_up_work);
7812 host_msg &= ~(u64)LINKUP_ACHIEVED;
7813 }
7814 if (host_msg & EXT_DEVICE_CFG_REQ) {
7815 handle_8051_request(ppd);
7816 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7817 }
7818 if (host_msg & VERIFY_CAP_FRAME) {
7819 queue_work(ppd->link_wq, &ppd->link_vc_work);
7820 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7821 }
7822 if (host_msg & LINK_GOING_DOWN) {
7823 const char *extra = "";
7824
7825 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7826 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7827 extra = " (ignoring downgrade)";
7828 }
7829 dd_dev_info(dd, "8051: Link down%s\n", extra);
7830 queue_link_down = 1;
7831 host_msg &= ~(u64)LINK_GOING_DOWN;
7832 }
7833 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7834 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7835 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7836 }
7837 if (host_msg) {
7838
7839 dd_dev_info(dd, "8051 info host message: %s\n",
7840 dc8051_info_host_msg_string(buf,
7841 sizeof(buf),
7842 host_msg));
7843 }
7844
7845 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7846 }
7847 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7848
7849
7850
7851
7852
7853 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7854 write_csr(dd, DC_DC8051_ERR_EN,
7855 read_csr(dd, DC_DC8051_ERR_EN) &
7856 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7857
7858 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7859 }
7860 if (reg) {
7861
7862 dd_dev_err(dd, "8051 error: %s\n",
7863 dc8051_err_string(buf, sizeof(buf), reg));
7864 }
7865
7866 if (queue_link_down) {
7867
7868
7869
7870
7871
7872 if ((ppd->host_link_state &
7873 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7874 ppd->link_enabled == 0) {
7875 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7876 __func__, ppd->host_link_state,
7877 ppd->link_enabled);
7878 } else {
7879 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7880 dd_dev_info(dd,
7881 "%s: link down request already queued\n",
7882 __func__);
7883 else
7884 queue_work(ppd->link_wq, &ppd->link_down_work);
7885 }
7886 }
7887}
7888
7889static const char * const fm_config_txt[] = {
7890[0] =
7891 "BadHeadDist: Distance violation between two head flits",
7892[1] =
7893 "BadTailDist: Distance violation between two tail flits",
7894[2] =
7895 "BadCtrlDist: Distance violation between two credit control flits",
7896[3] =
7897 "BadCrdAck: Credits return for unsupported VL",
7898[4] =
7899 "UnsupportedVLMarker: Received VL Marker",
7900[5] =
7901 "BadPreempt: Exceeded the preemption nesting level",
7902[6] =
7903 "BadControlFlit: Received unsupported control flit",
7904
7905[8] =
7906 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7907};
7908
7909static const char * const port_rcv_txt[] = {
7910[1] =
7911 "BadPktLen: Illegal PktLen",
7912[2] =
7913 "PktLenTooLong: Packet longer than PktLen",
7914[3] =
7915 "PktLenTooShort: Packet shorter than PktLen",
7916[4] =
7917 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7918[5] =
7919 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7920[6] =
7921 "BadL2: Illegal L2 opcode",
7922[7] =
7923 "BadSC: Unsupported SC",
7924[9] =
7925 "BadRC: Illegal RC",
7926[11] =
7927 "PreemptError: Preempting with same VL",
7928[12] =
7929 "PreemptVL15: Preempting a VL15 packet",
7930};
7931
7932#define OPA_LDR_FMCONFIG_OFFSET 16
7933#define OPA_LDR_PORTRCV_OFFSET 0
7934static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7935{
7936 u64 info, hdr0, hdr1;
7937 const char *extra;
7938 char buf[96];
7939 struct hfi1_pportdata *ppd = dd->pport;
7940 u8 lcl_reason = 0;
7941 int do_bounce = 0;
7942
7943 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7944 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7945 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7946 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7947
7948 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7949 }
7950 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7951 }
7952
7953 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7954 struct hfi1_pportdata *ppd = dd->pport;
7955
7956 if (ppd->link_downed < (u32)UINT_MAX)
7957 ppd->link_downed++;
7958 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7959 }
7960
7961 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7962 u8 reason_valid = 1;
7963
7964 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7965 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7966 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7967
7968 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7969 }
7970 switch (info) {
7971 case 0:
7972 case 1:
7973 case 2:
7974 case 3:
7975 case 4:
7976 case 5:
7977 case 6:
7978 extra = fm_config_txt[info];
7979 break;
7980 case 8:
7981 extra = fm_config_txt[info];
7982 if (ppd->port_error_action &
7983 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7984 do_bounce = 1;
7985
7986
7987
7988
7989 lcl_reason =
7990 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7991 }
7992 break;
7993 default:
7994 reason_valid = 0;
7995 snprintf(buf, sizeof(buf), "reserved%lld", info);
7996 extra = buf;
7997 break;
7998 }
7999
8000 if (reason_valid && !do_bounce) {
8001 do_bounce = ppd->port_error_action &
8002 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
8003 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
8004 }
8005
8006
8007 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8008 extra);
8009 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8010 }
8011
8012 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8013 u8 reason_valid = 1;
8014
8015 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8016 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8017 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8018 if (!(dd->err_info_rcvport.status_and_code &
8019 OPA_EI_STATUS_SMASK)) {
8020 dd->err_info_rcvport.status_and_code =
8021 info & OPA_EI_CODE_SMASK;
8022
8023 dd->err_info_rcvport.status_and_code |=
8024 OPA_EI_STATUS_SMASK;
8025
8026
8027
8028
8029 dd->err_info_rcvport.packet_flit1 = hdr0;
8030 dd->err_info_rcvport.packet_flit2 = hdr1;
8031 }
8032 switch (info) {
8033 case 1:
8034 case 2:
8035 case 3:
8036 case 4:
8037 case 5:
8038 case 6:
8039 case 7:
8040 case 9:
8041 case 11:
8042 case 12:
8043 extra = port_rcv_txt[info];
8044 break;
8045 default:
8046 reason_valid = 0;
8047 snprintf(buf, sizeof(buf), "reserved%lld", info);
8048 extra = buf;
8049 break;
8050 }
8051
8052 if (reason_valid && !do_bounce) {
8053 do_bounce = ppd->port_error_action &
8054 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8055 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8056 }
8057
8058
8059 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8060 " hdr0 0x%llx, hdr1 0x%llx\n",
8061 extra, hdr0, hdr1);
8062
8063 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8064 }
8065
8066 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8067
8068 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8069 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8070 }
8071 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8072
8073 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8074 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8075 }
8076
8077 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8078 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8079
8080
8081 if (reg)
8082 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8083 dcc_err_string(buf, sizeof(buf), reg));
8084
8085 if (lcl_reason == 0)
8086 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8087
8088 if (do_bounce) {
8089 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8090 __func__);
8091 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8092 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8093 }
8094}
8095
8096static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8097{
8098 char buf[96];
8099
8100 dd_dev_info(dd, "LCB Error: %s\n",
8101 lcb_err_string(buf, sizeof(buf), reg));
8102}
8103
8104
8105
8106
8107static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8108{
8109 const struct err_reg_info *eri = &dc_errs[source];
8110
8111 if (eri->handler) {
8112 interrupt_clear_down(dd, 0, eri);
8113 } else if (source == 3 ) {
8114
8115
8116
8117
8118
8119
8120
8121
8122
8123 dd_dev_err(dd, "Parity error in DC LBM block\n");
8124 } else {
8125 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8126 }
8127}
8128
8129
8130
8131
8132static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8133{
8134 sc_group_release_update(dd, source);
8135}
8136
8137
8138
8139
8140
8141
8142
8143
8144
8145
8146static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8147{
8148
8149 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8150
8151 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8152
8153#ifdef CONFIG_SDMA_VERBOSITY
8154 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8155 slashstrip(__FILE__), __LINE__, __func__);
8156 sdma_dumpstate(&dd->per_sdma[which]);
8157#endif
8158
8159 if (likely(what < 3 && which < dd->num_sdma)) {
8160 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8161 } else {
8162
8163 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8164 }
8165}
8166
8167
8168
8169
8170
8171
8172
8173
8174
8175
8176
8177static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8178{
8179 struct hfi1_ctxtdata *rcd;
8180 char *err_detail;
8181
8182 if (likely(source < dd->num_rcv_contexts)) {
8183 rcd = hfi1_rcd_get_by_index(dd, source);
8184 if (rcd) {
8185 handle_user_interrupt(rcd);
8186 hfi1_rcd_put(rcd);
8187 return;
8188 }
8189
8190 err_detail = "dataless";
8191 } else {
8192
8193 err_detail = "out of range";
8194 }
8195 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8196 err_detail, source);
8197}
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207
8208static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8209{
8210 struct hfi1_ctxtdata *rcd;
8211 char *err_detail;
8212
8213 if (likely(source < dd->num_rcv_contexts)) {
8214 rcd = hfi1_rcd_get_by_index(dd, source);
8215 if (rcd) {
8216 handle_user_interrupt(rcd);
8217 hfi1_rcd_put(rcd);
8218 return;
8219 }
8220
8221 err_detail = "dataless";
8222 } else {
8223
8224 err_detail = "out of range";
8225 }
8226 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8227 err_detail, source);
8228}
8229
8230
8231
8232
8233static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8234{
8235 char name[64];
8236
8237 dd_dev_err(dd, "unexpected %s interrupt\n",
8238 is_reserved_name(name, sizeof(name), source));
8239}
8240
8241static const struct is_table is_table[] = {
8242
8243
8244
8245
8246{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8247 is_misc_err_name, is_misc_err_int },
8248{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8249 is_sdma_eng_err_name, is_sdma_eng_err_int },
8250{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8251 is_sendctxt_err_name, is_sendctxt_err_int },
8252{ IS_SDMA_START, IS_SDMA_IDLE_END,
8253 is_sdma_eng_name, is_sdma_eng_int },
8254{ IS_VARIOUS_START, IS_VARIOUS_END,
8255 is_various_name, is_various_int },
8256{ IS_DC_START, IS_DC_END,
8257 is_dc_name, is_dc_int },
8258{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8259 is_rcv_avail_name, is_rcv_avail_int },
8260{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8261 is_rcv_urgent_name, is_rcv_urgent_int },
8262{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8263 is_send_credit_name, is_send_credit_int},
8264{ IS_RESERVED_START, IS_RESERVED_END,
8265 is_reserved_name, is_reserved_int},
8266};
8267
8268
8269
8270
8271
8272static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8273{
8274 const struct is_table *entry;
8275
8276
8277 for (entry = &is_table[0]; entry->is_name; entry++) {
8278 if (source <= entry->end) {
8279 trace_hfi1_interrupt(dd, entry, source);
8280 entry->is_int(dd, source - entry->start);
8281 return;
8282 }
8283 }
8284
8285 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8286}
8287
8288
8289
8290
8291
8292
8293
8294
8295
8296
8297irqreturn_t general_interrupt(int irq, void *data)
8298{
8299 struct hfi1_devdata *dd = data;
8300 u64 regs[CCE_NUM_INT_CSRS];
8301 u32 bit;
8302 int i;
8303 irqreturn_t handled = IRQ_NONE;
8304
8305 this_cpu_inc(*dd->int_counter);
8306
8307
8308 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8309 if (dd->gi_mask[i] == 0) {
8310 regs[i] = 0;
8311 continue;
8312 }
8313 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8314 dd->gi_mask[i];
8315
8316 if (regs[i])
8317 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8318 }
8319
8320
8321 for_each_set_bit(bit, (unsigned long *)®s[0],
8322 CCE_NUM_INT_CSRS * 64) {
8323 is_interrupt(dd, bit);
8324 handled = IRQ_HANDLED;
8325 }
8326
8327 return handled;
8328}
8329
8330irqreturn_t sdma_interrupt(int irq, void *data)
8331{
8332 struct sdma_engine *sde = data;
8333 struct hfi1_devdata *dd = sde->dd;
8334 u64 status;
8335
8336#ifdef CONFIG_SDMA_VERBOSITY
8337 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8338 slashstrip(__FILE__), __LINE__, __func__);
8339 sdma_dumpstate(sde);
8340#endif
8341
8342 this_cpu_inc(*dd->int_counter);
8343
8344
8345 status = read_csr(dd,
8346 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8347 & sde->imask;
8348 if (likely(status)) {
8349
8350 write_csr(dd,
8351 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8352 status);
8353
8354
8355 sdma_engine_interrupt(sde, status);
8356 } else {
8357 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8358 sde->this_idx);
8359 }
8360 return IRQ_HANDLED;
8361}
8362
8363
8364
8365
8366
8367
8368static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8369{
8370 struct hfi1_devdata *dd = rcd->dd;
8371 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8372
8373 write_csr(dd, addr, rcd->imask);
8374
8375 (void)read_csr(dd, addr);
8376}
8377
8378
8379void force_recv_intr(struct hfi1_ctxtdata *rcd)
8380{
8381 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8382}
8383
8384
8385
8386
8387
8388
8389
8390
8391
8392
8393
8394static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8395{
8396 u32 tail;
8397 int present;
8398
8399 if (!rcd->rcvhdrtail_kvaddr)
8400 present = (rcd->seq_cnt ==
8401 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8402 else
8403 present = (rcd->head != get_rcvhdrtail(rcd));
8404
8405 if (present)
8406 return 1;
8407
8408
8409 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8410 return rcd->head != tail;
8411}
8412
8413
8414
8415
8416
8417
8418
8419
8420
8421irqreturn_t receive_context_interrupt(int irq, void *data)
8422{
8423 struct hfi1_ctxtdata *rcd = data;
8424 struct hfi1_devdata *dd = rcd->dd;
8425 int disposition;
8426 int present;
8427
8428 trace_hfi1_receive_interrupt(dd, rcd);
8429 this_cpu_inc(*dd->int_counter);
8430 aspm_ctx_disable(rcd);
8431
8432
8433 disposition = rcd->do_interrupt(rcd, 0);
8434
8435
8436
8437
8438
8439
8440 if (disposition == RCV_PKT_LIMIT)
8441 return IRQ_WAKE_THREAD;
8442
8443
8444
8445
8446
8447
8448
8449 clear_recv_intr(rcd);
8450 present = check_packet_present(rcd);
8451 if (present)
8452 force_recv_intr(rcd);
8453
8454 return IRQ_HANDLED;
8455}
8456
8457
8458
8459
8460
8461irqreturn_t receive_context_thread(int irq, void *data)
8462{
8463 struct hfi1_ctxtdata *rcd = data;
8464 int present;
8465
8466
8467 (void)rcd->do_interrupt(rcd, 1);
8468
8469
8470
8471
8472
8473
8474
8475
8476 local_irq_disable();
8477 clear_recv_intr(rcd);
8478 present = check_packet_present(rcd);
8479 if (present)
8480 force_recv_intr(rcd);
8481 local_irq_enable();
8482
8483 return IRQ_HANDLED;
8484}
8485
8486
8487
8488u32 read_physical_state(struct hfi1_devdata *dd)
8489{
8490 u64 reg;
8491
8492 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8493 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8494 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8495}
8496
8497u32 read_logical_state(struct hfi1_devdata *dd)
8498{
8499 u64 reg;
8500
8501 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8502 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8503 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8504}
8505
8506static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8507{
8508 u64 reg;
8509
8510 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8511
8512 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8513 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8514 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8515}
8516
8517
8518
8519
8520static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8521{
8522 u32 regno;
8523 int ret;
8524
8525 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8526 if (acquire_lcb_access(dd, 0) == 0) {
8527 *data = read_csr(dd, addr);
8528 release_lcb_access(dd, 0);
8529 return 0;
8530 }
8531 return -EBUSY;
8532 }
8533
8534
8535 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8536 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8537 if (ret != HCMD_SUCCESS)
8538 return -EBUSY;
8539 return 0;
8540}
8541
8542
8543
8544
8545
8546
8547struct lcb_datum {
8548 u32 off;
8549 u64 val;
8550};
8551
8552static struct lcb_datum lcb_cache[] = {
8553 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8554 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8555 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8556};
8557
8558static void update_lcb_cache(struct hfi1_devdata *dd)
8559{
8560 int i;
8561 int ret;
8562 u64 val;
8563
8564 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8565 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8566
8567
8568 if (likely(ret != -EBUSY))
8569 lcb_cache[i].val = val;
8570 }
8571}
8572
8573static int read_lcb_cache(u32 off, u64 *val)
8574{
8575 int i;
8576
8577 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8578 if (lcb_cache[i].off == off) {
8579 *val = lcb_cache[i].val;
8580 return 0;
8581 }
8582 }
8583
8584 pr_warn("%s bad offset 0x%x\n", __func__, off);
8585 return -1;
8586}
8587
8588
8589
8590
8591
8592int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8593{
8594 struct hfi1_pportdata *ppd = dd->pport;
8595
8596
8597 if (ppd->host_link_state & HLS_UP)
8598 return read_lcb_via_8051(dd, addr, data);
8599
8600 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8601 if (read_lcb_cache(addr, data))
8602 return -EBUSY;
8603 return 0;
8604 }
8605
8606
8607 *data = read_csr(dd, addr);
8608 return 0;
8609}
8610
8611
8612
8613
8614static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8615{
8616 u32 regno;
8617 int ret;
8618
8619 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8620 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8621 if (acquire_lcb_access(dd, 0) == 0) {
8622 write_csr(dd, addr, data);
8623 release_lcb_access(dd, 0);
8624 return 0;
8625 }
8626 return -EBUSY;
8627 }
8628
8629
8630 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8631 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8632 if (ret != HCMD_SUCCESS)
8633 return -EBUSY;
8634 return 0;
8635}
8636
8637
8638
8639
8640
8641int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8642{
8643 struct hfi1_pportdata *ppd = dd->pport;
8644
8645
8646 if (ppd->host_link_state & HLS_UP)
8647 return write_lcb_via_8051(dd, addr, data);
8648
8649 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8650 return -EBUSY;
8651
8652 write_csr(dd, addr, data);
8653 return 0;
8654}
8655
8656
8657
8658
8659
8660
8661static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8662 u64 *out_data)
8663{
8664 u64 reg, completed;
8665 int return_code;
8666 unsigned long timeout;
8667
8668 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8669
8670 mutex_lock(&dd->dc8051_lock);
8671
8672
8673 if (dd->dc_shutdown) {
8674 return_code = -ENODEV;
8675 goto fail;
8676 }
8677
8678
8679
8680
8681
8682
8683
8684
8685
8686
8687
8688 if (dd->dc8051_timed_out) {
8689 if (dd->dc8051_timed_out > 1) {
8690 dd_dev_err(dd,
8691 "Previous 8051 host command timed out, skipping command %u\n",
8692 type);
8693 return_code = -ENXIO;
8694 goto fail;
8695 }
8696 _dc_shutdown(dd);
8697 _dc_start(dd);
8698 }
8699
8700
8701
8702
8703
8704
8705
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716 if (type == HCMD_WRITE_LCB_CSR) {
8717 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8718
8719 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8720 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8721 reg |= ((((*out_data) >> 40) & 0xff) <<
8722 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8723 | ((((*out_data) >> 48) & 0xffff) <<
8724 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8725 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8726 }
8727
8728
8729
8730
8731
8732 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8733 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8734 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8735 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8736 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8737 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8738 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8739
8740
8741 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8742 while (1) {
8743 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8744 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8745 if (completed)
8746 break;
8747 if (time_after(jiffies, timeout)) {
8748 dd->dc8051_timed_out++;
8749 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8750 if (out_data)
8751 *out_data = 0;
8752 return_code = -ETIMEDOUT;
8753 goto fail;
8754 }
8755 udelay(2);
8756 }
8757
8758 if (out_data) {
8759 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8760 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8761 if (type == HCMD_READ_LCB_CSR) {
8762
8763 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8764 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8765 << (48
8766 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8767 }
8768 }
8769 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8770 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8771 dd->dc8051_timed_out = 0;
8772
8773
8774
8775 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8776
8777fail:
8778 mutex_unlock(&dd->dc8051_lock);
8779 return return_code;
8780}
8781
8782static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8783{
8784 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8785}
8786
8787int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8788 u8 lane_id, u32 config_data)
8789{
8790 u64 data;
8791 int ret;
8792
8793 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8794 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8795 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8796 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8797 if (ret != HCMD_SUCCESS) {
8798 dd_dev_err(dd,
8799 "load 8051 config: field id %d, lane %d, err %d\n",
8800 (int)field_id, (int)lane_id, ret);
8801 }
8802 return ret;
8803}
8804
8805
8806
8807
8808
8809
8810int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8811 u32 *result)
8812{
8813 u64 big_data;
8814 u32 addr;
8815 int ret;
8816
8817
8818 if (lane_id < 4)
8819 addr = (4 * NUM_GENERAL_FIELDS)
8820 + (lane_id * 4 * NUM_LANE_FIELDS);
8821 else
8822 addr = 0;
8823 addr += field_id * 4;
8824
8825
8826 ret = read_8051_data(dd, addr, 8, &big_data);
8827
8828 if (ret == 0) {
8829
8830 if (addr & 0x4)
8831 *result = (u32)(big_data >> 32);
8832 else
8833 *result = (u32)big_data;
8834 } else {
8835 *result = 0;
8836 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8837 __func__, lane_id, field_id);
8838 }
8839
8840 return ret;
8841}
8842
8843static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8844 u8 continuous)
8845{
8846 u32 frame;
8847
8848 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8849 | power_management << POWER_MANAGEMENT_SHIFT;
8850 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8851 GENERAL_CONFIG, frame);
8852}
8853
8854static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8855 u16 vl15buf, u8 crc_sizes)
8856{
8857 u32 frame;
8858
8859 frame = (u32)vau << VAU_SHIFT
8860 | (u32)z << Z_SHIFT
8861 | (u32)vcu << VCU_SHIFT
8862 | (u32)vl15buf << VL15BUF_SHIFT
8863 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8864 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8865 GENERAL_CONFIG, frame);
8866}
8867
8868static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8869 u8 *flag_bits, u16 *link_widths)
8870{
8871 u32 frame;
8872
8873 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8874 &frame);
8875 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8876 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8877 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8878}
8879
8880static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8881 u8 misc_bits,
8882 u8 flag_bits,
8883 u16 link_widths)
8884{
8885 u32 frame;
8886
8887 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8888 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8889 | (u32)link_widths << LINK_WIDTH_SHIFT;
8890 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8891 frame);
8892}
8893
8894static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8895 u8 device_rev)
8896{
8897 u32 frame;
8898
8899 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8900 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8901 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8902}
8903
8904static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8905 u8 *device_rev)
8906{
8907 u32 frame;
8908
8909 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8910 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8911 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8912 & REMOTE_DEVICE_REV_MASK;
8913}
8914
8915int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8916{
8917 u32 frame;
8918 u32 mask;
8919
8920 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8921 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8922
8923 frame &= ~mask;
8924 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8925 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8926 frame);
8927}
8928
8929void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8930 u8 *ver_patch)
8931{
8932 u32 frame;
8933
8934 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8935 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8936 STS_FM_VERSION_MAJOR_MASK;
8937 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8938 STS_FM_VERSION_MINOR_MASK;
8939
8940 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8941 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8942 STS_FM_VERSION_PATCH_MASK;
8943}
8944
8945static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8946 u8 *continuous)
8947{
8948 u32 frame;
8949
8950 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8951 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8952 & POWER_MANAGEMENT_MASK;
8953 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8954 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8955}
8956
8957static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8958 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8959{
8960 u32 frame;
8961
8962 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8963 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8964 *z = (frame >> Z_SHIFT) & Z_MASK;
8965 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8966 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8967 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8968}
8969
8970static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8971 u8 *remote_tx_rate,
8972 u16 *link_widths)
8973{
8974 u32 frame;
8975
8976 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8977 &frame);
8978 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8979 & REMOTE_TX_RATE_MASK;
8980 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8981}
8982
8983static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8984{
8985 u32 frame;
8986
8987 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8988 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8989}
8990
8991static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8992{
8993 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8994}
8995
8996static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8997{
8998 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8999}
9000
9001void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9002{
9003 u32 frame;
9004 int ret;
9005
9006 *link_quality = 0;
9007 if (dd->pport->host_link_state & HLS_UP) {
9008 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9009 &frame);
9010 if (ret == 0)
9011 *link_quality = (frame >> LINK_QUALITY_SHIFT)
9012 & LINK_QUALITY_MASK;
9013 }
9014}
9015
9016static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9017{
9018 u32 frame;
9019
9020 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9021 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9022}
9023
9024static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9025{
9026 u32 frame;
9027
9028 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9029 *ldr = (frame & 0xff);
9030}
9031
9032static int read_tx_settings(struct hfi1_devdata *dd,
9033 u8 *enable_lane_tx,
9034 u8 *tx_polarity_inversion,
9035 u8 *rx_polarity_inversion,
9036 u8 *max_rate)
9037{
9038 u32 frame;
9039 int ret;
9040
9041 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9042 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9043 & ENABLE_LANE_TX_MASK;
9044 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9045 & TX_POLARITY_INVERSION_MASK;
9046 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9047 & RX_POLARITY_INVERSION_MASK;
9048 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9049 return ret;
9050}
9051
9052static int write_tx_settings(struct hfi1_devdata *dd,
9053 u8 enable_lane_tx,
9054 u8 tx_polarity_inversion,
9055 u8 rx_polarity_inversion,
9056 u8 max_rate)
9057{
9058 u32 frame;
9059
9060
9061 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9062 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9063 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9064 | max_rate << MAX_RATE_SHIFT;
9065 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9066}
9067
9068
9069
9070
9071
9072
9073static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9074{
9075 int ret;
9076
9077 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9078 if (ret != HCMD_SUCCESS) {
9079 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9080 (u32)type, ret);
9081 return -EINVAL;
9082 }
9083 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9084
9085 *data_out >>= IDLE_PAYLOAD_SHIFT;
9086 return 0;
9087}
9088
9089
9090
9091
9092
9093
9094
9095static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9096{
9097 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9098 data);
9099}
9100
9101
9102
9103
9104
9105
9106static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9107{
9108 int ret;
9109
9110 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9111 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9112 if (ret != HCMD_SUCCESS) {
9113 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9114 data, ret);
9115 return -EINVAL;
9116 }
9117 return 0;
9118}
9119
9120
9121
9122
9123
9124
9125int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9126{
9127 u64 data;
9128
9129 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9130 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9131 return send_idle_message(dd, data);
9132}
9133
9134
9135
9136
9137
9138
9139
9140static int do_quick_linkup(struct hfi1_devdata *dd)
9141{
9142 int ret;
9143
9144 lcb_shutdown(dd, 0);
9145
9146 if (loopback) {
9147
9148
9149 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9150 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9151 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9152 }
9153
9154
9155
9156 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9157
9158
9159 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9160
9161 write_csr(dd, DC_LCB_CFG_RUN,
9162 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9163
9164 ret = wait_link_transfer_active(dd, 10);
9165 if (ret)
9166 return ret;
9167
9168 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9169 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9170 }
9171
9172 if (!loopback) {
9173
9174
9175
9176
9177
9178
9179
9180 dd_dev_err(dd,
9181 "Pausing for peer to be finished with LCB set up\n");
9182 msleep(5000);
9183 dd_dev_err(dd, "Continuing with quick linkup\n");
9184 }
9185
9186 write_csr(dd, DC_LCB_ERR_EN, 0);
9187 set_8051_lcb_access(dd);
9188
9189
9190
9191
9192
9193
9194 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9195 if (ret != HCMD_SUCCESS) {
9196 dd_dev_err(dd,
9197 "%s: set physical link state to quick LinkUp failed with return %d\n",
9198 __func__, ret);
9199
9200 set_host_lcb_access(dd);
9201 write_csr(dd, DC_LCB_ERR_EN, ~0ull);
9202
9203 if (ret >= 0)
9204 ret = -EINVAL;
9205 return ret;
9206 }
9207
9208 return 0;
9209}
9210
9211
9212
9213
9214static int init_loopback(struct hfi1_devdata *dd)
9215{
9216 dd_dev_info(dd, "Entering loopback mode\n");
9217
9218
9219 write_csr(dd, DC_DC8051_CFG_MODE,
9220 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9221
9222
9223
9224
9225
9226
9227
9228 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9229 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9230 loopback == LOOPBACK_CABLE)) {
9231 loopback = LOOPBACK_LCB;
9232 quick_linkup = 1;
9233 return 0;
9234 }
9235
9236
9237
9238
9239 if (loopback == LOOPBACK_SERDES)
9240 return 0;
9241
9242
9243 if (loopback == LOOPBACK_LCB) {
9244 quick_linkup = 1;
9245
9246
9247 if (dd->icode == ICODE_FPGA_EMULATION) {
9248 dd_dev_err(dd,
9249 "LCB loopback not supported in emulation\n");
9250 return -EINVAL;
9251 }
9252 return 0;
9253 }
9254
9255
9256 if (loopback == LOOPBACK_CABLE)
9257 return 0;
9258
9259 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9260 return -EINVAL;
9261}
9262
9263
9264
9265
9266
9267static u16 opa_to_vc_link_widths(u16 opa_widths)
9268{
9269 int i;
9270 u16 result = 0;
9271
9272 static const struct link_bits {
9273 u16 from;
9274 u16 to;
9275 } opa_link_xlate[] = {
9276 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9277 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9278 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9279 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9280 };
9281
9282 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9283 if (opa_widths & opa_link_xlate[i].from)
9284 result |= opa_link_xlate[i].to;
9285 }
9286 return result;
9287}
9288
9289
9290
9291
9292static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9293{
9294 struct hfi1_devdata *dd = ppd->dd;
9295 u8 enable_lane_tx;
9296 u8 tx_polarity_inversion;
9297 u8 rx_polarity_inversion;
9298 int ret;
9299 u32 misc_bits = 0;
9300
9301 fabric_serdes_reset(dd);
9302
9303
9304 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9305 &rx_polarity_inversion, &ppd->local_tx_rate);
9306 if (ret)
9307 goto set_local_link_attributes_fail;
9308
9309 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9310
9311 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9312 ppd->local_tx_rate = 1;
9313 else
9314 ppd->local_tx_rate = 0;
9315 } else {
9316
9317 ppd->local_tx_rate = 0;
9318 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9319 ppd->local_tx_rate |= 2;
9320 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9321 ppd->local_tx_rate |= 1;
9322 }
9323
9324 enable_lane_tx = 0xF;
9325 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9326 rx_polarity_inversion, ppd->local_tx_rate);
9327 if (ret != HCMD_SUCCESS)
9328 goto set_local_link_attributes_fail;
9329
9330 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9331 if (ret != HCMD_SUCCESS) {
9332 dd_dev_err(dd,
9333 "Failed to set host interface version, return 0x%x\n",
9334 ret);
9335 goto set_local_link_attributes_fail;
9336 }
9337
9338
9339
9340
9341 ret = write_vc_local_phy(dd,
9342 0 ,
9343 1 );
9344 if (ret != HCMD_SUCCESS)
9345 goto set_local_link_attributes_fail;
9346
9347
9348 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9349 ppd->port_crc_mode_enabled);
9350 if (ret != HCMD_SUCCESS)
9351 goto set_local_link_attributes_fail;
9352
9353
9354
9355
9356
9357 if (loopback == LOOPBACK_SERDES)
9358 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9359
9360
9361
9362
9363
9364
9365 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9366 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9367
9368 ret = write_vc_local_link_mode(dd, misc_bits, 0,
9369 opa_to_vc_link_widths(
9370 ppd->link_width_enabled));
9371 if (ret != HCMD_SUCCESS)
9372 goto set_local_link_attributes_fail;
9373
9374
9375 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9376 if (ret == HCMD_SUCCESS)
9377 return 0;
9378
9379set_local_link_attributes_fail:
9380 dd_dev_err(dd,
9381 "Failed to set local link attributes, return 0x%x\n",
9382 ret);
9383 return ret;
9384}
9385
9386
9387
9388
9389
9390
9391int start_link(struct hfi1_pportdata *ppd)
9392{
9393
9394
9395
9396
9397 tune_serdes(ppd);
9398
9399 if (!ppd->driver_link_ready) {
9400 dd_dev_info(ppd->dd,
9401 "%s: stopping link start because driver is not ready\n",
9402 __func__);
9403 return 0;
9404 }
9405
9406
9407
9408
9409
9410
9411 clear_full_mgmt_pkey(ppd);
9412
9413 return set_link_state(ppd, HLS_DN_POLL);
9414}
9415
9416static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9417{
9418 struct hfi1_devdata *dd = ppd->dd;
9419 u64 mask;
9420 unsigned long timeout;
9421
9422
9423
9424
9425
9426
9427
9428
9429
9430 msleep(500);
9431
9432
9433
9434
9435 timeout = jiffies + msecs_to_jiffies(2000);
9436 while (1) {
9437 mask = read_csr(dd, dd->hfi1_id ?
9438 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9439 if (!(mask & QSFP_HFI0_INT_N))
9440 break;
9441 if (time_after(jiffies, timeout)) {
9442 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9443 __func__);
9444 break;
9445 }
9446 udelay(2);
9447 }
9448}
9449
9450static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9451{
9452 struct hfi1_devdata *dd = ppd->dd;
9453 u64 mask;
9454
9455 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9456 if (enable) {
9457
9458
9459
9460
9461 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9462 QSFP_HFI0_INT_N);
9463 mask |= (u64)QSFP_HFI0_INT_N;
9464 } else {
9465 mask &= ~(u64)QSFP_HFI0_INT_N;
9466 }
9467 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9468}
9469
9470int reset_qsfp(struct hfi1_pportdata *ppd)
9471{
9472 struct hfi1_devdata *dd = ppd->dd;
9473 u64 mask, qsfp_mask;
9474
9475
9476 set_qsfp_int_n(ppd, 0);
9477
9478
9479 mask = (u64)QSFP_HFI0_RESET_N;
9480
9481 qsfp_mask = read_csr(dd,
9482 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9483 qsfp_mask &= ~mask;
9484 write_csr(dd,
9485 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9486
9487 udelay(10);
9488
9489 qsfp_mask |= mask;
9490 write_csr(dd,
9491 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9492
9493 wait_for_qsfp_init(ppd);
9494
9495
9496
9497
9498
9499 set_qsfp_int_n(ppd, 1);
9500
9501
9502
9503
9504
9505
9506 return set_qsfp_tx(ppd, 0);
9507}
9508
9509static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9510 u8 *qsfp_interrupt_status)
9511{
9512 struct hfi1_devdata *dd = ppd->dd;
9513
9514 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9515 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9516 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9517 __func__);
9518
9519 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9520 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9521 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9522 __func__);
9523
9524
9525
9526
9527 if (ppd->host_link_state & HLS_DOWN)
9528 return 0;
9529
9530 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9531 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9532 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9533 __func__);
9534
9535 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9536 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9537 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9538 __func__);
9539
9540
9541
9542 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9543 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9544 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9545 __func__);
9546
9547 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9548 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9549 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9550 __func__);
9551
9552 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9553 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9554 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9555 __func__);
9556
9557 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9558 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9559 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9560 __func__);
9561
9562 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9563 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9564 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9565 __func__);
9566
9567 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9568 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9569 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9570 __func__);
9571
9572 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9573 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9574 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9575 __func__);
9576
9577 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9578 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9579 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9580 __func__);
9581
9582 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9583 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9584 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9585 __func__);
9586
9587 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9588 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9589 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9590 __func__);
9591
9592 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9593 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9594 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9595 __func__);
9596
9597 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9598 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9599 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9600 __func__);
9601
9602
9603
9604
9605 return 0;
9606}
9607
9608
9609void qsfp_event(struct work_struct *work)
9610{
9611 struct qsfp_data *qd;
9612 struct hfi1_pportdata *ppd;
9613 struct hfi1_devdata *dd;
9614
9615 qd = container_of(work, struct qsfp_data, qsfp_work);
9616 ppd = qd->ppd;
9617 dd = ppd->dd;
9618
9619
9620 if (!qsfp_mod_present(ppd))
9621 return;
9622
9623 if (ppd->host_link_state == HLS_DN_DISABLE) {
9624 dd_dev_info(ppd->dd,
9625 "%s: stopping link start because link is disabled\n",
9626 __func__);
9627 return;
9628 }
9629
9630
9631
9632
9633
9634 dc_start(dd);
9635
9636 if (qd->cache_refresh_required) {
9637 set_qsfp_int_n(ppd, 0);
9638
9639 wait_for_qsfp_init(ppd);
9640
9641
9642
9643
9644
9645 set_qsfp_int_n(ppd, 1);
9646
9647 start_link(ppd);
9648 }
9649
9650 if (qd->check_interrupt_flags) {
9651 u8 qsfp_interrupt_status[16] = {0,};
9652
9653 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9654 &qsfp_interrupt_status[0], 16) != 16) {
9655 dd_dev_info(dd,
9656 "%s: Failed to read status of QSFP module\n",
9657 __func__);
9658 } else {
9659 unsigned long flags;
9660
9661 handle_qsfp_error_conditions(
9662 ppd, qsfp_interrupt_status);
9663 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9664 ppd->qsfp_info.check_interrupt_flags = 0;
9665 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9666 flags);
9667 }
9668 }
9669}
9670
9671void init_qsfp_int(struct hfi1_devdata *dd)
9672{
9673 struct hfi1_pportdata *ppd = dd->pport;
9674 u64 qsfp_mask;
9675
9676 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9677
9678 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9679 qsfp_mask);
9680 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9681 qsfp_mask);
9682
9683 set_qsfp_int_n(ppd, 0);
9684
9685
9686 if (qsfp_mod_present(ppd))
9687 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9688 write_csr(dd,
9689 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9690 qsfp_mask);
9691
9692
9693 if (!dd->hfi1_id)
9694 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9695 else
9696 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9697}
9698
9699
9700
9701
9702static void init_lcb(struct hfi1_devdata *dd)
9703{
9704
9705 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9706 return;
9707
9708
9709
9710
9711 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9712 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9713 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9714 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9715 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9716 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9717 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9718}
9719
9720
9721
9722
9723
9724static int test_qsfp_read(struct hfi1_pportdata *ppd)
9725{
9726 int ret;
9727 u8 status;
9728
9729
9730
9731
9732
9733 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9734 return 0;
9735
9736
9737 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9738 if (ret < 0)
9739 return ret;
9740 if (ret != 1)
9741 return -EIO;
9742
9743 return 0;
9744}
9745
9746
9747
9748
9749
9750
9751
9752#define MAX_QSFP_RETRIES 20
9753#define QSFP_RETRY_WAIT 500
9754
9755
9756
9757
9758
9759static void try_start_link(struct hfi1_pportdata *ppd)
9760{
9761 if (test_qsfp_read(ppd)) {
9762
9763 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9764 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9765 return;
9766 }
9767 dd_dev_info(ppd->dd,
9768 "QSFP not responding, waiting and retrying %d\n",
9769 (int)ppd->qsfp_retry_count);
9770 ppd->qsfp_retry_count++;
9771 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9772 msecs_to_jiffies(QSFP_RETRY_WAIT));
9773 return;
9774 }
9775 ppd->qsfp_retry_count = 0;
9776
9777 start_link(ppd);
9778}
9779
9780
9781
9782
9783void handle_start_link(struct work_struct *work)
9784{
9785 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9786 start_link_work.work);
9787 try_start_link(ppd);
9788}
9789
9790int bringup_serdes(struct hfi1_pportdata *ppd)
9791{
9792 struct hfi1_devdata *dd = ppd->dd;
9793 u64 guid;
9794 int ret;
9795
9796 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9797 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9798
9799 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9800 if (!guid) {
9801 if (dd->base_guid)
9802 guid = dd->base_guid + ppd->port - 1;
9803 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9804 }
9805
9806
9807 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9808
9809
9810 init_lcb(dd);
9811
9812 if (loopback) {
9813 ret = init_loopback(dd);
9814 if (ret < 0)
9815 return ret;
9816 }
9817
9818 get_port_type(ppd);
9819 if (ppd->port_type == PORT_TYPE_QSFP) {
9820 set_qsfp_int_n(ppd, 0);
9821 wait_for_qsfp_init(ppd);
9822 set_qsfp_int_n(ppd, 1);
9823 }
9824
9825 try_start_link(ppd);
9826 return 0;
9827}
9828
9829void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9830{
9831 struct hfi1_devdata *dd = ppd->dd;
9832
9833
9834
9835
9836
9837
9838
9839
9840 ppd->driver_link_ready = 0;
9841 ppd->link_enabled = 0;
9842
9843 ppd->qsfp_retry_count = MAX_QSFP_RETRIES;
9844 flush_delayed_work(&ppd->start_link_work);
9845 cancel_delayed_work_sync(&ppd->start_link_work);
9846
9847 ppd->offline_disabled_reason =
9848 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9849 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9850 OPA_LINKDOWN_REASON_REBOOT);
9851 set_link_state(ppd, HLS_DN_OFFLINE);
9852
9853
9854 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9855 cancel_work_sync(&ppd->freeze_work);
9856}
9857
9858static inline int init_cpu_counters(struct hfi1_devdata *dd)
9859{
9860 struct hfi1_pportdata *ppd;
9861 int i;
9862
9863 ppd = (struct hfi1_pportdata *)(dd + 1);
9864 for (i = 0; i < dd->num_pports; i++, ppd++) {
9865 ppd->ibport_data.rvp.rc_acks = NULL;
9866 ppd->ibport_data.rvp.rc_qacks = NULL;
9867 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9868 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9869 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9870 if (!ppd->ibport_data.rvp.rc_acks ||
9871 !ppd->ibport_data.rvp.rc_delayed_comp ||
9872 !ppd->ibport_data.rvp.rc_qacks)
9873 return -ENOMEM;
9874 }
9875
9876 return 0;
9877}
9878
9879
9880
9881
9882void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9883 u32 type, unsigned long pa, u16 order)
9884{
9885 u64 reg;
9886
9887 if (!(dd->flags & HFI1_PRESENT))
9888 goto done;
9889
9890 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9891 pa = 0;
9892 order = 0;
9893 } else if (type > PT_INVALID) {
9894 dd_dev_err(dd,
9895 "unexpected receive array type %u for index %u, not handled\n",
9896 type, index);
9897 goto done;
9898 }
9899 trace_hfi1_put_tid(dd, index, type, pa, order);
9900
9901#define RT_ADDR_SHIFT 12
9902 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9903 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9904 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9905 << RCV_ARRAY_RT_ADDR_SHIFT;
9906 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9907 writeq(reg, dd->rcvarray_wc + (index * 8));
9908
9909 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9910
9911
9912
9913
9914
9915 flush_wc();
9916done:
9917 return;
9918}
9919
9920void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9921{
9922 struct hfi1_devdata *dd = rcd->dd;
9923 u32 i;
9924
9925
9926 for (i = rcd->eager_base; i < rcd->eager_base +
9927 rcd->egrbufs.alloced; i++)
9928 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9929
9930 for (i = rcd->expected_base;
9931 i < rcd->expected_base + rcd->expected_count; i++)
9932 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9933}
9934
9935static const char * const ib_cfg_name_strings[] = {
9936 "HFI1_IB_CFG_LIDLMC",
9937 "HFI1_IB_CFG_LWID_DG_ENB",
9938 "HFI1_IB_CFG_LWID_ENB",
9939 "HFI1_IB_CFG_LWID",
9940 "HFI1_IB_CFG_SPD_ENB",
9941 "HFI1_IB_CFG_SPD",
9942 "HFI1_IB_CFG_RXPOL_ENB",
9943 "HFI1_IB_CFG_LREV_ENB",
9944 "HFI1_IB_CFG_LINKLATENCY",
9945 "HFI1_IB_CFG_HRTBT",
9946 "HFI1_IB_CFG_OP_VLS",
9947 "HFI1_IB_CFG_VL_HIGH_CAP",
9948 "HFI1_IB_CFG_VL_LOW_CAP",
9949 "HFI1_IB_CFG_OVERRUN_THRESH",
9950 "HFI1_IB_CFG_PHYERR_THRESH",
9951 "HFI1_IB_CFG_LINKDEFAULT",
9952 "HFI1_IB_CFG_PKEYS",
9953 "HFI1_IB_CFG_MTU",
9954 "HFI1_IB_CFG_LSTATE",
9955 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9956 "HFI1_IB_CFG_PMA_TICKS",
9957 "HFI1_IB_CFG_PORT"
9958};
9959
9960static const char *ib_cfg_name(int which)
9961{
9962 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9963 return "invalid";
9964 return ib_cfg_name_strings[which];
9965}
9966
9967int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9968{
9969 struct hfi1_devdata *dd = ppd->dd;
9970 int val = 0;
9971
9972 switch (which) {
9973 case HFI1_IB_CFG_LWID_ENB:
9974 val = ppd->link_width_enabled;
9975 break;
9976 case HFI1_IB_CFG_LWID:
9977 val = ppd->link_width_active;
9978 break;
9979 case HFI1_IB_CFG_SPD_ENB:
9980 val = ppd->link_speed_enabled;
9981 break;
9982 case HFI1_IB_CFG_SPD:
9983 val = ppd->link_speed_active;
9984 break;
9985
9986 case HFI1_IB_CFG_RXPOL_ENB:
9987 case HFI1_IB_CFG_LREV_ENB:
9988 case HFI1_IB_CFG_LINKLATENCY:
9989 goto unimplemented;
9990
9991 case HFI1_IB_CFG_OP_VLS:
9992 val = ppd->actual_vls_operational;
9993 break;
9994 case HFI1_IB_CFG_VL_HIGH_CAP:
9995 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9996 break;
9997 case HFI1_IB_CFG_VL_LOW_CAP:
9998 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9999 break;
10000 case HFI1_IB_CFG_OVERRUN_THRESH:
10001 val = ppd->overrun_threshold;
10002 break;
10003 case HFI1_IB_CFG_PHYERR_THRESH:
10004 val = ppd->phy_error_threshold;
10005 break;
10006 case HFI1_IB_CFG_LINKDEFAULT:
10007 val = HLS_DEFAULT;
10008 break;
10009
10010 case HFI1_IB_CFG_HRTBT:
10011 case HFI1_IB_CFG_PMA_TICKS:
10012 default:
10013unimplemented:
10014 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10015 dd_dev_info(
10016 dd,
10017 "%s: which %s: not implemented\n",
10018 __func__,
10019 ib_cfg_name(which));
10020 break;
10021 }
10022
10023 return val;
10024}
10025
10026
10027
10028
10029#define MAX_MAD_PACKET 2048
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039
10040u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10041{
10042
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052 return (dd->rcd[0]->rcvhdrqentsize - 2 + 1) << 2;
10053}
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066static void set_send_length(struct hfi1_pportdata *ppd)
10067{
10068 struct hfi1_devdata *dd = ppd->dd;
10069 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10070 u32 maxvlmtu = dd->vld[15].mtu;
10071 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10072 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10073 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10074 int i, j;
10075 u32 thres;
10076
10077 for (i = 0; i < ppd->vls_supported; i++) {
10078 if (dd->vld[i].mtu > maxvlmtu)
10079 maxvlmtu = dd->vld[i].mtu;
10080 if (i <= 3)
10081 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10082 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10083 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10084 else
10085 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10086 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10087 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10088 }
10089 write_csr(dd, SEND_LEN_CHECK0, len1);
10090 write_csr(dd, SEND_LEN_CHECK1, len2);
10091
10092
10093 for (i = 0; i < ppd->vls_supported; i++) {
10094 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10095 sc_mtu_to_threshold(dd->vld[i].sc,
10096 dd->vld[i].mtu,
10097 dd->rcd[0]->rcvhdrqentsize));
10098 for (j = 0; j < INIT_SC_PER_VL; j++)
10099 sc_set_cr_threshold(
10100 pio_select_send_context_vl(dd, j, i),
10101 thres);
10102 }
10103 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10104 sc_mtu_to_threshold(dd->vld[15].sc,
10105 dd->vld[15].mtu,
10106 dd->rcd[0]->rcvhdrqentsize));
10107 sc_set_cr_threshold(dd->vld[15].sc, thres);
10108
10109
10110 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10111 (ilog2(maxvlmtu >> 8) + 1);
10112 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10113 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10114 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10115 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10116 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10117}
10118
10119static void set_lidlmc(struct hfi1_pportdata *ppd)
10120{
10121 int i;
10122 u64 sreg = 0;
10123 struct hfi1_devdata *dd = ppd->dd;
10124 u32 mask = ~((1U << ppd->lmc) - 1);
10125 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10126 u32 lid;
10127
10128
10129
10130
10131
10132 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10133 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10134 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10135 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10136 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10137 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10138 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10139 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10140
10141
10142
10143
10144 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10145 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10146 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10147 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10148
10149 for (i = 0; i < chip_send_contexts(dd); i++) {
10150 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10151 i, (u32)sreg);
10152 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10153 }
10154
10155
10156 sdma_update_lmc(dd, mask, lid);
10157}
10158
10159static const char *state_completed_string(u32 completed)
10160{
10161 static const char * const state_completed[] = {
10162 "EstablishComm",
10163 "OptimizeEQ",
10164 "VerifyCap"
10165 };
10166
10167 if (completed < ARRAY_SIZE(state_completed))
10168 return state_completed[completed];
10169
10170 return "unknown";
10171}
10172
10173static const char all_lanes_dead_timeout_expired[] =
10174 "All lanes were inactive – was the interconnect media removed?";
10175static const char tx_out_of_policy[] =
10176 "Passing lanes on local port do not meet the local link width policy";
10177static const char no_state_complete[] =
10178 "State timeout occurred before link partner completed the state";
10179static const char * const state_complete_reasons[] = {
10180 [0x00] = "Reason unknown",
10181 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10182 [0x02] = "Link partner reported failure",
10183 [0x10] = "Unable to achieve frame sync on any lane",
10184 [0x11] =
10185 "Unable to find a common bit rate with the link partner",
10186 [0x12] =
10187 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10188 [0x13] =
10189 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10190 [0x14] = no_state_complete,
10191 [0x15] =
10192 "State timeout occurred before link partner identified equalization presets",
10193 [0x16] =
10194 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10195 [0x17] = tx_out_of_policy,
10196 [0x20] = all_lanes_dead_timeout_expired,
10197 [0x21] =
10198 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10199 [0x22] = no_state_complete,
10200 [0x23] =
10201 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10202 [0x24] = tx_out_of_policy,
10203 [0x30] = all_lanes_dead_timeout_expired,
10204 [0x31] =
10205 "State timeout occurred waiting for host to process received frames",
10206 [0x32] = no_state_complete,
10207 [0x33] =
10208 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10209 [0x34] = tx_out_of_policy,
10210 [0x35] = "Negotiated link width is mutually exclusive",
10211 [0x36] =
10212 "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10213 [0x37] = "Unable to resolve secure data exchange",
10214};
10215
10216static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10217 u32 code)
10218{
10219 const char *str = NULL;
10220
10221 if (code < ARRAY_SIZE(state_complete_reasons))
10222 str = state_complete_reasons[code];
10223
10224 if (str)
10225 return str;
10226 return "Reserved";
10227}
10228
10229
10230static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10231 const char *prefix)
10232{
10233 struct hfi1_devdata *dd = ppd->dd;
10234 u32 success;
10235 u32 state;
10236 u32 reason;
10237 u32 lanes;
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247 success = frame & 0x1;
10248 state = (frame >> 1) & 0x7;
10249 reason = (frame >> 8) & 0xff;
10250 lanes = (frame >> 16) & 0xffff;
10251
10252 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10253 prefix, frame);
10254 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10255 state_completed_string(state), state);
10256 dd_dev_err(dd, " state successfully completed: %s\n",
10257 success ? "yes" : "no");
10258 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10259 reason, state_complete_reason_code_string(ppd, reason));
10260 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10261}
10262
10263
10264
10265
10266
10267
10268static void check_lni_states(struct hfi1_pportdata *ppd)
10269{
10270 u32 last_local_state;
10271 u32 last_remote_state;
10272
10273 read_last_local_state(ppd->dd, &last_local_state);
10274 read_last_remote_state(ppd->dd, &last_remote_state);
10275
10276
10277
10278
10279
10280
10281 if (last_local_state == 0 && last_remote_state == 0)
10282 return;
10283
10284 decode_state_complete(ppd, last_local_state, "transmitted");
10285 decode_state_complete(ppd, last_remote_state, "received");
10286}
10287
10288
10289static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10290{
10291 u64 reg;
10292 unsigned long timeout;
10293
10294
10295 timeout = jiffies + msecs_to_jiffies(wait_ms);
10296 while (1) {
10297 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10298 if (reg)
10299 break;
10300 if (time_after(jiffies, timeout)) {
10301 dd_dev_err(dd,
10302 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10303 return -ETIMEDOUT;
10304 }
10305 udelay(2);
10306 }
10307 return 0;
10308}
10309
10310
10311static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10312{
10313 struct hfi1_devdata *dd = ppd->dd;
10314
10315
10316
10317
10318 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10319 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10320 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10321
10322 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10323 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10324 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10325 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10326
10327 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10328 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10329 udelay(3);
10330 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10331 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10332
10333 wait_link_transfer_active(dd, 100);
10334
10335
10336
10337
10338 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10339 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10340 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10341
10342 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10343}
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10354{
10355 struct hfi1_devdata *dd = ppd->dd;
10356 u32 previous_state;
10357 int offline_state_ret;
10358 int ret;
10359
10360 update_lcb_cache(dd);
10361
10362 previous_state = ppd->host_link_state;
10363 ppd->host_link_state = HLS_GOING_OFFLINE;
10364
10365
10366 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10367
10368 if (ret != HCMD_SUCCESS) {
10369 dd_dev_err(dd,
10370 "Failed to transition to Offline link state, return %d\n",
10371 ret);
10372 return -EINVAL;
10373 }
10374 if (ppd->offline_disabled_reason ==
10375 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10376 ppd->offline_disabled_reason =
10377 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10378
10379 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10380 if (offline_state_ret < 0)
10381 return offline_state_ret;
10382
10383
10384 if (ppd->port_type == PORT_TYPE_QSFP &&
10385 ppd->qsfp_info.limiting_active &&
10386 qsfp_mod_present(ppd)) {
10387 int ret;
10388
10389 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10390 if (ret == 0) {
10391 set_qsfp_tx(ppd, 0);
10392 release_chip_resource(dd, qsfp_resource(dd));
10393 } else {
10394
10395 dd_dev_err(dd,
10396 "Unable to acquire lock to turn off QSFP TX\n");
10397 }
10398 }
10399
10400
10401
10402
10403
10404 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10405 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10406 if (ret < 0)
10407 return ret;
10408 }
10409
10410
10411
10412
10413
10414 set_host_lcb_access(dd);
10415 write_csr(dd, DC_LCB_ERR_EN, ~0ull);
10416
10417
10418 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10419 if (ret)
10420 force_logical_link_state_down(ppd);
10421
10422 ppd->host_link_state = HLS_LINK_COOLDOWN;
10423 update_statusp(ppd, IB_PORT_DOWN);
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434 ret = wait_fm_ready(dd, 7000);
10435 if (ret) {
10436 dd_dev_err(dd,
10437 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10438
10439 ppd->host_link_state = HLS_DN_OFFLINE;
10440 return ret;
10441 }
10442
10443
10444
10445
10446
10447
10448
10449 ppd->host_link_state = HLS_DN_OFFLINE;
10450 if (previous_state & HLS_UP) {
10451
10452 handle_linkup_change(dd, 0);
10453 } else if (previous_state
10454 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10455
10456 check_lni_states(ppd);
10457
10458
10459 ppd->qsfp_info.reset_needed = 0;
10460 }
10461
10462
10463 ppd->link_width_active = 0;
10464 ppd->link_width_downgrade_tx_active = 0;
10465 ppd->link_width_downgrade_rx_active = 0;
10466 ppd->current_egress_rate = 0;
10467 return 0;
10468}
10469
10470
10471static const char *link_state_name(u32 state)
10472{
10473 const char *name;
10474 int n = ilog2(state);
10475 static const char * const names[] = {
10476 [__HLS_UP_INIT_BP] = "INIT",
10477 [__HLS_UP_ARMED_BP] = "ARMED",
10478 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10479 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10480 [__HLS_DN_POLL_BP] = "POLL",
10481 [__HLS_DN_DISABLE_BP] = "DISABLE",
10482 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10483 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10484 [__HLS_GOING_UP_BP] = "GOING_UP",
10485 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10486 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10487 };
10488
10489 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10490 return name ? name : "unknown";
10491}
10492
10493
10494static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10495{
10496 if (state == HLS_UP_INIT) {
10497 switch (ppd->linkinit_reason) {
10498 case OPA_LINKINIT_REASON_LINKUP:
10499 return "(LINKUP)";
10500 case OPA_LINKINIT_REASON_FLAPPING:
10501 return "(FLAPPING)";
10502 case OPA_LINKINIT_OUTSIDE_POLICY:
10503 return "(OUTSIDE_POLICY)";
10504 case OPA_LINKINIT_QUARANTINED:
10505 return "(QUARANTINED)";
10506 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10507 return "(INSUFIC_CAPABILITY)";
10508 default:
10509 break;
10510 }
10511 }
10512 return "";
10513}
10514
10515
10516
10517
10518
10519
10520u32 driver_pstate(struct hfi1_pportdata *ppd)
10521{
10522 switch (ppd->host_link_state) {
10523 case HLS_UP_INIT:
10524 case HLS_UP_ARMED:
10525 case HLS_UP_ACTIVE:
10526 return IB_PORTPHYSSTATE_LINKUP;
10527 case HLS_DN_POLL:
10528 return IB_PORTPHYSSTATE_POLLING;
10529 case HLS_DN_DISABLE:
10530 return IB_PORTPHYSSTATE_DISABLED;
10531 case HLS_DN_OFFLINE:
10532 return OPA_PORTPHYSSTATE_OFFLINE;
10533 case HLS_VERIFY_CAP:
10534 return IB_PORTPHYSSTATE_TRAINING;
10535 case HLS_GOING_UP:
10536 return IB_PORTPHYSSTATE_TRAINING;
10537 case HLS_GOING_OFFLINE:
10538 return OPA_PORTPHYSSTATE_OFFLINE;
10539 case HLS_LINK_COOLDOWN:
10540 return OPA_PORTPHYSSTATE_OFFLINE;
10541 case HLS_DN_DOWNDEF:
10542 default:
10543 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10544 ppd->host_link_state);
10545 return -1;
10546 }
10547}
10548
10549
10550
10551
10552
10553
10554u32 driver_lstate(struct hfi1_pportdata *ppd)
10555{
10556 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10557 return IB_PORT_DOWN;
10558
10559 switch (ppd->host_link_state & HLS_UP) {
10560 case HLS_UP_INIT:
10561 return IB_PORT_INIT;
10562 case HLS_UP_ARMED:
10563 return IB_PORT_ARMED;
10564 case HLS_UP_ACTIVE:
10565 return IB_PORT_ACTIVE;
10566 default:
10567 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10568 ppd->host_link_state);
10569 return -1;
10570 }
10571}
10572
10573void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10574 u8 neigh_reason, u8 rem_reason)
10575{
10576 if (ppd->local_link_down_reason.latest == 0 &&
10577 ppd->neigh_link_down_reason.latest == 0) {
10578 ppd->local_link_down_reason.latest = lcl_reason;
10579 ppd->neigh_link_down_reason.latest = neigh_reason;
10580 ppd->remote_link_down_reason = rem_reason;
10581 }
10582}
10583
10584
10585
10586
10587
10588
10589
10590
10591static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10592{
10593 int i;
10594 u64 reg;
10595
10596 if (!ppd->actual_vls_operational)
10597 return false;
10598
10599 for (i = 0; i < ppd->vls_supported; i++) {
10600 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10601 if ((reg && !ppd->dd->vld[i].mtu) ||
10602 (!reg && ppd->dd->vld[i].mtu))
10603 return false;
10604 }
10605
10606 return true;
10607}
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10618{
10619 struct hfi1_devdata *dd = ppd->dd;
10620 struct ib_event event = {.device = NULL};
10621 int ret1, ret = 0;
10622 int orig_new_state, poll_bounce;
10623
10624 mutex_lock(&ppd->hls_lock);
10625
10626 orig_new_state = state;
10627 if (state == HLS_DN_DOWNDEF)
10628 state = HLS_DEFAULT;
10629
10630
10631 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10632 state == HLS_DN_POLL;
10633
10634 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10635 link_state_name(ppd->host_link_state),
10636 link_state_name(orig_new_state),
10637 poll_bounce ? "(bounce) " : "",
10638 link_state_reason_name(ppd, state));
10639
10640
10641
10642
10643
10644
10645 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10646 ppd->is_sm_config_started = 0;
10647
10648
10649
10650
10651
10652 if (ppd->host_link_state == state && !poll_bounce)
10653 goto done;
10654
10655 switch (state) {
10656 case HLS_UP_INIT:
10657 if (ppd->host_link_state == HLS_DN_POLL &&
10658 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10659
10660
10661
10662
10663
10664
10665
10666
10667 } else if (ppd->host_link_state != HLS_GOING_UP) {
10668 goto unexpected;
10669 }
10670
10671
10672
10673
10674
10675
10676 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10677 if (ret) {
10678 dd_dev_err(dd,
10679 "%s: physical state did not change to LINK-UP\n",
10680 __func__);
10681 break;
10682 }
10683
10684 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10685 if (ret) {
10686 dd_dev_err(dd,
10687 "%s: logical state did not change to INIT\n",
10688 __func__);
10689 break;
10690 }
10691
10692
10693 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10694 ppd->linkinit_reason =
10695 OPA_LINKINIT_REASON_LINKUP;
10696
10697
10698 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10699
10700 handle_linkup_change(dd, 1);
10701 pio_kernel_linkup(dd);
10702
10703
10704
10705
10706
10707
10708 update_xmit_counters(ppd, ppd->link_width_active);
10709
10710 ppd->host_link_state = HLS_UP_INIT;
10711 update_statusp(ppd, IB_PORT_INIT);
10712 break;
10713 case HLS_UP_ARMED:
10714 if (ppd->host_link_state != HLS_UP_INIT)
10715 goto unexpected;
10716
10717 if (!data_vls_operational(ppd)) {
10718 dd_dev_err(dd,
10719 "%s: Invalid data VL credits or mtu\n",
10720 __func__);
10721 ret = -EINVAL;
10722 break;
10723 }
10724
10725 set_logical_state(dd, LSTATE_ARMED);
10726 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10727 if (ret) {
10728 dd_dev_err(dd,
10729 "%s: logical state did not change to ARMED\n",
10730 __func__);
10731 break;
10732 }
10733 ppd->host_link_state = HLS_UP_ARMED;
10734 update_statusp(ppd, IB_PORT_ARMED);
10735
10736
10737
10738
10739
10740 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10741 ppd->neighbor_normal = 1;
10742 break;
10743 case HLS_UP_ACTIVE:
10744 if (ppd->host_link_state != HLS_UP_ARMED)
10745 goto unexpected;
10746
10747 set_logical_state(dd, LSTATE_ACTIVE);
10748 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10749 if (ret) {
10750 dd_dev_err(dd,
10751 "%s: logical state did not change to ACTIVE\n",
10752 __func__);
10753 } else {
10754
10755 sdma_all_running(dd);
10756 ppd->host_link_state = HLS_UP_ACTIVE;
10757 update_statusp(ppd, IB_PORT_ACTIVE);
10758
10759
10760 event.device = &dd->verbs_dev.rdi.ibdev;
10761 event.element.port_num = ppd->port;
10762 event.event = IB_EVENT_PORT_ACTIVE;
10763 }
10764 break;
10765 case HLS_DN_POLL:
10766 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10767 ppd->host_link_state == HLS_DN_OFFLINE) &&
10768 dd->dc_shutdown)
10769 dc_start(dd);
10770
10771 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10772
10773 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10774 u8 tmp = ppd->link_enabled;
10775
10776 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10777 if (ret) {
10778 ppd->link_enabled = tmp;
10779 break;
10780 }
10781 ppd->remote_link_down_reason = 0;
10782
10783 if (ppd->driver_link_ready)
10784 ppd->link_enabled = 1;
10785 }
10786
10787 set_all_slowpath(ppd->dd);
10788 ret = set_local_link_attributes(ppd);
10789 if (ret)
10790 break;
10791
10792 ppd->port_error_action = 0;
10793
10794 if (quick_linkup) {
10795
10796 ret = do_quick_linkup(dd);
10797 } else {
10798 ret1 = set_physical_link_state(dd, PLS_POLLING);
10799 if (!ret1)
10800 ret1 = wait_phys_link_out_of_offline(ppd,
10801 3000);
10802 if (ret1 != HCMD_SUCCESS) {
10803 dd_dev_err(dd,
10804 "Failed to transition to Polling link state, return 0x%x\n",
10805 ret1);
10806 ret = -EINVAL;
10807 }
10808 }
10809
10810
10811
10812
10813
10814
10815
10816 ppd->host_link_state = HLS_DN_POLL;
10817 ppd->offline_disabled_reason =
10818 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10819
10820
10821
10822
10823 if (ret)
10824 goto_offline(ppd, 0);
10825 else
10826 log_physical_state(ppd, PLS_POLLING);
10827 break;
10828 case HLS_DN_DISABLE:
10829
10830 ppd->link_enabled = 0;
10831
10832
10833
10834
10835 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10836 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10837 if (ret)
10838 break;
10839 ppd->remote_link_down_reason = 0;
10840 }
10841
10842 if (!dd->dc_shutdown) {
10843 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10844 if (ret1 != HCMD_SUCCESS) {
10845 dd_dev_err(dd,
10846 "Failed to transition to Disabled link state, return 0x%x\n",
10847 ret1);
10848 ret = -EINVAL;
10849 break;
10850 }
10851 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10852 if (ret) {
10853 dd_dev_err(dd,
10854 "%s: physical state did not change to DISABLED\n",
10855 __func__);
10856 break;
10857 }
10858 dc_shutdown(dd);
10859 }
10860 ppd->host_link_state = HLS_DN_DISABLE;
10861 break;
10862 case HLS_DN_OFFLINE:
10863 if (ppd->host_link_state == HLS_DN_DISABLE)
10864 dc_start(dd);
10865
10866
10867 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10868 if (!ret)
10869 ppd->remote_link_down_reason = 0;
10870 break;
10871 case HLS_VERIFY_CAP:
10872 if (ppd->host_link_state != HLS_DN_POLL)
10873 goto unexpected;
10874 ppd->host_link_state = HLS_VERIFY_CAP;
10875 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10876 break;
10877 case HLS_GOING_UP:
10878 if (ppd->host_link_state != HLS_VERIFY_CAP)
10879 goto unexpected;
10880
10881 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10882 if (ret1 != HCMD_SUCCESS) {
10883 dd_dev_err(dd,
10884 "Failed to transition to link up state, return 0x%x\n",
10885 ret1);
10886 ret = -EINVAL;
10887 break;
10888 }
10889 ppd->host_link_state = HLS_GOING_UP;
10890 break;
10891
10892 case HLS_GOING_OFFLINE:
10893 case HLS_LINK_COOLDOWN:
10894 default:
10895 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10896 __func__, state);
10897 ret = -EINVAL;
10898 break;
10899 }
10900
10901 goto done;
10902
10903unexpected:
10904 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10905 __func__, link_state_name(ppd->host_link_state),
10906 link_state_name(state));
10907 ret = -EINVAL;
10908
10909done:
10910 mutex_unlock(&ppd->hls_lock);
10911
10912 if (event.device)
10913 ib_dispatch_event(&event);
10914
10915 return ret;
10916}
10917
10918int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10919{
10920 u64 reg;
10921 int ret = 0;
10922
10923 switch (which) {
10924 case HFI1_IB_CFG_LIDLMC:
10925 set_lidlmc(ppd);
10926 break;
10927 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10928
10929
10930
10931
10932 val *= 4096 / 64;
10933 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10934 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10935 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10936 break;
10937 case HFI1_IB_CFG_LINKDEFAULT:
10938
10939 if (val != HLS_DN_POLL)
10940 ret = -EINVAL;
10941 break;
10942 case HFI1_IB_CFG_OP_VLS:
10943 if (ppd->vls_operational != val) {
10944 ppd->vls_operational = val;
10945 if (!ppd->port)
10946 ret = -EINVAL;
10947 }
10948 break;
10949
10950
10951
10952
10953
10954
10955
10956
10957 case HFI1_IB_CFG_LWID_ENB:
10958 ppd->link_width_enabled = val & ppd->link_width_supported;
10959 break;
10960 case HFI1_IB_CFG_LWID_DG_ENB:
10961 ppd->link_width_downgrade_enabled =
10962 val & ppd->link_width_downgrade_supported;
10963 break;
10964 case HFI1_IB_CFG_SPD_ENB:
10965 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10966 break;
10967 case HFI1_IB_CFG_OVERRUN_THRESH:
10968
10969
10970
10971
10972 ppd->overrun_threshold = val;
10973 break;
10974 case HFI1_IB_CFG_PHYERR_THRESH:
10975
10976
10977
10978
10979 ppd->phy_error_threshold = val;
10980 break;
10981
10982 case HFI1_IB_CFG_MTU:
10983 set_send_length(ppd);
10984 break;
10985
10986 case HFI1_IB_CFG_PKEYS:
10987 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10988 set_partition_keys(ppd);
10989 break;
10990
10991 default:
10992 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10993 dd_dev_info(ppd->dd,
10994 "%s: which %s, val 0x%x: not implemented\n",
10995 __func__, ib_cfg_name(which), val);
10996 break;
10997 }
10998 return ret;
10999}
11000
11001
11002static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11003{
11004 int i;
11005
11006 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11007 VL_ARB_LOW_PRIO_TABLE_SIZE);
11008 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11009 VL_ARB_HIGH_PRIO_TABLE_SIZE);
11010
11011
11012
11013
11014
11015
11016
11017
11018
11019
11020
11021 for (i = 0; i < MAX_PRIO_TABLE; i++)
11022 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11023}
11024
11025
11026
11027
11028
11029
11030
11031static inline struct vl_arb_cache *
11032vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11033{
11034 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11035 return NULL;
11036 spin_lock(&ppd->vl_arb_cache[idx].lock);
11037 return &ppd->vl_arb_cache[idx];
11038}
11039
11040static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11041{
11042 spin_unlock(&ppd->vl_arb_cache[idx].lock);
11043}
11044
11045static void vl_arb_get_cache(struct vl_arb_cache *cache,
11046 struct ib_vl_weight_elem *vl)
11047{
11048 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11049}
11050
11051static void vl_arb_set_cache(struct vl_arb_cache *cache,
11052 struct ib_vl_weight_elem *vl)
11053{
11054 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11055}
11056
11057static int vl_arb_match_cache(struct vl_arb_cache *cache,
11058 struct ib_vl_weight_elem *vl)
11059{
11060 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11061}
11062
11063
11064
11065static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11066 u32 size, struct ib_vl_weight_elem *vl)
11067{
11068 struct hfi1_devdata *dd = ppd->dd;
11069 u64 reg;
11070 unsigned int i, is_up = 0;
11071 int drain, ret = 0;
11072
11073 mutex_lock(&ppd->hls_lock);
11074
11075 if (ppd->host_link_state & HLS_UP)
11076 is_up = 1;
11077
11078 drain = !is_ax(dd) && is_up;
11079
11080 if (drain)
11081
11082
11083
11084
11085
11086
11087 ret = stop_drain_data_vls(dd);
11088
11089 if (ret) {
11090 dd_dev_err(
11091 dd,
11092 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11093 __func__);
11094 goto err;
11095 }
11096
11097 for (i = 0; i < size; i++, vl++) {
11098
11099
11100
11101
11102 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11103 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11104 | (((u64)vl->weight
11105 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11106 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11107 write_csr(dd, target + (i * 8), reg);
11108 }
11109 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11110
11111 if (drain)
11112 open_fill_data_vls(dd);
11113
11114err:
11115 mutex_unlock(&ppd->hls_lock);
11116
11117 return ret;
11118}
11119
11120
11121
11122
11123static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11124 struct vl_limit *vll)
11125{
11126 u64 reg = read_csr(dd, csr);
11127
11128 vll->dedicated = cpu_to_be16(
11129 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11130 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11131 vll->shared = cpu_to_be16(
11132 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11133 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11134}
11135
11136
11137
11138
11139static int get_buffer_control(struct hfi1_devdata *dd,
11140 struct buffer_control *bc, u16 *overall_limit)
11141{
11142 u64 reg;
11143 int i;
11144
11145
11146 memset(bc, 0, sizeof(*bc));
11147
11148
11149 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11150 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11151
11152
11153 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11154
11155 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11156 bc->overall_shared_limit = cpu_to_be16(
11157 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11158 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11159 if (overall_limit)
11160 *overall_limit = (reg
11161 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11162 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11163 return sizeof(struct buffer_control);
11164}
11165
11166static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11167{
11168 u64 reg;
11169 int i;
11170
11171
11172 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11173 for (i = 0; i < sizeof(u64); i++) {
11174 u8 byte = *(((u8 *)®) + i);
11175
11176 dp->vlnt[2 * i] = byte & 0xf;
11177 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11178 }
11179
11180 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11181 for (i = 0; i < sizeof(u64); i++) {
11182 u8 byte = *(((u8 *)®) + i);
11183
11184 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11185 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11186 }
11187 return sizeof(struct sc2vlnt);
11188}
11189
11190static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11191 struct ib_vl_weight_elem *vl)
11192{
11193 unsigned int i;
11194
11195 for (i = 0; i < nelems; i++, vl++) {
11196 vl->vl = 0xf;
11197 vl->weight = 0;
11198 }
11199}
11200
11201static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11202{
11203 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11204 DC_SC_VL_VAL(15_0,
11205 0, dp->vlnt[0] & 0xf,
11206 1, dp->vlnt[1] & 0xf,
11207 2, dp->vlnt[2] & 0xf,
11208 3, dp->vlnt[3] & 0xf,
11209 4, dp->vlnt[4] & 0xf,
11210 5, dp->vlnt[5] & 0xf,
11211 6, dp->vlnt[6] & 0xf,
11212 7, dp->vlnt[7] & 0xf,
11213 8, dp->vlnt[8] & 0xf,
11214 9, dp->vlnt[9] & 0xf,
11215 10, dp->vlnt[10] & 0xf,
11216 11, dp->vlnt[11] & 0xf,
11217 12, dp->vlnt[12] & 0xf,
11218 13, dp->vlnt[13] & 0xf,
11219 14, dp->vlnt[14] & 0xf,
11220 15, dp->vlnt[15] & 0xf));
11221 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11222 DC_SC_VL_VAL(31_16,
11223 16, dp->vlnt[16] & 0xf,
11224 17, dp->vlnt[17] & 0xf,
11225 18, dp->vlnt[18] & 0xf,
11226 19, dp->vlnt[19] & 0xf,
11227 20, dp->vlnt[20] & 0xf,
11228 21, dp->vlnt[21] & 0xf,
11229 22, dp->vlnt[22] & 0xf,
11230 23, dp->vlnt[23] & 0xf,
11231 24, dp->vlnt[24] & 0xf,
11232 25, dp->vlnt[25] & 0xf,
11233 26, dp->vlnt[26] & 0xf,
11234 27, dp->vlnt[27] & 0xf,
11235 28, dp->vlnt[28] & 0xf,
11236 29, dp->vlnt[29] & 0xf,
11237 30, dp->vlnt[30] & 0xf,
11238 31, dp->vlnt[31] & 0xf));
11239}
11240
11241static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11242 u16 limit)
11243{
11244 if (limit != 0)
11245 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11246 what, (int)limit, idx);
11247}
11248
11249
11250static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11251{
11252 u64 reg;
11253
11254 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11255 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11256 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11257 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11258}
11259
11260
11261static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11262{
11263 u64 reg;
11264
11265 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11266 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11267 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11268 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11269}
11270
11271
11272static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11273{
11274 u64 reg;
11275 u32 addr;
11276
11277 if (vl < TXE_NUM_DATA_VL)
11278 addr = SEND_CM_CREDIT_VL + (8 * vl);
11279 else
11280 addr = SEND_CM_CREDIT_VL15;
11281
11282 reg = read_csr(dd, addr);
11283 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11284 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11285 write_csr(dd, addr, reg);
11286}
11287
11288
11289static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11290{
11291 u64 reg;
11292 u32 addr;
11293
11294 if (vl < TXE_NUM_DATA_VL)
11295 addr = SEND_CM_CREDIT_VL + (8 * vl);
11296 else
11297 addr = SEND_CM_CREDIT_VL15;
11298
11299 reg = read_csr(dd, addr);
11300 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11301 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11302 write_csr(dd, addr, reg);
11303}
11304
11305
11306static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11307 const char *which)
11308{
11309 unsigned long timeout;
11310 u64 reg;
11311
11312 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11313 while (1) {
11314 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11315
11316 if (reg == 0)
11317 return;
11318 if (time_after(jiffies, timeout))
11319 break;
11320 udelay(1);
11321 }
11322
11323 dd_dev_err(dd,
11324 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11325 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11326
11327
11328
11329
11330 dd_dev_err(dd,
11331 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11332}
11333
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347
11348
11349
11350
11351
11352
11353
11354
11355
11356
11357
11358int set_buffer_control(struct hfi1_pportdata *ppd,
11359 struct buffer_control *new_bc)
11360{
11361 struct hfi1_devdata *dd = ppd->dd;
11362 u64 changing_mask, ld_mask, stat_mask;
11363 int change_count;
11364 int i, use_all_mask;
11365 int this_shared_changing;
11366 int vl_count = 0, ret;
11367
11368
11369
11370
11371 int any_shared_limit_changing;
11372 struct buffer_control cur_bc;
11373 u8 changing[OPA_MAX_VLS];
11374 u8 lowering_dedicated[OPA_MAX_VLS];
11375 u16 cur_total;
11376 u32 new_total = 0;
11377 const u64 all_mask =
11378 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11379 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11380 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11381 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11382 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11383 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11384 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11385 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11386 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11387
11388#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11389#define NUM_USABLE_VLS 16
11390
11391
11392 for (i = 0; i < OPA_MAX_VLS; i++) {
11393 if (valid_vl(i)) {
11394 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11395 continue;
11396 }
11397 nonzero_msg(dd, i, "dedicated",
11398 be16_to_cpu(new_bc->vl[i].dedicated));
11399 nonzero_msg(dd, i, "shared",
11400 be16_to_cpu(new_bc->vl[i].shared));
11401 new_bc->vl[i].dedicated = 0;
11402 new_bc->vl[i].shared = 0;
11403 }
11404 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11405
11406
11407 get_buffer_control(dd, &cur_bc, &cur_total);
11408
11409
11410
11411
11412 memset(changing, 0, sizeof(changing));
11413 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11414
11415
11416
11417
11418 stat_mask =
11419 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11420 changing_mask = 0;
11421 ld_mask = 0;
11422 change_count = 0;
11423 any_shared_limit_changing = 0;
11424 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11425 if (!valid_vl(i))
11426 continue;
11427 this_shared_changing = new_bc->vl[i].shared
11428 != cur_bc.vl[i].shared;
11429 if (this_shared_changing)
11430 any_shared_limit_changing = 1;
11431 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11432 this_shared_changing) {
11433 changing[i] = 1;
11434 changing_mask |= stat_mask;
11435 change_count++;
11436 }
11437 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11438 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11439 lowering_dedicated[i] = 1;
11440 ld_mask |= stat_mask;
11441 }
11442 }
11443
11444
11445 if (new_total > cur_total)
11446 set_global_limit(dd, new_total);
11447
11448
11449
11450
11451 use_all_mask = 0;
11452 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11453 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11454 (is_ax(dd) && any_shared_limit_changing)) {
11455 set_global_shared(dd, 0);
11456 cur_bc.overall_shared_limit = 0;
11457 use_all_mask = 1;
11458 }
11459
11460 for (i = 0; i < NUM_USABLE_VLS; i++) {
11461 if (!valid_vl(i))
11462 continue;
11463
11464 if (changing[i]) {
11465 set_vl_shared(dd, i, 0);
11466 cur_bc.vl[i].shared = 0;
11467 }
11468 }
11469
11470 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11471 "shared");
11472
11473 if (change_count > 0) {
11474 for (i = 0; i < NUM_USABLE_VLS; i++) {
11475 if (!valid_vl(i))
11476 continue;
11477
11478 if (lowering_dedicated[i]) {
11479 set_vl_dedicated(dd, i,
11480 be16_to_cpu(new_bc->
11481 vl[i].dedicated));
11482 cur_bc.vl[i].dedicated =
11483 new_bc->vl[i].dedicated;
11484 }
11485 }
11486
11487 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11488
11489
11490 for (i = 0; i < NUM_USABLE_VLS; i++) {
11491 if (!valid_vl(i))
11492 continue;
11493
11494 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11495 be16_to_cpu(cur_bc.vl[i].dedicated))
11496 set_vl_dedicated(dd, i,
11497 be16_to_cpu(new_bc->
11498 vl[i].dedicated));
11499 }
11500 }
11501
11502
11503 for (i = 0; i < NUM_USABLE_VLS; i++) {
11504 if (!valid_vl(i))
11505 continue;
11506
11507 if (be16_to_cpu(new_bc->vl[i].shared) >
11508 be16_to_cpu(cur_bc.vl[i].shared))
11509 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11510 }
11511
11512
11513 if (be16_to_cpu(new_bc->overall_shared_limit) >
11514 be16_to_cpu(cur_bc.overall_shared_limit))
11515 set_global_shared(dd,
11516 be16_to_cpu(new_bc->overall_shared_limit));
11517
11518
11519 if (new_total < cur_total)
11520 set_global_limit(dd, new_total);
11521
11522
11523
11524
11525
11526 if (change_count > 0) {
11527 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11528 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11529 be16_to_cpu(new_bc->vl[i].shared) > 0)
11530 vl_count++;
11531 ppd->actual_vls_operational = vl_count;
11532 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11533 ppd->actual_vls_operational :
11534 ppd->vls_operational,
11535 NULL);
11536 if (ret == 0)
11537 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11538 ppd->actual_vls_operational :
11539 ppd->vls_operational, NULL);
11540 if (ret)
11541 return ret;
11542 }
11543 return 0;
11544}
11545
11546
11547
11548
11549
11550
11551int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11552
11553{
11554 int size;
11555 struct vl_arb_cache *vlc;
11556
11557 switch (which) {
11558 case FM_TBL_VL_HIGH_ARB:
11559 size = 256;
11560
11561
11562
11563
11564 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11565 vl_arb_get_cache(vlc, t);
11566 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11567 break;
11568 case FM_TBL_VL_LOW_ARB:
11569 size = 256;
11570
11571
11572
11573
11574 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11575 vl_arb_get_cache(vlc, t);
11576 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11577 break;
11578 case FM_TBL_BUFFER_CONTROL:
11579 size = get_buffer_control(ppd->dd, t, NULL);
11580 break;
11581 case FM_TBL_SC2VLNT:
11582 size = get_sc2vlnt(ppd->dd, t);
11583 break;
11584 case FM_TBL_VL_PREEMPT_ELEMS:
11585 size = 256;
11586
11587 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11588 break;
11589 case FM_TBL_VL_PREEMPT_MATRIX:
11590 size = 256;
11591
11592
11593
11594
11595 break;
11596 default:
11597 return -EINVAL;
11598 }
11599 return size;
11600}
11601
11602
11603
11604
11605int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11606{
11607 int ret = 0;
11608 struct vl_arb_cache *vlc;
11609
11610 switch (which) {
11611 case FM_TBL_VL_HIGH_ARB:
11612 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11613 if (vl_arb_match_cache(vlc, t)) {
11614 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11615 break;
11616 }
11617 vl_arb_set_cache(vlc, t);
11618 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11619 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11620 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11621 break;
11622 case FM_TBL_VL_LOW_ARB:
11623 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11624 if (vl_arb_match_cache(vlc, t)) {
11625 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11626 break;
11627 }
11628 vl_arb_set_cache(vlc, t);
11629 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11630 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11631 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11632 break;
11633 case FM_TBL_BUFFER_CONTROL:
11634 ret = set_buffer_control(ppd, t);
11635 break;
11636 case FM_TBL_SC2VLNT:
11637 set_sc2vlnt(ppd->dd, t);
11638 break;
11639 default:
11640 ret = -EINVAL;
11641 }
11642 return ret;
11643}
11644
11645
11646
11647
11648
11649
11650static int disable_data_vls(struct hfi1_devdata *dd)
11651{
11652 if (is_ax(dd))
11653 return 1;
11654
11655 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11656
11657 return 0;
11658}
11659
11660
11661
11662
11663
11664
11665
11666
11667
11668int open_fill_data_vls(struct hfi1_devdata *dd)
11669{
11670 if (is_ax(dd))
11671 return 1;
11672
11673 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11674
11675 return 0;
11676}
11677
11678
11679
11680
11681
11682
11683static void drain_data_vls(struct hfi1_devdata *dd)
11684{
11685 sc_wait(dd);
11686 sdma_wait(dd);
11687 pause_for_credit_return(dd);
11688}
11689
11690
11691
11692
11693
11694
11695
11696
11697
11698
11699
11700int stop_drain_data_vls(struct hfi1_devdata *dd)
11701{
11702 int ret;
11703
11704 ret = disable_data_vls(dd);
11705 if (ret == 0)
11706 drain_data_vls(dd);
11707
11708 return ret;
11709}
11710
11711
11712
11713
11714
11715u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11716{
11717 u32 cclocks;
11718
11719 if (dd->icode == ICODE_FPGA_EMULATION)
11720 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11721 else
11722 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11723 if (ns && !cclocks)
11724 cclocks = 1;
11725 return cclocks;
11726}
11727
11728
11729
11730
11731
11732u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11733{
11734 u32 ns;
11735
11736 if (dd->icode == ICODE_FPGA_EMULATION)
11737 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11738 else
11739 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11740 if (cclocks && !ns)
11741 ns = 1;
11742 return ns;
11743}
11744
11745
11746
11747
11748
11749
11750
11751static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11752{
11753 struct hfi1_devdata *dd = rcd->dd;
11754 u32 timeout = rcd->rcvavail_timeout;
11755
11756
11757
11758
11759
11760
11761
11762
11763
11764
11765 if (npkts < rcv_intr_count) {
11766
11767
11768
11769
11770 if (timeout < 2)
11771 return;
11772 timeout >>= 1;
11773 } else {
11774
11775
11776
11777
11778 if (timeout >= dd->rcv_intr_timeout_csr)
11779 return;
11780 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11781 }
11782
11783 rcd->rcvavail_timeout = timeout;
11784
11785
11786
11787
11788 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11789 (u64)timeout <<
11790 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11791}
11792
11793void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11794 u32 intr_adjust, u32 npkts)
11795{
11796 struct hfi1_devdata *dd = rcd->dd;
11797 u64 reg;
11798 u32 ctxt = rcd->ctxt;
11799
11800
11801
11802
11803
11804 if (intr_adjust)
11805 adjust_rcv_timeout(rcd, npkts);
11806 if (updegr) {
11807 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11808 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11809 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11810 }
11811 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11812 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11813 << RCV_HDR_HEAD_HEAD_SHIFT);
11814 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11815}
11816
11817u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11818{
11819 u32 head, tail;
11820
11821 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11822 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11823
11824 if (rcd->rcvhdrtail_kvaddr)
11825 tail = get_rcvhdrtail(rcd);
11826 else
11827 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11828
11829 return head == tail;
11830}
11831
11832
11833
11834
11835
11836
11837
11838
11839
11840
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851static u32 encoded_size(u32 size)
11852{
11853 switch (size) {
11854 case 4 * 1024: return 0x1;
11855 case 8 * 1024: return 0x2;
11856 case 16 * 1024: return 0x3;
11857 case 32 * 1024: return 0x4;
11858 case 64 * 1024: return 0x5;
11859 case 128 * 1024: return 0x6;
11860 case 256 * 1024: return 0x7;
11861 case 512 * 1024: return 0x8;
11862 case 1 * 1024 * 1024: return 0x9;
11863 case 2 * 1024 * 1024: return 0xa;
11864 }
11865 return 0x1;
11866}
11867
11868void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11869 struct hfi1_ctxtdata *rcd)
11870{
11871 u64 rcvctrl, reg;
11872 int did_enable = 0;
11873 u16 ctxt;
11874
11875 if (!rcd)
11876 return;
11877
11878 ctxt = rcd->ctxt;
11879
11880 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11881
11882 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11883
11884 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11885 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11886
11887 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11888 rcd->rcvhdrq_dma);
11889 if (rcd->rcvhdrtail_kvaddr)
11890 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11891 rcd->rcvhdrqtailaddr_dma);
11892 rcd->seq_cnt = 1;
11893
11894
11895 rcd->head = 0;
11896
11897
11898
11899
11900
11901
11902
11903 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11904
11905
11906 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11907
11908
11909 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11910
11911
11912 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11913 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11914 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11915 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11916
11917
11918 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11919 did_enable = 1;
11920
11921
11922 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11923
11924
11925 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11926 & RCV_EGR_CTRL_EGR_CNT_MASK)
11927 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11928 (((rcd->eager_base >> RCV_SHIFT)
11929 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11930 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11931 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11932
11933
11934
11935
11936
11937
11938
11939 reg = (((rcd->expected_count >> RCV_SHIFT)
11940 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11941 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11942 (((rcd->expected_base >> RCV_SHIFT)
11943 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11944 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11945 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11946 if (ctxt == HFI1_CTRL_CTXT)
11947 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11948 }
11949 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11950 write_csr(dd, RCV_VL15, 0);
11951
11952
11953
11954
11955
11956 if (dd->rcvhdrtail_dummy_dma) {
11957 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11958 dd->rcvhdrtail_dummy_dma);
11959
11960 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11961 }
11962
11963 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11964 }
11965 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
11966 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11967 IS_RCVAVAIL_START + rcd->ctxt, true);
11968 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11969 }
11970 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
11971 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11972 IS_RCVAVAIL_START + rcd->ctxt, false);
11973 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11974 }
11975 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11976 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11977 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11978
11979 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11980 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11981 }
11982 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11983 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11984 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11985 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11986 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11987
11988
11989
11990
11991 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11992 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11993 }
11994 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11995 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11996 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11997 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11998 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11999 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12000 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12001 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12002 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12003 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12004 if (op & HFI1_RCVCTRL_URGENT_ENB)
12005 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12006 IS_RCVURGENT_START + rcd->ctxt, true);
12007 if (op & HFI1_RCVCTRL_URGENT_DIS)
12008 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12009 IS_RCVURGENT_START + rcd->ctxt, false);
12010
12011 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12012 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12013
12014
12015 if (did_enable &&
12016 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12017 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12018 if (reg != 0) {
12019 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12020 ctxt, reg);
12021 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12022 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12023 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12024 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12025 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12026 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12027 ctxt, reg, reg == 0 ? "not" : "still");
12028 }
12029 }
12030
12031 if (did_enable) {
12032
12033
12034
12035
12036
12037 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12038 (u64)rcd->rcvavail_timeout <<
12039 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12040
12041
12042 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12043 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12044 }
12045
12046 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12047
12048
12049
12050
12051
12052 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12053 dd->rcvhdrtail_dummy_dma);
12054}
12055
12056u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12057{
12058 int ret;
12059 u64 val = 0;
12060
12061 if (namep) {
12062 ret = dd->cntrnameslen;
12063 *namep = dd->cntrnames;
12064 } else {
12065 const struct cntr_entry *entry;
12066 int i, j;
12067
12068 ret = (dd->ndevcntrs) * sizeof(u64);
12069
12070
12071 *cntrp = dd->cntrs;
12072
12073
12074
12075
12076 for (i = 0; i < DEV_CNTR_LAST; i++) {
12077 entry = &dev_cntrs[i];
12078 hfi1_cdbg(CNTR, "reading %s", entry->name);
12079 if (entry->flags & CNTR_DISABLED) {
12080
12081 hfi1_cdbg(CNTR, "\tDisabled\n");
12082 } else {
12083 if (entry->flags & CNTR_VL) {
12084 hfi1_cdbg(CNTR, "\tPer VL\n");
12085 for (j = 0; j < C_VL_COUNT; j++) {
12086 val = entry->rw_cntr(entry,
12087 dd, j,
12088 CNTR_MODE_R,
12089 0);
12090 hfi1_cdbg(
12091 CNTR,
12092 "\t\tRead 0x%llx for %d\n",
12093 val, j);
12094 dd->cntrs[entry->offset + j] =
12095 val;
12096 }
12097 } else if (entry->flags & CNTR_SDMA) {
12098 hfi1_cdbg(CNTR,
12099 "\t Per SDMA Engine\n");
12100 for (j = 0; j < chip_sdma_engines(dd);
12101 j++) {
12102 val =
12103 entry->rw_cntr(entry, dd, j,
12104 CNTR_MODE_R, 0);
12105 hfi1_cdbg(CNTR,
12106 "\t\tRead 0x%llx for %d\n",
12107 val, j);
12108 dd->cntrs[entry->offset + j] =
12109 val;
12110 }
12111 } else {
12112 val = entry->rw_cntr(entry, dd,
12113 CNTR_INVALID_VL,
12114 CNTR_MODE_R, 0);
12115 dd->cntrs[entry->offset] = val;
12116 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12117 }
12118 }
12119 }
12120 }
12121 return ret;
12122}
12123
12124
12125
12126
12127u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12128{
12129 int ret;
12130 u64 val = 0;
12131
12132 if (namep) {
12133 ret = ppd->dd->portcntrnameslen;
12134 *namep = ppd->dd->portcntrnames;
12135 } else {
12136 const struct cntr_entry *entry;
12137 int i, j;
12138
12139 ret = ppd->dd->nportcntrs * sizeof(u64);
12140 *cntrp = ppd->cntrs;
12141
12142 for (i = 0; i < PORT_CNTR_LAST; i++) {
12143 entry = &port_cntrs[i];
12144 hfi1_cdbg(CNTR, "reading %s", entry->name);
12145 if (entry->flags & CNTR_DISABLED) {
12146
12147 hfi1_cdbg(CNTR, "\tDisabled\n");
12148 continue;
12149 }
12150
12151 if (entry->flags & CNTR_VL) {
12152 hfi1_cdbg(CNTR, "\tPer VL");
12153 for (j = 0; j < C_VL_COUNT; j++) {
12154 val = entry->rw_cntr(entry, ppd, j,
12155 CNTR_MODE_R,
12156 0);
12157 hfi1_cdbg(
12158 CNTR,
12159 "\t\tRead 0x%llx for %d",
12160 val, j);
12161 ppd->cntrs[entry->offset + j] = val;
12162 }
12163 } else {
12164 val = entry->rw_cntr(entry, ppd,
12165 CNTR_INVALID_VL,
12166 CNTR_MODE_R,
12167 0);
12168 ppd->cntrs[entry->offset] = val;
12169 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12170 }
12171 }
12172 }
12173 return ret;
12174}
12175
12176static void free_cntrs(struct hfi1_devdata *dd)
12177{
12178 struct hfi1_pportdata *ppd;
12179 int i;
12180
12181 if (dd->synth_stats_timer.function)
12182 del_timer_sync(&dd->synth_stats_timer);
12183 ppd = (struct hfi1_pportdata *)(dd + 1);
12184 for (i = 0; i < dd->num_pports; i++, ppd++) {
12185 kfree(ppd->cntrs);
12186 kfree(ppd->scntrs);
12187 free_percpu(ppd->ibport_data.rvp.rc_acks);
12188 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12189 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12190 ppd->cntrs = NULL;
12191 ppd->scntrs = NULL;
12192 ppd->ibport_data.rvp.rc_acks = NULL;
12193 ppd->ibport_data.rvp.rc_qacks = NULL;
12194 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12195 }
12196 kfree(dd->portcntrnames);
12197 dd->portcntrnames = NULL;
12198 kfree(dd->cntrs);
12199 dd->cntrs = NULL;
12200 kfree(dd->scntrs);
12201 dd->scntrs = NULL;
12202 kfree(dd->cntrnames);
12203 dd->cntrnames = NULL;
12204 if (dd->update_cntr_wq) {
12205 destroy_workqueue(dd->update_cntr_wq);
12206 dd->update_cntr_wq = NULL;
12207 }
12208}
12209
12210static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12211 u64 *psval, void *context, int vl)
12212{
12213 u64 val;
12214 u64 sval = *psval;
12215
12216 if (entry->flags & CNTR_DISABLED) {
12217 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12218 return 0;
12219 }
12220
12221 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12222
12223 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12224
12225
12226 if (entry->flags & CNTR_SYNTH) {
12227 if (sval == CNTR_MAX) {
12228
12229 return CNTR_MAX;
12230 }
12231
12232 if (entry->flags & CNTR_32BIT) {
12233
12234 u64 upper = sval >> 32;
12235 u64 lower = (sval << 32) >> 32;
12236
12237 if (lower > val) {
12238 if (upper == CNTR_32BIT_MAX)
12239 val = CNTR_MAX;
12240 else
12241 upper++;
12242 }
12243
12244 if (val != CNTR_MAX)
12245 val = (upper << 32) | val;
12246
12247 } else {
12248
12249 if ((val < sval) || (val > CNTR_MAX))
12250 val = CNTR_MAX;
12251 }
12252 }
12253
12254 *psval = val;
12255
12256 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12257
12258 return val;
12259}
12260
12261static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12262 struct cntr_entry *entry,
12263 u64 *psval, void *context, int vl, u64 data)
12264{
12265 u64 val;
12266
12267 if (entry->flags & CNTR_DISABLED) {
12268 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12269 return 0;
12270 }
12271
12272 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12273
12274 if (entry->flags & CNTR_SYNTH) {
12275 *psval = data;
12276 if (entry->flags & CNTR_32BIT) {
12277 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12278 (data << 32) >> 32);
12279 val = data;
12280 } else {
12281 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12282 data);
12283 }
12284 } else {
12285 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12286 }
12287
12288 *psval = val;
12289
12290 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12291
12292 return val;
12293}
12294
12295u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12296{
12297 struct cntr_entry *entry;
12298 u64 *sval;
12299
12300 entry = &dev_cntrs[index];
12301 sval = dd->scntrs + entry->offset;
12302
12303 if (vl != CNTR_INVALID_VL)
12304 sval += vl;
12305
12306 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12307}
12308
12309u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12310{
12311 struct cntr_entry *entry;
12312 u64 *sval;
12313
12314 entry = &dev_cntrs[index];
12315 sval = dd->scntrs + entry->offset;
12316
12317 if (vl != CNTR_INVALID_VL)
12318 sval += vl;
12319
12320 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12321}
12322
12323u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12324{
12325 struct cntr_entry *entry;
12326 u64 *sval;
12327
12328 entry = &port_cntrs[index];
12329 sval = ppd->scntrs + entry->offset;
12330
12331 if (vl != CNTR_INVALID_VL)
12332 sval += vl;
12333
12334 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12335 (index <= C_RCV_HDR_OVF_LAST)) {
12336
12337 return 0;
12338 }
12339
12340 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12341}
12342
12343u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12344{
12345 struct cntr_entry *entry;
12346 u64 *sval;
12347
12348 entry = &port_cntrs[index];
12349 sval = ppd->scntrs + entry->offset;
12350
12351 if (vl != CNTR_INVALID_VL)
12352 sval += vl;
12353
12354 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12355 (index <= C_RCV_HDR_OVF_LAST)) {
12356
12357 return 0;
12358 }
12359
12360 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12361}
12362
12363static void do_update_synth_timer(struct work_struct *work)
12364{
12365 u64 cur_tx;
12366 u64 cur_rx;
12367 u64 total_flits;
12368 u8 update = 0;
12369 int i, j, vl;
12370 struct hfi1_pportdata *ppd;
12371 struct cntr_entry *entry;
12372 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12373 update_cntr_work);
12374
12375
12376
12377
12378
12379
12380
12381 entry = &dev_cntrs[C_DC_RCV_FLITS];
12382 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12383
12384 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12385 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12386
12387 hfi1_cdbg(
12388 CNTR,
12389 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12390 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12391
12392 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12393
12394
12395
12396
12397 update = 1;
12398 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12399 dd->unit);
12400 } else {
12401 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12402 hfi1_cdbg(CNTR,
12403 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12404 total_flits, (u64)CNTR_32BIT_MAX);
12405 if (total_flits >= CNTR_32BIT_MAX) {
12406 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12407 dd->unit);
12408 update = 1;
12409 }
12410 }
12411
12412 if (update) {
12413 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12414 for (i = 0; i < DEV_CNTR_LAST; i++) {
12415 entry = &dev_cntrs[i];
12416 if (entry->flags & CNTR_VL) {
12417 for (vl = 0; vl < C_VL_COUNT; vl++)
12418 read_dev_cntr(dd, i, vl);
12419 } else {
12420 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12421 }
12422 }
12423 ppd = (struct hfi1_pportdata *)(dd + 1);
12424 for (i = 0; i < dd->num_pports; i++, ppd++) {
12425 for (j = 0; j < PORT_CNTR_LAST; j++) {
12426 entry = &port_cntrs[j];
12427 if (entry->flags & CNTR_VL) {
12428 for (vl = 0; vl < C_VL_COUNT; vl++)
12429 read_port_cntr(ppd, j, vl);
12430 } else {
12431 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12432 }
12433 }
12434 }
12435
12436
12437
12438
12439
12440
12441
12442 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12443 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12444 CNTR_MODE_R, 0);
12445
12446 entry = &dev_cntrs[C_DC_RCV_FLITS];
12447 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12448 CNTR_MODE_R, 0);
12449
12450 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12451 dd->unit, dd->last_tx, dd->last_rx);
12452
12453 } else {
12454 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12455 }
12456}
12457
12458static void update_synth_timer(struct timer_list *t)
12459{
12460 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12461
12462 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12463 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12464}
12465
12466#define C_MAX_NAME 16
12467static int init_cntrs(struct hfi1_devdata *dd)
12468{
12469 int i, rcv_ctxts, j;
12470 size_t sz;
12471 char *p;
12472 char name[C_MAX_NAME];
12473 struct hfi1_pportdata *ppd;
12474 const char *bit_type_32 = ",32";
12475 const int bit_type_32_sz = strlen(bit_type_32);
12476 u32 sdma_engines = chip_sdma_engines(dd);
12477
12478
12479 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12480
12481
12482
12483
12484
12485
12486 dd->ndevcntrs = 0;
12487 sz = 0;
12488
12489 for (i = 0; i < DEV_CNTR_LAST; i++) {
12490 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12491 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12492 continue;
12493 }
12494
12495 if (dev_cntrs[i].flags & CNTR_VL) {
12496 dev_cntrs[i].offset = dd->ndevcntrs;
12497 for (j = 0; j < C_VL_COUNT; j++) {
12498 snprintf(name, C_MAX_NAME, "%s%d",
12499 dev_cntrs[i].name, vl_from_idx(j));
12500 sz += strlen(name);
12501
12502 if (dev_cntrs[i].flags & CNTR_32BIT)
12503 sz += bit_type_32_sz;
12504 sz++;
12505 dd->ndevcntrs++;
12506 }
12507 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12508 dev_cntrs[i].offset = dd->ndevcntrs;
12509 for (j = 0; j < sdma_engines; j++) {
12510 snprintf(name, C_MAX_NAME, "%s%d",
12511 dev_cntrs[i].name, j);
12512 sz += strlen(name);
12513
12514 if (dev_cntrs[i].flags & CNTR_32BIT)
12515 sz += bit_type_32_sz;
12516 sz++;
12517 dd->ndevcntrs++;
12518 }
12519 } else {
12520
12521 sz += strlen(dev_cntrs[i].name) + 1;
12522
12523 if (dev_cntrs[i].flags & CNTR_32BIT)
12524 sz += bit_type_32_sz;
12525 dev_cntrs[i].offset = dd->ndevcntrs;
12526 dd->ndevcntrs++;
12527 }
12528 }
12529
12530
12531 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12532 GFP_KERNEL);
12533 if (!dd->cntrs)
12534 goto bail;
12535
12536 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12537 if (!dd->scntrs)
12538 goto bail;
12539
12540
12541 dd->cntrnameslen = sz;
12542 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12543 if (!dd->cntrnames)
12544 goto bail;
12545
12546
12547 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12548 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12549
12550 } else if (dev_cntrs[i].flags & CNTR_VL) {
12551 for (j = 0; j < C_VL_COUNT; j++) {
12552 snprintf(name, C_MAX_NAME, "%s%d",
12553 dev_cntrs[i].name,
12554 vl_from_idx(j));
12555 memcpy(p, name, strlen(name));
12556 p += strlen(name);
12557
12558
12559 if (dev_cntrs[i].flags & CNTR_32BIT) {
12560 memcpy(p, bit_type_32, bit_type_32_sz);
12561 p += bit_type_32_sz;
12562 }
12563
12564 *p++ = '\n';
12565 }
12566 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12567 for (j = 0; j < sdma_engines; j++) {
12568 snprintf(name, C_MAX_NAME, "%s%d",
12569 dev_cntrs[i].name, j);
12570 memcpy(p, name, strlen(name));
12571 p += strlen(name);
12572
12573
12574 if (dev_cntrs[i].flags & CNTR_32BIT) {
12575 memcpy(p, bit_type_32, bit_type_32_sz);
12576 p += bit_type_32_sz;
12577 }
12578
12579 *p++ = '\n';
12580 }
12581 } else {
12582 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12583 p += strlen(dev_cntrs[i].name);
12584
12585
12586 if (dev_cntrs[i].flags & CNTR_32BIT) {
12587 memcpy(p, bit_type_32, bit_type_32_sz);
12588 p += bit_type_32_sz;
12589 }
12590
12591 *p++ = '\n';
12592 }
12593 }
12594
12595
12596
12597
12598
12599
12600
12601
12602
12603
12604 rcv_ctxts = dd->num_rcv_contexts;
12605 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12606 i <= C_RCV_HDR_OVF_LAST; i++) {
12607 port_cntrs[i].flags |= CNTR_DISABLED;
12608 }
12609
12610
12611 sz = 0;
12612 dd->nportcntrs = 0;
12613 for (i = 0; i < PORT_CNTR_LAST; i++) {
12614 if (port_cntrs[i].flags & CNTR_DISABLED) {
12615 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12616 continue;
12617 }
12618
12619 if (port_cntrs[i].flags & CNTR_VL) {
12620 port_cntrs[i].offset = dd->nportcntrs;
12621 for (j = 0; j < C_VL_COUNT; j++) {
12622 snprintf(name, C_MAX_NAME, "%s%d",
12623 port_cntrs[i].name, vl_from_idx(j));
12624 sz += strlen(name);
12625
12626 if (port_cntrs[i].flags & CNTR_32BIT)
12627 sz += bit_type_32_sz;
12628 sz++;
12629 dd->nportcntrs++;
12630 }
12631 } else {
12632
12633 sz += strlen(port_cntrs[i].name) + 1;
12634
12635 if (port_cntrs[i].flags & CNTR_32BIT)
12636 sz += bit_type_32_sz;
12637 port_cntrs[i].offset = dd->nportcntrs;
12638 dd->nportcntrs++;
12639 }
12640 }
12641
12642
12643 dd->portcntrnameslen = sz;
12644 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12645 if (!dd->portcntrnames)
12646 goto bail;
12647
12648
12649 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12650 if (port_cntrs[i].flags & CNTR_DISABLED)
12651 continue;
12652
12653 if (port_cntrs[i].flags & CNTR_VL) {
12654 for (j = 0; j < C_VL_COUNT; j++) {
12655 snprintf(name, C_MAX_NAME, "%s%d",
12656 port_cntrs[i].name, vl_from_idx(j));
12657 memcpy(p, name, strlen(name));
12658 p += strlen(name);
12659
12660
12661 if (port_cntrs[i].flags & CNTR_32BIT) {
12662 memcpy(p, bit_type_32, bit_type_32_sz);
12663 p += bit_type_32_sz;
12664 }
12665
12666 *p++ = '\n';
12667 }
12668 } else {
12669 memcpy(p, port_cntrs[i].name,
12670 strlen(port_cntrs[i].name));
12671 p += strlen(port_cntrs[i].name);
12672
12673
12674 if (port_cntrs[i].flags & CNTR_32BIT) {
12675 memcpy(p, bit_type_32, bit_type_32_sz);
12676 p += bit_type_32_sz;
12677 }
12678
12679 *p++ = '\n';
12680 }
12681 }
12682
12683
12684 ppd = (struct hfi1_pportdata *)(dd + 1);
12685 for (i = 0; i < dd->num_pports; i++, ppd++) {
12686 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12687 if (!ppd->cntrs)
12688 goto bail;
12689
12690 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12691 if (!ppd->scntrs)
12692 goto bail;
12693 }
12694
12695
12696 if (init_cpu_counters(dd))
12697 goto bail;
12698
12699 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12700 WQ_MEM_RECLAIM, dd->unit);
12701 if (!dd->update_cntr_wq)
12702 goto bail;
12703
12704 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12705
12706 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12707 return 0;
12708bail:
12709 free_cntrs(dd);
12710 return -ENOMEM;
12711}
12712
12713static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12714{
12715 switch (chip_lstate) {
12716 default:
12717 dd_dev_err(dd,
12718 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12719 chip_lstate);
12720
12721 case LSTATE_DOWN:
12722 return IB_PORT_DOWN;
12723 case LSTATE_INIT:
12724 return IB_PORT_INIT;
12725 case LSTATE_ARMED:
12726 return IB_PORT_ARMED;
12727 case LSTATE_ACTIVE:
12728 return IB_PORT_ACTIVE;
12729 }
12730}
12731
12732u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12733{
12734
12735 switch (chip_pstate & 0xf0) {
12736 default:
12737 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12738 chip_pstate);
12739
12740 case PLS_DISABLED:
12741 return IB_PORTPHYSSTATE_DISABLED;
12742 case PLS_OFFLINE:
12743 return OPA_PORTPHYSSTATE_OFFLINE;
12744 case PLS_POLLING:
12745 return IB_PORTPHYSSTATE_POLLING;
12746 case PLS_CONFIGPHY:
12747 return IB_PORTPHYSSTATE_TRAINING;
12748 case PLS_LINKUP:
12749 return IB_PORTPHYSSTATE_LINKUP;
12750 case PLS_PHYTEST:
12751 return IB_PORTPHYSSTATE_PHY_TEST;
12752 }
12753}
12754
12755
12756const char *opa_lstate_name(u32 lstate)
12757{
12758 static const char * const port_logical_names[] = {
12759 "PORT_NOP",
12760 "PORT_DOWN",
12761 "PORT_INIT",
12762 "PORT_ARMED",
12763 "PORT_ACTIVE",
12764 "PORT_ACTIVE_DEFER",
12765 };
12766 if (lstate < ARRAY_SIZE(port_logical_names))
12767 return port_logical_names[lstate];
12768 return "unknown";
12769}
12770
12771
12772const char *opa_pstate_name(u32 pstate)
12773{
12774 static const char * const port_physical_names[] = {
12775 "PHYS_NOP",
12776 "reserved1",
12777 "PHYS_POLL",
12778 "PHYS_DISABLED",
12779 "PHYS_TRAINING",
12780 "PHYS_LINKUP",
12781 "PHYS_LINK_ERR_RECOVER",
12782 "PHYS_PHY_TEST",
12783 "reserved8",
12784 "PHYS_OFFLINE",
12785 "PHYS_GANGED",
12786 "PHYS_TEST",
12787 };
12788 if (pstate < ARRAY_SIZE(port_physical_names))
12789 return port_physical_names[pstate];
12790 return "unknown";
12791}
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12805{
12806
12807
12808
12809
12810
12811
12812
12813
12814 if (ppd->statusp) {
12815 switch (state) {
12816 case IB_PORT_DOWN:
12817 case IB_PORT_INIT:
12818 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12819 HFI1_STATUS_IB_READY);
12820 break;
12821 case IB_PORT_ARMED:
12822 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12823 break;
12824 case IB_PORT_ACTIVE:
12825 *ppd->statusp |= HFI1_STATUS_IB_READY;
12826 break;
12827 }
12828 }
12829 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12830 opa_lstate_name(state), state);
12831}
12832
12833
12834
12835
12836
12837
12838
12839
12840
12841
12842
12843static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12844 int msecs)
12845{
12846 unsigned long timeout;
12847 u32 new_state;
12848
12849 timeout = jiffies + msecs_to_jiffies(msecs);
12850 while (1) {
12851 new_state = chip_to_opa_lstate(ppd->dd,
12852 read_logical_state(ppd->dd));
12853 if (new_state == state)
12854 break;
12855 if (time_after(jiffies, timeout)) {
12856 dd_dev_err(ppd->dd,
12857 "timeout waiting for link state 0x%x\n",
12858 state);
12859 return -ETIMEDOUT;
12860 }
12861 msleep(20);
12862 }
12863
12864 return 0;
12865}
12866
12867static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12868{
12869 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12870
12871 dd_dev_info(ppd->dd,
12872 "physical state changed to %s (0x%x), phy 0x%x\n",
12873 opa_pstate_name(ib_pstate), ib_pstate, state);
12874}
12875
12876
12877
12878
12879
12880static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12881{
12882 u32 read_state = read_physical_state(ppd->dd);
12883
12884 if (read_state == state) {
12885 log_state_transition(ppd, state);
12886 } else {
12887 dd_dev_err(ppd->dd,
12888 "anticipated phy link state 0x%x, read 0x%x\n",
12889 state, read_state);
12890 }
12891}
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12903 int msecs)
12904{
12905 u32 read_state;
12906 unsigned long timeout;
12907
12908 timeout = jiffies + msecs_to_jiffies(msecs);
12909 while (1) {
12910 read_state = read_physical_state(ppd->dd);
12911 if (read_state == state)
12912 break;
12913 if (time_after(jiffies, timeout)) {
12914 dd_dev_err(ppd->dd,
12915 "timeout waiting for phy link state 0x%x\n",
12916 state);
12917 return -ETIMEDOUT;
12918 }
12919 usleep_range(1950, 2050);
12920 }
12921
12922 log_state_transition(ppd, state);
12923 return 0;
12924}
12925
12926
12927
12928
12929
12930
12931
12932
12933
12934
12935static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12936 int msecs)
12937{
12938 u32 read_state;
12939 unsigned long timeout;
12940
12941 timeout = jiffies + msecs_to_jiffies(msecs);
12942 while (1) {
12943 read_state = read_physical_state(ppd->dd);
12944 if ((read_state & 0xF0) == PLS_OFFLINE)
12945 break;
12946 if (time_after(jiffies, timeout)) {
12947 dd_dev_err(ppd->dd,
12948 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12949 read_state, msecs);
12950 return -ETIMEDOUT;
12951 }
12952 usleep_range(1950, 2050);
12953 }
12954
12955 log_state_transition(ppd, read_state);
12956 return read_state;
12957}
12958
12959
12960
12961
12962
12963
12964
12965
12966
12967
12968static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12969 int msecs)
12970{
12971 u32 read_state;
12972 unsigned long timeout;
12973
12974 timeout = jiffies + msecs_to_jiffies(msecs);
12975 while (1) {
12976 read_state = read_physical_state(ppd->dd);
12977 if ((read_state & 0xF0) != PLS_OFFLINE)
12978 break;
12979 if (time_after(jiffies, timeout)) {
12980 dd_dev_err(ppd->dd,
12981 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12982 read_state, msecs);
12983 return -ETIMEDOUT;
12984 }
12985 usleep_range(1950, 2050);
12986 }
12987
12988 log_state_transition(ppd, read_state);
12989 return read_state;
12990}
12991
12992#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12993(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12994
12995#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12996(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12997
12998void hfi1_init_ctxt(struct send_context *sc)
12999{
13000 if (sc) {
13001 struct hfi1_devdata *dd = sc->dd;
13002 u64 reg;
13003 u8 set = (sc->type == SC_USER ?
13004 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13005 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13006 reg = read_kctxt_csr(dd, sc->hw_context,
13007 SEND_CTXT_CHECK_ENABLE);
13008 if (set)
13009 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13010 else
13011 SET_STATIC_RATE_CONTROL_SMASK(reg);
13012 write_kctxt_csr(dd, sc->hw_context,
13013 SEND_CTXT_CHECK_ENABLE, reg);
13014 }
13015}
13016
13017int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13018{
13019 int ret = 0;
13020 u64 reg;
13021
13022 if (dd->icode != ICODE_RTL_SILICON) {
13023 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13024 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13025 __func__);
13026 return -EINVAL;
13027 }
13028 reg = read_csr(dd, ASIC_STS_THERM);
13029 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13030 ASIC_STS_THERM_CURR_TEMP_MASK);
13031 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13032 ASIC_STS_THERM_LO_TEMP_MASK);
13033 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13034 ASIC_STS_THERM_HI_TEMP_MASK);
13035 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13036 ASIC_STS_THERM_CRIT_TEMP_MASK);
13037
13038 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13039
13040 return ret;
13041}
13042
13043
13044
13045
13046
13047
13048
13049
13050
13051
13052
13053static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13054 bool set)
13055{
13056 u64 reg;
13057 u16 idx = src / BITS_PER_REGISTER;
13058
13059 spin_lock(&dd->irq_src_lock);
13060 reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13061 if (set)
13062 reg |= bits;
13063 else
13064 reg &= ~bits;
13065 write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13066 spin_unlock(&dd->irq_src_lock);
13067}
13068
13069
13070
13071
13072
13073
13074
13075
13076
13077
13078int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13079{
13080 u64 bits = 0;
13081 u64 bit;
13082 u16 src;
13083
13084 if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13085 return -EINVAL;
13086
13087 if (last < first)
13088 return -ERANGE;
13089
13090 for (src = first; src <= last; src++) {
13091 bit = src % BITS_PER_REGISTER;
13092
13093 if (!bit && bits) {
13094 read_mod_write(dd, src - 1, bits, set);
13095 bits = 0;
13096 }
13097 bits |= BIT_ULL(bit);
13098 }
13099 read_mod_write(dd, last, bits, set);
13100
13101 return 0;
13102}
13103
13104
13105
13106
13107void clear_all_interrupts(struct hfi1_devdata *dd)
13108{
13109 int i;
13110
13111 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13112 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13113
13114 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13115 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13116 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13117 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13118 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13119 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13120 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13121 for (i = 0; i < chip_send_contexts(dd); i++)
13122 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13123 for (i = 0; i < chip_sdma_engines(dd); i++)
13124 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13125
13126 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13127 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13128 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13129}
13130
13131
13132
13133
13134
13135void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13136{
13137 u64 reg;
13138 int m, n;
13139
13140
13141 m = isrc / 64;
13142 n = isrc % 64;
13143 if (likely(m < CCE_NUM_INT_CSRS)) {
13144 dd->gi_mask[m] &= ~((u64)1 << n);
13145 } else {
13146 dd_dev_err(dd, "remap interrupt err\n");
13147 return;
13148 }
13149
13150
13151 m = isrc / 8;
13152 n = isrc % 8;
13153 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13154 reg &= ~((u64)0xff << (8 * n));
13155 reg |= ((u64)msix_intr & 0xff) << (8 * n);
13156 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13157}
13158
13159void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13160{
13161
13162
13163
13164
13165
13166
13167
13168 remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13169 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13170 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13171}
13172
13173
13174
13175
13176
13177void reset_interrupts(struct hfi1_devdata *dd)
13178{
13179 int i;
13180
13181
13182 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13183 dd->gi_mask[i] = ~(u64)0;
13184
13185
13186 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13187 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13188}
13189
13190
13191
13192
13193
13194
13195static int set_up_interrupts(struct hfi1_devdata *dd)
13196{
13197 int ret;
13198
13199
13200 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13201
13202
13203 clear_all_interrupts(dd);
13204
13205
13206 reset_interrupts(dd);
13207
13208
13209 ret = msix_initialize(dd);
13210 if (ret)
13211 return ret;
13212
13213 ret = msix_request_irqs(dd);
13214 if (ret)
13215 msix_clean_up_interrupts(dd);
13216
13217 return ret;
13218}
13219
13220
13221
13222
13223
13224
13225
13226
13227
13228
13229
13230
13231static int set_up_context_variables(struct hfi1_devdata *dd)
13232{
13233 unsigned long num_kernel_contexts;
13234 u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13235 int total_contexts;
13236 int ret;
13237 unsigned ngroups;
13238 int rmt_count;
13239 int user_rmt_reduced;
13240 u32 n_usr_ctxts;
13241 u32 send_contexts = chip_send_contexts(dd);
13242 u32 rcv_contexts = chip_rcv_contexts(dd);
13243
13244
13245
13246
13247
13248
13249
13250
13251 if (n_krcvqs)
13252
13253
13254
13255
13256
13257 num_kernel_contexts = n_krcvqs + 1;
13258 else
13259 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13260
13261
13262
13263
13264 if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13265 dd_dev_err(dd,
13266 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13267 send_contexts - num_vls - 1,
13268 num_kernel_contexts);
13269 num_kernel_contexts = send_contexts - num_vls - 1;
13270 }
13271
13272
13273 if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13274 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13275 num_vnic_contexts = 0;
13276 }
13277 total_contexts = num_kernel_contexts + num_vnic_contexts;
13278
13279
13280
13281
13282
13283
13284 if (num_user_contexts < 0)
13285 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13286 else
13287 n_usr_ctxts = num_user_contexts;
13288
13289
13290
13291 if (total_contexts + n_usr_ctxts > rcv_contexts) {
13292 dd_dev_err(dd,
13293 "Reducing # user receive contexts to: %d, from %u\n",
13294 rcv_contexts - total_contexts,
13295 n_usr_ctxts);
13296
13297 n_usr_ctxts = rcv_contexts - total_contexts;
13298 }
13299
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311
13312 rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13313 if (HFI1_CAP_IS_KSET(TID_RDMA))
13314 rmt_count += num_kernel_contexts - 1;
13315 if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13316 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13317 dd_dev_err(dd,
13318 "RMT size is reducing the number of user receive contexts from %u to %d\n",
13319 n_usr_ctxts,
13320 user_rmt_reduced);
13321
13322 n_usr_ctxts = user_rmt_reduced;
13323 }
13324
13325 total_contexts += n_usr_ctxts;
13326
13327
13328 dd->num_rcv_contexts = total_contexts;
13329 dd->n_krcv_queues = num_kernel_contexts;
13330 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13331 dd->num_vnic_contexts = num_vnic_contexts;
13332 dd->num_user_contexts = n_usr_ctxts;
13333 dd->freectxts = n_usr_ctxts;
13334 dd_dev_info(dd,
13335 "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13336 rcv_contexts,
13337 (int)dd->num_rcv_contexts,
13338 (int)dd->n_krcv_queues,
13339 dd->num_vnic_contexts,
13340 dd->num_user_contexts);
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353 dd->rcv_entries.group_size = RCV_INCREMENT;
13354 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13355 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13356 dd->rcv_entries.nctxt_extra = ngroups -
13357 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13358 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13359 dd->rcv_entries.ngroups,
13360 dd->rcv_entries.nctxt_extra);
13361 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13362 MAX_EAGER_ENTRIES * 2) {
13363 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13364 dd->rcv_entries.group_size;
13365 dd_dev_info(dd,
13366 "RcvArray group count too high, change to %u\n",
13367 dd->rcv_entries.ngroups);
13368 dd->rcv_entries.nctxt_extra = 0;
13369 }
13370
13371
13372
13373 ret = init_sc_pools_and_sizes(dd);
13374 if (ret >= 0) {
13375 dd->num_send_contexts = ret;
13376 dd_dev_info(
13377 dd,
13378 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13379 send_contexts,
13380 dd->num_send_contexts,
13381 dd->sc_sizes[SC_KERNEL].count,
13382 dd->sc_sizes[SC_ACK].count,
13383 dd->sc_sizes[SC_USER].count,
13384 dd->sc_sizes[SC_VL15].count);
13385 ret = 0;
13386 }
13387
13388 return ret;
13389}
13390
13391
13392
13393
13394
13395
13396static void set_partition_keys(struct hfi1_pportdata *ppd)
13397{
13398 struct hfi1_devdata *dd = ppd->dd;
13399 u64 reg = 0;
13400 int i;
13401
13402 dd_dev_info(dd, "Setting partition keys\n");
13403 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13404 reg |= (ppd->pkeys[i] &
13405 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13406 ((i % 4) *
13407 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13408
13409 if ((i % 4) == 3) {
13410 write_csr(dd, RCV_PARTITION_KEY +
13411 ((i - 3) * 2), reg);
13412 reg = 0;
13413 }
13414 }
13415
13416
13417 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13418}
13419
13420
13421
13422
13423
13424
13425
13426
13427
13428static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13429{
13430 int i, j;
13431
13432
13433 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13434 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13435
13436
13437 for (i = 0; i < chip_send_contexts(dd); i++)
13438 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13439
13440
13441
13442
13443
13444
13445
13446
13447
13448
13449
13450 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13451 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13452 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13453 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13454 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13455 }
13456
13457
13458 for (i = 0; i < chip_rcv_array_count(dd); i++)
13459 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13460
13461
13462 for (i = 0; i < 32; i++)
13463 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13464}
13465
13466
13467
13468
13469static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13470 u64 ctrl_bits)
13471{
13472 unsigned long timeout;
13473 u64 reg;
13474
13475
13476 reg = read_csr(dd, CCE_STATUS);
13477 if ((reg & status_bits) == 0)
13478 return;
13479
13480
13481 write_csr(dd, CCE_CTRL, ctrl_bits);
13482
13483
13484 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13485 while (1) {
13486 reg = read_csr(dd, CCE_STATUS);
13487 if ((reg & status_bits) == 0)
13488 return;
13489 if (time_after(jiffies, timeout)) {
13490 dd_dev_err(dd,
13491 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13492 status_bits, reg & status_bits);
13493 return;
13494 }
13495 udelay(1);
13496 }
13497}
13498
13499
13500static void reset_cce_csrs(struct hfi1_devdata *dd)
13501{
13502 int i;
13503
13504
13505
13506
13507
13508 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13509 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13510 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13511 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13512 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13513
13514 write_csr(dd, CCE_ERR_MASK, 0);
13515 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13516
13517 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13518 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13519 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13520
13521 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13522 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13523 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13524 CCE_MSIX_TABLE_UPPER_RESETCSR);
13525 }
13526 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13527
13528 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13529 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13530 }
13531 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13532 write_csr(dd, CCE_INT_MAP, 0);
13533 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13534
13535 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13536 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13537
13538
13539 }
13540 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13541 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13542}
13543
13544
13545static void reset_misc_csrs(struct hfi1_devdata *dd)
13546{
13547 int i;
13548
13549 for (i = 0; i < 32; i++) {
13550 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13551 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13552 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13553 }
13554
13555
13556
13557
13558
13559 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13560 write_csr(dd, MISC_CFG_RSA_MU, 0);
13561 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13562
13563
13564
13565
13566
13567 write_csr(dd, MISC_ERR_MASK, 0);
13568 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13569
13570}
13571
13572
13573static void reset_txe_csrs(struct hfi1_devdata *dd)
13574{
13575 int i;
13576
13577
13578
13579
13580 write_csr(dd, SEND_CTRL, 0);
13581 __cm_reset(dd, 0);
13582
13583
13584
13585
13586 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13587 pio_reset_all(dd);
13588
13589 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13590 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13591
13592
13593 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13594 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13595
13596
13597 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13598 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13599
13600 write_csr(dd, SEND_BTH_QP, 0);
13601 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13602 write_csr(dd, SEND_SC2VLT0, 0);
13603 write_csr(dd, SEND_SC2VLT1, 0);
13604 write_csr(dd, SEND_SC2VLT2, 0);
13605 write_csr(dd, SEND_SC2VLT3, 0);
13606 write_csr(dd, SEND_LEN_CHECK0, 0);
13607 write_csr(dd, SEND_LEN_CHECK1, 0);
13608
13609 write_csr(dd, SEND_ERR_MASK, 0);
13610 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13611
13612 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13613 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13614 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13615 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13616 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13617 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13618 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13619 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13620 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13621 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13622 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13623 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13624
13625 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13626 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13627 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13628 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13629 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13630 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13631 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13632 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13633
13634
13635
13636
13637 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13638
13639
13640
13641
13642
13643
13644 for (i = 0; i < chip_send_contexts(dd); i++) {
13645 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13646 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13647 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13648 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13649 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13650 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13651 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13652 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13653 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13654 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13655 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13656 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13657 }
13658
13659
13660
13661
13662 for (i = 0; i < chip_sdma_engines(dd); i++) {
13663 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13664
13665 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13666 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13667 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13668
13669 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13670 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13671
13672 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13673 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13674
13675
13676 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13677 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13678
13679 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13680 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13681 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13682 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13683 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13684 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13685 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13686 }
13687}
13688
13689
13690
13691
13692
13693static void init_rbufs(struct hfi1_devdata *dd)
13694{
13695 u64 reg;
13696 int count;
13697
13698
13699
13700
13701
13702 count = 0;
13703 while (1) {
13704 reg = read_csr(dd, RCV_STATUS);
13705 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13706 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13707 break;
13708
13709
13710
13711
13712
13713
13714
13715 if (count++ > 500) {
13716 dd_dev_err(dd,
13717 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13718 __func__, reg);
13719 break;
13720 }
13721 udelay(2);
13722 }
13723
13724
13725 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13726
13727
13728
13729
13730
13731
13732
13733 read_csr(dd, RCV_CTRL);
13734
13735
13736 count = 0;
13737 while (1) {
13738
13739 udelay(2);
13740 reg = read_csr(dd, RCV_STATUS);
13741 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13742 break;
13743
13744
13745 if (count++ > 50) {
13746 dd_dev_err(dd,
13747 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13748 __func__);
13749 break;
13750 }
13751 }
13752}
13753
13754
13755static void reset_rxe_csrs(struct hfi1_devdata *dd)
13756{
13757 int i, j;
13758
13759
13760
13761
13762 write_csr(dd, RCV_CTRL, 0);
13763 init_rbufs(dd);
13764
13765
13766
13767
13768 write_csr(dd, RCV_BTH_QP, 0);
13769 write_csr(dd, RCV_MULTICAST, 0);
13770 write_csr(dd, RCV_BYPASS, 0);
13771 write_csr(dd, RCV_VL15, 0);
13772
13773 write_csr(dd, RCV_ERR_INFO,
13774 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13775
13776 write_csr(dd, RCV_ERR_MASK, 0);
13777 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13778
13779 for (i = 0; i < 32; i++)
13780 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13781 for (i = 0; i < 4; i++)
13782 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13783 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13784 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13785 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13786 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13787 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13788 clear_rsm_rule(dd, i);
13789 for (i = 0; i < 32; i++)
13790 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13791
13792
13793
13794
13795 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13796
13797 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13798
13799 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13800 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13801 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13802 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13803 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13804 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13805 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13806 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13807 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13808 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13809
13810
13811
13812 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13813
13814 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13815
13816 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13817 write_uctxt_csr(dd, i,
13818 RCV_TID_FLOW_TABLE + (8 * j), 0);
13819 }
13820 }
13821}
13822
13823
13824
13825
13826
13827
13828
13829
13830
13831
13832
13833
13834static void init_sc2vl_tables(struct hfi1_devdata *dd)
13835{
13836 int i;
13837
13838
13839
13840 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13841 0,
13842 0, 0, 1, 1,
13843 2, 2, 3, 3,
13844 4, 4, 5, 5,
13845 6, 6, 7, 7));
13846 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13847 1,
13848 8, 0, 9, 0,
13849 10, 0, 11, 0,
13850 12, 0, 13, 0,
13851 14, 0, 15, 15));
13852 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13853 2,
13854 16, 0, 17, 0,
13855 18, 0, 19, 0,
13856 20, 0, 21, 0,
13857 22, 0, 23, 0));
13858 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13859 3,
13860 24, 0, 25, 0,
13861 26, 0, 27, 0,
13862 28, 0, 29, 0,
13863 30, 0, 31, 0));
13864
13865
13866 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13867 15_0,
13868 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13869 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13870 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13871 31_16,
13872 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13873 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13874
13875
13876 for (i = 0; i < 32; i++) {
13877 if (i < 8 || i == 15)
13878 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13879 else
13880 *((u8 *)(dd->sc2vl) + i) = 0;
13881 }
13882}
13883
13884
13885
13886
13887
13888
13889
13890
13891
13892
13893static int init_chip(struct hfi1_devdata *dd)
13894{
13895 int i;
13896 int ret = 0;
13897
13898
13899
13900
13901
13902
13903
13904
13905
13906
13907
13908
13909
13910 write_csr(dd, SEND_CTRL, 0);
13911 for (i = 0; i < chip_send_contexts(dd); i++)
13912 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13913 for (i = 0; i < chip_sdma_engines(dd); i++)
13914 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13915
13916 write_csr(dd, RCV_CTRL, 0);
13917 for (i = 0; i < chip_rcv_contexts(dd); i++)
13918 write_csr(dd, RCV_CTXT_CTRL, 0);
13919
13920 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13921 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13922
13923
13924
13925
13926
13927
13928
13929 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13930 (void)read_csr(dd, CCE_DC_CTRL);
13931
13932 if (use_flr) {
13933
13934
13935
13936
13937
13938 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13939
13940
13941 pcie_flr(dd->pcidev);
13942
13943
13944 ret = restore_pci_variables(dd);
13945 if (ret) {
13946 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13947 __func__);
13948 return ret;
13949 }
13950
13951 if (is_ax(dd)) {
13952 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13953 pcie_flr(dd->pcidev);
13954 ret = restore_pci_variables(dd);
13955 if (ret) {
13956 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13957 __func__);
13958 return ret;
13959 }
13960 }
13961 } else {
13962 dd_dev_info(dd, "Resetting CSRs with writes\n");
13963 reset_cce_csrs(dd);
13964 reset_txe_csrs(dd);
13965 reset_rxe_csrs(dd);
13966 reset_misc_csrs(dd);
13967 }
13968
13969 write_csr(dd, CCE_DC_CTRL, 0);
13970
13971
13972 setextled(dd, 0);
13973
13974
13975
13976
13977
13978
13979
13980
13981
13982
13983
13984 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13985 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13986 init_chip_resources(dd);
13987 return ret;
13988}
13989
13990static void init_early_variables(struct hfi1_devdata *dd)
13991{
13992 int i;
13993
13994
13995 dd->vau = CM_VAU;
13996 dd->link_credits = CM_GLOBAL_CREDITS;
13997 if (is_ax(dd))
13998 dd->link_credits--;
13999 dd->vcu = cu_to_vcu(hfi1_cu);
14000
14001 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14002 if (dd->vl15_init > dd->link_credits)
14003 dd->vl15_init = dd->link_credits;
14004
14005 write_uninitialized_csrs_and_memories(dd);
14006
14007 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14008 for (i = 0; i < dd->num_pports; i++) {
14009 struct hfi1_pportdata *ppd = &dd->pport[i];
14010
14011 set_partition_keys(ppd);
14012 }
14013 init_sc2vl_tables(dd);
14014}
14015
14016static void init_kdeth_qp(struct hfi1_devdata *dd)
14017{
14018
14019 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14020
14021 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14022 kdeth_qp = 0;
14023 }
14024 if (kdeth_qp == 0)
14025 kdeth_qp = DEFAULT_KDETH_QP;
14026
14027 write_csr(dd, SEND_BTH_QP,
14028 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14029 SEND_BTH_QP_KDETH_QP_SHIFT);
14030
14031 write_csr(dd, RCV_BTH_QP,
14032 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14033 RCV_BTH_QP_KDETH_QP_SHIFT);
14034}
14035
14036
14037
14038
14039
14040
14041u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14042{
14043 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14044
14045 reg >>= (idx % 8) * 8;
14046 return reg;
14047}
14048
14049
14050
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060
14061
14062
14063
14064
14065
14066static void init_qpmap_table(struct hfi1_devdata *dd,
14067 u32 first_ctxt,
14068 u32 last_ctxt)
14069{
14070 u64 reg = 0;
14071 u64 regno = RCV_QP_MAP_TABLE;
14072 int i;
14073 u64 ctxt = first_ctxt;
14074
14075 for (i = 0; i < 256; i++) {
14076 reg |= ctxt << (8 * (i % 8));
14077 ctxt++;
14078 if (ctxt > last_ctxt)
14079 ctxt = first_ctxt;
14080 if (i % 8 == 7) {
14081 write_csr(dd, regno, reg);
14082 reg = 0;
14083 regno += 8;
14084 }
14085 }
14086
14087 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14088 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14089}
14090
14091struct rsm_map_table {
14092 u64 map[NUM_MAP_REGS];
14093 unsigned int used;
14094};
14095
14096struct rsm_rule_data {
14097 u8 offset;
14098 u8 pkt_type;
14099 u32 field1_off;
14100 u32 field2_off;
14101 u32 index1_off;
14102 u32 index1_width;
14103 u32 index2_off;
14104 u32 index2_width;
14105 u32 mask1;
14106 u32 value1;
14107 u32 mask2;
14108 u32 value2;
14109};
14110
14111
14112
14113
14114
14115static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14116{
14117 struct rsm_map_table *rmt;
14118 u8 rxcontext = is_ax(dd) ? 0 : 0xff;
14119
14120 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14121 if (rmt) {
14122 memset(rmt->map, rxcontext, sizeof(rmt->map));
14123 rmt->used = 0;
14124 }
14125
14126 return rmt;
14127}
14128
14129
14130
14131
14132
14133static void complete_rsm_map_table(struct hfi1_devdata *dd,
14134 struct rsm_map_table *rmt)
14135{
14136 int i;
14137
14138 if (rmt) {
14139
14140 for (i = 0; i < NUM_MAP_REGS; i++)
14141 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14142
14143
14144 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14145 }
14146}
14147
14148
14149
14150
14151static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14152 struct rsm_rule_data *rrd)
14153{
14154 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14155 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14156 1ull << rule_index |
14157 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14158 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14159 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14160 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14161 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14162 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14163 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14164 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14165 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14166 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14167 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14168 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14169 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14170}
14171
14172
14173
14174
14175static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14176{
14177 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14178 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14179 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14180}
14181
14182
14183static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14184 unsigned int *np)
14185{
14186 int i;
14187 unsigned int m, n;
14188 u8 max_by_vl = 0;
14189
14190
14191 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14192 num_vls == 1 ||
14193 krcvqsset <= 1)
14194 goto no_qos;
14195
14196
14197 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14198 if (krcvqs[i] > max_by_vl)
14199 max_by_vl = krcvqs[i];
14200 if (max_by_vl > 32)
14201 goto no_qos;
14202 m = ilog2(__roundup_pow_of_two(max_by_vl));
14203
14204
14205 n = ilog2(__roundup_pow_of_two(num_vls));
14206
14207
14208 if ((m + n) > 7)
14209 goto no_qos;
14210
14211 if (mp)
14212 *mp = m;
14213 if (np)
14214 *np = n;
14215
14216 return 1 << (m + n);
14217
14218no_qos:
14219 if (mp)
14220 *mp = 0;
14221 if (np)
14222 *np = 0;
14223 return 0;
14224}
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239
14240static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14241{
14242 struct rsm_rule_data rrd;
14243 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14244 unsigned int rmt_entries;
14245 u64 reg;
14246
14247 if (!rmt)
14248 goto bail;
14249 rmt_entries = qos_rmt_entries(dd, &m, &n);
14250 if (rmt_entries == 0)
14251 goto bail;
14252 qpns_per_vl = 1 << m;
14253
14254
14255 rmt_entries = 1 << (m + n);
14256 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14257 goto bail;
14258
14259
14260 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14261 unsigned tctxt;
14262
14263 for (qpn = 0, tctxt = ctxt;
14264 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14265 unsigned idx, regoff, regidx;
14266
14267
14268 idx = rmt->used + ((qpn << n) ^ i);
14269 regoff = (idx % 8) * 8;
14270 regidx = idx / 8;
14271
14272 reg = rmt->map[regidx];
14273 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14274 << regoff);
14275 reg |= (u64)(tctxt++) << regoff;
14276 rmt->map[regidx] = reg;
14277 if (tctxt == ctxt + krcvqs[i])
14278 tctxt = ctxt;
14279 }
14280 ctxt += krcvqs[i];
14281 }
14282
14283 rrd.offset = rmt->used;
14284 rrd.pkt_type = 2;
14285 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14286 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14287 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14288 rrd.index1_width = n;
14289 rrd.index2_off = QPN_SELECT_OFFSET;
14290 rrd.index2_width = m + n;
14291 rrd.mask1 = LRH_BTH_MASK;
14292 rrd.value1 = LRH_BTH_VALUE;
14293 rrd.mask2 = LRH_SC_MASK;
14294 rrd.value2 = LRH_SC_VALUE;
14295
14296
14297 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14298
14299
14300 rmt->used += rmt_entries;
14301
14302 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14303 dd->qos_shift = n + 1;
14304 return;
14305bail:
14306 dd->qos_shift = 1;
14307 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14308}
14309
14310static void init_fecn_handling(struct hfi1_devdata *dd,
14311 struct rsm_map_table *rmt)
14312{
14313 struct rsm_rule_data rrd;
14314 u64 reg;
14315 int i, idx, regoff, regidx, start;
14316 u8 offset;
14317 u32 total_cnt;
14318
14319 if (HFI1_CAP_IS_KSET(TID_RDMA))
14320
14321 start = 1;
14322 else
14323 start = dd->first_dyn_alloc_ctxt;
14324
14325 total_cnt = dd->num_rcv_contexts - start;
14326
14327
14328 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14329 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14330 return;
14331 }
14332
14333
14334
14335
14336
14337
14338
14339
14340
14341
14342
14343 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14344
14345 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14346 i++, idx++) {
14347
14348 regoff = (idx % 8) * 8;
14349 regidx = idx / 8;
14350 reg = rmt->map[regidx];
14351 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14352 reg |= (u64)i << regoff;
14353 rmt->map[regidx] = reg;
14354 }
14355
14356
14357
14358
14359
14360
14361
14362
14363
14364
14365 rrd.offset = offset;
14366 rrd.pkt_type = 0;
14367 rrd.field1_off = 95;
14368 rrd.field2_off = 133;
14369 rrd.index1_off = 64;
14370 rrd.index1_width = 8;
14371 rrd.index2_off = 0;
14372 rrd.index2_width = 0;
14373 rrd.mask1 = 1;
14374 rrd.value1 = 1;
14375 rrd.mask2 = 1;
14376 rrd.value2 = 1;
14377
14378
14379 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14380
14381 rmt->used += total_cnt;
14382}
14383
14384
14385void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14386{
14387 u8 i, j;
14388 u8 ctx_id = 0;
14389 u64 reg;
14390 u32 regoff;
14391 struct rsm_rule_data rrd;
14392
14393 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14394 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14395 dd->vnic.rmt_start);
14396 return;
14397 }
14398
14399 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14400 dd->vnic.rmt_start,
14401 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14402
14403
14404 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14405 reg = read_csr(dd, regoff);
14406 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14407
14408 j = (dd->vnic.rmt_start + i) % 8;
14409 reg &= ~(0xffllu << (j * 8));
14410 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14411
14412 ctx_id %= dd->vnic.num_ctxt;
14413
14414 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14415 dev_dbg(&(dd)->pcidev->dev,
14416 "Vnic rsm map reg[%d] =0x%llx\n",
14417 regoff - RCV_RSM_MAP_TABLE, reg);
14418
14419 write_csr(dd, regoff, reg);
14420 regoff += 8;
14421 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14422 reg = read_csr(dd, regoff);
14423 }
14424 }
14425
14426
14427 rrd.offset = dd->vnic.rmt_start;
14428 rrd.pkt_type = 4;
14429
14430 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14431 rrd.mask1 = L2_TYPE_MASK;
14432 rrd.value1 = L2_16B_VALUE;
14433
14434 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14435 rrd.mask2 = L4_16B_TYPE_MASK;
14436 rrd.value2 = L4_16B_ETH_VALUE;
14437
14438 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14439 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14440 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14441 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14442 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14443
14444
14445 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14446}
14447
14448void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14449{
14450 clear_rsm_rule(dd, RSM_INS_VNIC);
14451
14452
14453 if (dd->vnic.rmt_start == 0)
14454 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14455}
14456
14457static int init_rxe(struct hfi1_devdata *dd)
14458{
14459 struct rsm_map_table *rmt;
14460 u64 val;
14461
14462
14463 write_csr(dd, RCV_ERR_MASK, ~0ull);
14464
14465 rmt = alloc_rsm_map_table(dd);
14466 if (!rmt)
14467 return -ENOMEM;
14468
14469
14470 init_qos(dd, rmt);
14471 init_fecn_handling(dd, rmt);
14472 complete_rsm_map_table(dd, rmt);
14473
14474 dd->vnic.rmt_start = rmt->used;
14475 kfree(rmt);
14476
14477
14478
14479
14480
14481
14482
14483
14484
14485
14486
14487
14488
14489
14490 val = read_csr(dd, RCV_BYPASS);
14491 val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14492 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14493 RCV_BYPASS_HDR_SIZE_SHIFT);
14494 write_csr(dd, RCV_BYPASS, val);
14495 return 0;
14496}
14497
14498static void init_other(struct hfi1_devdata *dd)
14499{
14500
14501 write_csr(dd, CCE_ERR_MASK, ~0ull);
14502
14503 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14504
14505 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14506 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14507}
14508
14509
14510
14511
14512
14513
14514
14515
14516
14517static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14518 u32 csr0to3, u32 csr4to7)
14519{
14520 write_csr(dd, csr0to3,
14521 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14522 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14523 2ull * cu <<
14524 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14525 4ull * cu <<
14526 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14527 write_csr(dd, csr4to7,
14528 8ull * cu <<
14529 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14530 16ull * cu <<
14531 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14532 32ull * cu <<
14533 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14534 64ull * cu <<
14535 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14536}
14537
14538static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14539{
14540 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14541 SEND_CM_LOCAL_AU_TABLE4_TO7);
14542}
14543
14544void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14545{
14546 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14547 SEND_CM_REMOTE_AU_TABLE4_TO7);
14548}
14549
14550static void init_txe(struct hfi1_devdata *dd)
14551{
14552 int i;
14553
14554
14555 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14556 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14557 write_csr(dd, SEND_ERR_MASK, ~0ull);
14558 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14559
14560
14561 for (i = 0; i < chip_send_contexts(dd); i++)
14562 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14563 for (i = 0; i < chip_sdma_engines(dd); i++)
14564 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14565
14566
14567 assign_local_cm_au_table(dd, dd->vcu);
14568
14569
14570
14571
14572
14573 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14574 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14575}
14576
14577int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14578 u16 jkey)
14579{
14580 u8 hw_ctxt;
14581 u64 reg;
14582
14583 if (!rcd || !rcd->sc)
14584 return -EINVAL;
14585
14586 hw_ctxt = rcd->sc->hw_context;
14587 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK |
14588 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14589 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14590
14591 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14592 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14593 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14594
14595
14596
14597 if (!is_ax(dd)) {
14598 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14599 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14600 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14601 }
14602
14603
14604 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14605 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14606 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14607 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14608
14609 return 0;
14610}
14611
14612int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14613{
14614 u8 hw_ctxt;
14615 u64 reg;
14616
14617 if (!rcd || !rcd->sc)
14618 return -EINVAL;
14619
14620 hw_ctxt = rcd->sc->hw_context;
14621 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14622
14623
14624
14625
14626
14627 if (!is_ax(dd)) {
14628 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14629 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14630 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14631 }
14632
14633 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14634
14635 return 0;
14636}
14637
14638int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14639 u16 pkey)
14640{
14641 u8 hw_ctxt;
14642 u64 reg;
14643
14644 if (!rcd || !rcd->sc)
14645 return -EINVAL;
14646
14647 hw_ctxt = rcd->sc->hw_context;
14648 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14649 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14650 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14651 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14652 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14653 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14654 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14655
14656 return 0;
14657}
14658
14659int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14660{
14661 u8 hw_ctxt;
14662 u64 reg;
14663
14664 if (!ctxt || !ctxt->sc)
14665 return -EINVAL;
14666
14667 hw_ctxt = ctxt->sc->hw_context;
14668 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14669 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14670 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14671 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14672
14673 return 0;
14674}
14675
14676
14677
14678
14679
14680void hfi1_start_cleanup(struct hfi1_devdata *dd)
14681{
14682 aspm_exit(dd);
14683 free_cntrs(dd);
14684 free_rcverr(dd);
14685 finish_chip_resources(dd);
14686}
14687
14688#define HFI_BASE_GUID(dev) \
14689 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14690
14691
14692
14693
14694
14695
14696static int init_asic_data(struct hfi1_devdata *dd)
14697{
14698 unsigned long index;
14699 struct hfi1_devdata *peer;
14700 struct hfi1_asic_data *asic_data;
14701 int ret = 0;
14702
14703
14704 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14705 if (!asic_data)
14706 return -ENOMEM;
14707
14708 xa_lock_irq(&hfi1_dev_table);
14709
14710 xa_for_each(&hfi1_dev_table, index, peer) {
14711 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14712 dd->unit != peer->unit)
14713 break;
14714 }
14715
14716 if (peer) {
14717
14718 dd->asic_data = peer->asic_data;
14719 kfree(asic_data);
14720 } else {
14721 dd->asic_data = asic_data;
14722 mutex_init(&dd->asic_data->asic_resource_mutex);
14723 }
14724 dd->asic_data->dds[dd->hfi1_id] = dd;
14725 xa_unlock_irq(&hfi1_dev_table);
14726
14727
14728 if (!peer)
14729 ret = set_up_i2c(dd, dd->asic_data);
14730
14731 return ret;
14732}
14733
14734
14735
14736
14737
14738
14739
14740static int obtain_boardname(struct hfi1_devdata *dd)
14741{
14742
14743 const char generic[] =
14744 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14745 unsigned long size;
14746 int ret;
14747
14748 ret = read_hfi1_efi_var(dd, "description", &size,
14749 (void **)&dd->boardname);
14750 if (ret) {
14751 dd_dev_info(dd, "Board description not found\n");
14752
14753 dd->boardname = kstrdup(generic, GFP_KERNEL);
14754 if (!dd->boardname)
14755 return -ENOMEM;
14756 }
14757 return 0;
14758}
14759
14760
14761
14762
14763
14764
14765
14766
14767
14768static int check_int_registers(struct hfi1_devdata *dd)
14769{
14770 u64 reg;
14771 u64 all_bits = ~(u64)0;
14772 u64 mask;
14773
14774
14775 mask = read_csr(dd, CCE_INT_MASK);
14776 write_csr(dd, CCE_INT_MASK, 0ull);
14777 reg = read_csr(dd, CCE_INT_MASK);
14778 if (reg)
14779 goto err_exit;
14780
14781
14782 write_csr(dd, CCE_INT_CLEAR, all_bits);
14783 reg = read_csr(dd, CCE_INT_STATUS);
14784 if (reg)
14785 goto err_exit;
14786
14787
14788 write_csr(dd, CCE_INT_FORCE, all_bits);
14789 reg = read_csr(dd, CCE_INT_STATUS);
14790 if (reg != all_bits)
14791 goto err_exit;
14792
14793
14794 write_csr(dd, CCE_INT_CLEAR, all_bits);
14795 write_csr(dd, CCE_INT_MASK, mask);
14796
14797 return 0;
14798err_exit:
14799 write_csr(dd, CCE_INT_MASK, mask);
14800 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14801 return -EINVAL;
14802}
14803
14804
14805
14806
14807
14808
14809
14810
14811
14812int hfi1_init_dd(struct hfi1_devdata *dd)
14813{
14814 struct pci_dev *pdev = dd->pcidev;
14815 struct hfi1_pportdata *ppd;
14816 u64 reg;
14817 int i, ret;
14818 static const char * const inames[] = {
14819 "RTL silicon",
14820 "RTL VCS simulation",
14821 "RTL FPGA emulation",
14822 "Functional simulator"
14823 };
14824 struct pci_dev *parent = pdev->bus->self;
14825 u32 sdma_engines = chip_sdma_engines(dd);
14826
14827 ppd = dd->pport;
14828 for (i = 0; i < dd->num_pports; i++, ppd++) {
14829 int vl;
14830
14831 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14832
14833 ppd->link_width_supported =
14834 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14835 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14836 ppd->link_width_downgrade_supported =
14837 ppd->link_width_supported;
14838
14839 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14840 ppd->link_width_downgrade_enabled =
14841 ppd->link_width_downgrade_supported;
14842
14843
14844
14845 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14846 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14847 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14848 num_vls, HFI1_MAX_VLS_SUPPORTED);
14849 num_vls = HFI1_MAX_VLS_SUPPORTED;
14850 }
14851 ppd->vls_supported = num_vls;
14852 ppd->vls_operational = ppd->vls_supported;
14853
14854 for (vl = 0; vl < num_vls; vl++)
14855 dd->vld[vl].mtu = hfi1_max_mtu;
14856 dd->vld[15].mtu = MAX_MAD_PACKET;
14857
14858
14859
14860
14861 ppd->overrun_threshold = 0x4;
14862 ppd->phy_error_threshold = 0xf;
14863 ppd->port_crc_mode_enabled = link_crc_mask;
14864
14865 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14866
14867 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14868
14869 ppd->host_link_state = HLS_DN_OFFLINE;
14870 init_vl_arb_caches(ppd);
14871 }
14872
14873
14874
14875
14876
14877
14878 ret = hfi1_pcie_ddinit(dd, pdev);
14879 if (ret < 0)
14880 goto bail_free;
14881
14882
14883 ret = save_pci_variables(dd);
14884 if (ret < 0)
14885 goto bail_cleanup;
14886
14887 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14888 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14889 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14890 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14891
14892
14893
14894
14895
14896
14897 if (!parent) {
14898 ret = check_int_registers(dd);
14899 if (ret)
14900 goto bail_cleanup;
14901 }
14902
14903
14904
14905
14906
14907 reg = read_csr(dd, CCE_REVISION2);
14908 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14909 & CCE_REVISION2_HFI_ID_MASK;
14910
14911 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14912 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14913 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14914 dd->icode < ARRAY_SIZE(inames) ?
14915 inames[dd->icode] : "unknown", (int)dd->irev);
14916
14917
14918 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14919
14920 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14921
14922 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14923
14924
14925 ppd = dd->pport;
14926 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14927 ppd->link_width_supported =
14928 ppd->link_width_enabled =
14929 ppd->link_width_downgrade_supported =
14930 ppd->link_width_downgrade_enabled =
14931 OPA_LINK_WIDTH_1X;
14932 }
14933
14934 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
14935 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14936 num_vls, sdma_engines);
14937 num_vls = sdma_engines;
14938 ppd->vls_supported = sdma_engines;
14939 ppd->vls_operational = ppd->vls_supported;
14940 }
14941
14942
14943
14944
14945
14946
14947
14948
14949
14950 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14951 if (dd->rcv_intr_timeout_csr >
14952 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14953 dd->rcv_intr_timeout_csr =
14954 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14955 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14956 dd->rcv_intr_timeout_csr = 1;
14957
14958
14959 read_guid(dd);
14960
14961
14962 ret = init_asic_data(dd);
14963 if (ret)
14964 goto bail_cleanup;
14965
14966
14967 ret = init_chip(dd);
14968 if (ret)
14969 goto bail_cleanup;
14970
14971
14972 ret = pcie_speeds(dd);
14973 if (ret)
14974 goto bail_cleanup;
14975
14976
14977 ret = eprom_init(dd);
14978 if (ret)
14979 goto bail_free_rcverr;
14980
14981
14982 get_platform_config(dd);
14983
14984
14985 ret = hfi1_firmware_init(dd);
14986 if (ret)
14987 goto bail_cleanup;
14988
14989
14990
14991
14992
14993
14994
14995
14996
14997
14998
14999
15000
15001 ret = do_pcie_gen3_transition(dd);
15002 if (ret)
15003 goto bail_cleanup;
15004
15005
15006
15007
15008
15009 tune_pcie_caps(dd);
15010
15011
15012 init_early_variables(dd);
15013
15014 parse_platform_config(dd);
15015
15016 ret = obtain_boardname(dd);
15017 if (ret)
15018 goto bail_cleanup;
15019
15020 snprintf(dd->boardversion, BOARD_VERS_MAX,
15021 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15022 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15023 (u32)dd->majrev,
15024 (u32)dd->minrev,
15025 (dd->revision >> CCE_REVISION_SW_SHIFT)
15026 & CCE_REVISION_SW_MASK);
15027
15028 ret = set_up_context_variables(dd);
15029 if (ret)
15030 goto bail_cleanup;
15031
15032
15033 ret = init_rxe(dd);
15034 if (ret)
15035 goto bail_cleanup;
15036
15037
15038 init_txe(dd);
15039
15040 init_other(dd);
15041
15042 init_kdeth_qp(dd);
15043
15044 ret = hfi1_dev_affinity_init(dd);
15045 if (ret)
15046 goto bail_cleanup;
15047
15048
15049 ret = init_send_contexts(dd);
15050 if (ret)
15051 goto bail_cleanup;
15052
15053 ret = hfi1_create_kctxts(dd);
15054 if (ret)
15055 goto bail_cleanup;
15056
15057
15058
15059
15060
15061 aspm_init(dd);
15062
15063 ret = init_pervl_scs(dd);
15064 if (ret)
15065 goto bail_cleanup;
15066
15067
15068 for (i = 0; i < dd->num_pports; ++i) {
15069 ret = sdma_init(dd, i);
15070 if (ret)
15071 goto bail_cleanup;
15072 }
15073
15074
15075 ret = set_up_interrupts(dd);
15076 if (ret)
15077 goto bail_cleanup;
15078
15079 ret = hfi1_comp_vectors_set_up(dd);
15080 if (ret)
15081 goto bail_clear_intr;
15082
15083
15084 init_lcb_access(dd);
15085
15086
15087
15088
15089
15090
15091 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15092 (dd->base_guid & 0xFFFFFF) |
15093 ((dd->base_guid >> 11) & 0xF000000));
15094
15095 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15096 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15097 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15098
15099 ret = load_firmware(dd);
15100 if (ret)
15101 goto bail_clear_intr;
15102
15103 thermal_init(dd);
15104
15105 ret = init_cntrs(dd);
15106 if (ret)
15107 goto bail_clear_intr;
15108
15109 ret = init_rcverr(dd);
15110 if (ret)
15111 goto bail_free_cntrs;
15112
15113 init_completion(&dd->user_comp);
15114
15115
15116 atomic_set(&dd->user_refcount, 1);
15117
15118 goto bail;
15119
15120bail_free_rcverr:
15121 free_rcverr(dd);
15122bail_free_cntrs:
15123 free_cntrs(dd);
15124bail_clear_intr:
15125 hfi1_comp_vectors_clean_up(dd);
15126 msix_clean_up_interrupts(dd);
15127bail_cleanup:
15128 hfi1_pcie_ddcleanup(dd);
15129bail_free:
15130 hfi1_free_devdata(dd);
15131bail:
15132 return ret;
15133}
15134
15135static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15136 u32 dw_len)
15137{
15138 u32 delta_cycles;
15139 u32 current_egress_rate = ppd->current_egress_rate;
15140
15141
15142 if (desired_egress_rate == -1)
15143 return 0;
15144
15145 if (desired_egress_rate >= current_egress_rate)
15146 return 0;
15147
15148 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15149 egress_cycles(dw_len * 4, current_egress_rate);
15150
15151 return (u16)delta_cycles;
15152}
15153
15154
15155
15156
15157
15158
15159
15160
15161
15162
15163
15164
15165
15166
15167
15168u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15169 u32 dw_len)
15170{
15171 u64 pbc, delay = 0;
15172
15173 if (unlikely(srate_mbs))
15174 delay = delay_cycles(ppd, srate_mbs, dw_len);
15175
15176 pbc = flags
15177 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15178 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15179 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15180 | (dw_len & PBC_LENGTH_DWS_MASK)
15181 << PBC_LENGTH_DWS_SHIFT;
15182
15183 return pbc;
15184}
15185
15186#define SBUS_THERMAL 0x4f
15187#define SBUS_THERM_MONITOR_MODE 0x1
15188
15189#define THERM_FAILURE(dev, ret, reason) \
15190 dd_dev_err((dd), \
15191 "Thermal sensor initialization failed: %s (%d)\n", \
15192 (reason), (ret))
15193
15194
15195
15196
15197
15198
15199
15200
15201
15202
15203
15204static int thermal_init(struct hfi1_devdata *dd)
15205{
15206 int ret = 0;
15207
15208 if (dd->icode != ICODE_RTL_SILICON ||
15209 check_chip_resource(dd, CR_THERM_INIT, NULL))
15210 return ret;
15211
15212 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15213 if (ret) {
15214 THERM_FAILURE(dd, ret, "Acquire SBus");
15215 return ret;
15216 }
15217
15218 dd_dev_info(dd, "Initializing thermal sensor\n");
15219
15220 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15221 msleep(100);
15222
15223
15224 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15225 RESET_SBUS_RECEIVER, 0);
15226 if (ret) {
15227 THERM_FAILURE(dd, ret, "Bus Reset");
15228 goto done;
15229 }
15230
15231 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15232 WRITE_SBUS_RECEIVER, 0x1);
15233 if (ret) {
15234 THERM_FAILURE(dd, ret, "Therm Block Reset");
15235 goto done;
15236 }
15237
15238 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15239 WRITE_SBUS_RECEIVER, 0x32);
15240 if (ret) {
15241 THERM_FAILURE(dd, ret, "Write Clock Div");
15242 goto done;
15243 }
15244
15245 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15246 WRITE_SBUS_RECEIVER,
15247 SBUS_THERM_MONITOR_MODE);
15248 if (ret) {
15249 THERM_FAILURE(dd, ret, "Write Mode Sel");
15250 goto done;
15251 }
15252
15253 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15254 WRITE_SBUS_RECEIVER, 0x2);
15255 if (ret) {
15256 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15257 goto done;
15258 }
15259
15260 msleep(22);
15261
15262
15263 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15264
15265
15266 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15267 if (ret)
15268 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15269
15270done:
15271 release_chip_resource(dd, CR_SBUS);
15272 return ret;
15273}
15274
15275static void handle_temp_err(struct hfi1_devdata *dd)
15276{
15277 struct hfi1_pportdata *ppd = &dd->pport[0];
15278
15279
15280
15281
15282
15283 dd_dev_emerg(dd,
15284 "Critical temperature reached! Forcing device into freeze mode!\n");
15285 dd->flags |= HFI1_FORCED_FREEZE;
15286 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15287
15288
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298 ppd->driver_link_ready = 0;
15299 ppd->link_enabled = 0;
15300 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15301 PLS_OFFLINE);
15302
15303
15304
15305
15306 dc_shutdown(dd);
15307}
15308