1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/pci.h>
36#include <linux/pci_regs.h>
37#include <linux/firmware.h>
38#include <linux/stddef.h>
39#include <linux/delay.h>
40#include <linux/string.h>
41#include <linux/compiler.h>
42#include <linux/jiffies.h>
43#include <linux/kernel.h>
44#include <linux/log2.h>
45
46#include "csio_hw.h"
47#include "csio_lnode.h"
48#include "csio_rnode.h"
49
50int csio_force_master;
51int csio_dbg_level = 0xFEFF;
52unsigned int csio_port_mask = 0xf;
53
54
55static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
56
57
58int csio_msi = 2;
59
60
61static int dev_num;
62
63
64static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
68 {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
69 {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
70 {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
71 {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
72 {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
73 {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
74 {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
75 {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
76 {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
89};
90
91static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
112};
113
114static void csio_mgmtm_cleanup(struct csio_mgmtm *);
115static void csio_hw_mbm_cleanup(struct csio_hw *);
116
117
118static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
119static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
120static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
121static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
122static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
123static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
124static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
125static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
126static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
127
128static void csio_hw_initialize(struct csio_hw *hw);
129static void csio_evtq_stop(struct csio_hw *hw);
130static void csio_evtq_start(struct csio_hw *hw);
131
132int csio_is_hw_ready(struct csio_hw *hw)
133{
134 return csio_match_state(hw, csio_hws_ready);
135}
136
137int csio_is_hw_removing(struct csio_hw *hw)
138{
139 return csio_match_state(hw, csio_hws_removing);
140}
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158int
159csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
160 int polarity, int attempts, int delay, uint32_t *valp)
161{
162 uint32_t val;
163 while (1) {
164 val = csio_rd_reg32(hw, reg);
165
166 if (!!(val & mask) == polarity) {
167 if (valp)
168 *valp = val;
169 return 0;
170 }
171
172 if (--attempts == 0)
173 return -EAGAIN;
174 if (delay)
175 udelay(delay);
176 }
177}
178
179
180
181
182
183
184
185
186
187
188void
189csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
190 unsigned int mask, unsigned int val)
191{
192 csio_wr_reg32(hw, addr, TP_PIO_ADDR);
193 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
194 csio_wr_reg32(hw, val, TP_PIO_DATA);
195}
196
197void
198csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
199 uint32_t value)
200{
201 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
202
203 csio_wr_reg32(hw, val | value, reg);
204
205 csio_rd_reg32(hw, reg);
206
207}
208
209static int
210csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
211{
212 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
213 addr, len, buf, 0);
214}
215
216
217
218
219#define EEPROM_MAX_RD_POLL 40
220#define EEPROM_MAX_WR_POLL 6
221#define EEPROM_STAT_ADDR 0x7bfc
222#define VPD_BASE 0x400
223#define VPD_BASE_OLD 0
224#define VPD_LEN 1024
225#define VPD_INFO_FLD_HDR_SIZE 3
226
227
228
229
230
231
232
233
234
235
236
237static int
238csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
239{
240 uint16_t val = 0;
241 int attempts = EEPROM_MAX_RD_POLL;
242 uint32_t base = hw->params.pci.vpd_cap_addr;
243
244 if (addr >= EEPROMVSIZE || (addr & 3))
245 return -EINVAL;
246
247 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
248
249 do {
250 udelay(10);
251 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
252 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
253
254 if (!(val & PCI_VPD_ADDR_F)) {
255 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
256 return -EINVAL;
257 }
258
259 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
260 *data = le32_to_cpu(*data);
261
262 return 0;
263}
264
265
266
267
268
269struct t4_vpd_hdr {
270 u8 id_tag;
271 u8 id_len[2];
272 u8 id_data[ID_LEN];
273 u8 vpdr_tag;
274 u8 vpdr_len[2];
275};
276
277
278
279
280
281
282
283
284
285
286static int
287csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
288{
289 int32_t i;
290 int32_t offset , len;
291 const uint8_t *buf = &v->id_tag;
292 const uint8_t *vpdr_len = &v->vpdr_tag;
293 offset = sizeof(struct t4_vpd_hdr);
294 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
295
296 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
297 return -EINVAL;
298
299 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
300 if (memcmp(buf + i , kw, 2) == 0) {
301 i += VPD_INFO_FLD_HDR_SIZE;
302 return i;
303 }
304
305 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
306 }
307
308 return -EINVAL;
309}
310
311static int
312csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
313{
314 *pos = pci_find_capability(pdev, cap);
315 if (*pos)
316 return 0;
317
318 return -1;
319}
320
321
322
323
324
325
326
327
328static int
329csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
330{
331 int i, ret, ec, sn, addr;
332 uint8_t *vpd, csum;
333 const struct t4_vpd_hdr *v;
334
335 char *s;
336
337 if (csio_is_valid_vpd(hw))
338 return 0;
339
340 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
341 &hw->params.pci.vpd_cap_addr);
342 if (ret)
343 return -EINVAL;
344
345 vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
346 if (vpd == NULL)
347 return -ENOMEM;
348
349
350
351
352
353 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
354 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
355
356 for (i = 0; i < VPD_LEN; i += 4) {
357 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
358 if (ret) {
359 kfree(vpd);
360 return ret;
361 }
362 }
363
364
365 hw->flags &= (~CSIO_HWF_VPD_VALID);
366
367 v = (const struct t4_vpd_hdr *)vpd;
368
369#define FIND_VPD_KW(var, name) do { \
370 var = csio_hw_get_vpd_keyword_val(v, name); \
371 if (var < 0) { \
372 csio_err(hw, "missing VPD keyword " name "\n"); \
373 kfree(vpd); \
374 return -EINVAL; \
375 } \
376} while (0)
377
378 FIND_VPD_KW(i, "RV");
379 for (csum = 0; i >= 0; i--)
380 csum += vpd[i];
381
382 if (csum) {
383 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
384 kfree(vpd);
385 return -EINVAL;
386 }
387 FIND_VPD_KW(ec, "EC");
388 FIND_VPD_KW(sn, "SN");
389#undef FIND_VPD_KW
390
391 memcpy(p->id, v->id_data, ID_LEN);
392 s = strstrip(p->id);
393 memcpy(p->ec, vpd + ec, EC_LEN);
394 s = strstrip(p->ec);
395 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
396 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
397 s = strstrip(p->sn);
398
399 csio_valid_vpd_copied(hw);
400
401 kfree(vpd);
402 return 0;
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417static int
418csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
419 int32_t lock, uint32_t *valp)
420{
421 int ret;
422
423 if (!byte_cnt || byte_cnt > 4)
424 return -EINVAL;
425 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
426 return -EBUSY;
427
428 cont = cont ? SF_CONT : 0;
429 lock = lock ? SF_LOCK : 0;
430
431 csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
432 ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
433 10, NULL);
434 if (!ret)
435 *valp = csio_rd_reg32(hw, SF_DATA);
436 return ret;
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451static int
452csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
453 int32_t lock, uint32_t val)
454{
455 if (!byte_cnt || byte_cnt > 4)
456 return -EINVAL;
457 if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
458 return -EBUSY;
459
460 cont = cont ? SF_CONT : 0;
461 lock = lock ? SF_LOCK : 0;
462
463 csio_wr_reg32(hw, val, SF_DATA);
464 csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
465
466 return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
467 10, NULL);
468}
469
470
471
472
473
474
475
476
477
478static int
479csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
480{
481 int ret;
482 uint32_t status;
483
484 while (1) {
485 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
486 if (ret != 0)
487 return ret;
488
489 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
490 if (ret != 0)
491 return ret;
492
493 if (!(status & 1))
494 return 0;
495 if (--attempts == 0)
496 return -EAGAIN;
497 if (delay)
498 msleep(delay);
499 }
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515static int
516csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
517 uint32_t *data, int32_t byte_oriented)
518{
519 int ret;
520
521 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
522 return -EINVAL;
523
524 addr = swab32(addr) | SF_RD_DATA_FAST;
525
526 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
527 if (ret != 0)
528 return ret;
529
530 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
531 if (ret != 0)
532 return ret;
533
534 for ( ; nwords; nwords--, data++) {
535 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
536 if (nwords == 1)
537 csio_wr_reg32(hw, 0, SF_OP);
538 if (ret)
539 return ret;
540 if (byte_oriented)
541 *data = htonl(*data);
542 }
543 return 0;
544}
545
546
547
548
549
550
551
552
553
554
555
556static int
557csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
558 uint32_t n, const uint8_t *data)
559{
560 int ret = -EINVAL;
561 uint32_t buf[64];
562 uint32_t i, c, left, val, offset = addr & 0xff;
563
564 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
565 return -EINVAL;
566
567 val = swab32(addr) | SF_PROG_PAGE;
568
569 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
570 if (ret != 0)
571 goto unlock;
572
573 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
574 if (ret != 0)
575 goto unlock;
576
577 for (left = n; left; left -= c) {
578 c = min(left, 4U);
579 for (val = 0, i = 0; i < c; ++i)
580 val = (val << 8) + *data++;
581
582 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
583 if (ret)
584 goto unlock;
585 }
586 ret = csio_hw_flash_wait_op(hw, 8, 1);
587 if (ret)
588 goto unlock;
589
590 csio_wr_reg32(hw, 0, SF_OP);
591
592
593 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
594 if (ret)
595 return ret;
596
597 if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
598 csio_err(hw,
599 "failed to correctly write the flash page at %#x\n",
600 addr);
601 return -EINVAL;
602 }
603
604 return 0;
605
606unlock:
607 csio_wr_reg32(hw, 0, SF_OP);
608 return ret;
609}
610
611
612
613
614
615
616
617
618
619static int
620csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
621{
622 int ret = 0;
623
624 while (start <= end) {
625
626 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
627 if (ret != 0)
628 goto out;
629
630 ret = csio_hw_sf1_write(hw, 4, 0, 1,
631 SF_ERASE_SECTOR | (start << 8));
632 if (ret != 0)
633 goto out;
634
635 ret = csio_hw_flash_wait_op(hw, 14, 500);
636 if (ret != 0)
637 goto out;
638
639 start++;
640 }
641out:
642 if (ret)
643 csio_err(hw, "erase of flash sector %d failed, error %d\n",
644 start, ret);
645 csio_wr_reg32(hw, 0, SF_OP);
646 return 0;
647}
648
649static void
650csio_hw_print_fw_version(struct csio_hw *hw, char *str)
651{
652 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
653 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
654 FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
655 FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
656 FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
657}
658
659
660
661
662
663
664
665
666static int
667csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
668{
669 return csio_hw_read_flash(hw, FW_IMG_START +
670 offsetof(struct fw_hdr, fw_ver), 1,
671 vers, 0);
672}
673
674
675
676
677
678
679
680
681static int
682csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
683{
684 return csio_hw_read_flash(hw, FLASH_FW_START +
685 offsetof(struct fw_hdr, tp_microcode_ver), 1,
686 vers, 0);
687}
688
689
690
691
692
693
694
695
696
697
698static int
699csio_hw_check_fw_version(struct csio_hw *hw)
700{
701 int ret, major, minor, micro;
702
703 ret = csio_hw_get_fw_version(hw, &hw->fwrev);
704 if (!ret)
705 ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
706 if (ret)
707 return ret;
708
709 major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev);
710 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
711 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
712
713 if (major != FW_VERSION_MAJOR(hw)) {
714 csio_err(hw, "card FW has major version %u, driver wants %u\n",
715 major, FW_VERSION_MAJOR(hw));
716 return -EINVAL;
717 }
718
719 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
720 return 0;
721
722
723 return -EINVAL;
724}
725
726
727
728
729
730
731
732
733
734static int
735csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
736{
737 uint32_t csum;
738 int32_t addr;
739 int ret;
740 uint32_t i;
741 uint8_t first_page[SF_PAGE_SIZE];
742 const __be32 *p = (const __be32 *)fw_data;
743 struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
744 uint32_t sf_sec_size;
745
746 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
747 csio_err(hw, "Serial Flash data invalid\n");
748 return -EINVAL;
749 }
750
751 if (!size) {
752 csio_err(hw, "FW image has no data\n");
753 return -EINVAL;
754 }
755
756 if (size & 511) {
757 csio_err(hw, "FW image size not multiple of 512 bytes\n");
758 return -EINVAL;
759 }
760
761 if (ntohs(hdr->len512) * 512 != size) {
762 csio_err(hw, "FW image size differs from size in FW header\n");
763 return -EINVAL;
764 }
765
766 if (size > FW_MAX_SIZE) {
767 csio_err(hw, "FW image too large, max is %u bytes\n",
768 FW_MAX_SIZE);
769 return -EINVAL;
770 }
771
772 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
773 csum += ntohl(p[i]);
774
775 if (csum != 0xffffffff) {
776 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
777 return -EINVAL;
778 }
779
780 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
781 i = DIV_ROUND_UP(size, sf_sec_size);
782
783 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
784 FW_START_SEC, FW_START_SEC + i - 1);
785
786 ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
787 FW_START_SEC + i - 1);
788 if (ret) {
789 csio_err(hw, "Flash Erase failed\n");
790 goto out;
791 }
792
793
794
795
796
797
798 memcpy(first_page, fw_data, SF_PAGE_SIZE);
799 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
800 ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
801 if (ret)
802 goto out;
803
804 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
805 FW_IMG_START, FW_IMG_START + size);
806
807 addr = FW_IMG_START;
808 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
809 addr += SF_PAGE_SIZE;
810 fw_data += SF_PAGE_SIZE;
811 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
812 if (ret)
813 goto out;
814 }
815
816 ret = csio_hw_write_flash(hw,
817 FW_IMG_START +
818 offsetof(struct fw_hdr, fw_ver),
819 sizeof(hdr->fw_ver),
820 (const uint8_t *)&hdr->fw_ver);
821
822out:
823 if (ret)
824 csio_err(hw, "firmware download failed, error %d\n", ret);
825 return ret;
826}
827
828static int
829csio_hw_get_flash_params(struct csio_hw *hw)
830{
831 int ret;
832 uint32_t info = 0;
833
834 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
835 if (!ret)
836 ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
837 csio_wr_reg32(hw, 0, SF_OP);
838 if (ret != 0)
839 return ret;
840
841 if ((info & 0xff) != 0x20)
842 return -EINVAL;
843 info >>= 16;
844 if (info >= 0x14 && info < 0x18)
845 hw->params.sf_nsec = 1 << (info - 16);
846 else if (info == 0x18)
847 hw->params.sf_nsec = 64;
848 else
849 return -EINVAL;
850 hw->params.sf_size = 1 << info;
851
852 return 0;
853}
854
855static void
856csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
857{
858 uint16_t val;
859 int pcie_cap;
860
861 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
862 pci_read_config_word(hw->pdev,
863 pcie_cap + PCI_EXP_DEVCTL2, &val);
864 val &= 0xfff0;
865 val |= range ;
866 pci_write_config_word(hw->pdev,
867 pcie_cap + PCI_EXP_DEVCTL2, val);
868 }
869}
870
871
872
873
874
875static int
876csio_hw_dev_ready(struct csio_hw *hw)
877{
878 uint32_t reg;
879 int cnt = 6;
880
881 while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
882 (--cnt != 0))
883 mdelay(100);
884
885 if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
886 (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
887 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
888 return -EIO;
889 }
890
891 hw->pfn = SOURCEPF_GET(reg);
892
893 return 0;
894}
895
896
897
898
899
900
901
902
903static int
904csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
905{
906 struct csio_mb *mbp;
907 int rv = 0;
908 enum csio_dev_master master;
909 enum fw_retval retval;
910 uint8_t mpfn;
911 char state_str[16];
912 int retries = FW_CMD_HELLO_RETRIES;
913
914 memset(state_str, 0, sizeof(state_str));
915
916 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
917 if (!mbp) {
918 rv = -ENOMEM;
919 CSIO_INC_STATS(hw, n_err_nomem);
920 goto out;
921 }
922
923 master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;
924
925retry:
926 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
927 hw->pfn, master, NULL);
928
929 rv = csio_mb_issue(hw, mbp);
930 if (rv) {
931 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
932 goto out_free_mb;
933 }
934
935 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
936 if (retval != FW_SUCCESS) {
937 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
938 rv = -EINVAL;
939 goto out_free_mb;
940 }
941
942
943 if (hw->pfn == mpfn) {
944 hw->flags |= CSIO_HWF_MASTER;
945 } else if (*state == CSIO_DEV_STATE_UNINIT) {
946
947
948
949
950
951
952
953
954
955
956
957
958
959 int waiting = FW_CMD_HELLO_TIMEOUT;
960
961
962
963
964
965
966
967
968 for (;;) {
969 uint32_t pcie_fw;
970
971 spin_unlock_irq(&hw->lock);
972 msleep(50);
973 spin_lock_irq(&hw->lock);
974 waiting -= 50;
975
976
977
978
979
980
981
982 pcie_fw = csio_rd_reg32(hw, PCIE_FW);
983 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
984 if (waiting <= 0) {
985 if (retries-- > 0)
986 goto retry;
987
988 rv = -ETIMEDOUT;
989 break;
990 }
991 continue;
992 }
993
994
995
996
997
998 if (state) {
999 if (pcie_fw & PCIE_FW_ERR) {
1000 *state = CSIO_DEV_STATE_ERR;
1001 rv = -ETIMEDOUT;
1002 } else if (pcie_fw & PCIE_FW_INIT)
1003 *state = CSIO_DEV_STATE_INIT;
1004 }
1005
1006
1007
1008
1009
1010
1011 if (mpfn == PCIE_FW_MASTER_MASK &&
1012 (pcie_fw & PCIE_FW_MASTER_VLD))
1013 mpfn = PCIE_FW_MASTER_GET(pcie_fw);
1014 break;
1015 }
1016 hw->flags &= ~CSIO_HWF_MASTER;
1017 }
1018
1019 switch (*state) {
1020 case CSIO_DEV_STATE_UNINIT:
1021 strcpy(state_str, "Initializing");
1022 break;
1023 case CSIO_DEV_STATE_INIT:
1024 strcpy(state_str, "Initialized");
1025 break;
1026 case CSIO_DEV_STATE_ERR:
1027 strcpy(state_str, "Error");
1028 break;
1029 default:
1030 strcpy(state_str, "Unknown");
1031 break;
1032 }
1033
1034 if (hw->pfn == mpfn)
1035 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1036 hw->pfn, state_str);
1037 else
1038 csio_info(hw,
1039 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1040 hw->pfn, mpfn, state_str);
1041
1042out_free_mb:
1043 mempool_free(mbp, hw->mb_mempool);
1044out:
1045 return rv;
1046}
1047
1048
1049
1050
1051
1052
1053static int
1054csio_do_bye(struct csio_hw *hw)
1055{
1056 struct csio_mb *mbp;
1057 enum fw_retval retval;
1058
1059 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1060 if (!mbp) {
1061 CSIO_INC_STATS(hw, n_err_nomem);
1062 return -ENOMEM;
1063 }
1064
1065 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1066
1067 if (csio_mb_issue(hw, mbp)) {
1068 csio_err(hw, "Issue of BYE command failed\n");
1069 mempool_free(mbp, hw->mb_mempool);
1070 return -EINVAL;
1071 }
1072
1073 retval = csio_mb_fw_retval(mbp);
1074 if (retval != FW_SUCCESS) {
1075 mempool_free(mbp, hw->mb_mempool);
1076 return -EINVAL;
1077 }
1078
1079 mempool_free(mbp, hw->mb_mempool);
1080
1081 return 0;
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093static int
1094csio_do_reset(struct csio_hw *hw, bool fw_rst)
1095{
1096 struct csio_mb *mbp;
1097 enum fw_retval retval;
1098
1099 if (!fw_rst) {
1100
1101 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1102 mdelay(2000);
1103 return 0;
1104 }
1105
1106 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1107 if (!mbp) {
1108 CSIO_INC_STATS(hw, n_err_nomem);
1109 return -ENOMEM;
1110 }
1111
1112 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1113 PIORSTMODE | PIORST, 0, NULL);
1114
1115 if (csio_mb_issue(hw, mbp)) {
1116 csio_err(hw, "Issue of RESET command failed.n");
1117 mempool_free(mbp, hw->mb_mempool);
1118 return -EINVAL;
1119 }
1120
1121 retval = csio_mb_fw_retval(mbp);
1122 if (retval != FW_SUCCESS) {
1123 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1124 mempool_free(mbp, hw->mb_mempool);
1125 return -EINVAL;
1126 }
1127
1128 mempool_free(mbp, hw->mb_mempool);
1129
1130 return 0;
1131}
1132
1133static int
1134csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1135{
1136 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
1137 uint16_t caps;
1138
1139 caps = ntohs(rsp->fcoecaps);
1140
1141 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
1142 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1143 return -EINVAL;
1144 }
1145
1146 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
1147 csio_err(hw, "No FCoE Control Offload capability\n");
1148 return -EINVAL;
1149 }
1150
1151 return 0;
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170static int
1171csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1172{
1173 enum fw_retval retval = 0;
1174
1175
1176
1177
1178
1179 if (mbox <= PCIE_FW_MASTER_MASK) {
1180 struct csio_mb *mbp;
1181
1182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1183 if (!mbp) {
1184 CSIO_INC_STATS(hw, n_err_nomem);
1185 return -ENOMEM;
1186 }
1187
1188 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1189 PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1),
1190 NULL);
1191
1192 if (csio_mb_issue(hw, mbp)) {
1193 csio_err(hw, "Issue of RESET command failed!\n");
1194 mempool_free(mbp, hw->mb_mempool);
1195 return -EINVAL;
1196 }
1197
1198 retval = csio_mb_fw_retval(mbp);
1199 mempool_free(mbp, hw->mb_mempool);
1200 }
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 if (retval == 0 || force) {
1216 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
1217 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
1218 }
1219
1220
1221
1222
1223
1224 return retval ? -EINVAL : 0;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static int
1249csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1250{
1251 if (reset) {
1252
1253
1254
1255
1256
1257 csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
1258
1259
1260
1261
1262
1263
1264
1265
1266 if (mbox <= PCIE_FW_MASTER_MASK) {
1267 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
1268 msleep(100);
1269 if (csio_do_reset(hw, true) == 0)
1270 return 0;
1271 }
1272
1273 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
1274 msleep(2000);
1275 } else {
1276 int ms;
1277
1278 csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
1279 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
1280 if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
1281 return 0;
1282 msleep(100);
1283 ms += 100;
1284 }
1285 return -ETIMEDOUT;
1286 }
1287 return 0;
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static int
1312csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1313 const u8 *fw_data, uint32_t size, int32_t force)
1314{
1315 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
1316 int reset, ret;
1317
1318 ret = csio_hw_fw_halt(hw, mbox, force);
1319 if (ret != 0 && !force)
1320 return ret;
1321
1322 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1323 if (ret != 0)
1324 return ret;
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
1335 return csio_hw_fw_restart(hw, mbox, reset);
1336}
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365static int
1366csio_hw_fw_config_file(struct csio_hw *hw,
1367 unsigned int mtype, unsigned int maddr,
1368 uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
1369{
1370 struct csio_mb *mbp;
1371 struct fw_caps_config_cmd *caps_cmd;
1372 int rv = -EINVAL;
1373 enum fw_retval ret;
1374
1375 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1376 if (!mbp) {
1377 CSIO_INC_STATS(hw, n_err_nomem);
1378 return -ENOMEM;
1379 }
1380
1381
1382
1383
1384
1385
1386 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
1387 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
1388 caps_cmd->op_to_write =
1389 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1390 FW_CMD_REQUEST |
1391 FW_CMD_READ);
1392 caps_cmd->cfvalid_to_len16 =
1393 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
1394 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1395 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
1396 FW_LEN16(*caps_cmd));
1397
1398 if (csio_mb_issue(hw, mbp)) {
1399 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1400 goto out;
1401 }
1402
1403 ret = csio_mb_fw_retval(mbp);
1404 if (ret != FW_SUCCESS) {
1405 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1406 goto out;
1407 }
1408
1409 if (finiver)
1410 *finiver = ntohl(caps_cmd->finiver);
1411 if (finicsum)
1412 *finicsum = ntohl(caps_cmd->finicsum);
1413 if (cfcsum)
1414 *cfcsum = ntohl(caps_cmd->cfcsum);
1415
1416
1417 if (csio_hw_validate_caps(hw, mbp)) {
1418 rv = -ENOENT;
1419 goto out;
1420 }
1421
1422
1423
1424
1425 caps_cmd->op_to_write =
1426 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1427 FW_CMD_REQUEST |
1428 FW_CMD_WRITE);
1429 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
1430
1431 if (csio_mb_issue(hw, mbp)) {
1432 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
1433 goto out;
1434 }
1435
1436 ret = csio_mb_fw_retval(mbp);
1437 if (ret != FW_SUCCESS) {
1438 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
1439 goto out;
1440 }
1441
1442 rv = 0;
1443out:
1444 mempool_free(mbp, hw->mb_mempool);
1445 return rv;
1446}
1447
1448
1449
1450
1451
1452
1453static int
1454csio_get_device_params(struct csio_hw *hw)
1455{
1456 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1457 struct csio_mb *mbp;
1458 enum fw_retval retval;
1459 u32 param[6];
1460 int i, j = 0;
1461
1462
1463 for (i = 0; i < CSIO_MAX_PPORTS; i++)
1464 hw->pport[i].portid = -1;
1465
1466 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1467 if (!mbp) {
1468 CSIO_INC_STATS(hw, n_err_nomem);
1469 return -ENOMEM;
1470 }
1471
1472
1473 param[0] = FW_PARAM_DEV(PORTVEC);
1474
1475
1476 param[1] = FW_PARAM_DEV(CCLK);
1477
1478
1479 param[2] = FW_PARAM_PFVF(EQ_START);
1480 param[3] = FW_PARAM_PFVF(EQ_END);
1481
1482
1483 param[4] = FW_PARAM_PFVF(IQFLINT_START);
1484 param[5] = FW_PARAM_PFVF(IQFLINT_END);
1485
1486 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1487 ARRAY_SIZE(param), param, NULL, false, NULL);
1488 if (csio_mb_issue(hw, mbp)) {
1489 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1490 mempool_free(mbp, hw->mb_mempool);
1491 return -EINVAL;
1492 }
1493
1494 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1495 ARRAY_SIZE(param), param);
1496 if (retval != FW_SUCCESS) {
1497 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1498 retval);
1499 mempool_free(mbp, hw->mb_mempool);
1500 return -EINVAL;
1501 }
1502
1503
1504 hw->port_vec = param[0];
1505 hw->vpd.cclk = param[1];
1506 wrm->fw_eq_start = param[2];
1507 wrm->fw_iq_start = param[4];
1508
1509
1510 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1511 !csio_is_hw_master(hw)) {
1512 hw->cfg_niq = param[5] - param[4] + 1;
1513 hw->cfg_neq = param[3] - param[2] + 1;
1514 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1515 hw->cfg_niq, hw->cfg_neq);
1516 }
1517
1518 hw->port_vec &= csio_port_mask;
1519
1520 hw->num_pports = hweight32(hw->port_vec);
1521
1522 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1523 hw->port_vec, hw->num_pports);
1524
1525 for (i = 0; i < hw->num_pports; i++) {
1526 while ((hw->port_vec & (1 << j)) == 0)
1527 j++;
1528 hw->pport[i].portid = j++;
1529 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1530 }
1531 mempool_free(mbp, hw->mb_mempool);
1532
1533 return 0;
1534}
1535
1536
1537
1538
1539
1540
1541
1542static int
1543csio_config_device_caps(struct csio_hw *hw)
1544{
1545 struct csio_mb *mbp;
1546 enum fw_retval retval;
1547 int rv = -EINVAL;
1548
1549 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1550 if (!mbp) {
1551 CSIO_INC_STATS(hw, n_err_nomem);
1552 return -ENOMEM;
1553 }
1554
1555
1556 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1557
1558 if (csio_mb_issue(hw, mbp)) {
1559 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1560 goto out;
1561 }
1562
1563 retval = csio_mb_fw_retval(mbp);
1564 if (retval != FW_SUCCESS) {
1565 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1566 goto out;
1567 }
1568
1569
1570 if (csio_hw_validate_caps(hw, mbp))
1571 goto out;
1572
1573
1574 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1575 rv = 0;
1576 goto out;
1577 }
1578
1579
1580 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1581 false, true, NULL);
1582
1583 if (csio_mb_issue(hw, mbp)) {
1584 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1585 goto out;
1586 }
1587
1588 retval = csio_mb_fw_retval(mbp);
1589 if (retval != FW_SUCCESS) {
1590 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1591 goto out;
1592 }
1593
1594 rv = 0;
1595out:
1596 mempool_free(mbp, hw->mb_mempool);
1597 return rv;
1598}
1599
1600
1601
1602
1603
1604
1605static int
1606csio_enable_ports(struct csio_hw *hw)
1607{
1608 struct csio_mb *mbp;
1609 enum fw_retval retval;
1610 uint8_t portid;
1611 int i;
1612
1613 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1614 if (!mbp) {
1615 CSIO_INC_STATS(hw, n_err_nomem);
1616 return -ENOMEM;
1617 }
1618
1619 for (i = 0; i < hw->num_pports; i++) {
1620 portid = hw->pport[i].portid;
1621
1622
1623 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1624 false, 0, 0, NULL);
1625
1626 if (csio_mb_issue(hw, mbp)) {
1627 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1628 portid);
1629 mempool_free(mbp, hw->mb_mempool);
1630 return -EINVAL;
1631 }
1632
1633 csio_mb_process_read_port_rsp(hw, mbp, &retval,
1634 &hw->pport[i].pcap);
1635 if (retval != FW_SUCCESS) {
1636 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1637 portid, retval);
1638 mempool_free(mbp, hw->mb_mempool);
1639 return -EINVAL;
1640 }
1641
1642
1643 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
1644 (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
1645
1646 if (csio_mb_issue(hw, mbp)) {
1647 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1648 portid);
1649 mempool_free(mbp, hw->mb_mempool);
1650 return -EINVAL;
1651 }
1652
1653 retval = csio_mb_fw_retval(mbp);
1654 if (retval != FW_SUCCESS) {
1655 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1656 portid, retval);
1657 mempool_free(mbp, hw->mb_mempool);
1658 return -EINVAL;
1659 }
1660
1661 }
1662
1663 mempool_free(mbp, hw->mb_mempool);
1664
1665 return 0;
1666}
1667
1668
1669
1670
1671
1672
1673static int
1674csio_get_fcoe_resinfo(struct csio_hw *hw)
1675{
1676 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1677 struct fw_fcoe_res_info_cmd *rsp;
1678 struct csio_mb *mbp;
1679 enum fw_retval retval;
1680
1681 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1682 if (!mbp) {
1683 CSIO_INC_STATS(hw, n_err_nomem);
1684 return -ENOMEM;
1685 }
1686
1687
1688 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1689
1690 if (csio_mb_issue(hw, mbp)) {
1691 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1692 mempool_free(mbp, hw->mb_mempool);
1693 return -EINVAL;
1694 }
1695
1696 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
1697 retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
1698 if (retval != FW_SUCCESS) {
1699 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1700 retval);
1701 mempool_free(mbp, hw->mb_mempool);
1702 return -EINVAL;
1703 }
1704
1705 res_info->e_d_tov = ntohs(rsp->e_d_tov);
1706 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
1707 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
1708 res_info->r_r_tov = ntohs(rsp->r_r_tov);
1709 res_info->max_xchgs = ntohl(rsp->max_xchgs);
1710 res_info->max_ssns = ntohl(rsp->max_ssns);
1711 res_info->used_xchgs = ntohl(rsp->used_xchgs);
1712 res_info->used_ssns = ntohl(rsp->used_ssns);
1713 res_info->max_fcfs = ntohl(rsp->max_fcfs);
1714 res_info->max_vnps = ntohl(rsp->max_vnps);
1715 res_info->used_fcfs = ntohl(rsp->used_fcfs);
1716 res_info->used_vnps = ntohl(rsp->used_vnps);
1717
1718 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1719 res_info->max_xchgs);
1720 mempool_free(mbp, hw->mb_mempool);
1721
1722 return 0;
1723}
1724
1725static int
1726csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1727{
1728 struct csio_mb *mbp;
1729 enum fw_retval retval;
1730 u32 _param[1];
1731
1732 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1733 if (!mbp) {
1734 CSIO_INC_STATS(hw, n_err_nomem);
1735 return -ENOMEM;
1736 }
1737
1738
1739
1740
1741
1742 _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1743 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
1744
1745 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1746 ARRAY_SIZE(_param), _param, NULL, false, NULL);
1747 if (csio_mb_issue(hw, mbp)) {
1748 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1749 mempool_free(mbp, hw->mb_mempool);
1750 return -EINVAL;
1751 }
1752
1753 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1754 ARRAY_SIZE(_param), _param);
1755 if (retval != FW_SUCCESS) {
1756 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1757 retval);
1758 mempool_free(mbp, hw->mb_mempool);
1759 return -EINVAL;
1760 }
1761
1762 mempool_free(mbp, hw->mb_mempool);
1763 *param = _param[0];
1764
1765 return 0;
1766}
1767
1768static int
1769csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1770{
1771 int ret = 0;
1772 const struct firmware *cf;
1773 struct pci_dev *pci_dev = hw->pdev;
1774 struct device *dev = &pci_dev->dev;
1775 unsigned int mtype = 0, maddr = 0;
1776 uint32_t *cfg_data;
1777 int value_to_add = 0;
1778
1779 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
1780 csio_err(hw, "could not find config file %s, err: %d\n",
1781 CSIO_CF_FNAME(hw), ret);
1782 return -ENOENT;
1783 }
1784
1785 if (cf->size%4 != 0)
1786 value_to_add = 4 - (cf->size % 4);
1787
1788 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
1789 if (cfg_data == NULL) {
1790 ret = -ENOMEM;
1791 goto leave;
1792 }
1793
1794 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
1795 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
1796 ret = -EINVAL;
1797 goto leave;
1798 }
1799
1800 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
1801 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
1802
1803 ret = csio_memory_write(hw, mtype, maddr,
1804 cf->size + value_to_add, cfg_data);
1805
1806 if ((ret == 0) && (value_to_add != 0)) {
1807 union {
1808 u32 word;
1809 char buf[4];
1810 } last;
1811 size_t size = cf->size & ~0x3;
1812 int i;
1813
1814 last.word = cfg_data[size >> 2];
1815 for (i = value_to_add; i < 4; i++)
1816 last.buf[i] = 0;
1817 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1818 }
1819 if (ret == 0) {
1820 csio_info(hw, "config file upgraded to %s\n",
1821 CSIO_CF_FNAME(hw));
1822 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
1823 }
1824
1825leave:
1826 kfree(cfg_data);
1827 release_firmware(cf);
1828 return ret;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847static int
1848csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1849{
1850 unsigned int mtype, maddr;
1851 int rv;
1852 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
1853 int using_flash;
1854 char path[64];
1855
1856
1857
1858
1859 if (reset) {
1860 rv = csio_do_reset(hw, true);
1861 if (rv != 0)
1862 goto bye;
1863 }
1864
1865
1866
1867
1868
1869
1870 spin_unlock_irq(&hw->lock);
1871 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
1872 spin_lock_irq(&hw->lock);
1873 if (rv != 0) {
1874 if (rv == -ENOENT) {
1875
1876
1877
1878
1879 mtype = FW_MEMTYPE_CF_FLASH;
1880 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
1881 using_flash = 1;
1882 } else {
1883
1884
1885
1886
1887 goto bye;
1888 }
1889 } else {
1890 mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
1891 maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
1892 using_flash = 0;
1893 }
1894
1895 hw->cfg_store = (uint8_t)mtype;
1896
1897
1898
1899
1900
1901 rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
1902 &finicsum, &cfcsum);
1903 if (rv != 0)
1904 goto bye;
1905
1906 hw->cfg_finiver = finiver;
1907 hw->cfg_finicsum = finicsum;
1908 hw->cfg_cfcsum = cfcsum;
1909 hw->cfg_csum_status = true;
1910
1911 if (finicsum != cfcsum) {
1912 csio_warn(hw,
1913 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
1914 finicsum, cfcsum);
1915
1916 hw->cfg_csum_status = false;
1917 }
1918
1919
1920
1921
1922
1923
1924 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
1925
1926
1927 rv = csio_get_device_params(hw);
1928 if (rv != 0)
1929 goto bye;
1930
1931
1932 csio_wr_sge_init(hw);
1933
1934
1935
1936
1937
1938
1939 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1940
1941 csio_info(hw,
1942 "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
1943 (using_flash ? "in device FLASH" : path), finiver, cfcsum);
1944
1945 return 0;
1946
1947
1948
1949
1950bye:
1951 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
1952 csio_dbg(hw, "Configuration file error %d\n", rv);
1953 return rv;
1954}
1955
1956
1957
1958
1959
1960static int
1961csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
1962{
1963 int rv;
1964
1965
1966
1967 if (reset) {
1968 rv = csio_do_reset(hw, true);
1969 if (rv != 0)
1970 goto out;
1971 }
1972
1973
1974 rv = csio_config_device_caps(hw);
1975 if (rv != 0)
1976 goto out;
1977
1978
1979 rv = csio_get_device_params(hw);
1980 if (rv != 0)
1981 goto out;
1982
1983
1984 csio_wr_sge_init(hw);
1985
1986
1987 csio_post_event(&hw->sm, CSIO_HWE_INIT);
1988
1989out:
1990 return rv;
1991}
1992
1993
1994
1995
1996
1997
1998
1999static int
2000csio_hw_flash_fw(struct csio_hw *hw)
2001{
2002 int ret = -ECANCELED;
2003 const struct firmware *fw;
2004 const struct fw_hdr *hdr;
2005 u32 fw_ver;
2006 struct pci_dev *pci_dev = hw->pdev;
2007 struct device *dev = &pci_dev->dev ;
2008
2009 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
2010 csio_err(hw, "could not find firmware image %s, err: %d\n",
2011 CSIO_FW_FNAME(hw), ret);
2012 return -EINVAL;
2013 }
2014
2015 hdr = (const struct fw_hdr *)fw->data;
2016 fw_ver = ntohl(hdr->fw_ver);
2017 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
2018 return -EINVAL;
2019
2020
2021
2022
2023 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
2024 fw_ver > hw->fwrev) {
2025 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2026 false);
2027 if (!ret)
2028 csio_info(hw,
2029 "firmware upgraded to version %pI4 from %s\n",
2030 &hdr->fw_ver, CSIO_FW_FNAME(hw));
2031 else
2032 csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
2033 } else
2034 ret = -EINVAL;
2035
2036 release_firmware(fw);
2037
2038 return ret;
2039}
2040
2041
2042
2043
2044
2045
2046
2047static void
2048csio_hw_configure(struct csio_hw *hw)
2049{
2050 int reset = 1;
2051 int rv;
2052 u32 param[1];
2053
2054 rv = csio_hw_dev_ready(hw);
2055 if (rv != 0) {
2056 CSIO_INC_STATS(hw, n_err_fatal);
2057 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2058 goto out;
2059 }
2060
2061
2062 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
2063
2064
2065 rv = csio_hw_get_flash_params(hw);
2066 if (rv != 0) {
2067 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2068 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2069 goto out;
2070 }
2071
2072
2073 csio_set_pcie_completion_timeout(hw, 0xd);
2074
2075 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
2076
2077 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2078 if (rv != 0)
2079 goto out;
2080
2081 csio_hw_print_fw_version(hw, "Firmware revision");
2082
2083 rv = csio_do_hello(hw, &hw->fw_state);
2084 if (rv != 0) {
2085 CSIO_INC_STATS(hw, n_err_fatal);
2086 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2087 goto out;
2088 }
2089
2090
2091 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2092 if (rv != 0)
2093 goto out;
2094
2095 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2096 rv = csio_hw_check_fw_version(hw);
2097 if (rv == -EINVAL) {
2098
2099
2100 spin_unlock_irq(&hw->lock);
2101 rv = csio_hw_flash_fw(hw);
2102 spin_lock_irq(&hw->lock);
2103
2104 if (rv == 0) {
2105 reset = 0;
2106
2107
2108
2109
2110
2111 rv = csio_hw_check_fw_version(hw);
2112 }
2113 }
2114
2115
2116
2117
2118
2119
2120
2121
2122 if (csio_hw_check_fwconfig(hw, param) == 0) {
2123 rv = csio_hw_use_fwconfig(hw, reset, param);
2124 if (rv == -ENOENT)
2125 goto out;
2126 if (rv != 0) {
2127 csio_info(hw,
2128 "No Configuration File present "
2129 "on adapter. Using hard-wired "
2130 "configuration parameters.\n");
2131 rv = csio_hw_no_fwconfig(hw, reset);
2132 }
2133 } else {
2134 rv = csio_hw_no_fwconfig(hw, reset);
2135 }
2136
2137 if (rv != 0)
2138 goto out;
2139
2140 } else {
2141 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2142
2143 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2144
2145
2146 rv = csio_get_device_params(hw);
2147 if (rv != 0)
2148 goto out;
2149
2150
2151 rv = csio_config_device_caps(hw);
2152 if (rv != 0)
2153 goto out;
2154
2155
2156 csio_wr_sge_init(hw);
2157
2158
2159 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2160 goto out;
2161 }
2162 }
2163
2164out:
2165 return;
2166}
2167
2168
2169
2170
2171
2172
2173static void
2174csio_hw_initialize(struct csio_hw *hw)
2175{
2176 struct csio_mb *mbp;
2177 enum fw_retval retval;
2178 int rv;
2179 int i;
2180
2181 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2182 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2183 if (!mbp)
2184 goto out;
2185
2186 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2187
2188 if (csio_mb_issue(hw, mbp)) {
2189 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2190 goto free_and_out;
2191 }
2192
2193 retval = csio_mb_fw_retval(mbp);
2194 if (retval != FW_SUCCESS) {
2195 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2196 retval);
2197 goto free_and_out;
2198 }
2199
2200 mempool_free(mbp, hw->mb_mempool);
2201 }
2202
2203 rv = csio_get_fcoe_resinfo(hw);
2204 if (rv != 0) {
2205 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2206 goto out;
2207 }
2208
2209 spin_unlock_irq(&hw->lock);
2210 rv = csio_config_queues(hw);
2211 spin_lock_irq(&hw->lock);
2212
2213 if (rv != 0) {
2214 csio_err(hw, "Config of queues failed!: %d\n", rv);
2215 goto out;
2216 }
2217
2218 for (i = 0; i < hw->num_pports; i++)
2219 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2220
2221 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2222 rv = csio_enable_ports(hw);
2223 if (rv != 0) {
2224 csio_err(hw, "Failed to enable ports: %d\n", rv);
2225 goto out;
2226 }
2227 }
2228
2229 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2230 return;
2231
2232free_and_out:
2233 mempool_free(mbp, hw->mb_mempool);
2234out:
2235 return;
2236}
2237
2238#define PF_INTR_MASK (PFSW | PFCIM)
2239
2240
2241
2242
2243
2244
2245
2246static void
2247csio_hw_intr_enable(struct csio_hw *hw)
2248{
2249 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
2250 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2251 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
2252
2253
2254
2255
2256
2257 if (hw->intr_mode == CSIO_IM_MSIX)
2258 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
2259 AIVEC(AIVEC_MASK), vec);
2260 else if (hw->intr_mode == CSIO_IM_MSI)
2261 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
2262 AIVEC(AIVEC_MASK), 0);
2263
2264 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
2265
2266
2267 csio_mb_intr_enable(hw);
2268
2269
2270 if (csio_is_hw_master(hw)) {
2271
2272
2273
2274 pl &= (~SF);
2275 csio_wr_reg32(hw, pl, PL_INT_ENABLE);
2276
2277 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
2278 EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
2279 ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
2280 ERR_DATA_CPL_ON_HIGH_QID1 |
2281 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2282 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2283 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2284 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
2285 SGE_INT_ENABLE3);
2286 csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
2287 }
2288
2289 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2290
2291}
2292
2293
2294
2295
2296
2297
2298
2299void
2300csio_hw_intr_disable(struct csio_hw *hw)
2301{
2302 uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
2303
2304 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2305 return;
2306
2307 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2308
2309 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
2310 if (csio_is_hw_master(hw))
2311 csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
2312
2313
2314 csio_mb_intr_disable(hw);
2315
2316}
2317
2318void
2319csio_hw_fatal_err(struct csio_hw *hw)
2320{
2321 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
2322 csio_hw_intr_disable(hw);
2323
2324
2325 csio_fatal(hw, "HW Fatal error encountered!\n");
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337static void
2338csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2339{
2340 hw->prev_evt = hw->cur_evt;
2341 hw->cur_evt = evt;
2342 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2343
2344 switch (evt) {
2345 case CSIO_HWE_CFG:
2346 csio_set_state(&hw->sm, csio_hws_configuring);
2347 csio_hw_configure(hw);
2348 break;
2349
2350 default:
2351 CSIO_INC_STATS(hw, n_evt_unexp);
2352 break;
2353 }
2354}
2355
2356
2357
2358
2359
2360
2361
2362static void
2363csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2364{
2365 hw->prev_evt = hw->cur_evt;
2366 hw->cur_evt = evt;
2367 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2368
2369 switch (evt) {
2370 case CSIO_HWE_INIT:
2371 csio_set_state(&hw->sm, csio_hws_initializing);
2372 csio_hw_initialize(hw);
2373 break;
2374
2375 case CSIO_HWE_INIT_DONE:
2376 csio_set_state(&hw->sm, csio_hws_ready);
2377
2378 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2379 break;
2380
2381 case CSIO_HWE_FATAL:
2382 csio_set_state(&hw->sm, csio_hws_uninit);
2383 break;
2384
2385 case CSIO_HWE_PCI_REMOVE:
2386 csio_do_bye(hw);
2387 break;
2388 default:
2389 CSIO_INC_STATS(hw, n_evt_unexp);
2390 break;
2391 }
2392}
2393
2394
2395
2396
2397
2398
2399
2400static void
2401csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2402{
2403 hw->prev_evt = hw->cur_evt;
2404 hw->cur_evt = evt;
2405 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2406
2407 switch (evt) {
2408 case CSIO_HWE_INIT_DONE:
2409 csio_set_state(&hw->sm, csio_hws_ready);
2410
2411
2412 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2413
2414
2415 csio_hw_intr_enable(hw);
2416 break;
2417
2418 case CSIO_HWE_FATAL:
2419 csio_set_state(&hw->sm, csio_hws_uninit);
2420 break;
2421
2422 case CSIO_HWE_PCI_REMOVE:
2423 csio_do_bye(hw);
2424 break;
2425
2426 default:
2427 CSIO_INC_STATS(hw, n_evt_unexp);
2428 break;
2429 }
2430}
2431
2432
2433
2434
2435
2436
2437
2438static void
2439csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2440{
2441
2442 hw->evtflag = evt;
2443
2444 hw->prev_evt = hw->cur_evt;
2445 hw->cur_evt = evt;
2446 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2447
2448 switch (evt) {
2449 case CSIO_HWE_HBA_RESET:
2450 case CSIO_HWE_FW_DLOAD:
2451 case CSIO_HWE_SUSPEND:
2452 case CSIO_HWE_PCI_REMOVE:
2453 case CSIO_HWE_PCIERR_DETECTED:
2454 csio_set_state(&hw->sm, csio_hws_quiescing);
2455
2456 if (evt == CSIO_HWE_HBA_RESET ||
2457 evt == CSIO_HWE_PCIERR_DETECTED)
2458 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2459 else
2460 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2461
2462 csio_hw_intr_disable(hw);
2463 csio_hw_mbm_cleanup(hw);
2464 csio_evtq_stop(hw);
2465 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2466 csio_evtq_flush(hw);
2467 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2468 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2469 break;
2470
2471 case CSIO_HWE_FATAL:
2472 csio_set_state(&hw->sm, csio_hws_uninit);
2473 break;
2474
2475 default:
2476 CSIO_INC_STATS(hw, n_evt_unexp);
2477 break;
2478 }
2479}
2480
2481
2482
2483
2484
2485
2486
2487static void
2488csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2489{
2490 hw->prev_evt = hw->cur_evt;
2491 hw->cur_evt = evt;
2492 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2493
2494 switch (evt) {
2495 case CSIO_HWE_QUIESCED:
2496 switch (hw->evtflag) {
2497 case CSIO_HWE_FW_DLOAD:
2498 csio_set_state(&hw->sm, csio_hws_resetting);
2499
2500
2501
2502 case CSIO_HWE_HBA_RESET:
2503 csio_set_state(&hw->sm, csio_hws_resetting);
2504
2505 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2506 csio_wr_destroy_queues(hw, false);
2507 csio_do_reset(hw, false);
2508 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2509 break;
2510
2511 case CSIO_HWE_PCI_REMOVE:
2512 csio_set_state(&hw->sm, csio_hws_removing);
2513 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2514 csio_wr_destroy_queues(hw, true);
2515
2516 csio_do_bye(hw);
2517 break;
2518
2519 case CSIO_HWE_SUSPEND:
2520 csio_set_state(&hw->sm, csio_hws_quiesced);
2521 break;
2522
2523 case CSIO_HWE_PCIERR_DETECTED:
2524 csio_set_state(&hw->sm, csio_hws_pcierr);
2525 csio_wr_destroy_queues(hw, false);
2526 break;
2527
2528 default:
2529 CSIO_INC_STATS(hw, n_evt_unexp);
2530 break;
2531
2532 }
2533 break;
2534
2535 default:
2536 CSIO_INC_STATS(hw, n_evt_unexp);
2537 break;
2538 }
2539}
2540
2541
2542
2543
2544
2545
2546
2547static void
2548csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2549{
2550 hw->prev_evt = hw->cur_evt;
2551 hw->cur_evt = evt;
2552 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2553
2554 switch (evt) {
2555 case CSIO_HWE_RESUME:
2556 csio_set_state(&hw->sm, csio_hws_configuring);
2557 csio_hw_configure(hw);
2558 break;
2559
2560 default:
2561 CSIO_INC_STATS(hw, n_evt_unexp);
2562 break;
2563 }
2564}
2565
2566
2567
2568
2569
2570
2571
2572static void
2573csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
2574{
2575 hw->prev_evt = hw->cur_evt;
2576 hw->cur_evt = evt;
2577 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2578
2579 switch (evt) {
2580 case CSIO_HWE_HBA_RESET_DONE:
2581 csio_evtq_start(hw);
2582 csio_set_state(&hw->sm, csio_hws_configuring);
2583 csio_hw_configure(hw);
2584 break;
2585
2586 default:
2587 CSIO_INC_STATS(hw, n_evt_unexp);
2588 break;
2589 }
2590}
2591
2592
2593
2594
2595
2596
2597
2598static void
2599csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
2600{
2601 hw->prev_evt = hw->cur_evt;
2602 hw->cur_evt = evt;
2603 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2604
2605 switch (evt) {
2606 case CSIO_HWE_HBA_RESET:
2607 if (!csio_is_hw_master(hw))
2608 break;
2609
2610
2611
2612
2613
2614 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
2615 csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
2616 mdelay(2000);
2617 break;
2618
2619
2620 default:
2621 CSIO_INC_STATS(hw, n_evt_unexp);
2622 break;
2623
2624 }
2625}
2626
2627
2628
2629
2630
2631
2632
2633static void
2634csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
2635{
2636 hw->prev_evt = hw->cur_evt;
2637 hw->cur_evt = evt;
2638 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2639
2640 switch (evt) {
2641 case CSIO_HWE_PCIERR_SLOT_RESET:
2642 csio_evtq_start(hw);
2643 csio_set_state(&hw->sm, csio_hws_configuring);
2644 csio_hw_configure(hw);
2645 break;
2646
2647 default:
2648 CSIO_INC_STATS(hw, n_evt_unexp);
2649 break;
2650 }
2651}
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670int
2671csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
2672 const struct intr_info *acts)
2673{
2674 int fatal = 0;
2675 unsigned int mask = 0;
2676 unsigned int status = csio_rd_reg32(hw, reg);
2677
2678 for ( ; acts->mask; ++acts) {
2679 if (!(status & acts->mask))
2680 continue;
2681 if (acts->fatal) {
2682 fatal++;
2683 csio_fatal(hw, "Fatal %s (0x%x)\n",
2684 acts->msg, status & acts->mask);
2685 } else if (acts->msg)
2686 csio_info(hw, "%s (0x%x)\n",
2687 acts->msg, status & acts->mask);
2688 mask |= acts->mask;
2689 }
2690 status &= mask;
2691 if (status)
2692 csio_wr_reg32(hw, status, reg);
2693 return fatal;
2694}
2695
2696
2697
2698
2699static void csio_tp_intr_handler(struct csio_hw *hw)
2700{
2701 static struct intr_info tp_intr_info[] = {
2702 { 0x3fffffff, "TP parity error", -1, 1 },
2703 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
2704 { 0, NULL, 0, 0 }
2705 };
2706
2707 if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
2708 csio_hw_fatal_err(hw);
2709}
2710
2711
2712
2713
2714static void csio_sge_intr_handler(struct csio_hw *hw)
2715{
2716 uint64_t v;
2717
2718 static struct intr_info sge_intr_info[] = {
2719 { ERR_CPL_EXCEED_IQE_SIZE,
2720 "SGE received CPL exceeding IQE size", -1, 1 },
2721 { ERR_INVALID_CIDX_INC,
2722 "SGE GTS CIDX increment too large", -1, 0 },
2723 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
2724 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
2725 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
2726 "SGE IQID > 1023 received CPL for FL", -1, 0 },
2727 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
2728 0 },
2729 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
2730 0 },
2731 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
2732 0 },
2733 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
2734 0 },
2735 { ERR_ING_CTXT_PRIO,
2736 "SGE too many priority ingress contexts", -1, 0 },
2737 { ERR_EGR_CTXT_PRIO,
2738 "SGE too many priority egress contexts", -1, 0 },
2739 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
2740 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
2741 { 0, NULL, 0, 0 }
2742 };
2743
2744 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
2745 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
2746 if (v) {
2747 csio_fatal(hw, "SGE parity error (%#llx)\n",
2748 (unsigned long long)v);
2749 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
2750 SGE_INT_CAUSE1);
2751 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
2752 }
2753
2754 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
2755
2756 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
2757 v != 0)
2758 csio_hw_fatal_err(hw);
2759}
2760
2761#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
2762 OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
2763#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
2764 IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
2765
2766
2767
2768
2769static void csio_cim_intr_handler(struct csio_hw *hw)
2770{
2771 static struct intr_info cim_intr_info[] = {
2772 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
2773 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
2774 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
2775 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
2776 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
2777 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
2778 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
2779 { 0, NULL, 0, 0 }
2780 };
2781 static struct intr_info cim_upintr_info[] = {
2782 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
2783 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
2784 { ILLWRINT, "CIM illegal write", -1, 1 },
2785 { ILLRDINT, "CIM illegal read", -1, 1 },
2786 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
2787 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
2788 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
2789 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
2790 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
2791 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
2792 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
2793 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
2794 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
2795 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
2796 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
2797 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
2798 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
2799 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
2800 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
2801 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
2802 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
2803 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
2804 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
2805 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
2806 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
2807 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
2808 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
2809 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
2810 { 0, NULL, 0, 0 }
2811 };
2812
2813 int fat;
2814
2815 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
2816 cim_intr_info) +
2817 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
2818 cim_upintr_info);
2819 if (fat)
2820 csio_hw_fatal_err(hw);
2821}
2822
2823
2824
2825
2826static void csio_ulprx_intr_handler(struct csio_hw *hw)
2827{
2828 static struct intr_info ulprx_intr_info[] = {
2829 { 0x1800000, "ULPRX context error", -1, 1 },
2830 { 0x7fffff, "ULPRX parity error", -1, 1 },
2831 { 0, NULL, 0, 0 }
2832 };
2833
2834 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
2835 csio_hw_fatal_err(hw);
2836}
2837
2838
2839
2840
2841static void csio_ulptx_intr_handler(struct csio_hw *hw)
2842{
2843 static struct intr_info ulptx_intr_info[] = {
2844 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
2845 0 },
2846 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
2847 0 },
2848 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
2849 0 },
2850 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
2851 0 },
2852 { 0xfffffff, "ULPTX parity error", -1, 1 },
2853 { 0, NULL, 0, 0 }
2854 };
2855
2856 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
2857 csio_hw_fatal_err(hw);
2858}
2859
2860
2861
2862
2863static void csio_pmtx_intr_handler(struct csio_hw *hw)
2864{
2865 static struct intr_info pmtx_intr_info[] = {
2866 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
2867 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
2868 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
2869 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
2870 { 0xffffff0, "PMTX framing error", -1, 1 },
2871 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
2872 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
2873 1 },
2874 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
2875 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
2876 { 0, NULL, 0, 0 }
2877 };
2878
2879 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
2880 csio_hw_fatal_err(hw);
2881}
2882
2883
2884
2885
2886static void csio_pmrx_intr_handler(struct csio_hw *hw)
2887{
2888 static struct intr_info pmrx_intr_info[] = {
2889 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
2890 { 0x3ffff0, "PMRX framing error", -1, 1 },
2891 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
2892 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
2893 1 },
2894 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
2895 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
2896 { 0, NULL, 0, 0 }
2897 };
2898
2899 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
2900 csio_hw_fatal_err(hw);
2901}
2902
2903
2904
2905
2906static void csio_cplsw_intr_handler(struct csio_hw *hw)
2907{
2908 static struct intr_info cplsw_intr_info[] = {
2909 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
2910 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
2911 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
2912 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
2913 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
2914 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
2915 { 0, NULL, 0, 0 }
2916 };
2917
2918 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
2919 csio_hw_fatal_err(hw);
2920}
2921
2922
2923
2924
2925static void csio_le_intr_handler(struct csio_hw *hw)
2926{
2927 static struct intr_info le_intr_info[] = {
2928 { LIPMISS, "LE LIP miss", -1, 0 },
2929 { LIP0, "LE 0 LIP error", -1, 0 },
2930 { PARITYERR, "LE parity error", -1, 1 },
2931 { UNKNOWNCMD, "LE unknown command", -1, 1 },
2932 { REQQPARERR, "LE request queue parity error", -1, 1 },
2933 { 0, NULL, 0, 0 }
2934 };
2935
2936 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
2937 csio_hw_fatal_err(hw);
2938}
2939
2940
2941
2942
2943static void csio_mps_intr_handler(struct csio_hw *hw)
2944{
2945 static struct intr_info mps_rx_intr_info[] = {
2946 { 0xffffff, "MPS Rx parity error", -1, 1 },
2947 { 0, NULL, 0, 0 }
2948 };
2949 static struct intr_info mps_tx_intr_info[] = {
2950 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
2951 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
2952 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
2953 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
2954 { BUBBLE, "MPS Tx underflow", -1, 1 },
2955 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
2956 { FRMERR, "MPS Tx framing error", -1, 1 },
2957 { 0, NULL, 0, 0 }
2958 };
2959 static struct intr_info mps_trc_intr_info[] = {
2960 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
2961 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
2962 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
2963 { 0, NULL, 0, 0 }
2964 };
2965 static struct intr_info mps_stat_sram_intr_info[] = {
2966 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
2967 { 0, NULL, 0, 0 }
2968 };
2969 static struct intr_info mps_stat_tx_intr_info[] = {
2970 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
2971 { 0, NULL, 0, 0 }
2972 };
2973 static struct intr_info mps_stat_rx_intr_info[] = {
2974 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
2975 { 0, NULL, 0, 0 }
2976 };
2977 static struct intr_info mps_cls_intr_info[] = {
2978 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
2979 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
2980 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
2981 { 0, NULL, 0, 0 }
2982 };
2983
2984 int fat;
2985
2986 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
2987 mps_rx_intr_info) +
2988 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
2989 mps_tx_intr_info) +
2990 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
2991 mps_trc_intr_info) +
2992 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
2993 mps_stat_sram_intr_info) +
2994 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2995 mps_stat_tx_intr_info) +
2996 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2997 mps_stat_rx_intr_info) +
2998 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
2999 mps_cls_intr_info);
3000
3001 csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
3002 csio_rd_reg32(hw, MPS_INT_CAUSE);
3003 if (fat)
3004 csio_hw_fatal_err(hw);
3005}
3006
3007#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
3008
3009
3010
3011
3012static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
3013{
3014 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
3015
3016 unsigned int addr, cnt_addr, v;
3017
3018 if (idx <= MEM_EDC1) {
3019 addr = EDC_REG(EDC_INT_CAUSE, idx);
3020 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
3021 } else {
3022 addr = MC_INT_CAUSE;
3023 cnt_addr = MC_ECC_STATUS;
3024 }
3025
3026 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
3027 if (v & PERR_INT_CAUSE)
3028 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
3029 if (v & ECC_CE_INT_CAUSE) {
3030 uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
3031
3032 csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
3033 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3034 cnt, name[idx], cnt > 1 ? "s" : "");
3035 }
3036 if (v & ECC_UE_INT_CAUSE)
3037 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3038
3039 csio_wr_reg32(hw, v, addr);
3040 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
3041 csio_hw_fatal_err(hw);
3042}
3043
3044
3045
3046
3047static void csio_ma_intr_handler(struct csio_hw *hw)
3048{
3049 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
3050
3051 if (status & MEM_PERR_INT_CAUSE)
3052 csio_fatal(hw, "MA parity error, parity status %#x\n",
3053 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
3054 if (status & MEM_WRAP_INT_CAUSE) {
3055 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
3056 csio_fatal(hw,
3057 "MA address wrap-around error by client %u to address %#x\n",
3058 MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
3059 }
3060 csio_wr_reg32(hw, status, MA_INT_CAUSE);
3061 csio_hw_fatal_err(hw);
3062}
3063
3064
3065
3066
3067static void csio_smb_intr_handler(struct csio_hw *hw)
3068{
3069 static struct intr_info smb_intr_info[] = {
3070 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
3071 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
3072 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
3073 { 0, NULL, 0, 0 }
3074 };
3075
3076 if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
3077 csio_hw_fatal_err(hw);
3078}
3079
3080
3081
3082
3083static void csio_ncsi_intr_handler(struct csio_hw *hw)
3084{
3085 static struct intr_info ncsi_intr_info[] = {
3086 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
3087 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
3088 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
3089 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
3090 { 0, NULL, 0, 0 }
3091 };
3092
3093 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
3094 csio_hw_fatal_err(hw);
3095}
3096
3097
3098
3099
3100static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3101{
3102 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
3103
3104 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
3105 if (!v)
3106 return;
3107
3108 if (v & TXFIFO_PRTY_ERR)
3109 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3110 if (v & RXFIFO_PRTY_ERR)
3111 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
3112 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
3113 csio_hw_fatal_err(hw);
3114}
3115
3116
3117
3118
3119static void csio_pl_intr_handler(struct csio_hw *hw)
3120{
3121 static struct intr_info pl_intr_info[] = {
3122 { FATALPERR, "T4 fatal parity error", -1, 1 },
3123 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
3124 { 0, NULL, 0, 0 }
3125 };
3126
3127 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
3128 csio_hw_fatal_err(hw);
3129}
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139int
3140csio_hw_slow_intr_handler(struct csio_hw *hw)
3141{
3142 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
3143
3144 if (!(cause & CSIO_GLBL_INTR_MASK)) {
3145 CSIO_INC_STATS(hw, n_plint_unexp);
3146 return 0;
3147 }
3148
3149 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3150
3151 CSIO_INC_STATS(hw, n_plint_cnt);
3152
3153 if (cause & CIM)
3154 csio_cim_intr_handler(hw);
3155
3156 if (cause & MPS)
3157 csio_mps_intr_handler(hw);
3158
3159 if (cause & NCSI)
3160 csio_ncsi_intr_handler(hw);
3161
3162 if (cause & PL)
3163 csio_pl_intr_handler(hw);
3164
3165 if (cause & SMB)
3166 csio_smb_intr_handler(hw);
3167
3168 if (cause & XGMAC0)
3169 csio_xgmac_intr_handler(hw, 0);
3170
3171 if (cause & XGMAC1)
3172 csio_xgmac_intr_handler(hw, 1);
3173
3174 if (cause & XGMAC_KR0)
3175 csio_xgmac_intr_handler(hw, 2);
3176
3177 if (cause & XGMAC_KR1)
3178 csio_xgmac_intr_handler(hw, 3);
3179
3180 if (cause & PCIE)
3181 hw->chip_ops->chip_pcie_intr_handler(hw);
3182
3183 if (cause & MC)
3184 csio_mem_intr_handler(hw, MEM_MC);
3185
3186 if (cause & EDC0)
3187 csio_mem_intr_handler(hw, MEM_EDC0);
3188
3189 if (cause & EDC1)
3190 csio_mem_intr_handler(hw, MEM_EDC1);
3191
3192 if (cause & LE)
3193 csio_le_intr_handler(hw);
3194
3195 if (cause & TP)
3196 csio_tp_intr_handler(hw);
3197
3198 if (cause & MA)
3199 csio_ma_intr_handler(hw);
3200
3201 if (cause & PM_TX)
3202 csio_pmtx_intr_handler(hw);
3203
3204 if (cause & PM_RX)
3205 csio_pmrx_intr_handler(hw);
3206
3207 if (cause & ULP_RX)
3208 csio_ulprx_intr_handler(hw);
3209
3210 if (cause & CPL_SWITCH)
3211 csio_cplsw_intr_handler(hw);
3212
3213 if (cause & SGE)
3214 csio_sge_intr_handler(hw);
3215
3216 if (cause & ULP_TX)
3217 csio_ulptx_intr_handler(hw);
3218
3219
3220 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
3221 csio_rd_reg32(hw, PL_INT_CAUSE);
3222
3223 return 1;
3224}
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236static void
3237csio_mberr_worker(void *data)
3238{
3239 struct csio_hw *hw = (struct csio_hw *)data;
3240 struct csio_mbm *mbm = &hw->mbm;
3241 LIST_HEAD(cbfn_q);
3242 struct csio_mb *mbp_next;
3243 int rv;
3244
3245 del_timer_sync(&mbm->timer);
3246
3247 spin_lock_irq(&hw->lock);
3248 if (list_empty(&mbm->cbfn_q)) {
3249 spin_unlock_irq(&hw->lock);
3250 return;
3251 }
3252
3253 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
3254 mbm->stats.n_cbfnq = 0;
3255
3256
3257 if (!list_empty(&mbm->req_q)) {
3258 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
3259 list_del_init(&mbp_next->list);
3260
3261 rv = csio_mb_issue(hw, mbp_next);
3262 if (rv != 0)
3263 list_add_tail(&mbp_next->list, &mbm->req_q);
3264 else
3265 CSIO_DEC_STATS(mbm, n_activeq);
3266 }
3267 spin_unlock_irq(&hw->lock);
3268
3269
3270 csio_mb_completions(hw, &cbfn_q);
3271}
3272
3273
3274
3275
3276
3277
3278
3279static void
3280csio_hw_mb_timer(uintptr_t data)
3281{
3282 struct csio_hw *hw = (struct csio_hw *)data;
3283 struct csio_mb *mbp = NULL;
3284
3285 spin_lock_irq(&hw->lock);
3286 mbp = csio_mb_tmo_handler(hw);
3287 spin_unlock_irq(&hw->lock);
3288
3289
3290 if (mbp)
3291 mbp->mb_cbfn(hw, mbp);
3292
3293}
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304static void
3305csio_hw_mbm_cleanup(struct csio_hw *hw)
3306{
3307 LIST_HEAD(cbfn_q);
3308
3309 csio_mb_cancel_all(hw, &cbfn_q);
3310
3311 spin_unlock_irq(&hw->lock);
3312 csio_mb_completions(hw, &cbfn_q);
3313 spin_lock_irq(&hw->lock);
3314}
3315
3316
3317
3318
3319int
3320csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3321 uint16_t len)
3322{
3323 struct csio_evt_msg *evt_entry = NULL;
3324
3325 if (type >= CSIO_EVT_MAX)
3326 return -EINVAL;
3327
3328 if (len > CSIO_EVT_MSG_SIZE)
3329 return -EINVAL;
3330
3331 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3332 return -EINVAL;
3333
3334 if (list_empty(&hw->evt_free_q)) {
3335 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3336 type, len);
3337 return -ENOMEM;
3338 }
3339
3340 evt_entry = list_first_entry(&hw->evt_free_q,
3341 struct csio_evt_msg, list);
3342 list_del_init(&evt_entry->list);
3343
3344
3345 evt_entry->type = type;
3346 memcpy((void *)evt_entry->data, evt_msg, len);
3347 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3348
3349 CSIO_DEC_STATS(hw, n_evt_freeq);
3350 CSIO_INC_STATS(hw, n_evt_activeq);
3351
3352 return 0;
3353}
3354
3355static int
3356csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3357 uint16_t len, bool msg_sg)
3358{
3359 struct csio_evt_msg *evt_entry = NULL;
3360 struct csio_fl_dma_buf *fl_sg;
3361 uint32_t off = 0;
3362 unsigned long flags;
3363 int n, ret = 0;
3364
3365 if (type >= CSIO_EVT_MAX)
3366 return -EINVAL;
3367
3368 if (len > CSIO_EVT_MSG_SIZE)
3369 return -EINVAL;
3370
3371 spin_lock_irqsave(&hw->lock, flags);
3372 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3373 ret = -EINVAL;
3374 goto out;
3375 }
3376
3377 if (list_empty(&hw->evt_free_q)) {
3378 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3379 type, len);
3380 ret = -ENOMEM;
3381 goto out;
3382 }
3383
3384 evt_entry = list_first_entry(&hw->evt_free_q,
3385 struct csio_evt_msg, list);
3386 list_del_init(&evt_entry->list);
3387
3388
3389 evt_entry->type = type;
3390
3391
3392 if (msg_sg) {
3393 fl_sg = (struct csio_fl_dma_buf *) evt_msg;
3394 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
3395 memcpy((void *)((uintptr_t)evt_entry->data + off),
3396 fl_sg->flbufs[n].vaddr,
3397 fl_sg->flbufs[n].len);
3398 off += fl_sg->flbufs[n].len;
3399 }
3400 } else
3401 memcpy((void *)evt_entry->data, evt_msg, len);
3402
3403 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3404 CSIO_DEC_STATS(hw, n_evt_freeq);
3405 CSIO_INC_STATS(hw, n_evt_activeq);
3406out:
3407 spin_unlock_irqrestore(&hw->lock, flags);
3408 return ret;
3409}
3410
3411static void
3412csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3413{
3414 if (evt_entry) {
3415 spin_lock_irq(&hw->lock);
3416 list_del_init(&evt_entry->list);
3417 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3418 CSIO_DEC_STATS(hw, n_evt_activeq);
3419 CSIO_INC_STATS(hw, n_evt_freeq);
3420 spin_unlock_irq(&hw->lock);
3421 }
3422}
3423
3424void
3425csio_evtq_flush(struct csio_hw *hw)
3426{
3427 uint32_t count;
3428 count = 30;
3429 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3430 spin_unlock_irq(&hw->lock);
3431 msleep(2000);
3432 spin_lock_irq(&hw->lock);
3433 }
3434
3435 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3436}
3437
3438static void
3439csio_evtq_stop(struct csio_hw *hw)
3440{
3441 hw->flags |= CSIO_HWF_FWEVT_STOP;
3442}
3443
3444static void
3445csio_evtq_start(struct csio_hw *hw)
3446{
3447 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3448}
3449
3450static void
3451csio_evtq_cleanup(struct csio_hw *hw)
3452{
3453 struct list_head *evt_entry, *next_entry;
3454
3455
3456 if (!list_empty(&hw->evt_active_q))
3457 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3458
3459 hw->stats.n_evt_activeq = 0;
3460 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3461
3462
3463 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3464 kfree(evt_entry);
3465 CSIO_DEC_STATS(hw, n_evt_freeq);
3466 }
3467
3468 hw->stats.n_evt_freeq = 0;
3469}
3470
3471
3472static void
3473csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3474 struct csio_fl_dma_buf *flb, void *priv)
3475{
3476 __u8 op;
3477 void *msg = NULL;
3478 uint32_t msg_len = 0;
3479 bool msg_sg = 0;
3480
3481 op = ((struct rss_header *) wr)->opcode;
3482 if (op == CPL_FW6_PLD) {
3483 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3484 if (!flb || !flb->totlen) {
3485 CSIO_INC_STATS(hw, n_cpl_unexp);
3486 return;
3487 }
3488
3489 msg = (void *) flb;
3490 msg_len = flb->totlen;
3491 msg_sg = 1;
3492 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3493
3494 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3495
3496 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3497 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3498 sizeof(struct cpl_fw4_msg);
3499 } else {
3500 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3501 CSIO_INC_STATS(hw, n_cpl_unexp);
3502 return;
3503 }
3504
3505
3506
3507
3508
3509 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3510 (uint16_t)msg_len, msg_sg))
3511 CSIO_INC_STATS(hw, n_evt_drop);
3512}
3513
3514void
3515csio_evtq_worker(struct work_struct *work)
3516{
3517 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3518 struct list_head *evt_entry, *next_entry;
3519 LIST_HEAD(evt_q);
3520 struct csio_evt_msg *evt_msg;
3521 struct cpl_fw6_msg *msg;
3522 struct csio_rnode *rn;
3523 int rv = 0;
3524 uint8_t evtq_stop = 0;
3525
3526 csio_dbg(hw, "event worker thread active evts#%d\n",
3527 hw->stats.n_evt_activeq);
3528
3529 spin_lock_irq(&hw->lock);
3530 while (!list_empty(&hw->evt_active_q)) {
3531 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3532 spin_unlock_irq(&hw->lock);
3533
3534 list_for_each_safe(evt_entry, next_entry, &evt_q) {
3535 evt_msg = (struct csio_evt_msg *) evt_entry;
3536
3537
3538 spin_lock_irq(&hw->lock);
3539 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3540 evtq_stop = 1;
3541 spin_unlock_irq(&hw->lock);
3542 if (evtq_stop) {
3543 CSIO_INC_STATS(hw, n_evt_drop);
3544 goto free_evt;
3545 }
3546
3547 switch (evt_msg->type) {
3548 case CSIO_EVT_FW:
3549 msg = (struct cpl_fw6_msg *)(evt_msg->data);
3550
3551 if ((msg->opcode == CPL_FW6_MSG ||
3552 msg->opcode == CPL_FW4_MSG) &&
3553 !msg->type) {
3554 rv = csio_mb_fwevt_handler(hw,
3555 msg->data);
3556 if (!rv)
3557 break;
3558
3559 csio_fcoe_fwevt_handler(hw,
3560 msg->opcode, msg->data);
3561 } else if (msg->opcode == CPL_FW6_PLD) {
3562
3563 csio_fcoe_fwevt_handler(hw,
3564 msg->opcode, msg->data);
3565 } else {
3566 csio_warn(hw,
3567 "Unhandled FW msg op %x type %x\n",
3568 msg->opcode, msg->type);
3569 CSIO_INC_STATS(hw, n_evt_drop);
3570 }
3571 break;
3572
3573 case CSIO_EVT_MBX:
3574 csio_mberr_worker(hw);
3575 break;
3576
3577 case CSIO_EVT_DEV_LOSS:
3578 memcpy(&rn, evt_msg->data, sizeof(rn));
3579 csio_rnode_devloss_handler(rn);
3580 break;
3581
3582 default:
3583 csio_warn(hw, "Unhandled event %x on evtq\n",
3584 evt_msg->type);
3585 CSIO_INC_STATS(hw, n_evt_unexp);
3586 break;
3587 }
3588free_evt:
3589 csio_free_evt(hw, evt_msg);
3590 }
3591
3592 spin_lock_irq(&hw->lock);
3593 }
3594 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3595 spin_unlock_irq(&hw->lock);
3596}
3597
3598int
3599csio_fwevtq_handler(struct csio_hw *hw)
3600{
3601 int rv;
3602
3603 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
3604 CSIO_INC_STATS(hw, n_int_stray);
3605 return -EINVAL;
3606 }
3607
3608 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
3609 csio_process_fwevtq_entry, NULL);
3610 return rv;
3611}
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626int
3627csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
3628{
3629 struct list_head *tmp;
3630
3631
3632 list_for_each(tmp, &mgmtm->active_q) {
3633 if (io_req == (struct csio_ioreq *)tmp)
3634 return 0;
3635 }
3636 return -EINVAL;
3637}
3638
3639#define ECM_MIN_TMO 1000
3640
3641
3642
3643
3644
3645
3646
3647static void
3648csio_mgmt_tmo_handler(uintptr_t data)
3649{
3650 struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
3651 struct list_head *tmp;
3652 struct csio_ioreq *io_req;
3653
3654 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
3655
3656 spin_lock_irq(&mgmtm->hw->lock);
3657
3658 list_for_each(tmp, &mgmtm->active_q) {
3659 io_req = (struct csio_ioreq *) tmp;
3660 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
3661
3662 if (!io_req->tmo) {
3663
3664 tmp = csio_list_prev(tmp);
3665 list_del_init(&io_req->sm.sm_list);
3666 if (io_req->io_cbfn) {
3667
3668 io_req->wr_status = -ETIMEDOUT;
3669 io_req->io_cbfn(mgmtm->hw, io_req);
3670 } else {
3671 CSIO_DB_ASSERT(0);
3672 }
3673 }
3674 }
3675
3676
3677 if (!list_empty(&mgmtm->active_q))
3678 mod_timer(&mgmtm->mgmt_timer,
3679 jiffies + msecs_to_jiffies(ECM_MIN_TMO));
3680 spin_unlock_irq(&mgmtm->hw->lock);
3681}
3682
3683static void
3684csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
3685{
3686 struct csio_hw *hw = mgmtm->hw;
3687 struct csio_ioreq *io_req;
3688 struct list_head *tmp;
3689 uint32_t count;
3690
3691 count = 30;
3692
3693 while ((!list_empty(&mgmtm->active_q)) && count--) {
3694 spin_unlock_irq(&hw->lock);
3695 msleep(2000);
3696 spin_lock_irq(&hw->lock);
3697 }
3698
3699
3700 list_for_each(tmp, &mgmtm->active_q) {
3701 io_req = (struct csio_ioreq *) tmp;
3702 tmp = csio_list_prev(tmp);
3703 list_del_init(&io_req->sm.sm_list);
3704 mgmtm->stats.n_active--;
3705 if (io_req->io_cbfn) {
3706
3707 io_req->wr_status = -ETIMEDOUT;
3708 io_req->io_cbfn(mgmtm->hw, io_req);
3709 }
3710 }
3711}
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727static int
3728csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
3729{
3730 struct timer_list *timer = &mgmtm->mgmt_timer;
3731
3732 init_timer(timer);
3733 timer->function = csio_mgmt_tmo_handler;
3734 timer->data = (unsigned long)mgmtm;
3735
3736 INIT_LIST_HEAD(&mgmtm->active_q);
3737 INIT_LIST_HEAD(&mgmtm->cbfn_q);
3738
3739 mgmtm->hw = hw;
3740
3741
3742 return 0;
3743}
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754static void
3755csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
3756{
3757 del_timer_sync(&mgmtm->mgmt_timer);
3758}
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769int
3770csio_hw_start(struct csio_hw *hw)
3771{
3772 spin_lock_irq(&hw->lock);
3773 csio_post_event(&hw->sm, CSIO_HWE_CFG);
3774 spin_unlock_irq(&hw->lock);
3775
3776 if (csio_is_hw_ready(hw))
3777 return 0;
3778 else
3779 return -EINVAL;
3780}
3781
3782int
3783csio_hw_stop(struct csio_hw *hw)
3784{
3785 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
3786
3787 if (csio_is_hw_removing(hw))
3788 return 0;
3789 else
3790 return -EINVAL;
3791}
3792
3793
3794#define CSIO_MAX_RESET_RETRIES 3
3795
3796
3797
3798
3799
3800
3801
3802int
3803csio_hw_reset(struct csio_hw *hw)
3804{
3805 if (!csio_is_hw_master(hw))
3806 return -EPERM;
3807
3808 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
3809 csio_dbg(hw, "Max hw reset attempts reached..");
3810 return -EINVAL;
3811 }
3812
3813 hw->rst_retries++;
3814 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
3815
3816 if (csio_is_hw_ready(hw)) {
3817 hw->rst_retries = 0;
3818 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
3819 return 0;
3820 } else
3821 return -EINVAL;
3822}
3823
3824
3825
3826
3827
3828static void
3829csio_hw_get_device_id(struct csio_hw *hw)
3830{
3831
3832 if (csio_is_dev_id_cached(hw))
3833 return;
3834
3835
3836 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
3837 &hw->params.pci.vendor_id);
3838 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
3839 &hw->params.pci.device_id);
3840
3841 csio_dev_id_cached(hw);
3842 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
3843
3844}
3845
3846
3847
3848
3849
3850
3851
3852static void
3853csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
3854{
3855 uint32_t adap_type, prot_type;
3856
3857 if (ven_id == CSIO_VENDOR_ID) {
3858 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
3859 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
3860
3861 if (prot_type == CSIO_T4_FCOE_ASIC) {
3862 memcpy(hw->hw_ver,
3863 csio_t4_fcoe_adapters[adap_type].model_no, 16);
3864 memcpy(hw->model_desc,
3865 csio_t4_fcoe_adapters[adap_type].description,
3866 32);
3867 } else if (prot_type == CSIO_T5_FCOE_ASIC) {
3868 memcpy(hw->hw_ver,
3869 csio_t5_fcoe_adapters[adap_type].model_no, 16);
3870 memcpy(hw->model_desc,
3871 csio_t5_fcoe_adapters[adap_type].description,
3872 32);
3873 } else {
3874 char tempName[32] = "Chelsio FCoE Controller";
3875 memcpy(hw->model_desc, tempName, 32);
3876 }
3877 }
3878}
3879
3880
3881
3882
3883
3884
3885
3886int
3887csio_hw_init(struct csio_hw *hw)
3888{
3889 int rv = -EINVAL;
3890 uint32_t i;
3891 uint16_t ven_id, dev_id;
3892 struct csio_evt_msg *evt_entry;
3893
3894 INIT_LIST_HEAD(&hw->sm.sm_list);
3895 csio_init_state(&hw->sm, csio_hws_uninit);
3896 spin_lock_init(&hw->lock);
3897 INIT_LIST_HEAD(&hw->sln_head);
3898
3899
3900 csio_hw_get_device_id(hw);
3901
3902 strcpy(hw->name, CSIO_HW_NAME);
3903
3904
3905 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
3906
3907
3908
3909 ven_id = hw->params.pci.vendor_id;
3910 dev_id = hw->params.pci.device_id;
3911
3912 csio_hw_set_description(hw, ven_id, dev_id);
3913
3914
3915 hw->params.log_level = (uint32_t) csio_dbg_level;
3916
3917 csio_set_fwevt_intr_idx(hw, -1);
3918 csio_set_nondata_intr_idx(hw, -1);
3919
3920
3921 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
3922 goto err;
3923
3924 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
3925 if (rv)
3926 goto err_mbm_exit;
3927
3928 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
3929 if (rv)
3930 goto err_wrm_exit;
3931
3932 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
3933 if (rv)
3934 goto err_scsim_exit;
3935
3936 INIT_LIST_HEAD(&hw->evt_active_q);
3937 INIT_LIST_HEAD(&hw->evt_free_q);
3938 for (i = 0; i < csio_evtq_sz; i++) {
3939
3940 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
3941 if (!evt_entry) {
3942 csio_err(hw, "Failed to initialize eventq");
3943 goto err_evtq_cleanup;
3944 }
3945
3946 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3947 CSIO_INC_STATS(hw, n_evt_freeq);
3948 }
3949
3950 hw->dev_num = dev_num;
3951 dev_num++;
3952
3953 return 0;
3954
3955err_evtq_cleanup:
3956 csio_evtq_cleanup(hw);
3957 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3958err_scsim_exit:
3959 csio_scsim_exit(csio_hw_to_scsim(hw));
3960err_wrm_exit:
3961 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3962err_mbm_exit:
3963 csio_mbm_exit(csio_hw_to_mbm(hw));
3964err:
3965 return rv;
3966}
3967
3968
3969
3970
3971
3972
3973void
3974csio_hw_exit(struct csio_hw *hw)
3975{
3976 csio_evtq_cleanup(hw);
3977 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
3978 csio_scsim_exit(csio_hw_to_scsim(hw));
3979 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
3980 csio_mbm_exit(csio_hw_to_mbm(hw));
3981}
3982