1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71
72
73
74
75
76
77
78
79
80
81
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91
92
93
94
95
96
97
98
99
100
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr);
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133
134
135
136
137
138
139
140
141
142
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8;
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191
192
193
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
198
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
202#define MDIO_ATTEMPTS 20
203
204
205
206
207static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208 u16 reg_addr)
209{
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
215 mutex_lock(&adapter->mdio_lock);
216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
220 if (!ret)
221 ret = t3_read_reg(adapter, A_MI1_DATA);
222 mutex_unlock(&adapter->mdio_lock);
223 return ret;
224}
225
226static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
228{
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
231 int ret;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233
234 mutex_lock(&adapter->mdio_lock);
235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
240 mutex_unlock(&adapter->mdio_lock);
241 return ret;
242}
243
244static const struct mdio_ops mi1_mdio_ops = {
245 .read = t3_mi1_read,
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
248};
249
250
251
252
253
254static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255 int reg_addr)
256{
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
258
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264 MDIO_ATTEMPTS, 10);
265}
266
267
268
269
270static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271 u16 reg_addr)
272{
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
275 int ret;
276
277 mutex_lock(&adapter->mdio_lock);
278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 if (!ret) {
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
282 MDIO_ATTEMPTS, 10);
283 if (!ret)
284 ret = t3_read_reg(adapter, A_MI1_DATA);
285 }
286 mutex_unlock(&adapter->mdio_lock);
287 return ret;
288}
289
290static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
292{
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
295 int ret;
296
297 mutex_lock(&adapter->mdio_lock);
298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
299 if (!ret) {
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
303 MDIO_ATTEMPTS, 10);
304 }
305 mutex_unlock(&adapter->mdio_lock);
306 return ret;
307}
308
309static const struct mdio_ops mi1_mdio_ext_ops = {
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
313};
314
315
316
317
318
319
320
321
322
323
324
325
326int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327 unsigned int set)
328{
329 int ret;
330 unsigned int val;
331
332 ret = t3_mdio_read(phy, mmd, reg, &val);
333 if (!ret) {
334 val &= ~clear;
335 ret = t3_mdio_write(phy, mmd, reg, val | set);
336 }
337 return ret;
338}
339
340
341
342
343
344
345
346
347
348
349
350int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351{
352 int err;
353 unsigned int ctl;
354
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356 MDIO_CTRL1_RESET);
357 if (err || !wait)
358 return err;
359
360 do {
361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
362 if (err)
363 return err;
364 ctl &= MDIO_CTRL1_RESET;
365 if (ctl)
366 msleep(1);
367 } while (ctl && --wait);
368
369 return ctl ? -1 : 0;
370}
371
372
373
374
375
376
377
378
379
380int t3_phy_advertise(struct cphy *phy, unsigned int advert)
381{
382 int err;
383 unsigned int val = 0;
384
385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
386 if (err)
387 return err;
388
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
394
395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
396 if (err)
397 return err;
398
399 val = 1;
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
413}
414
415
416
417
418
419
420
421
422
423int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
424{
425 unsigned int val = 0;
426
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
436}
437
438
439
440
441
442
443
444
445
446
447int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
448{
449 int err;
450 unsigned int ctl;
451
452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
453 if (err)
454 return err;
455
456 if (speed >= 0) {
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
462 }
463 if (duplex >= 0) {
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
467 }
468 if (ctl & BMCR_SPEED1000)
469 ctl |= BMCR_ANENABLE;
470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
471}
472
473int t3_phy_lasi_intr_enable(struct cphy *phy)
474{
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
477}
478
479int t3_phy_lasi_intr_disable(struct cphy *phy)
480{
481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
482}
483
484int t3_phy_lasi_intr_clear(struct cphy *phy)
485{
486 u32 val;
487
488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
489}
490
491int t3_phy_lasi_intr_handler(struct cphy *phy)
492{
493 unsigned int status;
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
495 &status);
496
497 if (err)
498 return err;
499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
500}
501
502static const struct adapter_info t3_adap_info[] = {
503 {1, 1, 0,
504 F_GPIO2_OEN | F_GPIO4_OEN |
505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
506 &mi1_mdio_ops, "Chelsio PE9000"},
507 {1, 1, 0,
508 F_GPIO2_OEN | F_GPIO4_OEN |
509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
510 &mi1_mdio_ops, "Chelsio T302"},
511 {1, 0, 0,
512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
515 &mi1_mdio_ext_ops, "Chelsio T310"},
516 {1, 1, 0,
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T320"},
522 {},
523 {},
524 {1, 0, 0,
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
529 {1, 0, 0,
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
534};
535
536
537
538
539
540const struct adapter_info *t3_get_adapter_info(unsigned int id)
541{
542 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
543}
544
545struct port_type_info {
546 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547 int phy_addr, const struct mdio_ops *ops);
548};
549
550static const struct port_type_info port_types[] = {
551 { NULL },
552 { t3_ael1002_phy_prep },
553 { t3_vsc8211_phy_prep },
554 { NULL},
555 { t3_xaui_direct_phy_prep },
556 { t3_ael2005_phy_prep },
557 { t3_qt2045_phy_prep },
558 { t3_ael1006_phy_prep },
559 { NULL },
560 { t3_aq100x_phy_prep },
561 { t3_ael2020_phy_prep },
562};
563
564#define VPD_ENTRY(name, len) \
565 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
566
567
568
569
570
571struct t3_vpd {
572 u8 id_tag;
573 u8 id_len[2];
574 u8 id_data[16];
575 u8 vpdr_tag;
576 u8 vpdr_len[2];
577 VPD_ENTRY(pn, 16);
578 VPD_ENTRY(ec, 16);
579 VPD_ENTRY(sn, SERNUM_LEN);
580 VPD_ENTRY(na, 12);
581 VPD_ENTRY(cclk, 6);
582 VPD_ENTRY(mclk, 6);
583 VPD_ENTRY(uclk, 6);
584 VPD_ENTRY(mdc, 6);
585 VPD_ENTRY(mt, 2);
586 VPD_ENTRY(xaui0cfg, 6);
587 VPD_ENTRY(xaui1cfg, 6);
588 VPD_ENTRY(port0, 2);
589 VPD_ENTRY(port1, 2);
590 VPD_ENTRY(port2, 2);
591 VPD_ENTRY(port3, 2);
592 VPD_ENTRY(rv, 1);
593 u32 pad;
594};
595
596#define EEPROM_MAX_POLL 40
597#define EEPROM_STAT_ADDR 0x4000
598#define VPD_BASE 0xc00
599
600
601
602
603
604
605
606
607
608
609
610
611int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
612{
613 u16 val;
614 int attempts = EEPROM_MAX_POLL;
615 u32 v;
616 unsigned int base = adapter->params.pci.vpd_cap_addr;
617
618 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
619 return -EINVAL;
620
621 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
622 do {
623 udelay(10);
624 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
626
627 if (!(val & PCI_VPD_ADDR_F)) {
628 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
629 return -EIO;
630 }
631 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632 *data = cpu_to_le32(v);
633 return 0;
634}
635
636
637
638
639
640
641
642
643
644
645int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
646{
647 u16 val;
648 int attempts = EEPROM_MAX_POLL;
649 unsigned int base = adapter->params.pci.vpd_cap_addr;
650
651 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
652 return -EINVAL;
653
654 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
655 le32_to_cpu(data));
656 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657 addr | PCI_VPD_ADDR_F);
658 do {
659 msleep(1);
660 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661 } while ((val & PCI_VPD_ADDR_F) && --attempts);
662
663 if (val & PCI_VPD_ADDR_F) {
664 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
665 return -EIO;
666 }
667 return 0;
668}
669
670
671
672
673
674
675
676
677int t3_seeprom_wp(struct adapter *adapter, int enable)
678{
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
680}
681
682
683
684
685static unsigned int hex2int(unsigned char c)
686{
687 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
688}
689
690
691
692
693
694
695
696
697static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
698{
699 int i, addr, ret;
700 struct t3_vpd vpd;
701
702
703
704
705
706 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
707 if (ret)
708 return ret;
709 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
710
711 for (i = 0; i < sizeof(vpd); i += 4) {
712 ret = t3_seeprom_read(adapter, addr + i,
713 (__le32 *)((u8 *)&vpd + i));
714 if (ret)
715 return ret;
716 }
717
718 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
719 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
720 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
721 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
722 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
723 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
724
725
726 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
727 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
728 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
729 } else {
730 p->port_type[0] = hex2int(vpd.port0_data[0]);
731 p->port_type[1] = hex2int(vpd.port1_data[0]);
732 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
733 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
734 }
735
736 for (i = 0; i < 6; i++)
737 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
738 hex2int(vpd.na_data[2 * i + 1]);
739 return 0;
740}
741
742
743enum {
744 SF_ATTEMPTS = 5,
745 SF_SEC_SIZE = 64 * 1024,
746 SF_SIZE = SF_SEC_SIZE * 8,
747
748
749 SF_PROG_PAGE = 2,
750 SF_WR_DISABLE = 4,
751 SF_RD_STATUS = 5,
752 SF_WR_ENABLE = 6,
753 SF_RD_DATA_FAST = 0xb,
754 SF_ERASE_SECTOR = 0xd8,
755
756 FW_FLASH_BOOT_ADDR = 0x70000,
757 FW_VERS_ADDR = 0x7fffc,
758 FW_MIN_SIZE = 8
759};
760
761
762
763
764
765
766
767
768
769
770
771
772static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
773 u32 *valp)
774{
775 int ret;
776
777 if (!byte_cnt || byte_cnt > 4)
778 return -EINVAL;
779 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
780 return -EBUSY;
781 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
782 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
783 if (!ret)
784 *valp = t3_read_reg(adapter, A_SF_DATA);
785 return ret;
786}
787
788
789
790
791
792
793
794
795
796
797
798
799static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
800 u32 val)
801{
802 if (!byte_cnt || byte_cnt > 4)
803 return -EINVAL;
804 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
805 return -EBUSY;
806 t3_write_reg(adapter, A_SF_DATA, val);
807 t3_write_reg(adapter, A_SF_OP,
808 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
809 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
810}
811
812
813
814
815
816
817
818
819
820static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
821{
822 int ret;
823 u32 status;
824
825 while (1) {
826 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
827 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
828 return ret;
829 if (!(status & 1))
830 return 0;
831 if (--attempts == 0)
832 return -EAGAIN;
833 if (delay)
834 msleep(delay);
835 }
836}
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851int t3_read_flash(struct adapter *adapter, unsigned int addr,
852 unsigned int nwords, u32 *data, int byte_oriented)
853{
854 int ret;
855
856 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
857 return -EINVAL;
858
859 addr = swab32(addr) | SF_RD_DATA_FAST;
860
861 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
862 (ret = sf1_read(adapter, 1, 1, data)) != 0)
863 return ret;
864
865 for (; nwords; nwords--, data++) {
866 ret = sf1_read(adapter, 4, nwords > 1, data);
867 if (ret)
868 return ret;
869 if (byte_oriented)
870 *data = htonl(*data);
871 }
872 return 0;
873}
874
875
876
877
878
879
880
881
882
883
884
885static int t3_write_flash(struct adapter *adapter, unsigned int addr,
886 unsigned int n, const u8 *data)
887{
888 int ret;
889 u32 buf[64];
890 unsigned int i, c, left, val, offset = addr & 0xff;
891
892 if (addr + n > SF_SIZE || offset + n > 256)
893 return -EINVAL;
894
895 val = swab32(addr) | SF_PROG_PAGE;
896
897 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
898 (ret = sf1_write(adapter, 4, 1, val)) != 0)
899 return ret;
900
901 for (left = n; left; left -= c) {
902 c = min(left, 4U);
903 for (val = 0, i = 0; i < c; ++i)
904 val = (val << 8) + *data++;
905
906 ret = sf1_write(adapter, c, c != left, val);
907 if (ret)
908 return ret;
909 }
910 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
911 return ret;
912
913
914 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
915 if (ret)
916 return ret;
917
918 if (memcmp(data - n, (u8 *) buf + offset, n))
919 return -EIO;
920 return 0;
921}
922
923
924
925
926
927
928
929
930int t3_get_tp_version(struct adapter *adapter, u32 *vers)
931{
932 int ret;
933
934
935 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
936 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
937 1, 1, 5, 1);
938 if (ret)
939 return ret;
940
941 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
942
943 return 0;
944}
945
946
947
948
949
950
951
952int t3_check_tpsram_version(struct adapter *adapter)
953{
954 int ret;
955 u32 vers;
956 unsigned int major, minor;
957
958 if (adapter->params.rev == T3_REV_A)
959 return 0;
960
961
962 ret = t3_get_tp_version(adapter, &vers);
963 if (ret)
964 return ret;
965
966 major = G_TP_VERSION_MAJOR(vers);
967 minor = G_TP_VERSION_MINOR(vers);
968
969 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
970 return 0;
971 else {
972 CH_ERR(adapter, "found wrong TP version (%u.%u), "
973 "driver compiled for version %d.%d\n", major, minor,
974 TP_VERSION_MAJOR, TP_VERSION_MINOR);
975 }
976 return -EINVAL;
977}
978
979
980
981
982
983
984
985
986
987
988
989int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
990 unsigned int size)
991{
992 u32 csum;
993 unsigned int i;
994 const __be32 *p = (const __be32 *)tp_sram;
995
996
997 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
998 csum += ntohl(p[i]);
999 if (csum != 0xffffffff) {
1000 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1001 csum);
1002 return -EINVAL;
1003 }
1004
1005 return 0;
1006}
1007
1008enum fw_version_type {
1009 FW_VERSION_N3,
1010 FW_VERSION_T3
1011};
1012
1013
1014
1015
1016
1017
1018
1019
1020int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1021{
1022 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032int t3_check_fw_version(struct adapter *adapter)
1033{
1034 int ret;
1035 u32 vers;
1036 unsigned int type, major, minor;
1037
1038 ret = t3_get_fw_version(adapter, &vers);
1039 if (ret)
1040 return ret;
1041
1042 type = G_FW_VERSION_TYPE(vers);
1043 major = G_FW_VERSION_MAJOR(vers);
1044 minor = G_FW_VERSION_MINOR(vers);
1045
1046 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1047 minor == FW_VERSION_MINOR)
1048 return 0;
1049 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1050 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1051 "driver compiled for version %u.%u\n", major, minor,
1052 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1053 else {
1054 CH_WARN(adapter, "found newer FW version(%u.%u), "
1055 "driver compiled for version %u.%u\n", major, minor,
1056 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1057 return 0;
1058 }
1059 return -EINVAL;
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1071{
1072 while (start <= end) {
1073 int ret;
1074
1075 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1076 (ret = sf1_write(adapter, 4, 0,
1077 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1078 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1079 return ret;
1080 start++;
1081 }
1082 return 0;
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1097{
1098 u32 csum;
1099 unsigned int i;
1100 const __be32 *p = (const __be32 *)fw_data;
1101 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1102
1103 if ((size & 3) || size < FW_MIN_SIZE)
1104 return -EINVAL;
1105 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1106 return -EFBIG;
1107
1108 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1109 csum += ntohl(p[i]);
1110 if (csum != 0xffffffff) {
1111 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1112 csum);
1113 return -EINVAL;
1114 }
1115
1116 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1117 if (ret)
1118 goto out;
1119
1120 size -= 8;
1121 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1122 unsigned int chunk_size = min(size, 256U);
1123
1124 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1125 if (ret)
1126 goto out;
1127
1128 addr += chunk_size;
1129 fw_data += chunk_size;
1130 size -= chunk_size;
1131 }
1132
1133 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1134out:
1135 if (ret)
1136 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1137 return ret;
1138}
1139
1140#define CIM_CTL_BASE 0x2000
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1153 unsigned int n, unsigned int *valp)
1154{
1155 int ret = 0;
1156
1157 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1158 return -EBUSY;
1159
1160 for ( ; !ret && n--; addr += 4) {
1161 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1162 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1163 0, 5, 2);
1164 if (!ret)
1165 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1166 }
1167 return ret;
1168}
1169
1170static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1171 u32 *rx_hash_high, u32 *rx_hash_low)
1172{
1173
1174 t3_mac_disable_exact_filters(mac);
1175
1176
1177 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1178 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1179 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1180 F_DISBCAST);
1181
1182 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1183 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1184
1185 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1187
1188
1189 msleep(1);
1190}
1191
1192static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1193 u32 rx_hash_high, u32 rx_hash_low)
1194{
1195 t3_mac_enable_exact_filters(mac);
1196 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1197 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1198 rx_cfg);
1199 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1200 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212void t3_link_changed(struct adapter *adapter, int port_id)
1213{
1214 int link_ok, speed, duplex, fc;
1215 struct port_info *pi = adap2pinfo(adapter, port_id);
1216 struct cphy *phy = &pi->phy;
1217 struct cmac *mac = &pi->mac;
1218 struct link_config *lc = &pi->link_config;
1219
1220 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1221
1222 if (!lc->link_ok && link_ok) {
1223 u32 rx_cfg, rx_hash_high, rx_hash_low;
1224 u32 status;
1225
1226 t3_xgm_intr_enable(adapter, port_id);
1227 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1228 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1229 t3_mac_enable(mac, MAC_DIRECTION_RX);
1230
1231 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1232 if (status & F_LINKFAULTCHANGE) {
1233 mac->stats.link_faults++;
1234 pi->link_fault = 1;
1235 }
1236 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1237 }
1238
1239 if (lc->requested_fc & PAUSE_AUTONEG)
1240 fc &= lc->requested_fc;
1241 else
1242 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1243
1244 if (link_ok == lc->link_ok && speed == lc->speed &&
1245 duplex == lc->duplex && fc == lc->fc)
1246 return;
1247
1248 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1249 uses_xaui(adapter)) {
1250 if (link_ok)
1251 t3b_pcs_reset(mac);
1252 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1253 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1254 }
1255 lc->link_ok = link_ok;
1256 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1257 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1258
1259 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1260
1261 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1262 lc->fc = fc;
1263 }
1264
1265 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1266}
1267
1268void t3_link_fault(struct adapter *adapter, int port_id)
1269{
1270 struct port_info *pi = adap2pinfo(adapter, port_id);
1271 struct cmac *mac = &pi->mac;
1272 struct cphy *phy = &pi->phy;
1273 struct link_config *lc = &pi->link_config;
1274 int link_ok, speed, duplex, fc, link_fault;
1275 u32 rx_cfg, rx_hash_high, rx_hash_low;
1276
1277 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1278
1279 if (adapter->params.rev > 0 && uses_xaui(adapter))
1280 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1281
1282 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1283 t3_mac_enable(mac, MAC_DIRECTION_RX);
1284
1285 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1286
1287 link_fault = t3_read_reg(adapter,
1288 A_XGM_INT_STATUS + mac->offset);
1289 link_fault &= F_LINKFAULTCHANGE;
1290
1291 link_ok = lc->link_ok;
1292 speed = lc->speed;
1293 duplex = lc->duplex;
1294 fc = lc->fc;
1295
1296 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1297
1298 if (link_fault) {
1299 lc->link_ok = 0;
1300 lc->speed = SPEED_INVALID;
1301 lc->duplex = DUPLEX_INVALID;
1302
1303 t3_os_link_fault(adapter, port_id, 0);
1304
1305
1306 if (link_ok)
1307 mac->stats.link_faults++;
1308 } else {
1309 if (link_ok)
1310 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1311 F_TXACTENABLE | F_RXEN);
1312
1313 pi->link_fault = 0;
1314 lc->link_ok = (unsigned char)link_ok;
1315 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1316 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1317 t3_os_link_fault(adapter, port_id, link_ok);
1318 }
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1335{
1336 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1337
1338 lc->link_ok = 0;
1339 if (lc->supported & SUPPORTED_Autoneg) {
1340 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1341 if (fc) {
1342 lc->advertising |= ADVERTISED_Asym_Pause;
1343 if (fc & PAUSE_RX)
1344 lc->advertising |= ADVERTISED_Pause;
1345 }
1346 phy->ops->advertise(phy, lc->advertising);
1347
1348 if (lc->autoneg == AUTONEG_DISABLE) {
1349 lc->speed = lc->requested_speed;
1350 lc->duplex = lc->requested_duplex;
1351 lc->fc = (unsigned char)fc;
1352 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1353 fc);
1354
1355 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1356 } else
1357 phy->ops->autoneg_enable(phy);
1358 } else {
1359 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1360 lc->fc = (unsigned char)fc;
1361 phy->ops->reset(phy, 0);
1362 }
1363 return 0;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1375{
1376 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1377 ports << S_VLANEXTRACTIONENABLE,
1378 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1379}
1380
1381struct intr_info {
1382 unsigned int mask;
1383 const char *msg;
1384 short stat_idx;
1385 unsigned short fatal;
1386};
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1404 unsigned int mask,
1405 const struct intr_info *acts,
1406 unsigned long *stats)
1407{
1408 int fatal = 0;
1409 unsigned int status = t3_read_reg(adapter, reg) & mask;
1410
1411 for (; acts->mask; ++acts) {
1412 if (!(status & acts->mask))
1413 continue;
1414 if (acts->fatal) {
1415 fatal++;
1416 CH_ALERT(adapter, "%s (0x%x)\n",
1417 acts->msg, status & acts->mask);
1418 } else if (acts->msg)
1419 CH_WARN(adapter, "%s (0x%x)\n",
1420 acts->msg, status & acts->mask);
1421 if (acts->stat_idx >= 0)
1422 stats[acts->stat_idx]++;
1423 }
1424 if (status)
1425 t3_write_reg(adapter, reg, status);
1426 return fatal;
1427}
1428
1429#define SGE_INTR_MASK (F_RSPQDISABLED | \
1430 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1431 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1432 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1433 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1434 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1435 F_HIRCQPARITYERROR)
1436#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1437 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1438 F_NFASRCHFAIL)
1439#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1440#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1441 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1442 F_TXFIFO_UNDERRUN)
1443#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1444 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1445 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1446 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1447 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1448 V_CFPARERR(M_CFPARERR) )
1449#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1450 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1451 \
1452 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1453 F_TXPARERR | V_BISTERR(M_BISTERR))
1454#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1455 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1456 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1457#define ULPTX_INTR_MASK 0xfc
1458#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1459 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1460 F_ZERO_SWITCH_ERROR)
1461#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1462 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1463 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1464 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1465 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1466 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1467 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1468 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1469#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1470 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1471 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1472#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1473 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1474 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1475#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1476 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1477 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1478 V_MCAPARERRENB(M_MCAPARERRENB))
1479#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1480#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1481 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1482 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1483 F_MPS0 | F_CPL_SWITCH)
1484
1485
1486
1487static void pci_intr_handler(struct adapter *adapter)
1488{
1489 static const struct intr_info pcix1_intr_info[] = {
1490 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1491 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1492 {F_RCVTARABT, "PCI received target abort", -1, 1},
1493 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1494 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1495 {F_DETPARERR, "PCI detected parity error", -1, 1},
1496 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1497 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1498 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1499 1},
1500 {F_DETCORECCERR, "PCI correctable ECC error",
1501 STAT_PCI_CORR_ECC, 0},
1502 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1503 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1504 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1505 1},
1506 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1507 1},
1508 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1509 1},
1510 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1511 "error", -1, 1},
1512 {0}
1513 };
1514
1515 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1516 pcix1_intr_info, adapter->irq_stats))
1517 t3_fatal_err(adapter);
1518}
1519
1520
1521
1522
1523static void pcie_intr_handler(struct adapter *adapter)
1524{
1525 static const struct intr_info pcie_intr_info[] = {
1526 {F_PEXERR, "PCI PEX error", -1, 1},
1527 {F_UNXSPLCPLERRR,
1528 "PCI unexpected split completion DMA read error", -1, 1},
1529 {F_UNXSPLCPLERRC,
1530 "PCI unexpected split completion DMA command error", -1, 1},
1531 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1532 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1533 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1534 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1535 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1536 "PCI MSI-X table/PBA parity error", -1, 1},
1537 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1538 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1539 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1540 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1541 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1542 {0}
1543 };
1544
1545 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1546 CH_ALERT(adapter, "PEX error code 0x%x\n",
1547 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1548
1549 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1550 pcie_intr_info, adapter->irq_stats))
1551 t3_fatal_err(adapter);
1552}
1553
1554
1555
1556
1557static void tp_intr_handler(struct adapter *adapter)
1558{
1559 static const struct intr_info tp_intr_info[] = {
1560 {0xffffff, "TP parity error", -1, 1},
1561 {0x1000000, "TP out of Rx pages", -1, 1},
1562 {0x2000000, "TP out of Tx pages", -1, 1},
1563 {0}
1564 };
1565
1566 static struct intr_info tp_intr_info_t3c[] = {
1567 {0x1fffffff, "TP parity error", -1, 1},
1568 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1569 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1570 {0}
1571 };
1572
1573 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1574 adapter->params.rev < T3_REV_C ?
1575 tp_intr_info : tp_intr_info_t3c, NULL))
1576 t3_fatal_err(adapter);
1577}
1578
1579
1580
1581
1582static void cim_intr_handler(struct adapter *adapter)
1583{
1584 static const struct intr_info cim_intr_info[] = {
1585 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1586 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1587 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1588 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1589 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1590 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1591 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1592 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1593 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1594 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1595 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1596 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1597 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1598 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1599 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1600 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1601 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1602 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1603 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1604 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1605 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1606 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1607 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1608 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1609 {0}
1610 };
1611
1612 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1613 cim_intr_info, NULL))
1614 t3_fatal_err(adapter);
1615}
1616
1617
1618
1619
1620static void ulprx_intr_handler(struct adapter *adapter)
1621{
1622 static const struct intr_info ulprx_intr_info[] = {
1623 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1624 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1625 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1626 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1627 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1628 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1629 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1630 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1631 {0}
1632 };
1633
1634 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1635 ulprx_intr_info, NULL))
1636 t3_fatal_err(adapter);
1637}
1638
1639
1640
1641
1642static void ulptx_intr_handler(struct adapter *adapter)
1643{
1644 static const struct intr_info ulptx_intr_info[] = {
1645 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1646 STAT_ULP_CH0_PBL_OOB, 0},
1647 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1648 STAT_ULP_CH1_PBL_OOB, 0},
1649 {0xfc, "ULP TX parity error", -1, 1},
1650 {0}
1651 };
1652
1653 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1654 ulptx_intr_info, adapter->irq_stats))
1655 t3_fatal_err(adapter);
1656}
1657
1658#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1659 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1660 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1661 F_ICSPI1_TX_FRAMING_ERROR)
1662#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1663 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1664 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1665 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1666
1667
1668
1669
1670static void pmtx_intr_handler(struct adapter *adapter)
1671{
1672 static const struct intr_info pmtx_intr_info[] = {
1673 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1674 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1675 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1676 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1677 "PMTX ispi parity error", -1, 1},
1678 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1679 "PMTX ospi parity error", -1, 1},
1680 {0}
1681 };
1682
1683 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1684 pmtx_intr_info, NULL))
1685 t3_fatal_err(adapter);
1686}
1687
1688#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1689 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1690 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1691 F_IESPI1_TX_FRAMING_ERROR)
1692#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1693 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1694 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1695 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1696
1697
1698
1699
1700static void pmrx_intr_handler(struct adapter *adapter)
1701{
1702 static const struct intr_info pmrx_intr_info[] = {
1703 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1704 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1705 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1706 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1707 "PMRX ispi parity error", -1, 1},
1708 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1709 "PMRX ospi parity error", -1, 1},
1710 {0}
1711 };
1712
1713 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1714 pmrx_intr_info, NULL))
1715 t3_fatal_err(adapter);
1716}
1717
1718
1719
1720
1721static void cplsw_intr_handler(struct adapter *adapter)
1722{
1723 static const struct intr_info cplsw_intr_info[] = {
1724 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1725 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1726 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1727 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1728 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1729 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1730 {0}
1731 };
1732
1733 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1734 cplsw_intr_info, NULL))
1735 t3_fatal_err(adapter);
1736}
1737
1738
1739
1740
1741static void mps_intr_handler(struct adapter *adapter)
1742{
1743 static const struct intr_info mps_intr_info[] = {
1744 {0x1ff, "MPS parity error", -1, 1},
1745 {0}
1746 };
1747
1748 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1749 mps_intr_info, NULL))
1750 t3_fatal_err(adapter);
1751}
1752
1753#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1754
1755
1756
1757
1758static void mc7_intr_handler(struct mc7 *mc7)
1759{
1760 struct adapter *adapter = mc7->adapter;
1761 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1762
1763 if (cause & F_CE) {
1764 mc7->stats.corr_err++;
1765 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1766 "data 0x%x 0x%x 0x%x\n", mc7->name,
1767 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1768 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1769 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1770 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1771 }
1772
1773 if (cause & F_UE) {
1774 mc7->stats.uncorr_err++;
1775 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1776 "data 0x%x 0x%x 0x%x\n", mc7->name,
1777 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1778 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1779 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1780 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1781 }
1782
1783 if (G_PE(cause)) {
1784 mc7->stats.parity_err++;
1785 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1786 mc7->name, G_PE(cause));
1787 }
1788
1789 if (cause & F_AE) {
1790 u32 addr = 0;
1791
1792 if (adapter->params.rev > 0)
1793 addr = t3_read_reg(adapter,
1794 mc7->offset + A_MC7_ERR_ADDR);
1795 mc7->stats.addr_err++;
1796 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1797 mc7->name, addr);
1798 }
1799
1800 if (cause & MC7_INTR_FATAL)
1801 t3_fatal_err(adapter);
1802
1803 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1804}
1805
1806#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1807 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1808
1809
1810
1811static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1812{
1813 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1814
1815
1816
1817
1818
1819
1820 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1821 ~F_RXFIFO_OVERFLOW;
1822
1823 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1824 mac->stats.tx_fifo_parity_err++;
1825 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1826 }
1827 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1828 mac->stats.rx_fifo_parity_err++;
1829 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1830 }
1831 if (cause & F_TXFIFO_UNDERRUN)
1832 mac->stats.tx_fifo_urun++;
1833 if (cause & F_RXFIFO_OVERFLOW)
1834 mac->stats.rx_fifo_ovfl++;
1835 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1836 mac->stats.serdes_signal_loss++;
1837 if (cause & F_XAUIPCSCTCERR)
1838 mac->stats.xaui_pcs_ctc_err++;
1839 if (cause & F_XAUIPCSALIGNCHANGE)
1840 mac->stats.xaui_pcs_align_change++;
1841 if (cause & F_XGM_INT) {
1842 t3_set_reg_field(adap,
1843 A_XGM_INT_ENABLE + mac->offset,
1844 F_XGM_INT, 0);
1845 mac->stats.link_faults++;
1846
1847 t3_os_link_fault_handler(adap, idx);
1848 }
1849
1850 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1851
1852 if (cause & XGM_INTR_FATAL)
1853 t3_fatal_err(adap);
1854
1855 return cause != 0;
1856}
1857
1858
1859
1860
1861int t3_phy_intr_handler(struct adapter *adapter)
1862{
1863 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1864
1865 for_each_port(adapter, i) {
1866 struct port_info *p = adap2pinfo(adapter, i);
1867
1868 if (!(p->phy.caps & SUPPORTED_IRQ))
1869 continue;
1870
1871 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1872 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1873
1874 if (phy_cause & cphy_cause_link_change)
1875 t3_link_changed(adapter, i);
1876 if (phy_cause & cphy_cause_fifo_error)
1877 p->phy.fifo_errors++;
1878 if (phy_cause & cphy_cause_module_change)
1879 t3_os_phymod_changed(adapter, i);
1880 }
1881 }
1882
1883 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1884 return 0;
1885}
1886
1887
1888
1889
1890int t3_slow_intr_handler(struct adapter *adapter)
1891{
1892 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1893
1894 cause &= adapter->slow_intr_mask;
1895 if (!cause)
1896 return 0;
1897 if (cause & F_PCIM0) {
1898 if (is_pcie(adapter))
1899 pcie_intr_handler(adapter);
1900 else
1901 pci_intr_handler(adapter);
1902 }
1903 if (cause & F_SGE3)
1904 t3_sge_err_intr_handler(adapter);
1905 if (cause & F_MC7_PMRX)
1906 mc7_intr_handler(&adapter->pmrx);
1907 if (cause & F_MC7_PMTX)
1908 mc7_intr_handler(&adapter->pmtx);
1909 if (cause & F_MC7_CM)
1910 mc7_intr_handler(&adapter->cm);
1911 if (cause & F_CIM)
1912 cim_intr_handler(adapter);
1913 if (cause & F_TP1)
1914 tp_intr_handler(adapter);
1915 if (cause & F_ULP2_RX)
1916 ulprx_intr_handler(adapter);
1917 if (cause & F_ULP2_TX)
1918 ulptx_intr_handler(adapter);
1919 if (cause & F_PM1_RX)
1920 pmrx_intr_handler(adapter);
1921 if (cause & F_PM1_TX)
1922 pmtx_intr_handler(adapter);
1923 if (cause & F_CPL_SWITCH)
1924 cplsw_intr_handler(adapter);
1925 if (cause & F_MPS0)
1926 mps_intr_handler(adapter);
1927 if (cause & F_MC5A)
1928 t3_mc5_intr_handler(&adapter->mc5);
1929 if (cause & F_XGMAC0_0)
1930 mac_intr_handler(adapter, 0);
1931 if (cause & F_XGMAC0_1)
1932 mac_intr_handler(adapter, 1);
1933 if (cause & F_T3DBG)
1934 t3_os_ext_intr_handler(adapter);
1935
1936
1937 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1938 t3_read_reg(adapter, A_PL_INT_CAUSE0);
1939 return 1;
1940}
1941
1942static unsigned int calc_gpio_intr(struct adapter *adap)
1943{
1944 unsigned int i, gpi_intr = 0;
1945
1946 for_each_port(adap, i)
1947 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1948 adapter_info(adap)->gpio_intr[i])
1949 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1950 return gpi_intr;
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961void t3_intr_enable(struct adapter *adapter)
1962{
1963 static const struct addr_val_pair intr_en_avp[] = {
1964 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1965 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1966 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1967 MC7_INTR_MASK},
1968 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1969 MC7_INTR_MASK},
1970 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1971 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1972 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1973 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1974 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1975 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1976 };
1977
1978 adapter->slow_intr_mask = PL_INTR_MASK;
1979
1980 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1981 t3_write_reg(adapter, A_TP_INT_ENABLE,
1982 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1983
1984 if (adapter->params.rev > 0) {
1985 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1986 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1987 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1988 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1989 F_PBL_BOUND_ERR_CH1);
1990 } else {
1991 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1992 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1993 }
1994
1995 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1996
1997 if (is_pcie(adapter))
1998 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1999 else
2000 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2001 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2002 t3_read_reg(adapter, A_PL_INT_ENABLE0);
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012void t3_intr_disable(struct adapter *adapter)
2013{
2014 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2015 t3_read_reg(adapter, A_PL_INT_ENABLE0);
2016 adapter->slow_intr_mask = 0;
2017}
2018
2019
2020
2021
2022
2023
2024
2025void t3_intr_clear(struct adapter *adapter)
2026{
2027 static const unsigned int cause_reg_addr[] = {
2028 A_SG_INT_CAUSE,
2029 A_SG_RSPQ_FL_STATUS,
2030 A_PCIX_INT_CAUSE,
2031 A_MC7_INT_CAUSE,
2032 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2033 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2034 A_CIM_HOST_INT_CAUSE,
2035 A_TP_INT_CAUSE,
2036 A_MC5_DB_INT_CAUSE,
2037 A_ULPRX_INT_CAUSE,
2038 A_ULPTX_INT_CAUSE,
2039 A_CPL_INTR_CAUSE,
2040 A_PM1_TX_INT_CAUSE,
2041 A_PM1_RX_INT_CAUSE,
2042 A_MPS_INT_CAUSE,
2043 A_T3DBG_INT_CAUSE,
2044 };
2045 unsigned int i;
2046
2047
2048 for_each_port(adapter, i)
2049 t3_port_intr_clear(adapter, i);
2050
2051 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2052 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2053
2054 if (is_pcie(adapter))
2055 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2056 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2057 t3_read_reg(adapter, A_PL_INT_CAUSE0);
2058}
2059
2060void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2061{
2062 struct port_info *pi = adap2pinfo(adapter, idx);
2063
2064 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2065 XGM_EXTRA_INTR_MASK);
2066}
2067
2068void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2069{
2070 struct port_info *pi = adap2pinfo(adapter, idx);
2071
2072 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2073 0x7ff);
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084void t3_port_intr_enable(struct adapter *adapter, int idx)
2085{
2086 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2087
2088 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2089 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx));
2090 phy->ops->intr_enable(phy);
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101void t3_port_intr_disable(struct adapter *adapter, int idx)
2102{
2103 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2104
2105 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2106 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx));
2107 phy->ops->intr_disable(phy);
2108}
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118void t3_port_intr_clear(struct adapter *adapter, int idx)
2119{
2120 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2121
2122 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2123 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx));
2124 phy->ops->intr_clear(phy);
2125}
2126
2127#define SG_CONTEXT_CMD_ATTEMPTS 100
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2139 unsigned int type)
2140{
2141 if (type == F_RESPONSEQ) {
2142
2143
2144
2145
2146
2147
2148 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2149 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2150 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2152 } else {
2153 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2154 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2155 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2156 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2157 }
2158 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2159 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2160 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2161 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2176 unsigned int type)
2177{
2178 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2179 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2180 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2181 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2182 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2183 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2184 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2185 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2186 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2187 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2188 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2189 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2190}
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2210 enum sge_context_type type, int respq, u64 base_addr,
2211 unsigned int size, unsigned int token, int gen,
2212 unsigned int cidx)
2213{
2214 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2215
2216 if (base_addr & 0xfff)
2217 return -EINVAL;
2218 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2219 return -EBUSY;
2220
2221 base_addr >>= 12;
2222 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2223 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2224 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2225 V_EC_BASE_LO(base_addr & 0xffff));
2226 base_addr >>= 16;
2227 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2228 base_addr >>= 32;
2229 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2230 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2231 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2232 F_EC_VALID);
2233 return t3_sge_write_context(adapter, id, F_EGRESS);
2234}
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2253 int gts_enable, u64 base_addr, unsigned int size,
2254 unsigned int bsize, unsigned int cong_thres, int gen,
2255 unsigned int cidx)
2256{
2257 if (base_addr & 0xfff)
2258 return -EINVAL;
2259 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2260 return -EBUSY;
2261
2262 base_addr >>= 12;
2263 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2264 base_addr >>= 32;
2265 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2266 V_FL_BASE_HI((u32) base_addr) |
2267 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2268 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2269 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2270 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2271 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2272 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2273 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2274 return t3_sge_write_context(adapter, id, F_FREELIST);
2275}
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2293 int irq_vec_idx, u64 base_addr, unsigned int size,
2294 unsigned int fl_thres, int gen, unsigned int cidx)
2295{
2296 unsigned int intr = 0;
2297
2298 if (base_addr & 0xfff)
2299 return -EINVAL;
2300 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2301 return -EBUSY;
2302
2303 base_addr >>= 12;
2304 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2305 V_CQ_INDEX(cidx));
2306 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2307 base_addr >>= 32;
2308 if (irq_vec_idx >= 0)
2309 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2310 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2311 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2312 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2313 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2314}
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2332 unsigned int size, int rspq, int ovfl_mode,
2333 unsigned int credits, unsigned int credit_thres)
2334{
2335 if (base_addr & 0xfff)
2336 return -EINVAL;
2337 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2338 return -EBUSY;
2339
2340 base_addr >>= 12;
2341 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2342 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2343 base_addr >>= 32;
2344 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2345 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2346 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2347 V_CQ_ERR(ovfl_mode));
2348 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2349 V_CQ_CREDIT_THRES(credit_thres));
2350 return t3_sge_write_context(adapter, id, F_CQ);
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2363{
2364 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2365 return -EBUSY;
2366
2367 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2368 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2369 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2370 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2371 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2372 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2373 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2374 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2375 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2376}
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2387{
2388 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2389 return -EBUSY;
2390
2391 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2392 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2393 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2394 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2395 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2396 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2397 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2398 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2399 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2411{
2412 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2413 return -EBUSY;
2414
2415 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2416 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2417 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2418 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2419 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2420 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2421 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2422 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2423 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2435{
2436 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2437 return -EBUSY;
2438
2439 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2440 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2441 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2442 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2443 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2444 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2445 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2446 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2447 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2448}
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2461 unsigned int credits)
2462{
2463 u32 val;
2464
2465 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2466 return -EBUSY;
2467
2468 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2469 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2470 V_CONTEXT(id) | F_CQ);
2471 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2472 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2473 return -EIO;
2474
2475 if (op >= 2 && op < 7) {
2476 if (adapter->params.rev > 0)
2477 return G_CQ_INDEX(val);
2478
2479 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2480 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2481 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2482 F_CONTEXT_CMD_BUSY, 0,
2483 SG_CONTEXT_CMD_ATTEMPTS, 1))
2484 return -EIO;
2485 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2486 }
2487 return 0;
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2501 unsigned int id, u32 data[4])
2502{
2503 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2504 return -EBUSY;
2505
2506 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2507 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2508 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2509 SG_CONTEXT_CMD_ATTEMPTS, 1))
2510 return -EIO;
2511 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2512 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2513 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2514 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2515 return 0;
2516}
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2528{
2529 if (id >= 65536)
2530 return -EINVAL;
2531 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2532}
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2544{
2545 if (id >= 65536)
2546 return -EINVAL;
2547 return t3_sge_read_context(F_CQ, adapter, id, data);
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2560{
2561 if (id >= SGE_QSETS * 2)
2562 return -EINVAL;
2563 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2564}
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2576{
2577 if (id >= SGE_QSETS)
2578 return -EINVAL;
2579 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2595 const u8 * cpus, const u16 *rspq)
2596{
2597 int i, j, cpu_idx = 0, q_idx = 0;
2598
2599 if (cpus)
2600 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2601 u32 val = i << 16;
2602
2603 for (j = 0; j < 2; ++j) {
2604 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2605 if (cpus[cpu_idx] == 0xff)
2606 cpu_idx = 0;
2607 }
2608 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2609 }
2610
2611 if (rspq)
2612 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2613 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2614 (i << 16) | rspq[q_idx++]);
2615 if (rspq[q_idx] == 0xffff)
2616 q_idx = 0;
2617 }
2618
2619 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2620}
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2631{
2632 int i;
2633 u32 val;
2634
2635 if (lkup)
2636 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2637 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2638 0xffff0000 | i);
2639 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2640 if (!(val & 0x80000000))
2641 return -EAGAIN;
2642 *lkup++ = val;
2643 *lkup++ = (val >> 8);
2644 }
2645
2646 if (map)
2647 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2648 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2649 0xffff0000 | i);
2650 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2651 if (!(val & 0x80000000))
2652 return -EAGAIN;
2653 *map++ = val;
2654 }
2655 return 0;
2656}
2657
2658
2659
2660
2661
2662
2663
2664
2665void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2666{
2667 if (is_offload(adap) || !enable)
2668 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2669 V_NICMODE(!enable));
2670}
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681static inline unsigned int pm_num_pages(unsigned int mem_size,
2682 unsigned int pg_size)
2683{
2684 unsigned int n = mem_size / pg_size;
2685
2686 return n - n % 24;
2687}
2688
2689#define mem_region(adap, start, size, reg) \
2690 t3_write_reg((adap), A_ ## reg, (start)); \
2691 start += size
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701static void partition_mem(struct adapter *adap, const struct tp_params *p)
2702{
2703 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2704 unsigned int timers = 0, timers_shift = 22;
2705
2706 if (adap->params.rev > 0) {
2707 if (tids <= 16 * 1024) {
2708 timers = 1;
2709 timers_shift = 16;
2710 } else if (tids <= 64 * 1024) {
2711 timers = 2;
2712 timers_shift = 18;
2713 } else if (tids <= 256 * 1024) {
2714 timers = 3;
2715 timers_shift = 20;
2716 }
2717 }
2718
2719 t3_write_reg(adap, A_TP_PMM_SIZE,
2720 p->chan_rx_size | (p->chan_tx_size >> 16));
2721
2722 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2723 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2724 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2725 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2726 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2727
2728 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2729 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2730 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2731
2732 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2733
2734 pstructs += 48;
2735 pstructs -= pstructs % 24;
2736 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2737
2738 m = tids * TCB_SIZE;
2739 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2740 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2741 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2742 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2743 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2744 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2745 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2746 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2747
2748 m = (m + 4095) & ~0xfff;
2749 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2750 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2751
2752 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2753 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2754 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2755 if (tids < m)
2756 adap->params.mc5.nservers += m - tids;
2757}
2758
2759static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2760 u32 val)
2761{
2762 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2763 t3_write_reg(adap, A_TP_PIO_DATA, val);
2764}
2765
2766static void tp_config(struct adapter *adap, const struct tp_params *p)
2767{
2768 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2769 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2770 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2771 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2772 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2773 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2774 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2775 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2776 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2777 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2778 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2779 F_IPV6ENABLE | F_NICMODE);
2780 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2781 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2782 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2783 adap->params.rev > 0 ? F_ENABLEESND :
2784 F_T3A_ENABLEESND);
2785
2786 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2787 F_ENABLEEPCMDAFULL,
2788 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2789 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2790 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2791 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2792 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2793 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2794 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2795
2796 if (adap->params.rev > 0) {
2797 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2798 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2799 F_TXPACEAUTO);
2800 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2801 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2802 } else
2803 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2804
2805 if (adap->params.rev == T3_REV_C)
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2807 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2808 V_TABLELATENCYDELTA(4));
2809
2810 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2811 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2812 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2813 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2814}
2815
2816
2817#define TP_TMR_RES 50
2818
2819
2820#define TP_DACK_TIMER 50
2821#define TP_RTO_MIN 250
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2832{
2833 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2834 unsigned int dack_re = fls(core_clk / 5000) - 1;
2835 unsigned int tstamp_re = fls(core_clk / 1000);
2836 unsigned int tps = core_clk >> tre;
2837
2838 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2839 V_DELAYEDACKRESOLUTION(dack_re) |
2840 V_TIMESTAMPRESOLUTION(tstamp_re));
2841 t3_write_reg(adap, A_TP_DACK_TIMER,
2842 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2843 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2844 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2845 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2846 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2847 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2848 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2849 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2850 V_KEEPALIVEMAX(9));
2851
2852#define SECONDS * tps
2853
2854 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2855 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2856 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2857 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2858 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2859 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2860 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2861 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2862 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2863
2864#undef SECONDS
2865}
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2876{
2877 u32 val;
2878
2879 if (size > MAX_RX_COALESCING_LEN)
2880 return -EINVAL;
2881
2882 val = t3_read_reg(adap, A_TP_PARA_REG3);
2883 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2884
2885 if (size) {
2886 val |= F_RXCOALESCEENABLE;
2887 if (psh)
2888 val |= F_RXCOALESCEPSHEN;
2889 size = min(MAX_RX_COALESCING_LEN, size);
2890 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2891 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2892 }
2893 t3_write_reg(adap, A_TP_PARA_REG3, val);
2894 return 0;
2895}
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2906{
2907 t3_write_reg(adap, A_TP_PARA_REG7,
2908 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2909}
2910
2911static void init_mtus(unsigned short mtus[])
2912{
2913
2914
2915
2916
2917
2918 mtus[0] = 88;
2919 mtus[1] = 88;
2920 mtus[2] = 256;
2921 mtus[3] = 512;
2922 mtus[4] = 576;
2923 mtus[5] = 1024;
2924 mtus[6] = 1280;
2925 mtus[7] = 1492;
2926 mtus[8] = 1500;
2927 mtus[9] = 2002;
2928 mtus[10] = 2048;
2929 mtus[11] = 4096;
2930 mtus[12] = 4352;
2931 mtus[13] = 8192;
2932 mtus[14] = 9000;
2933 mtus[15] = 9600;
2934}
2935
2936
2937
2938
2939static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2940{
2941 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2942 a[9] = 2;
2943 a[10] = 3;
2944 a[11] = 4;
2945 a[12] = 5;
2946 a[13] = 6;
2947 a[14] = 7;
2948 a[15] = 8;
2949 a[16] = 9;
2950 a[17] = 10;
2951 a[18] = 14;
2952 a[19] = 17;
2953 a[20] = 21;
2954 a[21] = 25;
2955 a[22] = 30;
2956 a[23] = 35;
2957 a[24] = 45;
2958 a[25] = 60;
2959 a[26] = 80;
2960 a[27] = 100;
2961 a[28] = 200;
2962 a[29] = 300;
2963 a[30] = 400;
2964 a[31] = 500;
2965
2966 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2967 b[9] = b[10] = 1;
2968 b[11] = b[12] = 2;
2969 b[13] = b[14] = b[15] = b[16] = 3;
2970 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2971 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2972 b[28] = b[29] = 6;
2973 b[30] = b[31] = 7;
2974}
2975
2976
2977#define CC_MIN_INCR 2U
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2992 unsigned short alpha[NCCTRL_WIN],
2993 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2994{
2995 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2996 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2997 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2998 28672, 40960, 57344, 81920, 114688, 163840, 229376
2999 };
3000
3001 unsigned int i, w;
3002
3003 for (i = 0; i < NMTUS; ++i) {
3004 unsigned int mtu = min(mtus[i], mtu_cap);
3005 unsigned int log2 = fls(mtu);
3006
3007 if (!(mtu & ((1 << log2) >> 2)))
3008 log2--;
3009 t3_write_reg(adap, A_TP_MTU_TABLE,
3010 (i << 24) | (log2 << 16) | mtu);
3011
3012 for (w = 0; w < NCCTRL_WIN; ++w) {
3013 unsigned int inc;
3014
3015 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3016 CC_MIN_INCR);
3017
3018 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3019 (w << 16) | (beta[w] << 13) | inc);
3020 }
3021 }
3022}
3023
3024
3025
3026
3027
3028
3029
3030
3031void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3032{
3033 int i;
3034
3035 for (i = 0; i < NMTUS; ++i) {
3036 unsigned int val;
3037
3038 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3039 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3040 mtus[i] = val & 0x3fff;
3041 }
3042}
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052void t3_get_cong_cntl_tab(struct adapter *adap,
3053 unsigned short incr[NMTUS][NCCTRL_WIN])
3054{
3055 unsigned int mtu, w;
3056
3057 for (mtu = 0; mtu < NMTUS; ++mtu)
3058 for (w = 0; w < NCCTRL_WIN; ++w) {
3059 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3060 0xffff0000 | (mtu << 5) | w);
3061 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3062 0x1fff;
3063 }
3064}
3065
3066
3067
3068
3069
3070
3071
3072
3073void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3074{
3075 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3076 sizeof(*tps) / sizeof(u32), 0);
3077}
3078
3079#define ulp_region(adap, name, start, len) \
3080 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3081 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3082 (start) + (len) - 1); \
3083 start += len
3084
3085#define ulptx_region(adap, name, start, len) \
3086 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3087 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3088 (start) + (len) - 1)
3089
3090static void ulp_config(struct adapter *adap, const struct tp_params *p)
3091{
3092 unsigned int m = p->chan_rx_size;
3093
3094 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3095 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3096 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3097 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3098 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3099 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3100 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3101 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3102}
3103
3104
3105
3106
3107
3108
3109
3110
3111int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3112{
3113 int i;
3114 const __be32 *buf = (const __be32 *)data;
3115
3116 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3117 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3118 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3119 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3120 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3121 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3122
3123 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3124 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3125 return -EIO;
3126 }
3127 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3128
3129 return 0;
3130}
3131
3132void t3_config_trace_filter(struct adapter *adapter,
3133 const struct trace_params *tp, int filter_index,
3134 int invert, int enable)
3135{
3136 u32 addr, key[4], mask[4];
3137
3138 key[0] = tp->sport | (tp->sip << 16);
3139 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3140 key[2] = tp->dip;
3141 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3142
3143 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3144 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3145 mask[2] = tp->dip_mask;
3146 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3147
3148 if (invert)
3149 key[3] |= (1 << 29);
3150 if (enable)
3151 key[3] |= (1 << 28);
3152
3153 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3154 tp_wr_indirect(adapter, addr++, key[0]);
3155 tp_wr_indirect(adapter, addr++, mask[0]);
3156 tp_wr_indirect(adapter, addr++, key[1]);
3157 tp_wr_indirect(adapter, addr++, mask[1]);
3158 tp_wr_indirect(adapter, addr++, key[2]);
3159 tp_wr_indirect(adapter, addr++, mask[2]);
3160 tp_wr_indirect(adapter, addr++, key[3]);
3161 tp_wr_indirect(adapter, addr, mask[3]);
3162 t3_read_reg(adapter, A_TP_PIO_DATA);
3163}
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3174{
3175 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3176 unsigned int clk = adap->params.vpd.cclk * 1000;
3177 unsigned int selected_cpt = 0, selected_bpt = 0;
3178
3179 if (kbps > 0) {
3180 kbps *= 125;
3181 for (cpt = 1; cpt <= 255; cpt++) {
3182 tps = clk / cpt;
3183 bpt = (kbps + tps / 2) / tps;
3184 if (bpt > 0 && bpt <= 255) {
3185 v = bpt * tps;
3186 delta = v >= kbps ? v - kbps : kbps - v;
3187 if (delta <= mindelta) {
3188 mindelta = delta;
3189 selected_cpt = cpt;
3190 selected_bpt = bpt;
3191 }
3192 } else if (selected_cpt)
3193 break;
3194 }
3195 if (!selected_cpt)
3196 return -EINVAL;
3197 }
3198 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3199 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3200 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3201 if (sched & 1)
3202 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3203 else
3204 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3205 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3206 return 0;
3207}
3208
3209static int tp_init(struct adapter *adap, const struct tp_params *p)
3210{
3211 int busy = 0;
3212
3213 tp_config(adap, p);
3214 t3_set_vlan_accel(adap, 3, 0);
3215
3216 if (is_offload(adap)) {
3217 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3218 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3219 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3220 0, 1000, 5);
3221 if (busy)
3222 CH_ERR(adap, "TP initialization timed out\n");
3223 }
3224
3225 if (!busy)
3226 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3227 return busy;
3228}
3229
3230int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3231{
3232 if (port_mask & ~((1 << adap->params.nports) - 1))
3233 return -EINVAL;
3234 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3235 port_mask << S_PORT0ACTIVE);
3236 return 0;
3237}
3238
3239
3240
3241
3242
3243static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3244{
3245 int i;
3246
3247 if (chan_map != 3) {
3248 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3249 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3250 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3251 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3252 F_TPTXPORT1EN | F_PORT1ACTIVE));
3253 t3_write_reg(adap, A_PM1_TX_CFG,
3254 chan_map == 1 ? 0xffffffff : 0);
3255 } else {
3256 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3257 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3258 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3259 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3260 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3261 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3262 F_ENFORCEPKT);
3263 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3264 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3265 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3266 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3267 for (i = 0; i < 16; i++)
3268 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3269 (i << 16) | 0x1010);
3270 }
3271}
3272
3273static int calibrate_xgm(struct adapter *adapter)
3274{
3275 if (uses_xaui(adapter)) {
3276 unsigned int v, i;
3277
3278 for (i = 0; i < 5; ++i) {
3279 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3280 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3281 msleep(1);
3282 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3283 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3284 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3285 V_XAUIIMP(G_CALIMP(v) >> 2));
3286 return 0;
3287 }
3288 }
3289 CH_ERR(adapter, "MAC calibration failed\n");
3290 return -1;
3291 } else {
3292 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3293 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3294 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3295 F_XGM_IMPSETUPDATE);
3296 }
3297 return 0;
3298}
3299
3300static void calibrate_xgm_t3b(struct adapter *adapter)
3301{
3302 if (!uses_xaui(adapter)) {
3303 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3304 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3305 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3306 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3307 F_XGM_IMPSETUPDATE);
3308 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3309 0);
3310 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3311 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3312 }
3313}
3314
3315struct mc7_timing_params {
3316 unsigned char ActToPreDly;
3317 unsigned char ActToRdWrDly;
3318 unsigned char PreCyc;
3319 unsigned char RefCyc[5];
3320 unsigned char BkCyc;
3321 unsigned char WrToRdDly;
3322 unsigned char RdToWrDly;
3323};
3324
3325
3326
3327
3328
3329
3330static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3331{
3332 t3_write_reg(adapter, addr, val);
3333 t3_read_reg(adapter, addr);
3334 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3335 return 0;
3336 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3337 return -EIO;
3338}
3339
3340static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3341{
3342 static const unsigned int mc7_mode[] = {
3343 0x632, 0x642, 0x652, 0x432, 0x442
3344 };
3345 static const struct mc7_timing_params mc7_timings[] = {
3346 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3347 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3348 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3349 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3350 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3351 };
3352
3353 u32 val;
3354 unsigned int width, density, slow, attempts;
3355 struct adapter *adapter = mc7->adapter;
3356 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3357
3358 if (!mc7->size)
3359 return 0;
3360
3361 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3362 slow = val & F_SLOW;
3363 width = G_WIDTH(val);
3364 density = G_DEN(val);
3365
3366 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3367 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3368 msleep(1);
3369
3370 if (!slow) {
3371 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3372 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3373 msleep(1);
3374 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3375 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3376 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3377 mc7->name);
3378 goto out_fail;
3379 }
3380 }
3381
3382 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3383 V_ACTTOPREDLY(p->ActToPreDly) |
3384 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3385 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3386 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3387
3388 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3389 val | F_CLKEN | F_TERM150);
3390 t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3391
3392 if (!slow)
3393 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3394 F_DLLENB);
3395 udelay(1);
3396
3397 val = slow ? 3 : 6;
3398 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3399 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3400 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3401 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3402 goto out_fail;
3403
3404 if (!slow) {
3405 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3406 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3407 udelay(5);
3408 }
3409
3410 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3411 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3412 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3413 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3414 mc7_mode[mem_type]) ||
3415 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3416 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3417 goto out_fail;
3418
3419
3420 mc7_clock = mc7_clock * 7812 + mc7_clock / 2;
3421 mc7_clock /= 1000000;
3422
3423 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3424 F_PERREFEN | V_PREREFDIV(mc7_clock));
3425 t3_read_reg(adapter, mc7->offset + A_MC7_REF);
3426
3427 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3428 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3429 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3430 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3431 (mc7->size << width) - 1);
3432 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3433 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3434
3435 attempts = 50;
3436 do {
3437 msleep(250);
3438 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3439 } while ((val & F_BUSY) && --attempts);
3440 if (val & F_BUSY) {
3441 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3442 goto out_fail;
3443 }
3444
3445
3446 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3447 return 0;
3448
3449out_fail:
3450 return -1;
3451}
3452
3453static void config_pcie(struct adapter *adap)
3454{
3455 static const u16 ack_lat[4][6] = {
3456 {237, 416, 559, 1071, 2095, 4143},
3457 {128, 217, 289, 545, 1057, 2081},
3458 {73, 118, 154, 282, 538, 1050},
3459 {67, 107, 86, 150, 278, 534}
3460 };
3461 static const u16 rpl_tmr[4][6] = {
3462 {711, 1248, 1677, 3213, 6285, 12429},
3463 {384, 651, 867, 1635, 3171, 6243},
3464 {219, 354, 462, 846, 1614, 3150},
3465 {201, 321, 258, 450, 834, 1602}
3466 };
3467
3468 u16 val, devid;
3469 unsigned int log2_width, pldsize;
3470 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3471
3472 pci_read_config_word(adap->pdev,
3473 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3474 &val);
3475 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3476
3477 pci_read_config_word(adap->pdev, 0x2, &devid);
3478 if (devid == 0x37) {
3479 pci_write_config_word(adap->pdev,
3480 adap->params.pci.pcie_cap_addr +
3481 PCI_EXP_DEVCTL,
3482 val & ~PCI_EXP_DEVCTL_READRQ &
3483 ~PCI_EXP_DEVCTL_PAYLOAD);
3484 pldsize = 0;
3485 }
3486
3487 pci_read_config_word(adap->pdev,
3488 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3489 &val);
3490
3491 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3492 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3493 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3494 log2_width = fls(adap->params.pci.width) - 1;
3495 acklat = ack_lat[log2_width][pldsize];
3496 if (val & 1)
3497 acklat += fst_trn_tx * 4;
3498 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3499
3500 if (adap->params.rev == 0)
3501 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3502 V_T3A_ACKLAT(M_T3A_ACKLAT),
3503 V_T3A_ACKLAT(acklat));
3504 else
3505 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3506 V_ACKLAT(acklat));
3507
3508 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3509 V_REPLAYLMT(rpllmt));
3510
3511 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3512 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3513 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3514 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3515}
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525int t3_init_hw(struct adapter *adapter, u32 fw_params)
3526{
3527 int err = -EIO, attempts, i;
3528 const struct vpd_params *vpd = &adapter->params.vpd;
3529
3530 if (adapter->params.rev > 0)
3531 calibrate_xgm_t3b(adapter);
3532 else if (calibrate_xgm(adapter))
3533 goto out_err;
3534
3535 if (vpd->mclk) {
3536 partition_mem(adapter, &adapter->params.tp);
3537
3538 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3539 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3540 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3541 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3542 adapter->params.mc5.nfilters,
3543 adapter->params.mc5.nroutes))
3544 goto out_err;
3545
3546 for (i = 0; i < 32; i++)
3547 if (clear_sge_ctxt(adapter, i, F_CQ))
3548 goto out_err;
3549 }
3550
3551 if (tp_init(adapter, &adapter->params.tp))
3552 goto out_err;
3553
3554 t3_tp_set_coalescing_size(adapter,
3555 min(adapter->params.sge.max_pkt_size,
3556 MAX_RX_COALESCING_LEN), 1);
3557 t3_tp_set_max_rxsize(adapter,
3558 min(adapter->params.sge.max_pkt_size, 16384U));
3559 ulp_config(adapter, &adapter->params.tp);
3560
3561 if (is_pcie(adapter))
3562 config_pcie(adapter);
3563 else
3564 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3565 F_DMASTOPEN | F_CLIDECEN);
3566
3567 if (adapter->params.rev == T3_REV_C)
3568 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3569 F_CFG_CQE_SOP_MASK);
3570
3571 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3572 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3573 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3574 chan_init_hw(adapter, adapter->params.chan_map);
3575 t3_sge_init(adapter, &adapter->params.sge);
3576
3577 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3578
3579 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3580 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3581 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3582 t3_read_reg(adapter, A_CIM_BOOT_CFG);
3583
3584 attempts = 100;
3585 do {
3586 msleep(20);
3587 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3588 if (!attempts) {
3589 CH_ERR(adapter, "uP initialization timed out\n");
3590 goto out_err;
3591 }
3592
3593 err = 0;
3594out_err:
3595 return err;
3596}
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3607{
3608 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3609 u32 pci_mode, pcie_cap;
3610
3611 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3612 if (pcie_cap) {
3613 u16 val;
3614
3615 p->variant = PCI_VARIANT_PCIE;
3616 p->pcie_cap_addr = pcie_cap;
3617 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3618 &val);
3619 p->width = (val >> 4) & 0x3f;
3620 return;
3621 }
3622
3623 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3624 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3625 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3626 pci_mode = G_PCIXINITPAT(pci_mode);
3627 if (pci_mode == 0)
3628 p->variant = PCI_VARIANT_PCI;
3629 else if (pci_mode < 4)
3630 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3631 else if (pci_mode < 8)
3632 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3633 else
3634 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3635}
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646static void init_link_config(struct link_config *lc, unsigned int caps)
3647{
3648 lc->supported = caps;
3649 lc->requested_speed = lc->speed = SPEED_INVALID;
3650 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3651 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3652 if (lc->supported & SUPPORTED_Autoneg) {
3653 lc->advertising = lc->supported;
3654 lc->autoneg = AUTONEG_ENABLE;
3655 lc->requested_fc |= PAUSE_AUTONEG;
3656 } else {
3657 lc->advertising = 0;
3658 lc->autoneg = AUTONEG_DISABLE;
3659 }
3660}
3661
3662
3663
3664
3665
3666
3667
3668
3669static unsigned int mc7_calc_size(u32 cfg)
3670{
3671 unsigned int width = G_WIDTH(cfg);
3672 unsigned int banks = !!(cfg & F_BKS) + 1;
3673 unsigned int org = !!(cfg & F_ORG) + 1;
3674 unsigned int density = G_DEN(cfg);
3675 unsigned int MBs = ((256 << density) * banks) / (org << width);
3676
3677 return MBs << 20;
3678}
3679
3680static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3681 unsigned int base_addr, const char *name)
3682{
3683 u32 cfg;
3684
3685 mc7->adapter = adapter;
3686 mc7->name = name;
3687 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3688 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3689 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3690 mc7->width = G_WIDTH(cfg);
3691}
3692
3693void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3694{
3695 u16 devid;
3696
3697 mac->adapter = adapter;
3698 pci_read_config_word(adapter->pdev, 0x2, &devid);
3699
3700 if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3701 index = 0;
3702 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3703 mac->nucast = 1;
3704
3705 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3706 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3707 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3708 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3709 F_ENRGMII, 0);
3710 }
3711}
3712
3713void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3714{
3715 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3716
3717 mi1_init(adapter, ai);
3718 t3_write_reg(adapter, A_I2C_CFG,
3719 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3720 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3721 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3722 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3723 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3724
3725 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3726 val |= F_ENRGMII;
3727
3728
3729 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3730 t3_read_reg(adapter, A_XGM_PORT_CFG);
3731
3732 val |= F_CLKDIVRESET_;
3733 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3734 t3_read_reg(adapter, A_XGM_PORT_CFG);
3735 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3736 t3_read_reg(adapter, A_XGM_PORT_CFG);
3737}
3738
3739
3740
3741
3742
3743
3744int t3_reset_adapter(struct adapter *adapter)
3745{
3746 int i, save_and_restore_pcie =
3747 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3748 uint16_t devid = 0;
3749
3750 if (save_and_restore_pcie)
3751 pci_save_state(adapter->pdev);
3752 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3753
3754
3755
3756
3757
3758 for (i = 0; i < 10; i++) {
3759 msleep(50);
3760 pci_read_config_word(adapter->pdev, 0x00, &devid);
3761 if (devid == 0x1425)
3762 break;
3763 }
3764
3765 if (devid != 0x1425)
3766 return -1;
3767
3768 if (save_and_restore_pcie)
3769 pci_restore_state(adapter->pdev);
3770 return 0;
3771}
3772
3773static int init_parity(struct adapter *adap)
3774{
3775 int i, err, addr;
3776
3777 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3778 return -EBUSY;
3779
3780 for (err = i = 0; !err && i < 16; i++)
3781 err = clear_sge_ctxt(adap, i, F_EGRESS);
3782 for (i = 0xfff0; !err && i <= 0xffff; i++)
3783 err = clear_sge_ctxt(adap, i, F_EGRESS);
3784 for (i = 0; !err && i < SGE_QSETS; i++)
3785 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3786 if (err)
3787 return err;
3788
3789 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3790 for (i = 0; i < 4; i++)
3791 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3792 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3793 F_IBQDBGWR | V_IBQDBGQID(i) |
3794 V_IBQDBGADDR(addr));
3795 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3796 F_IBQDBGBUSY, 0, 2, 1);
3797 if (err)
3798 return err;
3799 }
3800 return 0;
3801}
3802
3803
3804
3805
3806
3807
3808int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3809 int reset)
3810{
3811 int ret;
3812 unsigned int i, j = -1;
3813
3814 get_pci_mode(adapter, &adapter->params.pci);
3815
3816 adapter->params.info = ai;
3817 adapter->params.nports = ai->nports0 + ai->nports1;
3818 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3819 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3820
3821
3822
3823
3824
3825
3826
3827
3828 adapter->params.linkpoll_period = 10;
3829 adapter->params.stats_update_period = is_10G(adapter) ?
3830 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3831 adapter->params.pci.vpd_cap_addr =
3832 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3833 ret = get_vpd_params(adapter, &adapter->params.vpd);
3834 if (ret < 0)
3835 return ret;
3836
3837 if (reset && t3_reset_adapter(adapter))
3838 return -1;
3839
3840 t3_sge_prep(adapter, &adapter->params.sge);
3841
3842 if (adapter->params.vpd.mclk) {
3843 struct tp_params *p = &adapter->params.tp;
3844
3845 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3846 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3847 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3848
3849 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3850 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3851 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3852 p->cm_size = t3_mc7_size(&adapter->cm);
3853 p->chan_rx_size = p->pmrx_size / 2;
3854 p->chan_tx_size = p->pmtx_size / p->nchan;
3855 p->rx_pg_size = 64 * 1024;
3856 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3857 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3858 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3859 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3860 adapter->params.rev > 0 ? 12 : 6;
3861 }
3862
3863 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3864 t3_mc7_size(&adapter->pmtx) &&
3865 t3_mc7_size(&adapter->cm);
3866
3867 if (is_offload(adapter)) {
3868 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3869 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3870 DEFAULT_NFILTERS : 0;
3871 adapter->params.mc5.nroutes = 0;
3872 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3873
3874 init_mtus(adapter->params.mtus);
3875 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3876 }
3877
3878 early_hw_init(adapter, ai);
3879 ret = init_parity(adapter);
3880 if (ret)
3881 return ret;
3882
3883 for_each_port(adapter, i) {
3884 u8 hw_addr[6];
3885 const struct port_type_info *pti;
3886 struct port_info *p = adap2pinfo(adapter, i);
3887
3888 while (!adapter->params.vpd.port_type[++j])
3889 ;
3890
3891 pti = &port_types[adapter->params.vpd.port_type[j]];
3892 if (!pti->phy_prep) {
3893 CH_ALERT(adapter, "Invalid port type index %d\n",
3894 adapter->params.vpd.port_type[j]);
3895 return -EINVAL;
3896 }
3897
3898 p->phy.mdio.dev = adapter->port[i];
3899 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3900 ai->mdio_ops);
3901 if (ret)
3902 return ret;
3903 mac_prep(&p->mac, adapter, j);
3904
3905
3906
3907
3908
3909
3910 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3911 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3912
3913 memcpy(adapter->port[i]->dev_addr, hw_addr,
3914 ETH_ALEN);
3915 memcpy(adapter->port[i]->perm_addr, hw_addr,
3916 ETH_ALEN);
3917 init_link_config(&p->link_config, p->phy.caps);
3918 p->phy.ops->power_down(&p->phy, 1);
3919
3920
3921
3922
3923
3924
3925 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3926 adapter->params.linkpoll_period > 10)
3927 adapter->params.linkpoll_period = 10;
3928 }
3929
3930 return 0;
3931}
3932
3933void t3_led_ready(struct adapter *adapter)
3934{
3935 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3936 F_GPIO0_OUT_VAL);
3937}
3938
3939int t3_replay_prep_adapter(struct adapter *adapter)
3940{
3941 const struct adapter_info *ai = adapter->params.info;
3942 unsigned int i, j = -1;
3943 int ret;
3944
3945 early_hw_init(adapter, ai);
3946 ret = init_parity(adapter);
3947 if (ret)
3948 return ret;
3949
3950 for_each_port(adapter, i) {
3951 const struct port_type_info *pti;
3952 struct port_info *p = adap2pinfo(adapter, i);
3953
3954 while (!adapter->params.vpd.port_type[++j])
3955 ;
3956
3957 pti = &port_types[adapter->params.vpd.port_type[j]];
3958 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3959 if (ret)
3960 return ret;
3961 p->phy.ops->power_down(&p->phy, 1);
3962 }
3963
3964return 0;
3965}
3966
3967