1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include "common.h"
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/if_vlan.h>
44#include <linux/mii.h>
45#include <linux/sockios.h>
46#include <linux/dma-mapping.h>
47#include <linux/uaccess.h>
48
49#include "cpl5_cmd.h"
50#include "regs.h"
51#include "gmac.h"
52#include "cphy.h"
53#include "sge.h"
54#include "tp.h"
55#include "espi.h"
56#include "elmer0.h"
57
58#include <linux/workqueue.h>
59
60static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61{
62 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63}
64
65static inline void cancel_mac_stats_update(struct adapter *ap)
66{
67 cancel_delayed_work(&ap->stats_update_task);
68}
69
70#define MAX_CMDQ_ENTRIES 16384
71#define MAX_CMDQ1_ENTRIES 1024
72#define MAX_RX_BUFFERS 16384
73#define MAX_RX_JUMBO_BUFFERS 16384
74#define MAX_TX_BUFFERS_HIGH 16384U
75#define MAX_TX_BUFFERS_LOW 1536U
76#define MAX_TX_BUFFERS 1460U
77#define MIN_FL_ENTRIES 32
78
79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83
84
85
86
87#define EEPROM_SIZE 32
88
89MODULE_DESCRIPTION(DRV_DESCRIPTION);
90MODULE_AUTHOR("Chelsio Communications");
91MODULE_LICENSE("GPL");
92
93static int dflt_msg_enable = DFLT_MSG_ENABLE;
94
95module_param(dflt_msg_enable, int, 0);
96MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97
98#define HCLOCK 0x0
99#define LCLOCK 0x1
100
101
102static int t1_clock(struct adapter *adapter, int mode);
103static int t1powersave = 1;
104
105module_param(t1powersave, int, 0);
106MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
107
108static int disable_msi = 0;
109module_param(disable_msi, int, 0);
110MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
111
112
113
114
115static void t1_set_rxmode(struct net_device *dev)
116{
117 struct adapter *adapter = dev->ml_priv;
118 struct cmac *mac = adapter->port[dev->if_port].mac;
119 struct t1_rx_mode rm;
120
121 rm.dev = dev;
122 mac->ops->set_rx_mode(mac, &rm);
123}
124
125static void link_report(struct port_info *p)
126{
127 if (!netif_carrier_ok(p->dev))
128 netdev_info(p->dev, "link down\n");
129 else {
130 const char *s = "10Mbps";
131
132 switch (p->link_config.speed) {
133 case SPEED_10000: s = "10Gbps"; break;
134 case SPEED_1000: s = "1000Mbps"; break;
135 case SPEED_100: s = "100Mbps"; break;
136 }
137
138 netdev_info(p->dev, "link up, %s, %s-duplex\n",
139 s, p->link_config.duplex == DUPLEX_FULL
140 ? "full" : "half");
141 }
142}
143
144void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
145 int speed, int duplex, int pause)
146{
147 struct port_info *p = &adapter->port[port_id];
148
149 if (link_stat != netif_carrier_ok(p->dev)) {
150 if (link_stat)
151 netif_carrier_on(p->dev);
152 else
153 netif_carrier_off(p->dev);
154 link_report(p);
155
156
157 if ((speed > 0) && (adapter->params.nports > 1)) {
158 unsigned int sched_speed = 10;
159 switch (speed) {
160 case SPEED_1000:
161 sched_speed = 1000;
162 break;
163 case SPEED_100:
164 sched_speed = 100;
165 break;
166 case SPEED_10:
167 sched_speed = 10;
168 break;
169 }
170 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
171 }
172 }
173}
174
175static void link_start(struct port_info *p)
176{
177 struct cmac *mac = p->mac;
178
179 mac->ops->reset(mac);
180 if (mac->ops->macaddress_set)
181 mac->ops->macaddress_set(mac, p->dev->dev_addr);
182 t1_set_rxmode(p->dev);
183 t1_link_start(p->phy, mac, &p->link_config);
184 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
185}
186
187static void enable_hw_csum(struct adapter *adapter)
188{
189 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
190 t1_tp_set_ip_checksum_offload(adapter->tp, 1);
191 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
192}
193
194
195
196
197
198static int cxgb_up(struct adapter *adapter)
199{
200 int err = 0;
201
202 if (!(adapter->flags & FULL_INIT_DONE)) {
203 err = t1_init_hw_modules(adapter);
204 if (err)
205 goto out_err;
206
207 enable_hw_csum(adapter);
208 adapter->flags |= FULL_INIT_DONE;
209 }
210
211 t1_interrupts_clear(adapter);
212
213 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
214 err = request_irq(adapter->pdev->irq, t1_interrupt,
215 adapter->params.has_msi ? 0 : IRQF_SHARED,
216 adapter->name, adapter);
217 if (err) {
218 if (adapter->params.has_msi)
219 pci_disable_msi(adapter->pdev);
220
221 goto out_err;
222 }
223
224 t1_sge_start(adapter->sge);
225 t1_interrupts_enable(adapter);
226out_err:
227 return err;
228}
229
230
231
232
233static void cxgb_down(struct adapter *adapter)
234{
235 t1_sge_stop(adapter->sge);
236 t1_interrupts_disable(adapter);
237 free_irq(adapter->pdev->irq, adapter);
238 if (adapter->params.has_msi)
239 pci_disable_msi(adapter->pdev);
240}
241
242static int cxgb_open(struct net_device *dev)
243{
244 int err;
245 struct adapter *adapter = dev->ml_priv;
246 int other_ports = adapter->open_device_map & PORT_MASK;
247
248 napi_enable(&adapter->napi);
249 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
250 napi_disable(&adapter->napi);
251 return err;
252 }
253
254 __set_bit(dev->if_port, &adapter->open_device_map);
255 link_start(&adapter->port[dev->if_port]);
256 netif_start_queue(dev);
257 if (!other_ports && adapter->params.stats_update_period)
258 schedule_mac_stats_update(adapter,
259 adapter->params.stats_update_period);
260
261 t1_vlan_mode(adapter, dev->features);
262 return 0;
263}
264
265static int cxgb_close(struct net_device *dev)
266{
267 struct adapter *adapter = dev->ml_priv;
268 struct port_info *p = &adapter->port[dev->if_port];
269 struct cmac *mac = p->mac;
270
271 netif_stop_queue(dev);
272 napi_disable(&adapter->napi);
273 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
274 netif_carrier_off(dev);
275
276 clear_bit(dev->if_port, &adapter->open_device_map);
277 if (adapter->params.stats_update_period &&
278 !(adapter->open_device_map & PORT_MASK)) {
279
280 smp_mb__after_atomic();
281 spin_lock(&adapter->work_lock);
282 spin_unlock(&adapter->work_lock);
283 cancel_mac_stats_update(adapter);
284 }
285
286 if (!adapter->open_device_map)
287 cxgb_down(adapter);
288 return 0;
289}
290
291static struct net_device_stats *t1_get_stats(struct net_device *dev)
292{
293 struct adapter *adapter = dev->ml_priv;
294 struct port_info *p = &adapter->port[dev->if_port];
295 struct net_device_stats *ns = &dev->stats;
296 const struct cmac_statistics *pstats;
297
298
299 pstats = p->mac->ops->statistics_update(p->mac,
300 MAC_STATS_UPDATE_FULL);
301
302 ns->tx_packets = pstats->TxUnicastFramesOK +
303 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
304
305 ns->rx_packets = pstats->RxUnicastFramesOK +
306 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
307
308 ns->tx_bytes = pstats->TxOctetsOK;
309 ns->rx_bytes = pstats->RxOctetsOK;
310
311 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
312 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
313 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
314 pstats->RxFCSErrors + pstats->RxAlignErrors +
315 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
316 pstats->RxSymbolErrors + pstats->RxRuntErrors;
317
318 ns->multicast = pstats->RxMulticastFramesOK;
319 ns->collisions = pstats->TxTotalCollisions;
320
321
322 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
323 pstats->RxJabberErrors;
324 ns->rx_over_errors = 0;
325 ns->rx_crc_errors = pstats->RxFCSErrors;
326 ns->rx_frame_errors = pstats->RxAlignErrors;
327 ns->rx_fifo_errors = 0;
328 ns->rx_missed_errors = 0;
329
330
331 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
332 ns->tx_carrier_errors = 0;
333 ns->tx_fifo_errors = pstats->TxUnderrun;
334 ns->tx_heartbeat_errors = 0;
335 ns->tx_window_errors = pstats->TxLateCollisions;
336 return ns;
337}
338
339static u32 get_msglevel(struct net_device *dev)
340{
341 struct adapter *adapter = dev->ml_priv;
342
343 return adapter->msg_enable;
344}
345
346static void set_msglevel(struct net_device *dev, u32 val)
347{
348 struct adapter *adapter = dev->ml_priv;
349
350 adapter->msg_enable = val;
351}
352
353static const char stats_strings[][ETH_GSTRING_LEN] = {
354 "TxOctetsOK",
355 "TxOctetsBad",
356 "TxUnicastFramesOK",
357 "TxMulticastFramesOK",
358 "TxBroadcastFramesOK",
359 "TxPauseFrames",
360 "TxFramesWithDeferredXmissions",
361 "TxLateCollisions",
362 "TxTotalCollisions",
363 "TxFramesAbortedDueToXSCollisions",
364 "TxUnderrun",
365 "TxLengthErrors",
366 "TxInternalMACXmitError",
367 "TxFramesWithExcessiveDeferral",
368 "TxFCSErrors",
369 "TxJumboFramesOk",
370 "TxJumboOctetsOk",
371
372 "RxOctetsOK",
373 "RxOctetsBad",
374 "RxUnicastFramesOK",
375 "RxMulticastFramesOK",
376 "RxBroadcastFramesOK",
377 "RxPauseFrames",
378 "RxFCSErrors",
379 "RxAlignErrors",
380 "RxSymbolErrors",
381 "RxDataErrors",
382 "RxSequenceErrors",
383 "RxRuntErrors",
384 "RxJabberErrors",
385 "RxInternalMACRcvError",
386 "RxInRangeLengthErrors",
387 "RxOutOfRangeLengthField",
388 "RxFrameTooLongErrors",
389 "RxJumboFramesOk",
390 "RxJumboOctetsOk",
391
392
393 "RxCsumGood",
394 "TxCsumOffload",
395 "TxTso",
396 "RxVlan",
397 "TxVlan",
398 "TxNeedHeadroom",
399
400
401 "rx drops",
402 "pure_rsps",
403 "unhandled irqs",
404 "respQ_empty",
405 "respQ_overflow",
406 "freelistQ_empty",
407 "pkt_too_big",
408 "pkt_mismatch",
409 "cmdQ_full0",
410 "cmdQ_full1",
411
412 "espi_DIP2ParityErr",
413 "espi_DIP4Err",
414 "espi_RxDrops",
415 "espi_TxDrops",
416 "espi_RxOvfl",
417 "espi_ParityErr"
418};
419
420#define T2_REGMAP_SIZE (3 * 1024)
421
422static int get_regs_len(struct net_device *dev)
423{
424 return T2_REGMAP_SIZE;
425}
426
427static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
428{
429 struct adapter *adapter = dev->ml_priv;
430
431 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
432 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
433 strlcpy(info->bus_info, pci_name(adapter->pdev),
434 sizeof(info->bus_info));
435}
436
437static int get_sset_count(struct net_device *dev, int sset)
438{
439 switch (sset) {
440 case ETH_SS_STATS:
441 return ARRAY_SIZE(stats_strings);
442 default:
443 return -EOPNOTSUPP;
444 }
445}
446
447static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
448{
449 if (stringset == ETH_SS_STATS)
450 memcpy(data, stats_strings, sizeof(stats_strings));
451}
452
453static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
454 u64 *data)
455{
456 struct adapter *adapter = dev->ml_priv;
457 struct cmac *mac = adapter->port[dev->if_port].mac;
458 const struct cmac_statistics *s;
459 const struct sge_intr_counts *t;
460 struct sge_port_stats ss;
461
462 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
463 t = t1_sge_get_intr_counts(adapter->sge);
464 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
465
466 *data++ = s->TxOctetsOK;
467 *data++ = s->TxOctetsBad;
468 *data++ = s->TxUnicastFramesOK;
469 *data++ = s->TxMulticastFramesOK;
470 *data++ = s->TxBroadcastFramesOK;
471 *data++ = s->TxPauseFrames;
472 *data++ = s->TxFramesWithDeferredXmissions;
473 *data++ = s->TxLateCollisions;
474 *data++ = s->TxTotalCollisions;
475 *data++ = s->TxFramesAbortedDueToXSCollisions;
476 *data++ = s->TxUnderrun;
477 *data++ = s->TxLengthErrors;
478 *data++ = s->TxInternalMACXmitError;
479 *data++ = s->TxFramesWithExcessiveDeferral;
480 *data++ = s->TxFCSErrors;
481 *data++ = s->TxJumboFramesOK;
482 *data++ = s->TxJumboOctetsOK;
483
484 *data++ = s->RxOctetsOK;
485 *data++ = s->RxOctetsBad;
486 *data++ = s->RxUnicastFramesOK;
487 *data++ = s->RxMulticastFramesOK;
488 *data++ = s->RxBroadcastFramesOK;
489 *data++ = s->RxPauseFrames;
490 *data++ = s->RxFCSErrors;
491 *data++ = s->RxAlignErrors;
492 *data++ = s->RxSymbolErrors;
493 *data++ = s->RxDataErrors;
494 *data++ = s->RxSequenceErrors;
495 *data++ = s->RxRuntErrors;
496 *data++ = s->RxJabberErrors;
497 *data++ = s->RxInternalMACRcvError;
498 *data++ = s->RxInRangeLengthErrors;
499 *data++ = s->RxOutOfRangeLengthField;
500 *data++ = s->RxFrameTooLongErrors;
501 *data++ = s->RxJumboFramesOK;
502 *data++ = s->RxJumboOctetsOK;
503
504 *data++ = ss.rx_cso_good;
505 *data++ = ss.tx_cso;
506 *data++ = ss.tx_tso;
507 *data++ = ss.vlan_xtract;
508 *data++ = ss.vlan_insert;
509 *data++ = ss.tx_need_hdrroom;
510
511 *data++ = t->rx_drops;
512 *data++ = t->pure_rsps;
513 *data++ = t->unhandled_irqs;
514 *data++ = t->respQ_empty;
515 *data++ = t->respQ_overflow;
516 *data++ = t->freelistQ_empty;
517 *data++ = t->pkt_too_big;
518 *data++ = t->pkt_mismatch;
519 *data++ = t->cmdQ_full[0];
520 *data++ = t->cmdQ_full[1];
521
522 if (adapter->espi) {
523 const struct espi_intr_counts *e;
524
525 e = t1_espi_get_intr_counts(adapter->espi);
526 *data++ = e->DIP2_parity_err;
527 *data++ = e->DIP4_err;
528 *data++ = e->rx_drops;
529 *data++ = e->tx_drops;
530 *data++ = e->rx_ovflw;
531 *data++ = e->parity_err;
532 }
533}
534
535static inline void reg_block_dump(struct adapter *ap, void *buf,
536 unsigned int start, unsigned int end)
537{
538 u32 *p = buf + start;
539
540 for ( ; start <= end; start += sizeof(u32))
541 *p++ = readl(ap->regs + start);
542}
543
544static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
545 void *buf)
546{
547 struct adapter *ap = dev->ml_priv;
548
549
550
551
552 regs->version = 2;
553
554 memset(buf, 0, T2_REGMAP_SIZE);
555 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
556 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
557 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
558 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
559 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
560 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
561 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
562 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
563 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
564 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
565}
566
567static int get_link_ksettings(struct net_device *dev,
568 struct ethtool_link_ksettings *cmd)
569{
570 struct adapter *adapter = dev->ml_priv;
571 struct port_info *p = &adapter->port[dev->if_port];
572 u32 supported, advertising;
573
574 supported = p->link_config.supported;
575 advertising = p->link_config.advertising;
576
577 if (netif_carrier_ok(dev)) {
578 cmd->base.speed = p->link_config.speed;
579 cmd->base.duplex = p->link_config.duplex;
580 } else {
581 cmd->base.speed = SPEED_UNKNOWN;
582 cmd->base.duplex = DUPLEX_UNKNOWN;
583 }
584
585 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
586 cmd->base.phy_address = p->phy->mdio.prtad;
587 cmd->base.autoneg = p->link_config.autoneg;
588
589 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
590 supported);
591 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
592 advertising);
593
594 return 0;
595}
596
597static int speed_duplex_to_caps(int speed, int duplex)
598{
599 int cap = 0;
600
601 switch (speed) {
602 case SPEED_10:
603 if (duplex == DUPLEX_FULL)
604 cap = SUPPORTED_10baseT_Full;
605 else
606 cap = SUPPORTED_10baseT_Half;
607 break;
608 case SPEED_100:
609 if (duplex == DUPLEX_FULL)
610 cap = SUPPORTED_100baseT_Full;
611 else
612 cap = SUPPORTED_100baseT_Half;
613 break;
614 case SPEED_1000:
615 if (duplex == DUPLEX_FULL)
616 cap = SUPPORTED_1000baseT_Full;
617 else
618 cap = SUPPORTED_1000baseT_Half;
619 break;
620 case SPEED_10000:
621 if (duplex == DUPLEX_FULL)
622 cap = SUPPORTED_10000baseT_Full;
623 }
624 return cap;
625}
626
627#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
628 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
629 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
630 ADVERTISED_10000baseT_Full)
631
632static int set_link_ksettings(struct net_device *dev,
633 const struct ethtool_link_ksettings *cmd)
634{
635 struct adapter *adapter = dev->ml_priv;
636 struct port_info *p = &adapter->port[dev->if_port];
637 struct link_config *lc = &p->link_config;
638 u32 advertising;
639
640 ethtool_convert_link_mode_to_legacy_u32(&advertising,
641 cmd->link_modes.advertising);
642
643 if (!(lc->supported & SUPPORTED_Autoneg))
644 return -EOPNOTSUPP;
645
646 if (cmd->base.autoneg == AUTONEG_DISABLE) {
647 u32 speed = cmd->base.speed;
648 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
649
650 if (!(lc->supported & cap) || (speed == SPEED_1000))
651 return -EINVAL;
652 lc->requested_speed = speed;
653 lc->requested_duplex = cmd->base.duplex;
654 lc->advertising = 0;
655 } else {
656 advertising &= ADVERTISED_MASK;
657 if (advertising & (advertising - 1))
658 advertising = lc->supported;
659 advertising &= lc->supported;
660 if (!advertising)
661 return -EINVAL;
662 lc->requested_speed = SPEED_INVALID;
663 lc->requested_duplex = DUPLEX_INVALID;
664 lc->advertising = advertising | ADVERTISED_Autoneg;
665 }
666 lc->autoneg = cmd->base.autoneg;
667 if (netif_running(dev))
668 t1_link_start(p->phy, p->mac, lc);
669 return 0;
670}
671
672static void get_pauseparam(struct net_device *dev,
673 struct ethtool_pauseparam *epause)
674{
675 struct adapter *adapter = dev->ml_priv;
676 struct port_info *p = &adapter->port[dev->if_port];
677
678 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
679 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
680 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
681}
682
683static int set_pauseparam(struct net_device *dev,
684 struct ethtool_pauseparam *epause)
685{
686 struct adapter *adapter = dev->ml_priv;
687 struct port_info *p = &adapter->port[dev->if_port];
688 struct link_config *lc = &p->link_config;
689
690 if (epause->autoneg == AUTONEG_DISABLE)
691 lc->requested_fc = 0;
692 else if (lc->supported & SUPPORTED_Autoneg)
693 lc->requested_fc = PAUSE_AUTONEG;
694 else
695 return -EINVAL;
696
697 if (epause->rx_pause)
698 lc->requested_fc |= PAUSE_RX;
699 if (epause->tx_pause)
700 lc->requested_fc |= PAUSE_TX;
701 if (lc->autoneg == AUTONEG_ENABLE) {
702 if (netif_running(dev))
703 t1_link_start(p->phy, p->mac, lc);
704 } else {
705 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
706 if (netif_running(dev))
707 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
708 lc->fc);
709 }
710 return 0;
711}
712
713static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
714{
715 struct adapter *adapter = dev->ml_priv;
716 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
717
718 e->rx_max_pending = MAX_RX_BUFFERS;
719 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
720 e->tx_max_pending = MAX_CMDQ_ENTRIES;
721
722 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
723 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
724 e->tx_pending = adapter->params.sge.cmdQ_size[0];
725}
726
727static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
728{
729 struct adapter *adapter = dev->ml_priv;
730 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
731
732 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
733 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
734 e->tx_pending > MAX_CMDQ_ENTRIES ||
735 e->rx_pending < MIN_FL_ENTRIES ||
736 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
737 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
738 return -EINVAL;
739
740 if (adapter->flags & FULL_INIT_DONE)
741 return -EBUSY;
742
743 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
744 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
745 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
746 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
747 MAX_CMDQ1_ENTRIES : e->tx_pending;
748 return 0;
749}
750
751static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
752{
753 struct adapter *adapter = dev->ml_priv;
754
755 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
756 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
757 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
758 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
759 return 0;
760}
761
762static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
763{
764 struct adapter *adapter = dev->ml_priv;
765
766 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
767 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
768 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
769 return 0;
770}
771
772static int get_eeprom_len(struct net_device *dev)
773{
774 struct adapter *adapter = dev->ml_priv;
775
776 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
777}
778
779#define EEPROM_MAGIC(ap) \
780 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
781
782static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
783 u8 *data)
784{
785 int i;
786 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
787 struct adapter *adapter = dev->ml_priv;
788
789 e->magic = EEPROM_MAGIC(adapter);
790 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
791 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
792 memcpy(data, buf + e->offset, e->len);
793 return 0;
794}
795
796static const struct ethtool_ops t1_ethtool_ops = {
797 .get_drvinfo = get_drvinfo,
798 .get_msglevel = get_msglevel,
799 .set_msglevel = set_msglevel,
800 .get_ringparam = get_sge_param,
801 .set_ringparam = set_sge_param,
802 .get_coalesce = get_coalesce,
803 .set_coalesce = set_coalesce,
804 .get_eeprom_len = get_eeprom_len,
805 .get_eeprom = get_eeprom,
806 .get_pauseparam = get_pauseparam,
807 .set_pauseparam = set_pauseparam,
808 .get_link = ethtool_op_get_link,
809 .get_strings = get_strings,
810 .get_sset_count = get_sset_count,
811 .get_ethtool_stats = get_stats,
812 .get_regs_len = get_regs_len,
813 .get_regs = get_regs,
814 .get_link_ksettings = get_link_ksettings,
815 .set_link_ksettings = set_link_ksettings,
816};
817
818static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
819{
820 struct adapter *adapter = dev->ml_priv;
821 struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
822
823 return mdio_mii_ioctl(mdio, if_mii(req), cmd);
824}
825
826static int t1_change_mtu(struct net_device *dev, int new_mtu)
827{
828 int ret;
829 struct adapter *adapter = dev->ml_priv;
830 struct cmac *mac = adapter->port[dev->if_port].mac;
831
832 if (!mac->ops->set_mtu)
833 return -EOPNOTSUPP;
834 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
835 return ret;
836 dev->mtu = new_mtu;
837 return 0;
838}
839
840static int t1_set_mac_addr(struct net_device *dev, void *p)
841{
842 struct adapter *adapter = dev->ml_priv;
843 struct cmac *mac = adapter->port[dev->if_port].mac;
844 struct sockaddr *addr = p;
845
846 if (!mac->ops->macaddress_set)
847 return -EOPNOTSUPP;
848
849 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
850 mac->ops->macaddress_set(mac, dev->dev_addr);
851 return 0;
852}
853
854static netdev_features_t t1_fix_features(struct net_device *dev,
855 netdev_features_t features)
856{
857
858
859
860
861 if (features & NETIF_F_HW_VLAN_CTAG_RX)
862 features |= NETIF_F_HW_VLAN_CTAG_TX;
863 else
864 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
865
866 return features;
867}
868
869static int t1_set_features(struct net_device *dev, netdev_features_t features)
870{
871 netdev_features_t changed = dev->features ^ features;
872 struct adapter *adapter = dev->ml_priv;
873
874 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
875 t1_vlan_mode(adapter, features);
876
877 return 0;
878}
879#ifdef CONFIG_NET_POLL_CONTROLLER
880static void t1_netpoll(struct net_device *dev)
881{
882 unsigned long flags;
883 struct adapter *adapter = dev->ml_priv;
884
885 local_irq_save(flags);
886 t1_interrupt(adapter->pdev->irq, adapter);
887 local_irq_restore(flags);
888}
889#endif
890
891
892
893
894
895static void mac_stats_task(struct work_struct *work)
896{
897 int i;
898 struct adapter *adapter =
899 container_of(work, struct adapter, stats_update_task.work);
900
901 for_each_port(adapter, i) {
902 struct port_info *p = &adapter->port[i];
903
904 if (netif_running(p->dev))
905 p->mac->ops->statistics_update(p->mac,
906 MAC_STATS_UPDATE_FAST);
907 }
908
909
910 spin_lock(&adapter->work_lock);
911 if (adapter->open_device_map & PORT_MASK)
912 schedule_mac_stats_update(adapter,
913 adapter->params.stats_update_period);
914 spin_unlock(&adapter->work_lock);
915}
916
917
918
919
920static void ext_intr_task(struct work_struct *work)
921{
922 struct adapter *adapter =
923 container_of(work, struct adapter, ext_intr_handler_task);
924
925 t1_elmer0_ext_intr_handler(adapter);
926
927
928 spin_lock_irq(&adapter->async_lock);
929 adapter->slow_intr_mask |= F_PL_INTR_EXT;
930 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
931 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
932 adapter->regs + A_PL_ENABLE);
933 spin_unlock_irq(&adapter->async_lock);
934}
935
936
937
938
939void t1_elmer0_ext_intr(struct adapter *adapter)
940{
941
942
943
944
945
946 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
947 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
948 adapter->regs + A_PL_ENABLE);
949 schedule_work(&adapter->ext_intr_handler_task);
950}
951
952void t1_fatal_err(struct adapter *adapter)
953{
954 if (adapter->flags & FULL_INIT_DONE) {
955 t1_sge_stop(adapter->sge);
956 t1_interrupts_disable(adapter);
957 }
958 pr_alert("%s: encountered fatal error, operation suspended\n",
959 adapter->name);
960}
961
962static const struct net_device_ops cxgb_netdev_ops = {
963 .ndo_open = cxgb_open,
964 .ndo_stop = cxgb_close,
965 .ndo_start_xmit = t1_start_xmit,
966 .ndo_get_stats = t1_get_stats,
967 .ndo_validate_addr = eth_validate_addr,
968 .ndo_set_rx_mode = t1_set_rxmode,
969 .ndo_do_ioctl = t1_ioctl,
970 .ndo_change_mtu = t1_change_mtu,
971 .ndo_set_mac_address = t1_set_mac_addr,
972 .ndo_fix_features = t1_fix_features,
973 .ndo_set_features = t1_set_features,
974#ifdef CONFIG_NET_POLL_CONTROLLER
975 .ndo_poll_controller = t1_netpoll,
976#endif
977};
978
979static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
980{
981 int i, err, pci_using_dac = 0;
982 unsigned long mmio_start, mmio_len;
983 const struct board_info *bi;
984 struct adapter *adapter = NULL;
985 struct port_info *pi;
986
987 pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
988
989 err = pci_enable_device(pdev);
990 if (err)
991 return err;
992
993 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
994 pr_err("%s: cannot find PCI device memory base address\n",
995 pci_name(pdev));
996 err = -ENODEV;
997 goto out_disable_pdev;
998 }
999
1000 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1001 pci_using_dac = 1;
1002
1003 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1004 pr_err("%s: unable to obtain 64-bit DMA for "
1005 "consistent allocations\n", pci_name(pdev));
1006 err = -ENODEV;
1007 goto out_disable_pdev;
1008 }
1009
1010 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1011 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1012 goto out_disable_pdev;
1013 }
1014
1015 err = pci_request_regions(pdev, DRV_NAME);
1016 if (err) {
1017 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1018 goto out_disable_pdev;
1019 }
1020
1021 pci_set_master(pdev);
1022
1023 mmio_start = pci_resource_start(pdev, 0);
1024 mmio_len = pci_resource_len(pdev, 0);
1025 bi = t1_get_board_info(ent->driver_data);
1026
1027 for (i = 0; i < bi->port_number; ++i) {
1028 struct net_device *netdev;
1029
1030 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1031 if (!netdev) {
1032 err = -ENOMEM;
1033 goto out_free_dev;
1034 }
1035
1036 SET_NETDEV_DEV(netdev, &pdev->dev);
1037
1038 if (!adapter) {
1039 adapter = netdev_priv(netdev);
1040 adapter->pdev = pdev;
1041 adapter->port[0].dev = netdev;
1042
1043 adapter->regs = ioremap(mmio_start, mmio_len);
1044 if (!adapter->regs) {
1045 pr_err("%s: cannot map device registers\n",
1046 pci_name(pdev));
1047 err = -ENOMEM;
1048 goto out_free_dev;
1049 }
1050
1051 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1052 err = -ENODEV;
1053 goto out_free_dev;
1054 }
1055
1056 adapter->name = pci_name(pdev);
1057 adapter->msg_enable = dflt_msg_enable;
1058 adapter->mmio_len = mmio_len;
1059
1060 spin_lock_init(&adapter->tpi_lock);
1061 spin_lock_init(&adapter->work_lock);
1062 spin_lock_init(&adapter->async_lock);
1063 spin_lock_init(&adapter->mac_lock);
1064
1065 INIT_WORK(&adapter->ext_intr_handler_task,
1066 ext_intr_task);
1067 INIT_DELAYED_WORK(&adapter->stats_update_task,
1068 mac_stats_task);
1069
1070 pci_set_drvdata(pdev, netdev);
1071 }
1072
1073 pi = &adapter->port[i];
1074 pi->dev = netdev;
1075 netif_carrier_off(netdev);
1076 netdev->irq = pdev->irq;
1077 netdev->if_port = i;
1078 netdev->mem_start = mmio_start;
1079 netdev->mem_end = mmio_start + mmio_len - 1;
1080 netdev->ml_priv = adapter;
1081 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1082 NETIF_F_RXCSUM;
1083 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1084 NETIF_F_RXCSUM | NETIF_F_LLTX;
1085
1086 if (pci_using_dac)
1087 netdev->features |= NETIF_F_HIGHDMA;
1088 if (vlan_tso_capable(adapter)) {
1089 netdev->features |=
1090 NETIF_F_HW_VLAN_CTAG_TX |
1091 NETIF_F_HW_VLAN_CTAG_RX;
1092 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1093
1094
1095 if (!(is_T2(adapter)) || bi->port_number != 4) {
1096 netdev->hw_features |= NETIF_F_TSO;
1097 netdev->features |= NETIF_F_TSO;
1098 }
1099 }
1100
1101 netdev->netdev_ops = &cxgb_netdev_ops;
1102 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1103 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1104
1105 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1106
1107 netdev->ethtool_ops = &t1_ethtool_ops;
1108
1109 switch (bi->board) {
1110 case CHBT_BOARD_CHT110:
1111 case CHBT_BOARD_N110:
1112 case CHBT_BOARD_N210:
1113 case CHBT_BOARD_CHT210:
1114 netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1115 (ETH_HLEN + ETH_FCS_LEN);
1116 break;
1117 case CHBT_BOARD_CHN204:
1118 netdev->max_mtu = VSC7326_MAX_MTU;
1119 break;
1120 default:
1121 netdev->max_mtu = ETH_DATA_LEN;
1122 break;
1123 }
1124 }
1125
1126 if (t1_init_sw_modules(adapter, bi) < 0) {
1127 err = -ENODEV;
1128 goto out_free_dev;
1129 }
1130
1131
1132
1133
1134
1135
1136
1137 for (i = 0; i < bi->port_number; ++i) {
1138 err = register_netdev(adapter->port[i].dev);
1139 if (err)
1140 pr_warn("%s: cannot register net device %s, skipping\n",
1141 pci_name(pdev), adapter->port[i].dev->name);
1142 else {
1143
1144
1145
1146
1147 if (!adapter->registered_device_map)
1148 adapter->name = adapter->port[i].dev->name;
1149
1150 __set_bit(i, &adapter->registered_device_map);
1151 }
1152 }
1153 if (!adapter->registered_device_map) {
1154 pr_err("%s: could not register any net devices\n",
1155 pci_name(pdev));
1156 goto out_release_adapter_res;
1157 }
1158
1159 pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1160 adapter->name, bi->desc, adapter->params.chip_revision,
1161 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1162 adapter->params.pci.speed, adapter->params.pci.width);
1163
1164
1165
1166
1167 if (t1powersave)
1168 adapter->t1powersave = LCLOCK;
1169 else
1170 adapter->t1powersave = HCLOCK;
1171 if (t1_is_T1B(adapter))
1172 t1_clock(adapter, t1powersave);
1173
1174 return 0;
1175
1176out_release_adapter_res:
1177 t1_free_sw_modules(adapter);
1178out_free_dev:
1179 if (adapter) {
1180 if (adapter->regs)
1181 iounmap(adapter->regs);
1182 for (i = bi->port_number - 1; i >= 0; --i)
1183 if (adapter->port[i].dev)
1184 free_netdev(adapter->port[i].dev);
1185 }
1186 pci_release_regions(pdev);
1187out_disable_pdev:
1188 pci_disable_device(pdev);
1189 return err;
1190}
1191
1192static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1193{
1194 int data;
1195 int i;
1196 u32 val;
1197
1198 enum {
1199 S_CLOCK = 1 << 3,
1200 S_DATA = 1 << 4
1201 };
1202
1203 for (i = (nbits - 1); i > -1; i--) {
1204
1205 udelay(50);
1206
1207 data = ((bitdata >> i) & 0x1);
1208 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1209
1210 if (data)
1211 val |= S_DATA;
1212 else
1213 val &= ~S_DATA;
1214
1215 udelay(50);
1216
1217
1218 val &= ~S_CLOCK;
1219 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1220
1221 udelay(50);
1222
1223
1224 val |= S_CLOCK;
1225 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1226
1227 }
1228}
1229
1230static int t1_clock(struct adapter *adapter, int mode)
1231{
1232 u32 val;
1233 int M_CORE_VAL;
1234 int M_MEM_VAL;
1235
1236 enum {
1237 M_CORE_BITS = 9,
1238 T_CORE_VAL = 0,
1239 T_CORE_BITS = 2,
1240 N_CORE_VAL = 0,
1241 N_CORE_BITS = 2,
1242 M_MEM_BITS = 9,
1243 T_MEM_VAL = 0,
1244 T_MEM_BITS = 2,
1245 N_MEM_VAL = 0,
1246 N_MEM_BITS = 2,
1247 NP_LOAD = 1 << 17,
1248 S_LOAD_MEM = 1 << 5,
1249 S_LOAD_CORE = 1 << 6,
1250 S_CLOCK = 1 << 3
1251 };
1252
1253 if (!t1_is_T1B(adapter))
1254 return -ENODEV;
1255
1256 if (mode & 2)
1257 return 0;
1258
1259 if ((adapter->t1powersave & 1) == (mode & 1))
1260 return -EALREADY;
1261
1262 if ((mode & 1) == HCLOCK) {
1263 M_CORE_VAL = 0x14;
1264 M_MEM_VAL = 0x18;
1265 adapter->t1powersave = HCLOCK;
1266 } else {
1267 M_CORE_VAL = 0xe;
1268 M_MEM_VAL = 0x10;
1269 adapter->t1powersave = LCLOCK;
1270 }
1271
1272
1273 spin_lock(&adapter->tpi_lock);
1274
1275
1276 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1277 val |= NP_LOAD;
1278 udelay(50);
1279 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1280 udelay(50);
1281 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1282 val &= ~S_LOAD_CORE;
1283 val &= ~S_CLOCK;
1284 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1285 udelay(50);
1286
1287
1288 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1289 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1290 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1291 udelay(50);
1292
1293
1294 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1295 val |= S_LOAD_CORE;
1296 udelay(50);
1297 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1298 udelay(50);
1299 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1300 val &= ~S_LOAD_CORE;
1301 udelay(50);
1302 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1303 udelay(50);
1304
1305
1306 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1307 val |= NP_LOAD;
1308 udelay(50);
1309 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1310 udelay(50);
1311 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1312 val &= ~S_LOAD_MEM;
1313 val &= ~S_CLOCK;
1314 udelay(50);
1315 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1316 udelay(50);
1317
1318
1319 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1320 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1321 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1322 udelay(50);
1323
1324
1325 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1326 val |= S_LOAD_MEM;
1327 udelay(50);
1328 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1329 udelay(50);
1330 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1331 val &= ~S_LOAD_MEM;
1332 udelay(50);
1333 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1334
1335 spin_unlock(&adapter->tpi_lock);
1336
1337 return 0;
1338}
1339
1340static inline void t1_sw_reset(struct pci_dev *pdev)
1341{
1342 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1343 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1344}
1345
1346static void remove_one(struct pci_dev *pdev)
1347{
1348 struct net_device *dev = pci_get_drvdata(pdev);
1349 struct adapter *adapter = dev->ml_priv;
1350 int i;
1351
1352 for_each_port(adapter, i) {
1353 if (test_bit(i, &adapter->registered_device_map))
1354 unregister_netdev(adapter->port[i].dev);
1355 }
1356
1357 t1_free_sw_modules(adapter);
1358 iounmap(adapter->regs);
1359
1360 while (--i >= 0) {
1361 if (adapter->port[i].dev)
1362 free_netdev(adapter->port[i].dev);
1363 }
1364
1365 pci_release_regions(pdev);
1366 pci_disable_device(pdev);
1367 t1_sw_reset(pdev);
1368}
1369
1370static struct pci_driver cxgb_pci_driver = {
1371 .name = DRV_NAME,
1372 .id_table = t1_pci_tbl,
1373 .probe = init_one,
1374 .remove = remove_one,
1375};
1376
1377module_pci_driver(cxgb_pci_driver);
1378