1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/init.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/if_vlan.h>
43#include <linux/mdio.h>
44#include <linux/sockios.h>
45#include <linux/workqueue.h>
46#include <linux/proc_fs.h>
47#include <linux/rtnetlink.h>
48#include <linux/firmware.h>
49#include <linux/log2.h>
50#include <linux/stringify.h>
51#include <linux/sched.h>
52#include <linux/slab.h>
53#include <linux/uaccess.h>
54#include <linux/nospec.h>
55
56#include "common.h"
57#include "cxgb3_ioctl.h"
58#include "regs.h"
59#include "cxgb3_offload.h"
60#include "version.h"
61
62#include "cxgb3_ctl_defs.h"
63#include "t3_cpl.h"
64#include "firmware_exports.h"
65
66enum {
67 MAX_TXQ_ENTRIES = 16384,
68 MAX_CTRL_TXQ_ENTRIES = 1024,
69 MAX_RSPQ_ENTRIES = 16384,
70 MAX_RX_BUFFERS = 16384,
71 MAX_RX_JUMBO_BUFFERS = 16384,
72 MIN_TXQ_ENTRIES = 4,
73 MIN_CTRL_TXQ_ENTRIES = 4,
74 MIN_RSPQ_ENTRIES = 32,
75 MIN_FL_ENTRIES = 32
76};
77
78#define PORT_MASK ((1 << MAX_NPORTS) - 1)
79
80#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84#define EEPROM_MAGIC 0x38E2F10C
85
86#define CH_DEVICE(devid, idx) \
87 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
88
89static const struct pci_device_id cxgb3_pci_tbl[] = {
90 CH_DEVICE(0x20, 0),
91 CH_DEVICE(0x21, 1),
92 CH_DEVICE(0x22, 2),
93 CH_DEVICE(0x23, 3),
94 CH_DEVICE(0x24, 1),
95 CH_DEVICE(0x25, 3),
96 CH_DEVICE(0x26, 2),
97 CH_DEVICE(0x30, 2),
98 CH_DEVICE(0x31, 3),
99 CH_DEVICE(0x32, 1),
100 CH_DEVICE(0x35, 6),
101 CH_DEVICE(0x36, 3),
102 CH_DEVICE(0x37, 7),
103 {0,}
104};
105
106MODULE_DESCRIPTION(DRV_DESC);
107MODULE_AUTHOR("Chelsio Communications");
108MODULE_LICENSE("Dual BSD/GPL");
109MODULE_VERSION(DRV_VERSION);
110MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
111
112static int dflt_msg_enable = DFLT_MSG_ENABLE;
113
114module_param(dflt_msg_enable, int, 0644);
115MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116
117
118
119
120
121
122
123
124
125
126static int msi = 2;
127
128module_param(msi, int, 0644);
129MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130
131
132
133
134
135
136static int ofld_disable = 0;
137
138module_param(ofld_disable, int, 0644);
139MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140
141
142
143
144
145
146
147
148
149struct workqueue_struct *cxgb3_wq;
150
151
152
153
154
155
156
157static void link_report(struct net_device *dev)
158{
159 if (!netif_carrier_ok(dev))
160 netdev_info(dev, "link down\n");
161 else {
162 const char *s = "10Mbps";
163 const struct port_info *p = netdev_priv(dev);
164
165 switch (p->link_config.speed) {
166 case SPEED_10000:
167 s = "10Gbps";
168 break;
169 case SPEED_1000:
170 s = "1000Mbps";
171 break;
172 case SPEED_100:
173 s = "100Mbps";
174 break;
175 }
176
177 netdev_info(dev, "link up, %s, %s-duplex\n",
178 s, p->link_config.duplex == DUPLEX_FULL
179 ? "full" : "half");
180 }
181}
182
183static void enable_tx_fifo_drain(struct adapter *adapter,
184 struct port_info *pi)
185{
186 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
187 F_ENDROPPKT);
188 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191}
192
193static void disable_tx_fifo_drain(struct adapter *adapter,
194 struct port_info *pi)
195{
196 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
197 F_ENDROPPKT, 0);
198}
199
200void t3_os_link_fault(struct adapter *adap, int port_id, int state)
201{
202 struct net_device *dev = adap->port[port_id];
203 struct port_info *pi = netdev_priv(dev);
204
205 if (state == netif_carrier_ok(dev))
206 return;
207
208 if (state) {
209 struct cmac *mac = &pi->mac;
210
211 netif_carrier_on(dev);
212
213 disable_tx_fifo_drain(adap, pi);
214
215
216 t3_xgm_intr_disable(adap, pi->port_id);
217 t3_read_reg(adap, A_XGM_INT_STATUS +
218 pi->mac.offset);
219 t3_write_reg(adap,
220 A_XGM_INT_CAUSE + pi->mac.offset,
221 F_XGM_INT);
222
223 t3_set_reg_field(adap,
224 A_XGM_INT_ENABLE +
225 pi->mac.offset,
226 F_XGM_INT, F_XGM_INT);
227 t3_xgm_intr_enable(adap, pi->port_id);
228
229 t3_mac_enable(mac, MAC_DIRECTION_TX);
230 } else {
231 netif_carrier_off(dev);
232
233
234 enable_tx_fifo_drain(adap, pi);
235 }
236 link_report(dev);
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253 int speed, int duplex, int pause)
254{
255 struct net_device *dev = adapter->port[port_id];
256 struct port_info *pi = netdev_priv(dev);
257 struct cmac *mac = &pi->mac;
258
259
260 if (!netif_running(dev))
261 return;
262
263 if (link_stat != netif_carrier_ok(dev)) {
264 if (link_stat) {
265 disable_tx_fifo_drain(adapter, pi);
266
267 t3_mac_enable(mac, MAC_DIRECTION_RX);
268
269
270 t3_xgm_intr_disable(adapter, pi->port_id);
271 t3_read_reg(adapter, A_XGM_INT_STATUS +
272 pi->mac.offset);
273 t3_write_reg(adapter,
274 A_XGM_INT_CAUSE + pi->mac.offset,
275 F_XGM_INT);
276
277 t3_set_reg_field(adapter,
278 A_XGM_INT_ENABLE + pi->mac.offset,
279 F_XGM_INT, F_XGM_INT);
280 t3_xgm_intr_enable(adapter, pi->port_id);
281
282 netif_carrier_on(dev);
283 } else {
284 netif_carrier_off(dev);
285
286 t3_xgm_intr_disable(adapter, pi->port_id);
287 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288 t3_set_reg_field(adapter,
289 A_XGM_INT_ENABLE + pi->mac.offset,
290 F_XGM_INT, 0);
291
292 if (is_10G(adapter))
293 pi->phy.ops->power_down(&pi->phy, 1);
294
295 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296 t3_mac_disable(mac, MAC_DIRECTION_RX);
297 t3_link_start(&pi->phy, mac, &pi->link_config);
298
299
300 enable_tx_fifo_drain(adapter, pi);
301 }
302
303 link_report(dev);
304 }
305}
306
307
308
309
310
311
312
313
314
315
316void t3_os_phymod_changed(struct adapter *adap, int port_id)
317{
318 static const char *mod_str[] = {
319 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320 };
321
322 const struct net_device *dev = adap->port[port_id];
323 const struct port_info *pi = netdev_priv(dev);
324
325 if (pi->phy.modtype == phy_modtype_none)
326 netdev_info(dev, "PHY module unplugged\n");
327 else
328 netdev_info(dev, "%s PHY module inserted\n",
329 mod_str[pi->phy.modtype]);
330}
331
332static void cxgb_set_rxmode(struct net_device *dev)
333{
334 struct port_info *pi = netdev_priv(dev);
335
336 t3_mac_set_rx_mode(&pi->mac, dev);
337}
338
339
340
341
342
343
344
345static void link_start(struct net_device *dev)
346{
347 struct port_info *pi = netdev_priv(dev);
348 struct cmac *mac = &pi->mac;
349
350 t3_mac_reset(mac);
351 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352 t3_mac_set_mtu(mac, dev->mtu);
353 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355 t3_mac_set_rx_mode(mac, dev);
356 t3_link_start(&pi->phy, mac, &pi->link_config);
357 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358}
359
360static inline void cxgb_disable_msi(struct adapter *adapter)
361{
362 if (adapter->flags & USING_MSIX) {
363 pci_disable_msix(adapter->pdev);
364 adapter->flags &= ~USING_MSIX;
365 } else if (adapter->flags & USING_MSI) {
366 pci_disable_msi(adapter->pdev);
367 adapter->flags &= ~USING_MSI;
368 }
369}
370
371
372
373
374static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
375{
376 t3_slow_intr_handler(cookie);
377 return IRQ_HANDLED;
378}
379
380
381
382
383static void name_msix_vecs(struct adapter *adap)
384{
385 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
386
387 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388 adap->msix_info[0].desc[n] = 0;
389
390 for_each_port(adap, j) {
391 struct net_device *d = adap->port[j];
392 const struct port_info *pi = netdev_priv(d);
393
394 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395 snprintf(adap->msix_info[msi_idx].desc, n,
396 "%s-%d", d->name, pi->first_qset + i);
397 adap->msix_info[msi_idx].desc[n] = 0;
398 }
399 }
400}
401
402static int request_msix_data_irqs(struct adapter *adap)
403{
404 int i, j, err, qidx = 0;
405
406 for_each_port(adap, i) {
407 int nqsets = adap2pinfo(adap, i)->nqsets;
408
409 for (j = 0; j < nqsets; ++j) {
410 err = request_irq(adap->msix_info[qidx + 1].vec,
411 t3_intr_handler(adap,
412 adap->sge.qs[qidx].
413 rspq.polling), 0,
414 adap->msix_info[qidx + 1].desc,
415 &adap->sge.qs[qidx]);
416 if (err) {
417 while (--qidx >= 0)
418 free_irq(adap->msix_info[qidx + 1].vec,
419 &adap->sge.qs[qidx]);
420 return err;
421 }
422 qidx++;
423 }
424 }
425 return 0;
426}
427
428static void free_irq_resources(struct adapter *adapter)
429{
430 if (adapter->flags & USING_MSIX) {
431 int i, n = 0;
432
433 free_irq(adapter->msix_info[0].vec, adapter);
434 for_each_port(adapter, i)
435 n += adap2pinfo(adapter, i)->nqsets;
436
437 for (i = 0; i < n; ++i)
438 free_irq(adapter->msix_info[i + 1].vec,
439 &adapter->sge.qs[i]);
440 } else
441 free_irq(adapter->pdev->irq, adapter);
442}
443
444static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445 unsigned long n)
446{
447 int attempts = 10;
448
449 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450 if (!--attempts)
451 return -ETIMEDOUT;
452 msleep(10);
453 }
454 return 0;
455}
456
457static int init_tp_parity(struct adapter *adap)
458{
459 int i;
460 struct sk_buff *skb;
461 struct cpl_set_tcb_field *greq;
462 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
463
464 t3_tp_set_offload_mode(adap, 1);
465
466 for (i = 0; i < 16; i++) {
467 struct cpl_smt_write_req *req;
468
469 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
470 if (!skb)
471 skb = adap->nofail_skb;
472 if (!skb)
473 goto alloc_skb_fail;
474
475 req = __skb_put_zero(skb, sizeof(*req));
476 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478 req->mtu_idx = NMTUS - 1;
479 req->iff = i;
480 t3_mgmt_tx(adap, skb);
481 if (skb == adap->nofail_skb) {
482 await_mgmt_replies(adap, cnt, i + 1);
483 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484 if (!adap->nofail_skb)
485 goto alloc_skb_fail;
486 }
487 }
488
489 for (i = 0; i < 2048; i++) {
490 struct cpl_l2t_write_req *req;
491
492 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493 if (!skb)
494 skb = adap->nofail_skb;
495 if (!skb)
496 goto alloc_skb_fail;
497
498 req = __skb_put_zero(skb, sizeof(*req));
499 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
500 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
501 req->params = htonl(V_L2T_W_IDX(i));
502 t3_mgmt_tx(adap, skb);
503 if (skb == adap->nofail_skb) {
504 await_mgmt_replies(adap, cnt, 16 + i + 1);
505 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
506 if (!adap->nofail_skb)
507 goto alloc_skb_fail;
508 }
509 }
510
511 for (i = 0; i < 2048; i++) {
512 struct cpl_rte_write_req *req;
513
514 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
515 if (!skb)
516 skb = adap->nofail_skb;
517 if (!skb)
518 goto alloc_skb_fail;
519
520 req = __skb_put_zero(skb, sizeof(*req));
521 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
522 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
523 req->l2t_idx = htonl(V_L2T_W_IDX(i));
524 t3_mgmt_tx(adap, skb);
525 if (skb == adap->nofail_skb) {
526 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
527 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
528 if (!adap->nofail_skb)
529 goto alloc_skb_fail;
530 }
531 }
532
533 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
534 if (!skb)
535 skb = adap->nofail_skb;
536 if (!skb)
537 goto alloc_skb_fail;
538
539 greq = __skb_put_zero(skb, sizeof(*greq));
540 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542 greq->mask = cpu_to_be64(1);
543 t3_mgmt_tx(adap, skb);
544
545 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 if (skb == adap->nofail_skb) {
547 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
549 }
550
551 t3_tp_set_offload_mode(adap, 0);
552 return i;
553
554alloc_skb_fail:
555 t3_tp_set_offload_mode(adap, 0);
556 return -ENOMEM;
557}
558
559
560
561
562
563
564
565
566
567
568
569
570static void setup_rss(struct adapter *adap)
571{
572 int i;
573 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575 u8 cpus[SGE_QSETS + 1];
576 u16 rspq_map[RSS_TABLE_SIZE + 1];
577
578 for (i = 0; i < SGE_QSETS; ++i)
579 cpus[i] = i;
580 cpus[SGE_QSETS] = 0xff;
581
582 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583 rspq_map[i] = i % nq0;
584 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
585 }
586 rspq_map[RSS_TABLE_SIZE] = 0xffff;
587
588 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
589 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
590 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
591}
592
593static void ring_dbs(struct adapter *adap)
594{
595 int i, j;
596
597 for (i = 0; i < SGE_QSETS; i++) {
598 struct sge_qset *qs = &adap->sge.qs[i];
599
600 if (qs->adap)
601 for (j = 0; j < SGE_TXQ_PER_SET; j++)
602 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
603 }
604}
605
606static void init_napi(struct adapter *adap)
607{
608 int i;
609
610 for (i = 0; i < SGE_QSETS; i++) {
611 struct sge_qset *qs = &adap->sge.qs[i];
612
613 if (qs->adap)
614 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
615 64);
616 }
617
618
619
620
621
622
623 adap->flags |= NAPI_INIT;
624}
625
626
627
628
629
630
631static void quiesce_rx(struct adapter *adap)
632{
633 int i;
634
635 for (i = 0; i < SGE_QSETS; i++)
636 if (adap->sge.qs[i].adap)
637 napi_disable(&adap->sge.qs[i].napi);
638}
639
640static void enable_all_napi(struct adapter *adap)
641{
642 int i;
643 for (i = 0; i < SGE_QSETS; i++)
644 if (adap->sge.qs[i].adap)
645 napi_enable(&adap->sge.qs[i].napi);
646}
647
648
649
650
651
652
653
654
655
656static int setup_sge_qsets(struct adapter *adap)
657{
658 int i, j, err, irq_idx = 0, qset_idx = 0;
659 unsigned int ntxq = SGE_TXQ_PER_SET;
660
661 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
662 irq_idx = -1;
663
664 for_each_port(adap, i) {
665 struct net_device *dev = adap->port[i];
666 struct port_info *pi = netdev_priv(dev);
667
668 pi->qs = &adap->sge.qs[pi->first_qset];
669 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
670 err = t3_sge_alloc_qset(adap, qset_idx, 1,
671 (adap->flags & USING_MSIX) ? qset_idx + 1 :
672 irq_idx,
673 &adap->params.sge.qset[qset_idx], ntxq, dev,
674 netdev_get_tx_queue(dev, j));
675 if (err) {
676 t3_free_sge_resources(adap);
677 return err;
678 }
679 }
680 }
681
682 return 0;
683}
684
685static ssize_t attr_show(struct device *d, char *buf,
686 ssize_t(*format) (struct net_device *, char *))
687{
688 ssize_t len;
689
690
691 rtnl_lock();
692 len = (*format) (to_net_dev(d), buf);
693 rtnl_unlock();
694 return len;
695}
696
697static ssize_t attr_store(struct device *d,
698 const char *buf, size_t len,
699 ssize_t(*set) (struct net_device *, unsigned int),
700 unsigned int min_val, unsigned int max_val)
701{
702 ssize_t ret;
703 unsigned int val;
704
705 if (!capable(CAP_NET_ADMIN))
706 return -EPERM;
707
708 ret = kstrtouint(buf, 0, &val);
709 if (ret)
710 return ret;
711 if (val < min_val || val > max_val)
712 return -EINVAL;
713
714 rtnl_lock();
715 ret = (*set) (to_net_dev(d), val);
716 if (!ret)
717 ret = len;
718 rtnl_unlock();
719 return ret;
720}
721
722#define CXGB3_SHOW(name, val_expr) \
723static ssize_t format_##name(struct net_device *dev, char *buf) \
724{ \
725 struct port_info *pi = netdev_priv(dev); \
726 struct adapter *adap = pi->adapter; \
727 return sprintf(buf, "%u\n", val_expr); \
728} \
729static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
730 char *buf) \
731{ \
732 return attr_show(d, buf, format_##name); \
733}
734
735static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
736{
737 struct port_info *pi = netdev_priv(dev);
738 struct adapter *adap = pi->adapter;
739 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
740
741 if (adap->flags & FULL_INIT_DONE)
742 return -EBUSY;
743 if (val && adap->params.rev == 0)
744 return -EINVAL;
745 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
746 min_tids)
747 return -EINVAL;
748 adap->params.mc5.nfilters = val;
749 return 0;
750}
751
752static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
753 const char *buf, size_t len)
754{
755 return attr_store(d, buf, len, set_nfilters, 0, ~0);
756}
757
758static ssize_t set_nservers(struct net_device *dev, unsigned int val)
759{
760 struct port_info *pi = netdev_priv(dev);
761 struct adapter *adap = pi->adapter;
762
763 if (adap->flags & FULL_INIT_DONE)
764 return -EBUSY;
765 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
766 MC5_MIN_TIDS)
767 return -EINVAL;
768 adap->params.mc5.nservers = val;
769 return 0;
770}
771
772static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
773 const char *buf, size_t len)
774{
775 return attr_store(d, buf, len, set_nservers, 0, ~0);
776}
777
778#define CXGB3_ATTR_R(name, val_expr) \
779CXGB3_SHOW(name, val_expr) \
780static DEVICE_ATTR(name, 0444, show_##name, NULL)
781
782#define CXGB3_ATTR_RW(name, val_expr, store_method) \
783CXGB3_SHOW(name, val_expr) \
784static DEVICE_ATTR(name, 0644, show_##name, store_method)
785
786CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
787CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
788CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
789
790static struct attribute *cxgb3_attrs[] = {
791 &dev_attr_cam_size.attr,
792 &dev_attr_nfilters.attr,
793 &dev_attr_nservers.attr,
794 NULL
795};
796
797static const struct attribute_group cxgb3_attr_group = {
798 .attrs = cxgb3_attrs,
799};
800
801static ssize_t tm_attr_show(struct device *d,
802 char *buf, int sched)
803{
804 struct port_info *pi = netdev_priv(to_net_dev(d));
805 struct adapter *adap = pi->adapter;
806 unsigned int v, addr, bpt, cpt;
807 ssize_t len;
808
809 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
810 rtnl_lock();
811 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
813 if (sched & 1)
814 v >>= 16;
815 bpt = (v >> 8) & 0xff;
816 cpt = v & 0xff;
817 if (!cpt)
818 len = sprintf(buf, "disabled\n");
819 else {
820 v = (adap->params.vpd.cclk * 1000) / cpt;
821 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
822 }
823 rtnl_unlock();
824 return len;
825}
826
827static ssize_t tm_attr_store(struct device *d,
828 const char *buf, size_t len, int sched)
829{
830 struct port_info *pi = netdev_priv(to_net_dev(d));
831 struct adapter *adap = pi->adapter;
832 unsigned int val;
833 ssize_t ret;
834
835 if (!capable(CAP_NET_ADMIN))
836 return -EPERM;
837
838 ret = kstrtouint(buf, 0, &val);
839 if (ret)
840 return ret;
841 if (val > 10000000)
842 return -EINVAL;
843
844 rtnl_lock();
845 ret = t3_config_sched(adap, val, sched);
846 if (!ret)
847 ret = len;
848 rtnl_unlock();
849 return ret;
850}
851
852#define TM_ATTR(name, sched) \
853static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
854 char *buf) \
855{ \
856 return tm_attr_show(d, buf, sched); \
857} \
858static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859 const char *buf, size_t len) \
860{ \
861 return tm_attr_store(d, buf, len, sched); \
862} \
863static DEVICE_ATTR(name, 0644, show_##name, store_##name)
864
865TM_ATTR(sched0, 0);
866TM_ATTR(sched1, 1);
867TM_ATTR(sched2, 2);
868TM_ATTR(sched3, 3);
869TM_ATTR(sched4, 4);
870TM_ATTR(sched5, 5);
871TM_ATTR(sched6, 6);
872TM_ATTR(sched7, 7);
873
874static struct attribute *offload_attrs[] = {
875 &dev_attr_sched0.attr,
876 &dev_attr_sched1.attr,
877 &dev_attr_sched2.attr,
878 &dev_attr_sched3.attr,
879 &dev_attr_sched4.attr,
880 &dev_attr_sched5.attr,
881 &dev_attr_sched6.attr,
882 &dev_attr_sched7.attr,
883 NULL
884};
885
886static const struct attribute_group offload_attr_group = {
887 .attrs = offload_attrs,
888};
889
890
891
892
893
894static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895{
896 int ret;
897
898 local_bh_disable();
899 ret = t3_offload_tx(tdev, skb);
900 local_bh_enable();
901 return ret;
902}
903
904static int write_smt_entry(struct adapter *adapter, int idx)
905{
906 struct cpl_smt_write_req *req;
907 struct port_info *pi = netdev_priv(adapter->port[idx]);
908 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909
910 if (!skb)
911 return -ENOMEM;
912
913 req = __skb_put(skb, sizeof(*req));
914 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916 req->mtu_idx = NMTUS - 1;
917 req->iff = idx;
918 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
920 skb->priority = 1;
921 offload_tx(&adapter->tdev, skb);
922 return 0;
923}
924
925static int init_smt(struct adapter *adapter)
926{
927 int i;
928
929 for_each_port(adapter, i)
930 write_smt_entry(adapter, i);
931 return 0;
932}
933
934static void init_port_mtus(struct adapter *adapter)
935{
936 unsigned int mtus = adapter->port[0]->mtu;
937
938 if (adapter->port[1])
939 mtus |= adapter->port[1]->mtu << 16;
940 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941}
942
943static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
944 int hi, int port)
945{
946 struct sk_buff *skb;
947 struct mngt_pktsched_wr *req;
948 int ret;
949
950 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
951 if (!skb)
952 skb = adap->nofail_skb;
953 if (!skb)
954 return -ENOMEM;
955
956 req = skb_put(skb, sizeof(*req));
957 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
959 req->sched = sched;
960 req->idx = qidx;
961 req->min = lo;
962 req->max = hi;
963 req->binding = port;
964 ret = t3_mgmt_tx(adap, skb);
965 if (skb == adap->nofail_skb) {
966 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
967 GFP_KERNEL);
968 if (!adap->nofail_skb)
969 ret = -ENOMEM;
970 }
971
972 return ret;
973}
974
975static int bind_qsets(struct adapter *adap)
976{
977 int i, j, err = 0;
978
979 for_each_port(adap, i) {
980 const struct port_info *pi = adap2pinfo(adap, i);
981
982 for (j = 0; j < pi->nqsets; ++j) {
983 int ret = send_pktsched_cmd(adap, 1,
984 pi->first_qset + j, -1,
985 -1, i);
986 if (ret)
987 err = ret;
988 }
989 }
990
991 return err;
992}
993
994#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
995 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
996#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
997#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
998 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
999#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
1000#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1001#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1002#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1003MODULE_FIRMWARE(FW_FNAME);
1004MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1005MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1006MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1007MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1008MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1009
1010static inline const char *get_edc_fw_name(int edc_idx)
1011{
1012 const char *fw_name = NULL;
1013
1014 switch (edc_idx) {
1015 case EDC_OPT_AEL2005:
1016 fw_name = AEL2005_OPT_EDC_NAME;
1017 break;
1018 case EDC_TWX_AEL2005:
1019 fw_name = AEL2005_TWX_EDC_NAME;
1020 break;
1021 case EDC_TWX_AEL2020:
1022 fw_name = AEL2020_TWX_EDC_NAME;
1023 break;
1024 }
1025 return fw_name;
1026}
1027
1028int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1029{
1030 struct adapter *adapter = phy->adapter;
1031 const struct firmware *fw;
1032 const char *fw_name;
1033 u32 csum;
1034 const __be32 *p;
1035 u16 *cache = phy->phy_cache;
1036 int i, ret = -EINVAL;
1037
1038 fw_name = get_edc_fw_name(edc_idx);
1039 if (fw_name)
1040 ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1041 if (ret < 0) {
1042 dev_err(&adapter->pdev->dev,
1043 "could not upgrade firmware: unable to load %s\n",
1044 fw_name);
1045 return ret;
1046 }
1047
1048
1049 if (fw->size > size + 4) {
1050 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1051 (unsigned int)fw->size, size + 4);
1052 ret = -EINVAL;
1053 }
1054
1055
1056 p = (const __be32 *)fw->data;
1057 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1058 csum += ntohl(p[i]);
1059
1060 if (csum != 0xffffffff) {
1061 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1062 csum);
1063 ret = -EINVAL;
1064 }
1065
1066 for (i = 0; i < size / 4 ; i++) {
1067 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1068 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1069 }
1070
1071 release_firmware(fw);
1072
1073 return ret;
1074}
1075
1076static int upgrade_fw(struct adapter *adap)
1077{
1078 int ret;
1079 const struct firmware *fw;
1080 struct device *dev = &adap->pdev->dev;
1081
1082 ret = request_firmware(&fw, FW_FNAME, dev);
1083 if (ret < 0) {
1084 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1085 FW_FNAME);
1086 return ret;
1087 }
1088 ret = t3_load_fw(adap, fw->data, fw->size);
1089 release_firmware(fw);
1090
1091 if (ret == 0)
1092 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1093 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1094 else
1095 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1096 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1097
1098 return ret;
1099}
1100
1101static inline char t3rev2char(struct adapter *adapter)
1102{
1103 char rev = 0;
1104
1105 switch(adapter->params.rev) {
1106 case T3_REV_B:
1107 case T3_REV_B2:
1108 rev = 'b';
1109 break;
1110 case T3_REV_C:
1111 rev = 'c';
1112 break;
1113 }
1114 return rev;
1115}
1116
1117static int update_tpsram(struct adapter *adap)
1118{
1119 const struct firmware *tpsram;
1120 char buf[64];
1121 struct device *dev = &adap->pdev->dev;
1122 int ret;
1123 char rev;
1124
1125 rev = t3rev2char(adap);
1126 if (!rev)
1127 return 0;
1128
1129 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1130
1131 ret = request_firmware(&tpsram, buf, dev);
1132 if (ret < 0) {
1133 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1134 buf);
1135 return ret;
1136 }
1137
1138 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1139 if (ret)
1140 goto release_tpsram;
1141
1142 ret = t3_set_proto_sram(adap, tpsram->data);
1143 if (ret == 0)
1144 dev_info(dev,
1145 "successful update of protocol engine "
1146 "to %d.%d.%d\n",
1147 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1148 else
1149 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1150 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1151 if (ret)
1152 dev_err(dev, "loading protocol SRAM failed\n");
1153
1154release_tpsram:
1155 release_firmware(tpsram);
1156
1157 return ret;
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1170{
1171 int i;
1172
1173 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1174 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1175
1176 spin_lock_irq(&q->lock);
1177 spin_unlock_irq(&q->lock);
1178 }
1179}
1180
1181static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1182{
1183 struct port_info *pi = netdev_priv(dev);
1184 struct adapter *adapter = pi->adapter;
1185
1186 if (adapter->params.rev > 0) {
1187 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1188 features & NETIF_F_HW_VLAN_CTAG_RX);
1189 } else {
1190
1191 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1192
1193 for_each_port(adapter, i)
1194 have_vlans |=
1195 adapter->port[i]->features &
1196 NETIF_F_HW_VLAN_CTAG_RX;
1197
1198 t3_set_vlan_accel(adapter, 1, have_vlans);
1199 }
1200 t3_synchronize_rx(adapter, pi);
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static int cxgb_up(struct adapter *adap)
1214{
1215 int i, err;
1216
1217 if (!(adap->flags & FULL_INIT_DONE)) {
1218 err = t3_check_fw_version(adap);
1219 if (err == -EINVAL) {
1220 err = upgrade_fw(adap);
1221 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1222 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1223 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1224 }
1225
1226 err = t3_check_tpsram_version(adap);
1227 if (err == -EINVAL) {
1228 err = update_tpsram(adap);
1229 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1230 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1231 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1232 }
1233
1234
1235
1236
1237
1238
1239 t3_intr_clear(adap);
1240
1241 err = t3_init_hw(adap, 0);
1242 if (err)
1243 goto out;
1244
1245 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1246 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1247
1248 err = setup_sge_qsets(adap);
1249 if (err)
1250 goto out;
1251
1252 for_each_port(adap, i)
1253 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1254
1255 setup_rss(adap);
1256 if (!(adap->flags & NAPI_INIT))
1257 init_napi(adap);
1258
1259 t3_start_sge_timers(adap);
1260 adap->flags |= FULL_INIT_DONE;
1261 }
1262
1263 t3_intr_clear(adap);
1264
1265 if (adap->flags & USING_MSIX) {
1266 name_msix_vecs(adap);
1267 err = request_irq(adap->msix_info[0].vec,
1268 t3_async_intr_handler, 0,
1269 adap->msix_info[0].desc, adap);
1270 if (err)
1271 goto irq_err;
1272
1273 err = request_msix_data_irqs(adap);
1274 if (err) {
1275 free_irq(adap->msix_info[0].vec, adap);
1276 goto irq_err;
1277 }
1278 } else if ((err = request_irq(adap->pdev->irq,
1279 t3_intr_handler(adap,
1280 adap->sge.qs[0].rspq.
1281 polling),
1282 (adap->flags & USING_MSI) ?
1283 0 : IRQF_SHARED,
1284 adap->name, adap)))
1285 goto irq_err;
1286
1287 enable_all_napi(adap);
1288 t3_sge_start(adap);
1289 t3_intr_enable(adap);
1290
1291 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1292 is_offload(adap) && init_tp_parity(adap) == 0)
1293 adap->flags |= TP_PARITY_INIT;
1294
1295 if (adap->flags & TP_PARITY_INIT) {
1296 t3_write_reg(adap, A_TP_INT_CAUSE,
1297 F_CMCACHEPERR | F_ARPLUTPERR);
1298 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1299 }
1300
1301 if (!(adap->flags & QUEUES_BOUND)) {
1302 int ret = bind_qsets(adap);
1303
1304 if (ret < 0) {
1305 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1306 t3_intr_disable(adap);
1307 free_irq_resources(adap);
1308 err = ret;
1309 goto out;
1310 }
1311 adap->flags |= QUEUES_BOUND;
1312 }
1313
1314out:
1315 return err;
1316irq_err:
1317 CH_ERR(adap, "request_irq failed, err %d\n", err);
1318 goto out;
1319}
1320
1321
1322
1323
1324static void cxgb_down(struct adapter *adapter, int on_wq)
1325{
1326 t3_sge_stop(adapter);
1327 spin_lock_irq(&adapter->work_lock);
1328 t3_intr_disable(adapter);
1329 spin_unlock_irq(&adapter->work_lock);
1330
1331 free_irq_resources(adapter);
1332 quiesce_rx(adapter);
1333 t3_sge_stop(adapter);
1334 if (!on_wq)
1335 flush_workqueue(cxgb3_wq);
1336}
1337
1338static void schedule_chk_task(struct adapter *adap)
1339{
1340 unsigned int timeo;
1341
1342 timeo = adap->params.linkpoll_period ?
1343 (HZ * adap->params.linkpoll_period) / 10 :
1344 adap->params.stats_update_period * HZ;
1345 if (timeo)
1346 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1347}
1348
1349static int offload_open(struct net_device *dev)
1350{
1351 struct port_info *pi = netdev_priv(dev);
1352 struct adapter *adapter = pi->adapter;
1353 struct t3cdev *tdev = dev2t3cdev(dev);
1354 int adap_up = adapter->open_device_map & PORT_MASK;
1355 int err;
1356
1357 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1358 return 0;
1359
1360 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1361 goto out;
1362
1363 t3_tp_set_offload_mode(adapter, 1);
1364 tdev->lldev = adapter->port[0];
1365 err = cxgb3_offload_activate(adapter);
1366 if (err)
1367 goto out;
1368
1369 init_port_mtus(adapter);
1370 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1371 adapter->params.b_wnd,
1372 adapter->params.rev == 0 ?
1373 adapter->port[0]->mtu : 0xffff);
1374 init_smt(adapter);
1375
1376 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1377 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1378
1379
1380 cxgb3_add_clients(tdev);
1381
1382out:
1383
1384 if (err) {
1385 t3_tp_set_offload_mode(adapter, 0);
1386 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1387 cxgb3_set_dummy_ops(tdev);
1388 }
1389 return err;
1390}
1391
1392static int offload_close(struct t3cdev *tdev)
1393{
1394 struct adapter *adapter = tdev2adap(tdev);
1395 struct t3c_data *td = T3C_DATA(tdev);
1396
1397 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1398 return 0;
1399
1400
1401 cxgb3_remove_clients(tdev);
1402
1403 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1404
1405
1406 flush_work(&td->tid_release_task);
1407
1408 tdev->lldev = NULL;
1409 cxgb3_set_dummy_ops(tdev);
1410 t3_tp_set_offload_mode(adapter, 0);
1411 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1412
1413 if (!adapter->open_device_map)
1414 cxgb_down(adapter, 0);
1415
1416 cxgb3_offload_deactivate(adapter);
1417 return 0;
1418}
1419
1420static int cxgb_open(struct net_device *dev)
1421{
1422 struct port_info *pi = netdev_priv(dev);
1423 struct adapter *adapter = pi->adapter;
1424 int other_ports = adapter->open_device_map & PORT_MASK;
1425 int err;
1426
1427 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1428 return err;
1429
1430 set_bit(pi->port_id, &adapter->open_device_map);
1431 if (is_offload(adapter) && !ofld_disable) {
1432 err = offload_open(dev);
1433 if (err)
1434 pr_warn("Could not initialize offload capabilities\n");
1435 }
1436
1437 netif_set_real_num_tx_queues(dev, pi->nqsets);
1438 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1439 if (err)
1440 return err;
1441 link_start(dev);
1442 t3_port_intr_enable(adapter, pi->port_id);
1443 netif_tx_start_all_queues(dev);
1444 if (!other_ports)
1445 schedule_chk_task(adapter);
1446
1447 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1448 return 0;
1449}
1450
1451static int __cxgb_close(struct net_device *dev, int on_wq)
1452{
1453 struct port_info *pi = netdev_priv(dev);
1454 struct adapter *adapter = pi->adapter;
1455
1456
1457 if (!adapter->open_device_map)
1458 return 0;
1459
1460
1461 t3_xgm_intr_disable(adapter, pi->port_id);
1462 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1463
1464 t3_port_intr_disable(adapter, pi->port_id);
1465 netif_tx_stop_all_queues(dev);
1466 pi->phy.ops->power_down(&pi->phy, 1);
1467 netif_carrier_off(dev);
1468 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1469
1470 spin_lock_irq(&adapter->work_lock);
1471 clear_bit(pi->port_id, &adapter->open_device_map);
1472 spin_unlock_irq(&adapter->work_lock);
1473
1474 if (!(adapter->open_device_map & PORT_MASK))
1475 cancel_delayed_work_sync(&adapter->adap_check_task);
1476
1477 if (!adapter->open_device_map)
1478 cxgb_down(adapter, on_wq);
1479
1480 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1481 return 0;
1482}
1483
1484static int cxgb_close(struct net_device *dev)
1485{
1486 return __cxgb_close(dev, 0);
1487}
1488
1489static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1490{
1491 struct port_info *pi = netdev_priv(dev);
1492 struct adapter *adapter = pi->adapter;
1493 struct net_device_stats *ns = &dev->stats;
1494 const struct mac_stats *pstats;
1495
1496 spin_lock(&adapter->stats_lock);
1497 pstats = t3_mac_update_stats(&pi->mac);
1498 spin_unlock(&adapter->stats_lock);
1499
1500 ns->tx_bytes = pstats->tx_octets;
1501 ns->tx_packets = pstats->tx_frames;
1502 ns->rx_bytes = pstats->rx_octets;
1503 ns->rx_packets = pstats->rx_frames;
1504 ns->multicast = pstats->rx_mcast_frames;
1505
1506 ns->tx_errors = pstats->tx_underrun;
1507 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1508 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1509 pstats->rx_fifo_ovfl;
1510
1511
1512 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1513 ns->rx_over_errors = 0;
1514 ns->rx_crc_errors = pstats->rx_fcs_errs;
1515 ns->rx_frame_errors = pstats->rx_symbol_errs;
1516 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1517 ns->rx_missed_errors = pstats->rx_cong_drops;
1518
1519
1520 ns->tx_aborted_errors = 0;
1521 ns->tx_carrier_errors = 0;
1522 ns->tx_fifo_errors = pstats->tx_underrun;
1523 ns->tx_heartbeat_errors = 0;
1524 ns->tx_window_errors = 0;
1525 return ns;
1526}
1527
1528static u32 get_msglevel(struct net_device *dev)
1529{
1530 struct port_info *pi = netdev_priv(dev);
1531 struct adapter *adapter = pi->adapter;
1532
1533 return adapter->msg_enable;
1534}
1535
1536static void set_msglevel(struct net_device *dev, u32 val)
1537{
1538 struct port_info *pi = netdev_priv(dev);
1539 struct adapter *adapter = pi->adapter;
1540
1541 adapter->msg_enable = val;
1542}
1543
1544static const char stats_strings[][ETH_GSTRING_LEN] = {
1545 "TxOctetsOK ",
1546 "TxFramesOK ",
1547 "TxMulticastFramesOK",
1548 "TxBroadcastFramesOK",
1549 "TxPauseFrames ",
1550 "TxUnderrun ",
1551 "TxExtUnderrun ",
1552
1553 "TxFrames64 ",
1554 "TxFrames65To127 ",
1555 "TxFrames128To255 ",
1556 "TxFrames256To511 ",
1557 "TxFrames512To1023 ",
1558 "TxFrames1024To1518 ",
1559 "TxFrames1519ToMax ",
1560
1561 "RxOctetsOK ",
1562 "RxFramesOK ",
1563 "RxMulticastFramesOK",
1564 "RxBroadcastFramesOK",
1565 "RxPauseFrames ",
1566 "RxFCSErrors ",
1567 "RxSymbolErrors ",
1568 "RxShortErrors ",
1569 "RxJabberErrors ",
1570 "RxLengthErrors ",
1571 "RxFIFOoverflow ",
1572
1573 "RxFrames64 ",
1574 "RxFrames65To127 ",
1575 "RxFrames128To255 ",
1576 "RxFrames256To511 ",
1577 "RxFrames512To1023 ",
1578 "RxFrames1024To1518 ",
1579 "RxFrames1519ToMax ",
1580
1581 "PhyFIFOErrors ",
1582 "TSO ",
1583 "VLANextractions ",
1584 "VLANinsertions ",
1585 "TxCsumOffload ",
1586 "RxCsumGood ",
1587 "LroAggregated ",
1588 "LroFlushed ",
1589 "LroNoDesc ",
1590 "RxDrops ",
1591
1592 "CheckTXEnToggled ",
1593 "CheckResets ",
1594
1595 "LinkFaults ",
1596};
1597
1598static int get_sset_count(struct net_device *dev, int sset)
1599{
1600 switch (sset) {
1601 case ETH_SS_STATS:
1602 return ARRAY_SIZE(stats_strings);
1603 default:
1604 return -EOPNOTSUPP;
1605 }
1606}
1607
1608#define T3_REGMAP_SIZE (3 * 1024)
1609
1610static int get_regs_len(struct net_device *dev)
1611{
1612 return T3_REGMAP_SIZE;
1613}
1614
1615static int get_eeprom_len(struct net_device *dev)
1616{
1617 return EEPROMSIZE;
1618}
1619
1620static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1621{
1622 struct port_info *pi = netdev_priv(dev);
1623 struct adapter *adapter = pi->adapter;
1624 u32 fw_vers = 0;
1625 u32 tp_vers = 0;
1626
1627 spin_lock(&adapter->stats_lock);
1628 t3_get_fw_version(adapter, &fw_vers);
1629 t3_get_tp_version(adapter, &tp_vers);
1630 spin_unlock(&adapter->stats_lock);
1631
1632 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1633 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1634 strlcpy(info->bus_info, pci_name(adapter->pdev),
1635 sizeof(info->bus_info));
1636 if (fw_vers)
1637 snprintf(info->fw_version, sizeof(info->fw_version),
1638 "%s %u.%u.%u TP %u.%u.%u",
1639 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1640 G_FW_VERSION_MAJOR(fw_vers),
1641 G_FW_VERSION_MINOR(fw_vers),
1642 G_FW_VERSION_MICRO(fw_vers),
1643 G_TP_VERSION_MAJOR(tp_vers),
1644 G_TP_VERSION_MINOR(tp_vers),
1645 G_TP_VERSION_MICRO(tp_vers));
1646}
1647
1648static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1649{
1650 if (stringset == ETH_SS_STATS)
1651 memcpy(data, stats_strings, sizeof(stats_strings));
1652}
1653
1654static unsigned long collect_sge_port_stats(struct adapter *adapter,
1655 struct port_info *p, int idx)
1656{
1657 int i;
1658 unsigned long tot = 0;
1659
1660 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1661 tot += adapter->sge.qs[i].port_stats[idx];
1662 return tot;
1663}
1664
1665static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1666 u64 *data)
1667{
1668 struct port_info *pi = netdev_priv(dev);
1669 struct adapter *adapter = pi->adapter;
1670 const struct mac_stats *s;
1671
1672 spin_lock(&adapter->stats_lock);
1673 s = t3_mac_update_stats(&pi->mac);
1674 spin_unlock(&adapter->stats_lock);
1675
1676 *data++ = s->tx_octets;
1677 *data++ = s->tx_frames;
1678 *data++ = s->tx_mcast_frames;
1679 *data++ = s->tx_bcast_frames;
1680 *data++ = s->tx_pause;
1681 *data++ = s->tx_underrun;
1682 *data++ = s->tx_fifo_urun;
1683
1684 *data++ = s->tx_frames_64;
1685 *data++ = s->tx_frames_65_127;
1686 *data++ = s->tx_frames_128_255;
1687 *data++ = s->tx_frames_256_511;
1688 *data++ = s->tx_frames_512_1023;
1689 *data++ = s->tx_frames_1024_1518;
1690 *data++ = s->tx_frames_1519_max;
1691
1692 *data++ = s->rx_octets;
1693 *data++ = s->rx_frames;
1694 *data++ = s->rx_mcast_frames;
1695 *data++ = s->rx_bcast_frames;
1696 *data++ = s->rx_pause;
1697 *data++ = s->rx_fcs_errs;
1698 *data++ = s->rx_symbol_errs;
1699 *data++ = s->rx_short;
1700 *data++ = s->rx_jabber;
1701 *data++ = s->rx_too_long;
1702 *data++ = s->rx_fifo_ovfl;
1703
1704 *data++ = s->rx_frames_64;
1705 *data++ = s->rx_frames_65_127;
1706 *data++ = s->rx_frames_128_255;
1707 *data++ = s->rx_frames_256_511;
1708 *data++ = s->rx_frames_512_1023;
1709 *data++ = s->rx_frames_1024_1518;
1710 *data++ = s->rx_frames_1519_max;
1711
1712 *data++ = pi->phy.fifo_errors;
1713
1714 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1715 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1716 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1717 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1718 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1719 *data++ = 0;
1720 *data++ = 0;
1721 *data++ = 0;
1722 *data++ = s->rx_cong_drops;
1723
1724 *data++ = s->num_toggled;
1725 *data++ = s->num_resets;
1726
1727 *data++ = s->link_faults;
1728}
1729
1730static inline void reg_block_dump(struct adapter *ap, void *buf,
1731 unsigned int start, unsigned int end)
1732{
1733 u32 *p = buf + start;
1734
1735 for (; start <= end; start += sizeof(u32))
1736 *p++ = t3_read_reg(ap, start);
1737}
1738
1739static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1740 void *buf)
1741{
1742 struct port_info *pi = netdev_priv(dev);
1743 struct adapter *ap = pi->adapter;
1744
1745
1746
1747
1748
1749
1750
1751 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1752
1753
1754
1755
1756
1757
1758 memset(buf, 0, T3_REGMAP_SIZE);
1759 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1760 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1761 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1762 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1763 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1764 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1765 XGM_REG(A_XGM_SERDES_STAT3, 1));
1766 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1767 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1768}
1769
1770static int restart_autoneg(struct net_device *dev)
1771{
1772 struct port_info *p = netdev_priv(dev);
1773
1774 if (!netif_running(dev))
1775 return -EAGAIN;
1776 if (p->link_config.autoneg != AUTONEG_ENABLE)
1777 return -EINVAL;
1778 p->phy.ops->autoneg_restart(&p->phy);
1779 return 0;
1780}
1781
1782static int set_phys_id(struct net_device *dev,
1783 enum ethtool_phys_id_state state)
1784{
1785 struct port_info *pi = netdev_priv(dev);
1786 struct adapter *adapter = pi->adapter;
1787
1788 switch (state) {
1789 case ETHTOOL_ID_ACTIVE:
1790 return 1;
1791
1792 case ETHTOOL_ID_OFF:
1793 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1794 break;
1795
1796 case ETHTOOL_ID_ON:
1797 case ETHTOOL_ID_INACTIVE:
1798 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1799 F_GPIO0_OUT_VAL);
1800 }
1801
1802 return 0;
1803}
1804
1805static int get_link_ksettings(struct net_device *dev,
1806 struct ethtool_link_ksettings *cmd)
1807{
1808 struct port_info *p = netdev_priv(dev);
1809 u32 supported;
1810
1811 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1812 p->link_config.supported);
1813 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1814 p->link_config.advertising);
1815
1816 if (netif_carrier_ok(dev)) {
1817 cmd->base.speed = p->link_config.speed;
1818 cmd->base.duplex = p->link_config.duplex;
1819 } else {
1820 cmd->base.speed = SPEED_UNKNOWN;
1821 cmd->base.duplex = DUPLEX_UNKNOWN;
1822 }
1823
1824 ethtool_convert_link_mode_to_legacy_u32(&supported,
1825 cmd->link_modes.supported);
1826
1827 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1828 cmd->base.phy_address = p->phy.mdio.prtad;
1829 cmd->base.autoneg = p->link_config.autoneg;
1830 return 0;
1831}
1832
1833static int speed_duplex_to_caps(int speed, int duplex)
1834{
1835 int cap = 0;
1836
1837 switch (speed) {
1838 case SPEED_10:
1839 if (duplex == DUPLEX_FULL)
1840 cap = SUPPORTED_10baseT_Full;
1841 else
1842 cap = SUPPORTED_10baseT_Half;
1843 break;
1844 case SPEED_100:
1845 if (duplex == DUPLEX_FULL)
1846 cap = SUPPORTED_100baseT_Full;
1847 else
1848 cap = SUPPORTED_100baseT_Half;
1849 break;
1850 case SPEED_1000:
1851 if (duplex == DUPLEX_FULL)
1852 cap = SUPPORTED_1000baseT_Full;
1853 else
1854 cap = SUPPORTED_1000baseT_Half;
1855 break;
1856 case SPEED_10000:
1857 if (duplex == DUPLEX_FULL)
1858 cap = SUPPORTED_10000baseT_Full;
1859 }
1860 return cap;
1861}
1862
1863#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1864 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1865 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1866 ADVERTISED_10000baseT_Full)
1867
1868static int set_link_ksettings(struct net_device *dev,
1869 const struct ethtool_link_ksettings *cmd)
1870{
1871 struct port_info *p = netdev_priv(dev);
1872 struct link_config *lc = &p->link_config;
1873 u32 advertising;
1874
1875 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1876 cmd->link_modes.advertising);
1877
1878 if (!(lc->supported & SUPPORTED_Autoneg)) {
1879
1880
1881
1882
1883 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1884 u32 speed = cmd->base.speed;
1885 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1886 if (lc->supported & cap)
1887 return 0;
1888 }
1889 return -EINVAL;
1890 }
1891
1892 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1893 u32 speed = cmd->base.speed;
1894 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1895
1896 if (!(lc->supported & cap) || (speed == SPEED_1000))
1897 return -EINVAL;
1898 lc->requested_speed = speed;
1899 lc->requested_duplex = cmd->base.duplex;
1900 lc->advertising = 0;
1901 } else {
1902 advertising &= ADVERTISED_MASK;
1903 advertising &= lc->supported;
1904 if (!advertising)
1905 return -EINVAL;
1906 lc->requested_speed = SPEED_INVALID;
1907 lc->requested_duplex = DUPLEX_INVALID;
1908 lc->advertising = advertising | ADVERTISED_Autoneg;
1909 }
1910 lc->autoneg = cmd->base.autoneg;
1911 if (netif_running(dev))
1912 t3_link_start(&p->phy, &p->mac, lc);
1913 return 0;
1914}
1915
1916static void get_pauseparam(struct net_device *dev,
1917 struct ethtool_pauseparam *epause)
1918{
1919 struct port_info *p = netdev_priv(dev);
1920
1921 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1922 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1923 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1924}
1925
1926static int set_pauseparam(struct net_device *dev,
1927 struct ethtool_pauseparam *epause)
1928{
1929 struct port_info *p = netdev_priv(dev);
1930 struct link_config *lc = &p->link_config;
1931
1932 if (epause->autoneg == AUTONEG_DISABLE)
1933 lc->requested_fc = 0;
1934 else if (lc->supported & SUPPORTED_Autoneg)
1935 lc->requested_fc = PAUSE_AUTONEG;
1936 else
1937 return -EINVAL;
1938
1939 if (epause->rx_pause)
1940 lc->requested_fc |= PAUSE_RX;
1941 if (epause->tx_pause)
1942 lc->requested_fc |= PAUSE_TX;
1943 if (lc->autoneg == AUTONEG_ENABLE) {
1944 if (netif_running(dev))
1945 t3_link_start(&p->phy, &p->mac, lc);
1946 } else {
1947 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1948 if (netif_running(dev))
1949 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1950 }
1951 return 0;
1952}
1953
1954static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1955{
1956 struct port_info *pi = netdev_priv(dev);
1957 struct adapter *adapter = pi->adapter;
1958 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1959
1960 e->rx_max_pending = MAX_RX_BUFFERS;
1961 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1962 e->tx_max_pending = MAX_TXQ_ENTRIES;
1963
1964 e->rx_pending = q->fl_size;
1965 e->rx_mini_pending = q->rspq_size;
1966 e->rx_jumbo_pending = q->jumbo_size;
1967 e->tx_pending = q->txq_size[0];
1968}
1969
1970static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1971{
1972 struct port_info *pi = netdev_priv(dev);
1973 struct adapter *adapter = pi->adapter;
1974 struct qset_params *q;
1975 int i;
1976
1977 if (e->rx_pending > MAX_RX_BUFFERS ||
1978 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1979 e->tx_pending > MAX_TXQ_ENTRIES ||
1980 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1981 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1982 e->rx_pending < MIN_FL_ENTRIES ||
1983 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1984 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1985 return -EINVAL;
1986
1987 if (adapter->flags & FULL_INIT_DONE)
1988 return -EBUSY;
1989
1990 q = &adapter->params.sge.qset[pi->first_qset];
1991 for (i = 0; i < pi->nqsets; ++i, ++q) {
1992 q->rspq_size = e->rx_mini_pending;
1993 q->fl_size = e->rx_pending;
1994 q->jumbo_size = e->rx_jumbo_pending;
1995 q->txq_size[0] = e->tx_pending;
1996 q->txq_size[1] = e->tx_pending;
1997 q->txq_size[2] = e->tx_pending;
1998 }
1999 return 0;
2000}
2001
2002static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2003{
2004 struct port_info *pi = netdev_priv(dev);
2005 struct adapter *adapter = pi->adapter;
2006 struct qset_params *qsp;
2007 struct sge_qset *qs;
2008 int i;
2009
2010 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2011 return -EINVAL;
2012
2013 for (i = 0; i < pi->nqsets; i++) {
2014 qsp = &adapter->params.sge.qset[i];
2015 qs = &adapter->sge.qs[i];
2016 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2017 t3_update_qset_coalesce(qs, qsp);
2018 }
2019
2020 return 0;
2021}
2022
2023static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2024{
2025 struct port_info *pi = netdev_priv(dev);
2026 struct adapter *adapter = pi->adapter;
2027 struct qset_params *q = adapter->params.sge.qset;
2028
2029 c->rx_coalesce_usecs = q->coalesce_usecs;
2030 return 0;
2031}
2032
2033static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2034 u8 * data)
2035{
2036 struct port_info *pi = netdev_priv(dev);
2037 struct adapter *adapter = pi->adapter;
2038 int i, err = 0;
2039
2040 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2041 if (!buf)
2042 return -ENOMEM;
2043
2044 e->magic = EEPROM_MAGIC;
2045 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2046 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2047
2048 if (!err)
2049 memcpy(data, buf + e->offset, e->len);
2050 kfree(buf);
2051 return err;
2052}
2053
2054static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2055 u8 * data)
2056{
2057 struct port_info *pi = netdev_priv(dev);
2058 struct adapter *adapter = pi->adapter;
2059 u32 aligned_offset, aligned_len;
2060 __le32 *p;
2061 u8 *buf;
2062 int err;
2063
2064 if (eeprom->magic != EEPROM_MAGIC)
2065 return -EINVAL;
2066
2067 aligned_offset = eeprom->offset & ~3;
2068 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2069
2070 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2071 buf = kmalloc(aligned_len, GFP_KERNEL);
2072 if (!buf)
2073 return -ENOMEM;
2074 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2075 if (!err && aligned_len > 4)
2076 err = t3_seeprom_read(adapter,
2077 aligned_offset + aligned_len - 4,
2078 (__le32 *) & buf[aligned_len - 4]);
2079 if (err)
2080 goto out;
2081 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2082 } else
2083 buf = data;
2084
2085 err = t3_seeprom_wp(adapter, 0);
2086 if (err)
2087 goto out;
2088
2089 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2090 err = t3_seeprom_write(adapter, aligned_offset, *p);
2091 aligned_offset += 4;
2092 }
2093
2094 if (!err)
2095 err = t3_seeprom_wp(adapter, 1);
2096out:
2097 if (buf != data)
2098 kfree(buf);
2099 return err;
2100}
2101
2102static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2103{
2104 wol->supported = 0;
2105 wol->wolopts = 0;
2106 memset(&wol->sopass, 0, sizeof(wol->sopass));
2107}
2108
2109static const struct ethtool_ops cxgb_ethtool_ops = {
2110 .get_drvinfo = get_drvinfo,
2111 .get_msglevel = get_msglevel,
2112 .set_msglevel = set_msglevel,
2113 .get_ringparam = get_sge_param,
2114 .set_ringparam = set_sge_param,
2115 .get_coalesce = get_coalesce,
2116 .set_coalesce = set_coalesce,
2117 .get_eeprom_len = get_eeprom_len,
2118 .get_eeprom = get_eeprom,
2119 .set_eeprom = set_eeprom,
2120 .get_pauseparam = get_pauseparam,
2121 .set_pauseparam = set_pauseparam,
2122 .get_link = ethtool_op_get_link,
2123 .get_strings = get_strings,
2124 .set_phys_id = set_phys_id,
2125 .nway_reset = restart_autoneg,
2126 .get_sset_count = get_sset_count,
2127 .get_ethtool_stats = get_stats,
2128 .get_regs_len = get_regs_len,
2129 .get_regs = get_regs,
2130 .get_wol = get_wol,
2131 .get_link_ksettings = get_link_ksettings,
2132 .set_link_ksettings = set_link_ksettings,
2133};
2134
2135static int in_range(int val, int lo, int hi)
2136{
2137 return val < 0 || (val <= hi && val >= lo);
2138}
2139
2140static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2141{
2142 struct port_info *pi = netdev_priv(dev);
2143 struct adapter *adapter = pi->adapter;
2144 u32 cmd;
2145 int ret;
2146
2147 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2148 return -EFAULT;
2149
2150 switch (cmd) {
2151 case CHELSIO_SET_QSET_PARAMS:{
2152 int i;
2153 struct qset_params *q;
2154 struct ch_qset_params t;
2155 int q1 = pi->first_qset;
2156 int nqsets = pi->nqsets;
2157
2158 if (!capable(CAP_NET_ADMIN))
2159 return -EPERM;
2160 if (copy_from_user(&t, useraddr, sizeof(t)))
2161 return -EFAULT;
2162 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2163 return -EINVAL;
2164 if (t.qset_idx >= SGE_QSETS)
2165 return -EINVAL;
2166 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2167 !in_range(t.cong_thres, 0, 255) ||
2168 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2169 MAX_TXQ_ENTRIES) ||
2170 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2171 MAX_TXQ_ENTRIES) ||
2172 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2173 MAX_CTRL_TXQ_ENTRIES) ||
2174 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2175 MAX_RX_BUFFERS) ||
2176 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2177 MAX_RX_JUMBO_BUFFERS) ||
2178 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2179 MAX_RSPQ_ENTRIES))
2180 return -EINVAL;
2181
2182 if ((adapter->flags & FULL_INIT_DONE) &&
2183 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2184 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2185 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2186 t.polling >= 0 || t.cong_thres >= 0))
2187 return -EBUSY;
2188
2189
2190 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2191 q1 = 0;
2192 for_each_port(adapter, i) {
2193 pi = adap2pinfo(adapter, i);
2194 nqsets += pi->first_qset + pi->nqsets;
2195 }
2196 }
2197
2198 if (t.qset_idx < q1)
2199 return -EINVAL;
2200 if (t.qset_idx > q1 + nqsets - 1)
2201 return -EINVAL;
2202
2203 q = &adapter->params.sge.qset[t.qset_idx];
2204
2205 if (t.rspq_size >= 0)
2206 q->rspq_size = t.rspq_size;
2207 if (t.fl_size[0] >= 0)
2208 q->fl_size = t.fl_size[0];
2209 if (t.fl_size[1] >= 0)
2210 q->jumbo_size = t.fl_size[1];
2211 if (t.txq_size[0] >= 0)
2212 q->txq_size[0] = t.txq_size[0];
2213 if (t.txq_size[1] >= 0)
2214 q->txq_size[1] = t.txq_size[1];
2215 if (t.txq_size[2] >= 0)
2216 q->txq_size[2] = t.txq_size[2];
2217 if (t.cong_thres >= 0)
2218 q->cong_thres = t.cong_thres;
2219 if (t.intr_lat >= 0) {
2220 struct sge_qset *qs =
2221 &adapter->sge.qs[t.qset_idx];
2222
2223 q->coalesce_usecs = t.intr_lat;
2224 t3_update_qset_coalesce(qs, q);
2225 }
2226 if (t.polling >= 0) {
2227 if (adapter->flags & USING_MSIX)
2228 q->polling = t.polling;
2229 else {
2230
2231 if (adapter->params.rev == 0 &&
2232 !(adapter->flags & USING_MSI))
2233 t.polling = 0;
2234
2235 for (i = 0; i < SGE_QSETS; i++) {
2236 q = &adapter->params.sge.
2237 qset[i];
2238 q->polling = t.polling;
2239 }
2240 }
2241 }
2242
2243 if (t.lro >= 0) {
2244 if (t.lro)
2245 dev->wanted_features |= NETIF_F_GRO;
2246 else
2247 dev->wanted_features &= ~NETIF_F_GRO;
2248 netdev_update_features(dev);
2249 }
2250
2251 break;
2252 }
2253 case CHELSIO_GET_QSET_PARAMS:{
2254 struct qset_params *q;
2255 struct ch_qset_params t;
2256 int q1 = pi->first_qset;
2257 int nqsets = pi->nqsets;
2258 int i;
2259
2260 if (copy_from_user(&t, useraddr, sizeof(t)))
2261 return -EFAULT;
2262
2263 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2264 return -EINVAL;
2265
2266
2267 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2268 q1 = 0;
2269 for_each_port(adapter, i) {
2270 pi = adap2pinfo(adapter, i);
2271 nqsets = pi->first_qset + pi->nqsets;
2272 }
2273 }
2274
2275 if (t.qset_idx >= nqsets)
2276 return -EINVAL;
2277 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2278
2279 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2280 t.rspq_size = q->rspq_size;
2281 t.txq_size[0] = q->txq_size[0];
2282 t.txq_size[1] = q->txq_size[1];
2283 t.txq_size[2] = q->txq_size[2];
2284 t.fl_size[0] = q->fl_size;
2285 t.fl_size[1] = q->jumbo_size;
2286 t.polling = q->polling;
2287 t.lro = !!(dev->features & NETIF_F_GRO);
2288 t.intr_lat = q->coalesce_usecs;
2289 t.cong_thres = q->cong_thres;
2290 t.qnum = q1;
2291
2292 if (adapter->flags & USING_MSIX)
2293 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2294 else
2295 t.vector = adapter->pdev->irq;
2296
2297 if (copy_to_user(useraddr, &t, sizeof(t)))
2298 return -EFAULT;
2299 break;
2300 }
2301 case CHELSIO_SET_QSET_NUM:{
2302 struct ch_reg edata;
2303 unsigned int i, first_qset = 0, other_qsets = 0;
2304
2305 if (!capable(CAP_NET_ADMIN))
2306 return -EPERM;
2307 if (adapter->flags & FULL_INIT_DONE)
2308 return -EBUSY;
2309 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2310 return -EFAULT;
2311 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2312 return -EINVAL;
2313 if (edata.val < 1 ||
2314 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2315 return -EINVAL;
2316
2317 for_each_port(adapter, i)
2318 if (adapter->port[i] && adapter->port[i] != dev)
2319 other_qsets += adap2pinfo(adapter, i)->nqsets;
2320
2321 if (edata.val + other_qsets > SGE_QSETS)
2322 return -EINVAL;
2323
2324 pi->nqsets = edata.val;
2325
2326 for_each_port(adapter, i)
2327 if (adapter->port[i]) {
2328 pi = adap2pinfo(adapter, i);
2329 pi->first_qset = first_qset;
2330 first_qset += pi->nqsets;
2331 }
2332 break;
2333 }
2334 case CHELSIO_GET_QSET_NUM:{
2335 struct ch_reg edata;
2336
2337 memset(&edata, 0, sizeof(struct ch_reg));
2338
2339 edata.cmd = CHELSIO_GET_QSET_NUM;
2340 edata.val = pi->nqsets;
2341 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2342 return -EFAULT;
2343 break;
2344 }
2345 case CHELSIO_LOAD_FW:{
2346 u8 *fw_data;
2347 struct ch_mem_range t;
2348
2349 if (!capable(CAP_SYS_RAWIO))
2350 return -EPERM;
2351 if (copy_from_user(&t, useraddr, sizeof(t)))
2352 return -EFAULT;
2353 if (t.cmd != CHELSIO_LOAD_FW)
2354 return -EINVAL;
2355
2356 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2357 if (IS_ERR(fw_data))
2358 return PTR_ERR(fw_data);
2359
2360 ret = t3_load_fw(adapter, fw_data, t.len);
2361 kfree(fw_data);
2362 if (ret)
2363 return ret;
2364 break;
2365 }
2366 case CHELSIO_SETMTUTAB:{
2367 struct ch_mtus m;
2368 int i;
2369
2370 if (!is_offload(adapter))
2371 return -EOPNOTSUPP;
2372 if (!capable(CAP_NET_ADMIN))
2373 return -EPERM;
2374 if (offload_running(adapter))
2375 return -EBUSY;
2376 if (copy_from_user(&m, useraddr, sizeof(m)))
2377 return -EFAULT;
2378 if (m.cmd != CHELSIO_SETMTUTAB)
2379 return -EINVAL;
2380 if (m.nmtus != NMTUS)
2381 return -EINVAL;
2382 if (m.mtus[0] < 81)
2383 return -EINVAL;
2384
2385
2386 for (i = 1; i < NMTUS; ++i)
2387 if (m.mtus[i] < m.mtus[i - 1])
2388 return -EINVAL;
2389
2390 memcpy(adapter->params.mtus, m.mtus,
2391 sizeof(adapter->params.mtus));
2392 break;
2393 }
2394 case CHELSIO_GET_PM:{
2395 struct tp_params *p = &adapter->params.tp;
2396 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2397
2398 if (!is_offload(adapter))
2399 return -EOPNOTSUPP;
2400 m.tx_pg_sz = p->tx_pg_size;
2401 m.tx_num_pg = p->tx_num_pgs;
2402 m.rx_pg_sz = p->rx_pg_size;
2403 m.rx_num_pg = p->rx_num_pgs;
2404 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2405 if (copy_to_user(useraddr, &m, sizeof(m)))
2406 return -EFAULT;
2407 break;
2408 }
2409 case CHELSIO_SET_PM:{
2410 struct ch_pm m;
2411 struct tp_params *p = &adapter->params.tp;
2412
2413 if (!is_offload(adapter))
2414 return -EOPNOTSUPP;
2415 if (!capable(CAP_NET_ADMIN))
2416 return -EPERM;
2417 if (adapter->flags & FULL_INIT_DONE)
2418 return -EBUSY;
2419 if (copy_from_user(&m, useraddr, sizeof(m)))
2420 return -EFAULT;
2421 if (m.cmd != CHELSIO_SET_PM)
2422 return -EINVAL;
2423 if (!is_power_of_2(m.rx_pg_sz) ||
2424 !is_power_of_2(m.tx_pg_sz))
2425 return -EINVAL;
2426 if (!(m.rx_pg_sz & 0x14000))
2427 return -EINVAL;
2428 if (!(m.tx_pg_sz & 0x1554000))
2429 return -EINVAL;
2430 if (m.tx_num_pg == -1)
2431 m.tx_num_pg = p->tx_num_pgs;
2432 if (m.rx_num_pg == -1)
2433 m.rx_num_pg = p->rx_num_pgs;
2434 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2435 return -EINVAL;
2436 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2437 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2438 return -EINVAL;
2439 p->rx_pg_size = m.rx_pg_sz;
2440 p->tx_pg_size = m.tx_pg_sz;
2441 p->rx_num_pgs = m.rx_num_pg;
2442 p->tx_num_pgs = m.tx_num_pg;
2443 break;
2444 }
2445 case CHELSIO_GET_MEM:{
2446 struct ch_mem_range t;
2447 struct mc7 *mem;
2448 u64 buf[32];
2449
2450 if (!is_offload(adapter))
2451 return -EOPNOTSUPP;
2452 if (!(adapter->flags & FULL_INIT_DONE))
2453 return -EIO;
2454 if (copy_from_user(&t, useraddr, sizeof(t)))
2455 return -EFAULT;
2456 if (t.cmd != CHELSIO_GET_MEM)
2457 return -EINVAL;
2458 if ((t.addr & 7) || (t.len & 7))
2459 return -EINVAL;
2460 if (t.mem_id == MEM_CM)
2461 mem = &adapter->cm;
2462 else if (t.mem_id == MEM_PMRX)
2463 mem = &adapter->pmrx;
2464 else if (t.mem_id == MEM_PMTX)
2465 mem = &adapter->pmtx;
2466 else
2467 return -EINVAL;
2468
2469
2470
2471
2472
2473
2474 t.version = 3 | (adapter->params.rev << 10);
2475 if (copy_to_user(useraddr, &t, sizeof(t)))
2476 return -EFAULT;
2477
2478
2479
2480
2481
2482 useraddr += sizeof(t);
2483 while (t.len) {
2484 unsigned int chunk =
2485 min_t(unsigned int, t.len, sizeof(buf));
2486
2487 ret =
2488 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2489 buf);
2490 if (ret)
2491 return ret;
2492 if (copy_to_user(useraddr, buf, chunk))
2493 return -EFAULT;
2494 useraddr += chunk;
2495 t.addr += chunk;
2496 t.len -= chunk;
2497 }
2498 break;
2499 }
2500 case CHELSIO_SET_TRACE_FILTER:{
2501 struct ch_trace t;
2502 const struct trace_params *tp;
2503
2504 if (!capable(CAP_NET_ADMIN))
2505 return -EPERM;
2506 if (!offload_running(adapter))
2507 return -EAGAIN;
2508 if (copy_from_user(&t, useraddr, sizeof(t)))
2509 return -EFAULT;
2510 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2511 return -EINVAL;
2512
2513 tp = (const struct trace_params *)&t.sip;
2514 if (t.config_tx)
2515 t3_config_trace_filter(adapter, tp, 0,
2516 t.invert_match,
2517 t.trace_tx);
2518 if (t.config_rx)
2519 t3_config_trace_filter(adapter, tp, 1,
2520 t.invert_match,
2521 t.trace_rx);
2522 break;
2523 }
2524 default:
2525 return -EOPNOTSUPP;
2526 }
2527 return 0;
2528}
2529
2530static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2531{
2532 struct mii_ioctl_data *data = if_mii(req);
2533 struct port_info *pi = netdev_priv(dev);
2534 struct adapter *adapter = pi->adapter;
2535
2536 switch (cmd) {
2537 case SIOCGMIIREG:
2538 case SIOCSMIIREG:
2539
2540 if (is_10G(adapter) &&
2541 !mdio_phy_id_is_c45(data->phy_id) &&
2542 (data->phy_id & 0x1f00) &&
2543 !(data->phy_id & 0xe0e0))
2544 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2545 data->phy_id & 0x1f);
2546
2547 case SIOCGMIIPHY:
2548 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2549 case SIOCCHIOCTL:
2550 return cxgb_extension_ioctl(dev, req->ifr_data);
2551 default:
2552 return -EOPNOTSUPP;
2553 }
2554}
2555
2556static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2557{
2558 struct port_info *pi = netdev_priv(dev);
2559 struct adapter *adapter = pi->adapter;
2560 int ret;
2561
2562 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2563 return ret;
2564 dev->mtu = new_mtu;
2565 init_port_mtus(adapter);
2566 if (adapter->params.rev == 0 && offload_running(adapter))
2567 t3_load_mtus(adapter, adapter->params.mtus,
2568 adapter->params.a_wnd, adapter->params.b_wnd,
2569 adapter->port[0]->mtu);
2570 return 0;
2571}
2572
2573static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2574{
2575 struct port_info *pi = netdev_priv(dev);
2576 struct adapter *adapter = pi->adapter;
2577 struct sockaddr *addr = p;
2578
2579 if (!is_valid_ether_addr(addr->sa_data))
2580 return -EADDRNOTAVAIL;
2581
2582 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2583 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2584 if (offload_running(adapter))
2585 write_smt_entry(adapter, pi->port_id);
2586 return 0;
2587}
2588
2589static netdev_features_t cxgb_fix_features(struct net_device *dev,
2590 netdev_features_t features)
2591{
2592
2593
2594
2595
2596 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2597 features |= NETIF_F_HW_VLAN_CTAG_TX;
2598 else
2599 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2600
2601 return features;
2602}
2603
2604static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2605{
2606 netdev_features_t changed = dev->features ^ features;
2607
2608 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2609 cxgb_vlan_mode(dev, features);
2610
2611 return 0;
2612}
2613
2614#ifdef CONFIG_NET_POLL_CONTROLLER
2615static void cxgb_netpoll(struct net_device *dev)
2616{
2617 struct port_info *pi = netdev_priv(dev);
2618 struct adapter *adapter = pi->adapter;
2619 int qidx;
2620
2621 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2622 struct sge_qset *qs = &adapter->sge.qs[qidx];
2623 void *source;
2624
2625 if (adapter->flags & USING_MSIX)
2626 source = qs;
2627 else
2628 source = adapter;
2629
2630 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2631 }
2632}
2633#endif
2634
2635
2636
2637
2638static void mac_stats_update(struct adapter *adapter)
2639{
2640 int i;
2641
2642 for_each_port(adapter, i) {
2643 struct net_device *dev = adapter->port[i];
2644 struct port_info *p = netdev_priv(dev);
2645
2646 if (netif_running(dev)) {
2647 spin_lock(&adapter->stats_lock);
2648 t3_mac_update_stats(&p->mac);
2649 spin_unlock(&adapter->stats_lock);
2650 }
2651 }
2652}
2653
2654static void check_link_status(struct adapter *adapter)
2655{
2656 int i;
2657
2658 for_each_port(adapter, i) {
2659 struct net_device *dev = adapter->port[i];
2660 struct port_info *p = netdev_priv(dev);
2661 int link_fault;
2662
2663 spin_lock_irq(&adapter->work_lock);
2664 link_fault = p->link_fault;
2665 spin_unlock_irq(&adapter->work_lock);
2666
2667 if (link_fault) {
2668 t3_link_fault(adapter, i);
2669 continue;
2670 }
2671
2672 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2673 t3_xgm_intr_disable(adapter, i);
2674 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2675
2676 t3_link_changed(adapter, i);
2677 t3_xgm_intr_enable(adapter, i);
2678 }
2679 }
2680}
2681
2682static void check_t3b2_mac(struct adapter *adapter)
2683{
2684 int i;
2685
2686 if (!rtnl_trylock())
2687 return;
2688
2689 for_each_port(adapter, i) {
2690 struct net_device *dev = adapter->port[i];
2691 struct port_info *p = netdev_priv(dev);
2692 int status;
2693
2694 if (!netif_running(dev))
2695 continue;
2696
2697 status = 0;
2698 if (netif_running(dev) && netif_carrier_ok(dev))
2699 status = t3b2_mac_watchdog_task(&p->mac);
2700 if (status == 1)
2701 p->mac.stats.num_toggled++;
2702 else if (status == 2) {
2703 struct cmac *mac = &p->mac;
2704
2705 t3_mac_set_mtu(mac, dev->mtu);
2706 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2707 cxgb_set_rxmode(dev);
2708 t3_link_start(&p->phy, mac, &p->link_config);
2709 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2710 t3_port_intr_enable(adapter, p->port_id);
2711 p->mac.stats.num_resets++;
2712 }
2713 }
2714 rtnl_unlock();
2715}
2716
2717
2718static void t3_adap_check_task(struct work_struct *work)
2719{
2720 struct adapter *adapter = container_of(work, struct adapter,
2721 adap_check_task.work);
2722 const struct adapter_params *p = &adapter->params;
2723 int port;
2724 unsigned int v, status, reset;
2725
2726 adapter->check_task_cnt++;
2727
2728 check_link_status(adapter);
2729
2730
2731 if (!p->linkpoll_period ||
2732 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2733 p->stats_update_period) {
2734 mac_stats_update(adapter);
2735 adapter->check_task_cnt = 0;
2736 }
2737
2738 if (p->rev == T3_REV_B2)
2739 check_t3b2_mac(adapter);
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749 for_each_port(adapter, port) {
2750 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2751 u32 cause;
2752
2753 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2754 reset = 0;
2755 if (cause & F_RXFIFO_OVERFLOW) {
2756 mac->stats.rx_fifo_ovfl++;
2757 reset |= F_RXFIFO_OVERFLOW;
2758 }
2759
2760 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2761 }
2762
2763
2764
2765
2766 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2767 reset = 0;
2768
2769 if (status & F_FLEMPTY) {
2770 struct sge_qset *qs = &adapter->sge.qs[0];
2771 int i = 0;
2772
2773 reset |= F_FLEMPTY;
2774
2775 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2776 0xffff;
2777
2778 while (v) {
2779 qs->fl[i].empty += (v & 1);
2780 if (i)
2781 qs++;
2782 i ^= 1;
2783 v >>= 1;
2784 }
2785 }
2786
2787 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2788
2789
2790 spin_lock_irq(&adapter->work_lock);
2791 if (adapter->open_device_map & PORT_MASK)
2792 schedule_chk_task(adapter);
2793 spin_unlock_irq(&adapter->work_lock);
2794}
2795
2796static void db_full_task(struct work_struct *work)
2797{
2798 struct adapter *adapter = container_of(work, struct adapter,
2799 db_full_task);
2800
2801 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2802}
2803
2804static void db_empty_task(struct work_struct *work)
2805{
2806 struct adapter *adapter = container_of(work, struct adapter,
2807 db_empty_task);
2808
2809 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2810}
2811
2812static void db_drop_task(struct work_struct *work)
2813{
2814 struct adapter *adapter = container_of(work, struct adapter,
2815 db_drop_task);
2816 unsigned long delay = 1000;
2817 unsigned short r;
2818
2819 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2820
2821
2822
2823
2824
2825 get_random_bytes(&r, 2);
2826 delay += r & 1023;
2827 set_current_state(TASK_UNINTERRUPTIBLE);
2828 schedule_timeout(usecs_to_jiffies(delay));
2829 ring_dbs(adapter);
2830}
2831
2832
2833
2834
2835static void ext_intr_task(struct work_struct *work)
2836{
2837 struct adapter *adapter = container_of(work, struct adapter,
2838 ext_intr_handler_task);
2839 int i;
2840
2841
2842 for_each_port(adapter, i) {
2843 struct net_device *dev = adapter->port[i];
2844 struct port_info *p = netdev_priv(dev);
2845
2846 t3_xgm_intr_disable(adapter, i);
2847 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2848 }
2849
2850
2851 t3_phy_intr_handler(adapter);
2852
2853 for_each_port(adapter, i)
2854 t3_xgm_intr_enable(adapter, i);
2855
2856
2857 spin_lock_irq(&adapter->work_lock);
2858 if (adapter->slow_intr_mask) {
2859 adapter->slow_intr_mask |= F_T3DBG;
2860 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2861 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2862 adapter->slow_intr_mask);
2863 }
2864 spin_unlock_irq(&adapter->work_lock);
2865}
2866
2867
2868
2869
2870void t3_os_ext_intr_handler(struct adapter *adapter)
2871{
2872
2873
2874
2875
2876
2877
2878 spin_lock(&adapter->work_lock);
2879 if (adapter->slow_intr_mask) {
2880 adapter->slow_intr_mask &= ~F_T3DBG;
2881 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2882 adapter->slow_intr_mask);
2883 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2884 }
2885 spin_unlock(&adapter->work_lock);
2886}
2887
2888void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2889{
2890 struct net_device *netdev = adapter->port[port_id];
2891 struct port_info *pi = netdev_priv(netdev);
2892
2893 spin_lock(&adapter->work_lock);
2894 pi->link_fault = 1;
2895 spin_unlock(&adapter->work_lock);
2896}
2897
2898static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2899{
2900 int i, ret = 0;
2901
2902 if (is_offload(adapter) &&
2903 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2904 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2905 offload_close(&adapter->tdev);
2906 }
2907
2908
2909 for_each_port(adapter, i) {
2910 struct net_device *netdev = adapter->port[i];
2911
2912 if (netif_running(netdev))
2913 __cxgb_close(netdev, on_wq);
2914 }
2915
2916
2917 t3_stop_sge_timers(adapter);
2918
2919 adapter->flags &= ~FULL_INIT_DONE;
2920
2921 if (reset)
2922 ret = t3_reset_adapter(adapter);
2923
2924 pci_disable_device(adapter->pdev);
2925
2926 return ret;
2927}
2928
2929static int t3_reenable_adapter(struct adapter *adapter)
2930{
2931 if (pci_enable_device(adapter->pdev)) {
2932 dev_err(&adapter->pdev->dev,
2933 "Cannot re-enable PCI device after reset.\n");
2934 goto err;
2935 }
2936 pci_set_master(adapter->pdev);
2937 pci_restore_state(adapter->pdev);
2938 pci_save_state(adapter->pdev);
2939
2940
2941 t3_free_sge_resources(adapter);
2942
2943 if (t3_replay_prep_adapter(adapter))
2944 goto err;
2945
2946 return 0;
2947err:
2948 return -1;
2949}
2950
2951static void t3_resume_ports(struct adapter *adapter)
2952{
2953 int i;
2954
2955
2956 for_each_port(adapter, i) {
2957 struct net_device *netdev = adapter->port[i];
2958
2959 if (netif_running(netdev)) {
2960 if (cxgb_open(netdev)) {
2961 dev_err(&adapter->pdev->dev,
2962 "can't bring device back up"
2963 " after reset\n");
2964 continue;
2965 }
2966 }
2967 }
2968
2969 if (is_offload(adapter) && !ofld_disable)
2970 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2971}
2972
2973
2974
2975
2976
2977static void fatal_error_task(struct work_struct *work)
2978{
2979 struct adapter *adapter = container_of(work, struct adapter,
2980 fatal_error_handler_task);
2981 int err = 0;
2982
2983 rtnl_lock();
2984 err = t3_adapter_error(adapter, 1, 1);
2985 if (!err)
2986 err = t3_reenable_adapter(adapter);
2987 if (!err)
2988 t3_resume_ports(adapter);
2989
2990 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2991 rtnl_unlock();
2992}
2993
2994void t3_fatal_err(struct adapter *adapter)
2995{
2996 unsigned int fw_status[4];
2997
2998 if (adapter->flags & FULL_INIT_DONE) {
2999 t3_sge_stop(adapter);
3000 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3001 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3002 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3003 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3004
3005 spin_lock(&adapter->work_lock);
3006 t3_intr_disable(adapter);
3007 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3008 spin_unlock(&adapter->work_lock);
3009 }
3010 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3011 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3012 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3013 fw_status[0], fw_status[1],
3014 fw_status[2], fw_status[3]);
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3026 pci_channel_state_t state)
3027{
3028 struct adapter *adapter = pci_get_drvdata(pdev);
3029
3030 if (state == pci_channel_io_perm_failure)
3031 return PCI_ERS_RESULT_DISCONNECT;
3032
3033 t3_adapter_error(adapter, 0, 0);
3034
3035
3036 return PCI_ERS_RESULT_NEED_RESET;
3037}
3038
3039
3040
3041
3042
3043
3044
3045static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3046{
3047 struct adapter *adapter = pci_get_drvdata(pdev);
3048
3049 if (!t3_reenable_adapter(adapter))
3050 return PCI_ERS_RESULT_RECOVERED;
3051
3052 return PCI_ERS_RESULT_DISCONNECT;
3053}
3054
3055
3056
3057
3058
3059
3060
3061
3062static void t3_io_resume(struct pci_dev *pdev)
3063{
3064 struct adapter *adapter = pci_get_drvdata(pdev);
3065
3066 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3067 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3068
3069 rtnl_lock();
3070 t3_resume_ports(adapter);
3071 rtnl_unlock();
3072}
3073
3074static const struct pci_error_handlers t3_err_handler = {
3075 .error_detected = t3_io_error_detected,
3076 .slot_reset = t3_io_slot_reset,
3077 .resume = t3_io_resume,
3078};
3079
3080
3081
3082
3083
3084
3085static void set_nqsets(struct adapter *adap)
3086{
3087 int i, j = 0;
3088 int num_cpus = netif_get_num_default_rss_queues();
3089 int hwports = adap->params.nports;
3090 int nqsets = adap->msix_nvectors - 1;
3091
3092 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3093 if (hwports == 2 &&
3094 (hwports * nqsets > SGE_QSETS ||
3095 num_cpus >= nqsets / hwports))
3096 nqsets /= hwports;
3097 if (nqsets > num_cpus)
3098 nqsets = num_cpus;
3099 if (nqsets < 1 || hwports == 4)
3100 nqsets = 1;
3101 } else
3102 nqsets = 1;
3103
3104 for_each_port(adap, i) {
3105 struct port_info *pi = adap2pinfo(adap, i);
3106
3107 pi->first_qset = j;
3108 pi->nqsets = nqsets;
3109 j = pi->first_qset + nqsets;
3110
3111 dev_info(&adap->pdev->dev,
3112 "Port %d using %d queue sets.\n", i, nqsets);
3113 }
3114}
3115
3116static int cxgb_enable_msix(struct adapter *adap)
3117{
3118 struct msix_entry entries[SGE_QSETS + 1];
3119 int vectors;
3120 int i;
3121
3122 vectors = ARRAY_SIZE(entries);
3123 for (i = 0; i < vectors; ++i)
3124 entries[i].entry = i;
3125
3126 vectors = pci_enable_msix_range(adap->pdev, entries,
3127 adap->params.nports + 1, vectors);
3128 if (vectors < 0)
3129 return vectors;
3130
3131 for (i = 0; i < vectors; ++i)
3132 adap->msix_info[i].vec = entries[i].vector;
3133 adap->msix_nvectors = vectors;
3134
3135 return 0;
3136}
3137
3138static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3139{
3140 static const char *pci_variant[] = {
3141 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3142 };
3143
3144 int i;
3145 char buf[80];
3146
3147 if (is_pcie(adap))
3148 snprintf(buf, sizeof(buf), "%s x%d",
3149 pci_variant[adap->params.pci.variant],
3150 adap->params.pci.width);
3151 else
3152 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3153 pci_variant[adap->params.pci.variant],
3154 adap->params.pci.speed, adap->params.pci.width);
3155
3156 for_each_port(adap, i) {
3157 struct net_device *dev = adap->port[i];
3158 const struct port_info *pi = netdev_priv(dev);
3159
3160 if (!test_bit(i, &adap->registered_device_map))
3161 continue;
3162 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3163 ai->desc, pi->phy.desc,
3164 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3165 (adap->flags & USING_MSIX) ? " MSI-X" :
3166 (adap->flags & USING_MSI) ? " MSI" : "");
3167 if (adap->name == dev->name && adap->params.vpd.mclk)
3168 pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3169 adap->name, t3_mc7_size(&adap->cm) >> 20,
3170 t3_mc7_size(&adap->pmtx) >> 20,
3171 t3_mc7_size(&adap->pmrx) >> 20,
3172 adap->params.vpd.sn);
3173 }
3174}
3175
3176static const struct net_device_ops cxgb_netdev_ops = {
3177 .ndo_open = cxgb_open,
3178 .ndo_stop = cxgb_close,
3179 .ndo_start_xmit = t3_eth_xmit,
3180 .ndo_get_stats = cxgb_get_stats,
3181 .ndo_validate_addr = eth_validate_addr,
3182 .ndo_set_rx_mode = cxgb_set_rxmode,
3183 .ndo_do_ioctl = cxgb_ioctl,
3184 .ndo_change_mtu = cxgb_change_mtu,
3185 .ndo_set_mac_address = cxgb_set_mac_addr,
3186 .ndo_fix_features = cxgb_fix_features,
3187 .ndo_set_features = cxgb_set_features,
3188#ifdef CONFIG_NET_POLL_CONTROLLER
3189 .ndo_poll_controller = cxgb_netpoll,
3190#endif
3191};
3192
3193static void cxgb3_init_iscsi_mac(struct net_device *dev)
3194{
3195 struct port_info *pi = netdev_priv(dev);
3196
3197 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3198 pi->iscsic.mac_addr[3] |= 0x80;
3199}
3200
3201#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3202#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3203 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3204static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3205{
3206 int i, err, pci_using_dac = 0;
3207 resource_size_t mmio_start, mmio_len;
3208 const struct adapter_info *ai;
3209 struct adapter *adapter = NULL;
3210 struct port_info *pi;
3211
3212 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3213
3214 if (!cxgb3_wq) {
3215 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3216 if (!cxgb3_wq) {
3217 pr_err("cannot initialize work queue\n");
3218 return -ENOMEM;
3219 }
3220 }
3221
3222 err = pci_enable_device(pdev);
3223 if (err) {
3224 dev_err(&pdev->dev, "cannot enable PCI device\n");
3225 goto out;
3226 }
3227
3228 err = pci_request_regions(pdev, DRV_NAME);
3229 if (err) {
3230
3231 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3232 goto out_disable_device;
3233 }
3234
3235 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3236 pci_using_dac = 1;
3237 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3238 if (err) {
3239 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3240 "coherent allocations\n");
3241 goto out_release_regions;
3242 }
3243 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3244 dev_err(&pdev->dev, "no usable DMA configuration\n");
3245 goto out_release_regions;
3246 }
3247
3248 pci_set_master(pdev);
3249 pci_save_state(pdev);
3250
3251 mmio_start = pci_resource_start(pdev, 0);
3252 mmio_len = pci_resource_len(pdev, 0);
3253 ai = t3_get_adapter_info(ent->driver_data);
3254
3255 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3256 if (!adapter) {
3257 err = -ENOMEM;
3258 goto out_release_regions;
3259 }
3260
3261 adapter->nofail_skb =
3262 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3263 if (!adapter->nofail_skb) {
3264 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3265 err = -ENOMEM;
3266 goto out_free_adapter;
3267 }
3268
3269 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3270 if (!adapter->regs) {
3271 dev_err(&pdev->dev, "cannot map device registers\n");
3272 err = -ENOMEM;
3273 goto out_free_adapter;
3274 }
3275
3276 adapter->pdev = pdev;
3277 adapter->name = pci_name(pdev);
3278 adapter->msg_enable = dflt_msg_enable;
3279 adapter->mmio_len = mmio_len;
3280
3281 mutex_init(&adapter->mdio_lock);
3282 spin_lock_init(&adapter->work_lock);
3283 spin_lock_init(&adapter->stats_lock);
3284
3285 INIT_LIST_HEAD(&adapter->adapter_list);
3286 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3287 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3288
3289 INIT_WORK(&adapter->db_full_task, db_full_task);
3290 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3291 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3292
3293 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3294
3295 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3296 struct net_device *netdev;
3297
3298 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3299 if (!netdev) {
3300 err = -ENOMEM;
3301 goto out_free_dev;
3302 }
3303
3304 SET_NETDEV_DEV(netdev, &pdev->dev);
3305
3306 adapter->port[i] = netdev;
3307 pi = netdev_priv(netdev);
3308 pi->adapter = adapter;
3309 pi->port_id = i;
3310 netif_carrier_off(netdev);
3311 netdev->irq = pdev->irq;
3312 netdev->mem_start = mmio_start;
3313 netdev->mem_end = mmio_start + mmio_len - 1;
3314 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3315 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3316 netdev->features |= netdev->hw_features |
3317 NETIF_F_HW_VLAN_CTAG_TX;
3318 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3319 if (pci_using_dac)
3320 netdev->features |= NETIF_F_HIGHDMA;
3321
3322 netdev->netdev_ops = &cxgb_netdev_ops;
3323 netdev->ethtool_ops = &cxgb_ethtool_ops;
3324 netdev->min_mtu = 81;
3325 netdev->max_mtu = ETH_MAX_MTU;
3326 netdev->dev_port = pi->port_id;
3327 }
3328
3329 pci_set_drvdata(pdev, adapter);
3330 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3331 err = -ENODEV;
3332 goto out_free_dev;
3333 }
3334
3335
3336
3337
3338
3339
3340
3341 for_each_port(adapter, i) {
3342 err = register_netdev(adapter->port[i]);
3343 if (err)
3344 dev_warn(&pdev->dev,
3345 "cannot register net device %s, skipping\n",
3346 adapter->port[i]->name);
3347 else {
3348
3349
3350
3351
3352 if (!adapter->registered_device_map)
3353 adapter->name = adapter->port[i]->name;
3354
3355 __set_bit(i, &adapter->registered_device_map);
3356 }
3357 }
3358 if (!adapter->registered_device_map) {
3359 dev_err(&pdev->dev, "could not register any net devices\n");
3360 goto out_free_dev;
3361 }
3362
3363 for_each_port(adapter, i)
3364 cxgb3_init_iscsi_mac(adapter->port[i]);
3365
3366
3367 t3_led_ready(adapter);
3368
3369 if (is_offload(adapter)) {
3370 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3371 cxgb3_adapter_ofld(adapter);
3372 }
3373
3374
3375 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3376 adapter->flags |= USING_MSIX;
3377 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3378 adapter->flags |= USING_MSI;
3379
3380 set_nqsets(adapter);
3381
3382 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3383 &cxgb3_attr_group);
3384 if (err) {
3385 dev_err(&pdev->dev, "cannot create sysfs group\n");
3386 goto out_close_led;
3387 }
3388
3389 print_port_info(adapter, ai);
3390 return 0;
3391
3392out_close_led:
3393 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3394
3395out_free_dev:
3396 iounmap(adapter->regs);
3397 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3398 if (adapter->port[i])
3399 free_netdev(adapter->port[i]);
3400
3401out_free_adapter:
3402 kfree(adapter);
3403
3404out_release_regions:
3405 pci_release_regions(pdev);
3406out_disable_device:
3407 pci_disable_device(pdev);
3408out:
3409 return err;
3410}
3411
3412static void remove_one(struct pci_dev *pdev)
3413{
3414 struct adapter *adapter = pci_get_drvdata(pdev);
3415
3416 if (adapter) {
3417 int i;
3418
3419 t3_sge_stop(adapter);
3420 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3421 &cxgb3_attr_group);
3422
3423 if (is_offload(adapter)) {
3424 cxgb3_adapter_unofld(adapter);
3425 if (test_bit(OFFLOAD_DEVMAP_BIT,
3426 &adapter->open_device_map))
3427 offload_close(&adapter->tdev);
3428 }
3429
3430 for_each_port(adapter, i)
3431 if (test_bit(i, &adapter->registered_device_map))
3432 unregister_netdev(adapter->port[i]);
3433
3434 t3_stop_sge_timers(adapter);
3435 t3_free_sge_resources(adapter);
3436 cxgb_disable_msi(adapter);
3437
3438 for_each_port(adapter, i)
3439 if (adapter->port[i])
3440 free_netdev(adapter->port[i]);
3441
3442 iounmap(adapter->regs);
3443 if (adapter->nofail_skb)
3444 kfree_skb(adapter->nofail_skb);
3445 kfree(adapter);
3446 pci_release_regions(pdev);
3447 pci_disable_device(pdev);
3448 }
3449}
3450
3451static struct pci_driver driver = {
3452 .name = DRV_NAME,
3453 .id_table = cxgb3_pci_tbl,
3454 .probe = init_one,
3455 .remove = remove_one,
3456 .err_handler = &t3_err_handler,
3457};
3458
3459static int __init cxgb3_init_module(void)
3460{
3461 int ret;
3462
3463 cxgb3_offload_init();
3464
3465 ret = pci_register_driver(&driver);
3466 return ret;
3467}
3468
3469static void __exit cxgb3_cleanup_module(void)
3470{
3471 pci_unregister_driver(&driver);
3472 if (cxgb3_wq)
3473 destroy_workqueue(cxgb3_wq);
3474}
3475
3476module_init(cxgb3_init_module);
3477module_exit(cxgb3_cleanup_module);
3478