1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62
63#include "tehuti.h"
64
65static const struct pci_device_id bdx_pci_tbl[] = {
66 { PCI_VDEVICE(TEHUTI, 0x3009), },
67 { PCI_VDEVICE(TEHUTI, 0x3010), },
68 { PCI_VDEVICE(TEHUTI, 0x3014), },
69 { 0 }
70};
71
72MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
73
74
75static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
76static void bdx_tx_cleanup(struct bdx_priv *priv);
77static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
78
79
80static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
81
82
83static int bdx_tx_init(struct bdx_priv *priv);
84static int bdx_rx_init(struct bdx_priv *priv);
85
86
87static void bdx_rx_free(struct bdx_priv *priv);
88static void bdx_tx_free(struct bdx_priv *priv);
89
90
91static void bdx_set_ethtool_ops(struct net_device *netdev);
92
93
94
95
96
97static void print_hw_id(struct pci_dev *pdev)
98{
99 struct pci_nic *nic = pci_get_drvdata(pdev);
100 u16 pci_link_status = 0;
101 u16 pci_ctrl = 0;
102
103 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
104 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
105
106 pr_info("%s%s\n", BDX_NIC_NAME,
107 nic->port_num == 1 ? "" : ", 2-Port");
108 pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
109 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
110 readl(nic->regs + FPGA_SEED),
111 GET_LINK_STATUS_LANES(pci_link_status),
112 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
113}
114
115static void print_fw_id(struct pci_nic *nic)
116{
117 pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
118}
119
120static void print_eth_id(struct net_device *ndev)
121{
122 netdev_info(ndev, "%s, Port %c\n",
123 BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
124
125}
126
127
128
129
130
131#define bdx_enable_interrupts(priv) \
132 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
133#define bdx_disable_interrupts(priv) \
134 do { WRITE_REG(priv, regIMR, 0); } while (0)
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149static int
150bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
151 u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
152{
153 u16 memsz = FIFO_SIZE * (1 << fsz_type);
154
155 memset(f, 0, sizeof(struct fifo));
156
157 f->va = pci_alloc_consistent(priv->pdev,
158 memsz + FIFO_EXTRA_SPACE, &f->da);
159 if (!f->va) {
160 pr_err("pci_alloc_consistent failed\n");
161 RET(-ENOMEM);
162 }
163 f->reg_CFG0 = reg_CFG0;
164 f->reg_CFG1 = reg_CFG1;
165 f->reg_RPTR = reg_RPTR;
166 f->reg_WPTR = reg_WPTR;
167 f->rptr = 0;
168 f->wptr = 0;
169 f->memsz = memsz;
170 f->size_mask = memsz - 1;
171 WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
172 WRITE_REG(priv, reg_CFG1, H32_64(f->da));
173
174 RET(0);
175}
176
177
178
179
180
181
182static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
183{
184 ENTER;
185 if (f->va) {
186 pci_free_consistent(priv->pdev,
187 f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
188 f->va = NULL;
189 }
190 RET();
191}
192
193
194
195
196
197static void bdx_link_changed(struct bdx_priv *priv)
198{
199 u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
200
201 if (!link) {
202 if (netif_carrier_ok(priv->ndev)) {
203 netif_stop_queue(priv->ndev);
204 netif_carrier_off(priv->ndev);
205 netdev_err(priv->ndev, "Link Down\n");
206 }
207 } else {
208 if (!netif_carrier_ok(priv->ndev)) {
209 netif_wake_queue(priv->ndev);
210 netif_carrier_on(priv->ndev);
211 netdev_err(priv->ndev, "Link Up\n");
212 }
213 }
214}
215
216static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
217{
218 if (isr & IR_RX_FREE_0) {
219 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
220 DBG("RX_FREE_0\n");
221 }
222
223 if (isr & IR_LNKCHG0)
224 bdx_link_changed(priv);
225
226 if (isr & IR_PCIE_LINK)
227 netdev_err(priv->ndev, "PCI-E Link Fault\n");
228
229 if (isr & IR_PCIE_TOUT)
230 netdev_err(priv->ndev, "PCI-E Time Out\n");
231
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248static irqreturn_t bdx_isr_napi(int irq, void *dev)
249{
250 struct net_device *ndev = dev;
251 struct bdx_priv *priv = netdev_priv(ndev);
252 u32 isr;
253
254 ENTER;
255 isr = (READ_REG(priv, regISR) & IR_RUN);
256 if (unlikely(!isr)) {
257 bdx_enable_interrupts(priv);
258 return IRQ_NONE;
259 }
260
261 if (isr & IR_EXTRA)
262 bdx_isr_extra(priv, isr);
263
264 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
265 if (likely(napi_schedule_prep(&priv->napi))) {
266 __napi_schedule(&priv->napi);
267 RET(IRQ_HANDLED);
268 } else {
269
270
271
272
273
274
275
276
277 READ_REG(priv, regTXF_WPTR_0);
278 READ_REG(priv, regRXD_WPTR_0);
279 }
280 }
281
282 bdx_enable_interrupts(priv);
283 RET(IRQ_HANDLED);
284}
285
286static int bdx_poll(struct napi_struct *napi, int budget)
287{
288 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
289 int work_done;
290
291 ENTER;
292 bdx_tx_cleanup(priv);
293 work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
294 if ((work_done < budget) ||
295 (priv->napi_stop++ >= 30)) {
296 DBG("rx poll is done. backing to isr-driven\n");
297
298
299
300 priv->napi_stop = 0;
301
302 napi_complete_done(napi, work_done);
303 bdx_enable_interrupts(priv);
304 }
305 return work_done;
306}
307
308
309
310
311
312
313
314
315
316
317
318static int bdx_fw_load(struct bdx_priv *priv)
319{
320 const struct firmware *fw = NULL;
321 int master, i;
322 int rc;
323
324 ENTER;
325 master = READ_REG(priv, regINIT_SEMAPHORE);
326 if (!READ_REG(priv, regINIT_STATUS) && master) {
327 rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
328 if (rc)
329 goto out;
330 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
331 mdelay(100);
332 }
333 for (i = 0; i < 200; i++) {
334 if (READ_REG(priv, regINIT_STATUS)) {
335 rc = 0;
336 goto out;
337 }
338 mdelay(2);
339 }
340 rc = -EIO;
341out:
342 if (master)
343 WRITE_REG(priv, regINIT_SEMAPHORE, 1);
344
345 release_firmware(fw);
346
347 if (rc) {
348 netdev_err(priv->ndev, "firmware loading failed\n");
349 if (rc == -EIO)
350 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
351 READ_REG(priv, regVPC),
352 READ_REG(priv, regVIC),
353 READ_REG(priv, regINIT_STATUS), i);
354 RET(rc);
355 } else {
356 DBG("%s: firmware loading success\n", priv->ndev->name);
357 RET(0);
358 }
359}
360
361static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
362{
363 u32 val;
364
365 ENTER;
366 DBG("mac0=%x mac1=%x mac2=%x\n",
367 READ_REG(priv, regUNC_MAC0_A),
368 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
369
370 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
371 WRITE_REG(priv, regUNC_MAC2_A, val);
372 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
373 WRITE_REG(priv, regUNC_MAC1_A, val);
374 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
375 WRITE_REG(priv, regUNC_MAC0_A, val);
376
377 DBG("mac0=%x mac1=%x mac2=%x\n",
378 READ_REG(priv, regUNC_MAC0_A),
379 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
380 RET();
381}
382
383
384
385
386
387static int bdx_hw_start(struct bdx_priv *priv)
388{
389 int rc = -EIO;
390 struct net_device *ndev = priv->ndev;
391
392 ENTER;
393 bdx_link_changed(priv);
394
395
396 WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
397 WRITE_REG(priv, regPAUSE_QUANT, 0x96);
398 WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
399 WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
400 WRITE_REG(priv, regRX_FULLNESS, 0);
401 WRITE_REG(priv, regTX_FULLNESS, 0);
402 WRITE_REG(priv, regCTRLST,
403 regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
404
405 WRITE_REG(priv, regVGLB, 0);
406 WRITE_REG(priv, regMAX_FRAME_A,
407 priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
408
409 DBG("RDINTCM=%08x\n", priv->rdintcm);
410 WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
411 WRITE_REG(priv, regRDINTCM2, 0);
412
413 DBG("TDINTCM=%08x\n", priv->tdintcm);
414 WRITE_REG(priv, regTDINTCM0, priv->tdintcm);
415
416
417
418 bdx_restore_mac(priv->ndev, priv);
419
420 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
421 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
422
423#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
424
425 rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
426 ndev->name, ndev);
427 if (rc)
428 goto err_irq;
429 bdx_enable_interrupts(priv);
430
431 RET(0);
432
433err_irq:
434 RET(rc);
435}
436
437static void bdx_hw_stop(struct bdx_priv *priv)
438{
439 ENTER;
440 bdx_disable_interrupts(priv);
441 free_irq(priv->pdev->irq, priv->ndev);
442
443 netif_carrier_off(priv->ndev);
444 netif_stop_queue(priv->ndev);
445
446 RET();
447}
448
449static int bdx_hw_reset_direct(void __iomem *regs)
450{
451 u32 val, i;
452 ENTER;
453
454
455 val = readl(regs + regCLKPLL);
456 writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
457 udelay(50);
458 val = readl(regs + regCLKPLL);
459 writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
460
461
462 for (i = 0; i < 70; i++, mdelay(10))
463 if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
464
465 readl(regs + regRXD_CFG0_0);
466 return 0;
467 }
468 pr_err("HW reset failed\n");
469 return 1;
470}
471
472static int bdx_hw_reset(struct bdx_priv *priv)
473{
474 u32 val, i;
475 ENTER;
476
477 if (priv->port == 0) {
478
479 val = READ_REG(priv, regCLKPLL);
480 WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
481 udelay(50);
482 val = READ_REG(priv, regCLKPLL);
483 WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
484 }
485
486 for (i = 0; i < 70; i++, mdelay(10))
487 if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
488
489 READ_REG(priv, regRXD_CFG0_0);
490 return 0;
491 }
492 pr_err("HW reset failed\n");
493 return 1;
494}
495
496static int bdx_sw_reset(struct bdx_priv *priv)
497{
498 int i;
499
500 ENTER;
501
502
503 WRITE_REG(priv, regGMAC_RXF_A, 0);
504 mdelay(100);
505
506 WRITE_REG(priv, regDIS_PORT, 1);
507
508 WRITE_REG(priv, regDIS_QU, 1);
509
510 for (i = 0; i < 50; i++) {
511 if (READ_REG(priv, regRST_PORT) & 1)
512 break;
513 mdelay(10);
514 }
515 if (i == 50)
516 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
517
518
519 WRITE_REG(priv, regRDINTCM0, 0);
520 WRITE_REG(priv, regTDINTCM0, 0);
521 WRITE_REG(priv, regIMR, 0);
522 READ_REG(priv, regISR);
523
524
525 WRITE_REG(priv, regRST_QU, 1);
526
527 WRITE_REG(priv, regRST_PORT, 1);
528
529 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
530 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
531 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
532 WRITE_REG(priv, i, 0);
533
534 WRITE_REG(priv, regDIS_PORT, 0);
535
536 WRITE_REG(priv, regDIS_QU, 0);
537
538 WRITE_REG(priv, regRST_QU, 0);
539
540 WRITE_REG(priv, regRST_PORT, 0);
541
542
543
544 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
545 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
546
547 RET(0);
548}
549
550
551static int bdx_reset(struct bdx_priv *priv)
552{
553 ENTER;
554 RET((priv->pdev->device == 0x3009)
555 ? bdx_hw_reset(priv)
556 : bdx_sw_reset(priv));
557}
558
559
560
561
562
563
564
565
566
567
568
569
570static int bdx_close(struct net_device *ndev)
571{
572 struct bdx_priv *priv = NULL;
573
574 ENTER;
575 priv = netdev_priv(ndev);
576
577 napi_disable(&priv->napi);
578
579 bdx_reset(priv);
580 bdx_hw_stop(priv);
581 bdx_rx_free(priv);
582 bdx_tx_free(priv);
583 RET(0);
584}
585
586
587
588
589
590
591
592
593
594
595
596
597
598static int bdx_open(struct net_device *ndev)
599{
600 struct bdx_priv *priv;
601 int rc;
602
603 ENTER;
604 priv = netdev_priv(ndev);
605 bdx_reset(priv);
606 if (netif_running(ndev))
607 netif_stop_queue(priv->ndev);
608
609 if ((rc = bdx_tx_init(priv)) ||
610 (rc = bdx_rx_init(priv)) ||
611 (rc = bdx_fw_load(priv)))
612 goto err;
613
614 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
615
616 rc = bdx_hw_start(priv);
617 if (rc)
618 goto err;
619
620 napi_enable(&priv->napi);
621
622 print_fw_id(priv->nic);
623
624 RET(0);
625
626err:
627 bdx_close(ndev);
628 RET(rc);
629}
630
631static int bdx_range_check(struct bdx_priv *priv, u32 offset)
632{
633 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
634 -EINVAL : 0;
635}
636
637static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
638{
639 struct bdx_priv *priv = netdev_priv(ndev);
640 u32 data[3];
641 int error;
642
643 ENTER;
644
645 DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
646 if (cmd != SIOCDEVPRIVATE) {
647 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
648 if (error) {
649 pr_err("can't copy from user\n");
650 RET(-EFAULT);
651 }
652 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
653 } else {
654 return -EOPNOTSUPP;
655 }
656
657 if (!capable(CAP_SYS_RAWIO))
658 return -EPERM;
659
660 switch (data[0]) {
661
662 case BDX_OP_READ:
663 error = bdx_range_check(priv, data[1]);
664 if (error < 0)
665 return error;
666 data[2] = READ_REG(priv, data[1]);
667 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
668 data[2]);
669 error = copy_to_user(ifr->ifr_data, data, sizeof(data));
670 if (error)
671 RET(-EFAULT);
672 break;
673
674 case BDX_OP_WRITE:
675 error = bdx_range_check(priv, data[1]);
676 if (error < 0)
677 return error;
678 WRITE_REG(priv, data[1], data[2]);
679 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
680 break;
681
682 default:
683 RET(-EOPNOTSUPP);
684 }
685 return 0;
686}
687
688static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
689{
690 ENTER;
691 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
692 RET(bdx_ioctl_priv(ndev, ifr, cmd));
693 else
694 RET(-EOPNOTSUPP);
695}
696
697
698
699
700
701
702
703
704
705static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
706{
707 struct bdx_priv *priv = netdev_priv(ndev);
708 u32 reg, bit, val;
709
710 ENTER;
711 DBG2("vid=%d value=%d\n", (int)vid, enable);
712 if (unlikely(vid >= 4096)) {
713 pr_err("invalid VID: %u (> 4096)\n", vid);
714 RET();
715 }
716 reg = regVLAN_0 + (vid / 32) * 4;
717 bit = 1 << vid % 32;
718 val = READ_REG(priv, reg);
719 DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
720 if (enable)
721 val |= bit;
722 else
723 val &= ~bit;
724 DBG2("new val %x\n", val);
725 WRITE_REG(priv, reg, val);
726 RET();
727}
728
729
730
731
732
733
734static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
735{
736 __bdx_vlan_rx_vid(ndev, vid, 1);
737 return 0;
738}
739
740
741
742
743
744
745static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
746{
747 __bdx_vlan_rx_vid(ndev, vid, 0);
748 return 0;
749}
750
751
752
753
754
755
756
757
758static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
759{
760 ENTER;
761
762 ndev->mtu = new_mtu;
763 if (netif_running(ndev)) {
764 bdx_close(ndev);
765 bdx_open(ndev);
766 }
767 RET(0);
768}
769
770static void bdx_setmulti(struct net_device *ndev)
771{
772 struct bdx_priv *priv = netdev_priv(ndev);
773
774 u32 rxf_val =
775 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
776 int i;
777
778 ENTER;
779
780
781
782
783 if (ndev->flags & IFF_PROMISC) {
784 rxf_val |= GMAC_RX_FILTER_PRM;
785 } else if (ndev->flags & IFF_ALLMULTI) {
786
787 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
788 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
789 } else if (!netdev_mc_empty(ndev)) {
790 u8 hash;
791 struct netdev_hw_addr *ha;
792 u32 reg, val;
793
794
795 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
796 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
797
798 for (i = 0; i < MAC_MCST_NUM; i++) {
799 WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
800 WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
801 }
802
803
804
805
806
807
808 netdev_for_each_mc_addr(ha, ndev) {
809 hash = 0;
810 for (i = 0; i < ETH_ALEN; i++)
811 hash ^= ha->addr[i];
812 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
813 val = READ_REG(priv, reg);
814 val |= (1 << (hash % 32));
815 WRITE_REG(priv, reg, val);
816 }
817
818 } else {
819 DBG("only own mac %d\n", netdev_mc_count(ndev));
820 rxf_val |= GMAC_RX_FILTER_AB;
821 }
822 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
823
824
825 RET();
826}
827
828static int bdx_set_mac(struct net_device *ndev, void *p)
829{
830 struct bdx_priv *priv = netdev_priv(ndev);
831 struct sockaddr *addr = p;
832
833 ENTER;
834
835
836
837
838 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
839 bdx_restore_mac(ndev, priv);
840 RET(0);
841}
842
843static int bdx_read_mac(struct bdx_priv *priv)
844{
845 u16 macAddress[3], i;
846 ENTER;
847
848 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
849 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
850 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
851 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
852 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
853 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
854 for (i = 0; i < 3; i++) {
855 priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
856 priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
857 }
858 RET(0);
859}
860
861static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
862{
863 u64 val;
864
865 val = READ_REG(priv, reg);
866 val |= ((u64) READ_REG(priv, reg + 8)) << 32;
867 return val;
868}
869
870
871static void bdx_update_stats(struct bdx_priv *priv)
872{
873 struct bdx_stats *stats = &priv->hw_stats;
874 u64 *stats_vector = (u64 *) stats;
875 int i;
876 int addr;
877
878
879 addr = 0x7200;
880
881 for (i = 0; i < 12; i++) {
882 stats_vector[i] = bdx_read_l2stat(priv, addr);
883 addr += 0x10;
884 }
885 BDX_ASSERT(addr != 0x72C0);
886
887 addr = 0x72F0;
888 for (; i < 16; i++) {
889 stats_vector[i] = bdx_read_l2stat(priv, addr);
890 addr += 0x10;
891 }
892 BDX_ASSERT(addr != 0x7330);
893
894 addr = 0x7370;
895 for (; i < 19; i++) {
896 stats_vector[i] = bdx_read_l2stat(priv, addr);
897 addr += 0x10;
898 }
899 BDX_ASSERT(addr != 0x73A0);
900
901 addr = 0x73C0;
902 for (; i < 23; i++) {
903 stats_vector[i] = bdx_read_l2stat(priv, addr);
904 addr += 0x10;
905 }
906 BDX_ASSERT(addr != 0x7400);
907 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
908}
909
910static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
911 u16 rxd_vlan);
912static void print_rxfd(struct rxf_desc *rxfd);
913
914
915
916
917
918static void bdx_rxdb_destroy(struct rxdb *db)
919{
920 vfree(db);
921}
922
923static struct rxdb *bdx_rxdb_create(int nelem)
924{
925 struct rxdb *db;
926 int i;
927
928 db = vmalloc(sizeof(struct rxdb)
929 + (nelem * sizeof(int))
930 + (nelem * sizeof(struct rx_map)));
931 if (likely(db != NULL)) {
932 db->stack = (int *)(db + 1);
933 db->elems = (void *)(db->stack + nelem);
934 db->nelem = nelem;
935 db->top = nelem;
936 for (i = 0; i < nelem; i++)
937 db->stack[i] = nelem - i - 1;
938
939 }
940
941 return db;
942}
943
944static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
945{
946 BDX_ASSERT(db->top <= 0);
947 return db->stack[--(db->top)];
948}
949
950static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
951{
952 BDX_ASSERT((n < 0) || (n >= db->nelem));
953 return db->elems + n;
954}
955
956static inline int bdx_rxdb_available(struct rxdb *db)
957{
958 return db->top;
959}
960
961static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
962{
963 BDX_ASSERT((n >= db->nelem) || (n < 0));
964 db->stack[(db->top)++] = n;
965}
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989static int bdx_rx_init(struct bdx_priv *priv)
990{
991 ENTER;
992
993 if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
994 regRXD_CFG0_0, regRXD_CFG1_0,
995 regRXD_RPTR_0, regRXD_WPTR_0))
996 goto err_mem;
997 if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
998 regRXF_CFG0_0, regRXF_CFG1_0,
999 regRXF_RPTR_0, regRXF_WPTR_0))
1000 goto err_mem;
1001 priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1002 sizeof(struct rxf_desc));
1003 if (!priv->rxdb)
1004 goto err_mem;
1005
1006 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1007 return 0;
1008
1009err_mem:
1010 netdev_err(priv->ndev, "Rx init failed\n");
1011 return -ENOMEM;
1012}
1013
1014
1015
1016
1017
1018
1019static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1020{
1021 struct rx_map *dm;
1022 struct rxdb *db = priv->rxdb;
1023 u16 i;
1024
1025 ENTER;
1026 DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1027 db->nelem - bdx_rxdb_available(db));
1028 while (bdx_rxdb_available(db) > 0) {
1029 i = bdx_rxdb_alloc_elem(db);
1030 dm = bdx_rxdb_addr_elem(db, i);
1031 dm->dma = 0;
1032 }
1033 for (i = 0; i < db->nelem; i++) {
1034 dm = bdx_rxdb_addr_elem(db, i);
1035 if (dm->dma) {
1036 pci_unmap_single(priv->pdev,
1037 dm->dma, f->m.pktsz,
1038 PCI_DMA_FROMDEVICE);
1039 dev_kfree_skb(dm->skb);
1040 }
1041 }
1042}
1043
1044
1045
1046
1047
1048
1049
1050static void bdx_rx_free(struct bdx_priv *priv)
1051{
1052 ENTER;
1053 if (priv->rxdb) {
1054 bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1055 bdx_rxdb_destroy(priv->rxdb);
1056 priv->rxdb = NULL;
1057 }
1058 bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1059 bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1060
1061 RET();
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1082{
1083 struct sk_buff *skb;
1084 struct rxf_desc *rxfd;
1085 struct rx_map *dm;
1086 int dno, delta, idx;
1087 struct rxdb *db = priv->rxdb;
1088
1089 ENTER;
1090 dno = bdx_rxdb_available(db) - 1;
1091 while (dno > 0) {
1092 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
1093 if (!skb)
1094 break;
1095
1096 skb_reserve(skb, NET_IP_ALIGN);
1097
1098 idx = bdx_rxdb_alloc_elem(db);
1099 dm = bdx_rxdb_addr_elem(db, idx);
1100 dm->dma = pci_map_single(priv->pdev,
1101 skb->data, f->m.pktsz,
1102 PCI_DMA_FROMDEVICE);
1103 dm->skb = skb;
1104 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1105 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1106 rxfd->va_lo = idx;
1107 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1108 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1109 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1110 print_rxfd(rxfd);
1111
1112 f->m.wptr += sizeof(struct rxf_desc);
1113 delta = f->m.wptr - f->m.memsz;
1114 if (unlikely(delta >= 0)) {
1115 f->m.wptr = delta;
1116 if (delta > 0) {
1117 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1118 DBG("wrapped descriptor\n");
1119 }
1120 }
1121 dno--;
1122 }
1123
1124 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1125 RET();
1126}
1127
1128static inline void
1129NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1130 struct sk_buff *skb)
1131{
1132 ENTER;
1133 DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
1134 if (GET_RXD_VTAG(rxd_val1)) {
1135 DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
1136 priv->ndev->name,
1137 GET_RXD_VLAN_ID(rxd_vlan),
1138 GET_RXD_VTAG(rxd_val1));
1139 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
1140 }
1141 netif_receive_skb(skb);
1142}
1143
1144static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1145{
1146 struct rxf_desc *rxfd;
1147 struct rx_map *dm;
1148 struct rxf_fifo *f;
1149 struct rxdb *db;
1150 int delta;
1151
1152 ENTER;
1153 DBG("priv=%p rxdd=%p\n", priv, rxdd);
1154 f = &priv->rxf_fifo0;
1155 db = priv->rxdb;
1156 DBG("db=%p f=%p\n", db, f);
1157 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1158 DBG("dm=%p\n", dm);
1159 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1160 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1161 rxfd->va_lo = rxdd->va_lo;
1162 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1163 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1164 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1165 print_rxfd(rxfd);
1166
1167 f->m.wptr += sizeof(struct rxf_desc);
1168 delta = f->m.wptr - f->m.memsz;
1169 if (unlikely(delta >= 0)) {
1170 f->m.wptr = delta;
1171 if (delta > 0) {
1172 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1173 DBG("wrapped descriptor\n");
1174 }
1175 }
1176 RET();
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1193{
1194 struct net_device *ndev = priv->ndev;
1195 struct sk_buff *skb, *skb2;
1196 struct rxd_desc *rxdd;
1197 struct rx_map *dm;
1198 struct rxf_fifo *rxf_fifo;
1199 int tmp_len, size;
1200 int done = 0;
1201 int max_done = BDX_MAX_RX_DONE;
1202 struct rxdb *db = NULL;
1203
1204 u32 rxd_val1;
1205 u16 len;
1206 u16 rxd_vlan;
1207
1208 ENTER;
1209 max_done = budget;
1210
1211 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1212
1213 size = f->m.wptr - f->m.rptr;
1214 if (size < 0)
1215 size = f->m.memsz + size;
1216
1217 while (size > 0) {
1218
1219 rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1220 rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1221
1222 len = CPU_CHIP_SWAP16(rxdd->len);
1223
1224 rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1225
1226 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1227
1228 tmp_len = GET_RXD_BC(rxd_val1) << 3;
1229 BDX_ASSERT(tmp_len <= 0);
1230 size -= tmp_len;
1231 if (size < 0)
1232 break;
1233
1234 f->m.rptr += tmp_len;
1235
1236 tmp_len = f->m.rptr - f->m.memsz;
1237 if (unlikely(tmp_len >= 0)) {
1238 f->m.rptr = tmp_len;
1239 if (tmp_len > 0) {
1240 DBG("wrapped desc rptr=%d tmp_len=%d\n",
1241 f->m.rptr, tmp_len);
1242 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1243 }
1244 }
1245
1246 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1247 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1248 ndev->stats.rx_errors++;
1249 bdx_recycle_skb(priv, rxdd);
1250 continue;
1251 }
1252
1253 rxf_fifo = &priv->rxf_fifo0;
1254 db = priv->rxdb;
1255 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1256 skb = dm->skb;
1257
1258 if (len < BDX_COPYBREAK &&
1259 (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
1260 skb_reserve(skb2, NET_IP_ALIGN);
1261
1262 pci_dma_sync_single_for_cpu(priv->pdev,
1263 dm->dma, rxf_fifo->m.pktsz,
1264 PCI_DMA_FROMDEVICE);
1265 memcpy(skb2->data, skb->data, len);
1266 bdx_recycle_skb(priv, rxdd);
1267 skb = skb2;
1268 } else {
1269 pci_unmap_single(priv->pdev,
1270 dm->dma, rxf_fifo->m.pktsz,
1271 PCI_DMA_FROMDEVICE);
1272 bdx_rxdb_free_elem(db, rxdd->va_lo);
1273 }
1274
1275 ndev->stats.rx_bytes += len;
1276
1277 skb_put(skb, len);
1278 skb->protocol = eth_type_trans(skb, ndev);
1279
1280
1281 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1282 skb_checksum_none_assert(skb);
1283 else
1284 skb->ip_summed = CHECKSUM_UNNECESSARY;
1285
1286 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1287
1288 if (++done >= max_done)
1289 break;
1290 }
1291
1292 ndev->stats.rx_packets += done;
1293
1294
1295 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1296
1297 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1298
1299 RET(done);
1300}
1301
1302
1303
1304
1305static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1306 u16 rxd_vlan)
1307{
1308 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1309 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1310 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1311 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1312 GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1313 GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1314 rxdd->va_hi);
1315}
1316
1317static void print_rxfd(struct rxf_desc *rxfd)
1318{
1319 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1320 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1321 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1371{
1372 BDX_ASSERT(db == NULL || pptr == NULL);
1373
1374 BDX_ASSERT(*pptr != db->rptr &&
1375 *pptr != db->wptr);
1376
1377 BDX_ASSERT(*pptr < db->start ||
1378 *pptr >= db->end);
1379
1380 ++*pptr;
1381 if (unlikely(*pptr == db->end))
1382 *pptr = db->start;
1383}
1384
1385
1386
1387
1388
1389static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1390{
1391 BDX_ASSERT(db->rptr == db->wptr);
1392 __bdx_tx_db_ptr_next(db, &db->rptr);
1393}
1394
1395
1396
1397
1398
1399static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1400{
1401 __bdx_tx_db_ptr_next(db, &db->wptr);
1402 BDX_ASSERT(db->rptr == db->wptr);
1403
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413static int bdx_tx_db_init(struct txdb *d, int sz_type)
1414{
1415 int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1416
1417 d->start = vmalloc(memsz);
1418 if (!d->start)
1419 return -ENOMEM;
1420
1421
1422
1423
1424
1425
1426 d->size = memsz / sizeof(struct tx_map) - 1;
1427 d->end = d->start + d->size + 1;
1428
1429
1430 d->rptr = d->start;
1431 d->wptr = d->start;
1432
1433 return 0;
1434}
1435
1436
1437
1438
1439
1440static void bdx_tx_db_close(struct txdb *d)
1441{
1442 BDX_ASSERT(d == NULL);
1443
1444 vfree(d->start);
1445 d->start = NULL;
1446}
1447
1448
1449
1450
1451
1452
1453
1454static struct {
1455 u16 bytes;
1456 u16 qwords;
1457} txd_sizes[MAX_SKB_FRAGS + 1];
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471static inline void
1472bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1473 struct txd_desc *txdd)
1474{
1475 struct txdb *db = &priv->txdb;
1476 struct pbl *pbl = &txdd->pbl[0];
1477 int nr_frags = skb_shinfo(skb)->nr_frags;
1478 int i;
1479
1480 db->wptr->len = skb_headlen(skb);
1481 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1482 db->wptr->len, PCI_DMA_TODEVICE);
1483 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1484 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1485 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1486 DBG("=== pbl len: 0x%x ================\n", pbl->len);
1487 DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1488 DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1489 bdx_tx_db_inc_wptr(db);
1490
1491 for (i = 0; i < nr_frags; i++) {
1492 const skb_frag_t *frag;
1493
1494 frag = &skb_shinfo(skb)->frags[i];
1495 db->wptr->len = skb_frag_size(frag);
1496 db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
1497 0, skb_frag_size(frag),
1498 DMA_TO_DEVICE);
1499
1500 pbl++;
1501 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1502 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1503 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1504 bdx_tx_db_inc_wptr(db);
1505 }
1506
1507
1508 db->wptr->len = -txd_sizes[nr_frags].bytes;
1509 db->wptr->addr.skb = skb;
1510 bdx_tx_db_inc_wptr(db);
1511}
1512
1513
1514
1515
1516static void __init init_txd_sizes(void)
1517{
1518 int i, lwords;
1519
1520
1521
1522 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1523 lwords = 7 + (i * 3);
1524 if (lwords & 1)
1525 lwords++;
1526 txd_sizes[i].qwords = lwords >> 1;
1527 txd_sizes[i].bytes = lwords << 2;
1528 }
1529}
1530
1531
1532
1533static int bdx_tx_init(struct bdx_priv *priv)
1534{
1535 if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1536 regTXD_CFG0_0,
1537 regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1538 goto err_mem;
1539 if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1540 regTXF_CFG0_0,
1541 regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1542 goto err_mem;
1543
1544
1545
1546 if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1547 goto err_mem;
1548
1549 priv->tx_level = BDX_MAX_TX_LEVEL;
1550#ifdef BDX_DELAY_WPTR
1551 priv->tx_update_mark = priv->tx_level - 1024;
1552#endif
1553 return 0;
1554
1555err_mem:
1556 netdev_err(priv->ndev, "Tx init failed\n");
1557 return -ENOMEM;
1558}
1559
1560
1561
1562
1563
1564
1565
1566static inline int bdx_tx_space(struct bdx_priv *priv)
1567{
1568 struct txd_fifo *f = &priv->txd_fifo0;
1569 int fsize;
1570
1571 f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1572 fsize = f->m.rptr - f->m.wptr;
1573 if (fsize <= 0)
1574 fsize = f->m.memsz + fsize;
1575 return fsize;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1589 struct net_device *ndev)
1590{
1591 struct bdx_priv *priv = netdev_priv(ndev);
1592 struct txd_fifo *f = &priv->txd_fifo0;
1593 int txd_checksum = 7;
1594 int txd_lgsnd = 0;
1595 int txd_vlan_id = 0;
1596 int txd_vtag = 0;
1597 int txd_mss = 0;
1598
1599 int nr_frags = skb_shinfo(skb)->nr_frags;
1600 struct txd_desc *txdd;
1601 int len;
1602 unsigned long flags;
1603
1604 ENTER;
1605 local_irq_save(flags);
1606 spin_lock(&priv->tx_lock);
1607
1608
1609 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1610 txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1611 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1612 txd_checksum = 0;
1613
1614 if (skb_shinfo(skb)->gso_size) {
1615 txd_mss = skb_shinfo(skb)->gso_size;
1616 txd_lgsnd = 1;
1617 DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1618 txd_mss);
1619 }
1620
1621 if (skb_vlan_tag_present(skb)) {
1622
1623 txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
1624 txd_vtag = 1;
1625 }
1626
1627 txdd->length = CPU_CHIP_SWAP16(skb->len);
1628 txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1629 txdd->txd_val1 =
1630 CPU_CHIP_SWAP32(TXD_W1_VAL
1631 (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1632 txd_lgsnd, txd_vlan_id));
1633 DBG("=== TxD desc =====================\n");
1634 DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1635 DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1636
1637 bdx_tx_map_skb(priv, skb, txdd);
1638
1639
1640
1641
1642 f->m.wptr += txd_sizes[nr_frags].bytes;
1643 len = f->m.wptr - f->m.memsz;
1644 if (unlikely(len >= 0)) {
1645 f->m.wptr = len;
1646 if (len > 0) {
1647 BDX_ASSERT(len > f->m.memsz);
1648 memcpy(f->m.va, f->m.va + f->m.memsz, len);
1649 }
1650 }
1651 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1652
1653 priv->tx_level -= txd_sizes[nr_frags].bytes;
1654 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1655#ifdef BDX_DELAY_WPTR
1656 if (priv->tx_level > priv->tx_update_mark) {
1657
1658
1659
1660
1661 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1662 } else {
1663 if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1664 priv->tx_noupd = 0;
1665 WRITE_REG(priv, f->m.reg_WPTR,
1666 f->m.wptr & TXF_WPTR_WR_PTR);
1667 }
1668 }
1669#else
1670
1671
1672
1673
1674 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1675
1676#endif
1677#ifdef BDX_LLTX
1678 netif_trans_update(ndev);
1679#endif
1680 ndev->stats.tx_packets++;
1681 ndev->stats.tx_bytes += skb->len;
1682
1683 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1684 DBG("%s: %s: TX Q STOP level %d\n",
1685 BDX_DRV_NAME, ndev->name, priv->tx_level);
1686 netif_stop_queue(ndev);
1687 }
1688
1689 spin_unlock_irqrestore(&priv->tx_lock, flags);
1690 return NETDEV_TX_OK;
1691}
1692
1693
1694
1695
1696
1697
1698
1699
1700static void bdx_tx_cleanup(struct bdx_priv *priv)
1701{
1702 struct txf_fifo *f = &priv->txf_fifo0;
1703 struct txdb *db = &priv->txdb;
1704 int tx_level = 0;
1705
1706 ENTER;
1707 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1708 BDX_ASSERT(f->m.rptr >= f->m.memsz);
1709
1710 while (f->m.wptr != f->m.rptr) {
1711 f->m.rptr += BDX_TXF_DESC_SZ;
1712 f->m.rptr &= f->m.size_mask;
1713
1714
1715
1716 BDX_ASSERT(db->rptr->len == 0);
1717 do {
1718 BDX_ASSERT(db->rptr->addr.dma == 0);
1719 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1720 db->rptr->len, PCI_DMA_TODEVICE);
1721 bdx_tx_db_inc_rptr(db);
1722 } while (db->rptr->len > 0);
1723 tx_level -= db->rptr->len;
1724
1725
1726 dev_consume_skb_irq(db->rptr->addr.skb);
1727 bdx_tx_db_inc_rptr(db);
1728 }
1729
1730
1731 BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1732 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1733
1734
1735
1736 spin_lock(&priv->tx_lock);
1737 priv->tx_level += tx_level;
1738 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1739#ifdef BDX_DELAY_WPTR
1740 if (priv->tx_noupd) {
1741 priv->tx_noupd = 0;
1742 WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1743 priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1744 }
1745#endif
1746
1747 if (unlikely(netif_queue_stopped(priv->ndev) &&
1748 netif_carrier_ok(priv->ndev) &&
1749 (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1750 DBG("%s: %s: TX Q WAKE level %d\n",
1751 BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1752 netif_wake_queue(priv->ndev);
1753 }
1754 spin_unlock(&priv->tx_lock);
1755}
1756
1757
1758
1759
1760
1761static void bdx_tx_free_skbs(struct bdx_priv *priv)
1762{
1763 struct txdb *db = &priv->txdb;
1764
1765 ENTER;
1766 while (db->rptr != db->wptr) {
1767 if (likely(db->rptr->len))
1768 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1769 db->rptr->len, PCI_DMA_TODEVICE);
1770 else
1771 dev_kfree_skb(db->rptr->addr.skb);
1772 bdx_tx_db_inc_rptr(db);
1773 }
1774 RET();
1775}
1776
1777
1778static void bdx_tx_free(struct bdx_priv *priv)
1779{
1780 ENTER;
1781 bdx_tx_free_skbs(priv);
1782 bdx_fifo_free(priv, &priv->txd_fifo0.m);
1783 bdx_fifo_free(priv, &priv->txf_fifo0.m);
1784 bdx_tx_db_close(&priv->txdb);
1785}
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1799{
1800 struct txd_fifo *f = &priv->txd_fifo0;
1801 int i = f->m.memsz - f->m.wptr;
1802
1803 if (size == 0)
1804 return;
1805
1806 if (i > size) {
1807 memcpy(f->m.va + f->m.wptr, data, size);
1808 f->m.wptr += size;
1809 } else {
1810 memcpy(f->m.va + f->m.wptr, data, i);
1811 f->m.wptr = size - i;
1812 memcpy(f->m.va, data + i, f->m.wptr);
1813 }
1814 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1827{
1828 int timer = 0;
1829 ENTER;
1830
1831 while (size > 0) {
1832
1833
1834
1835 int avail = bdx_tx_space(priv) - 8;
1836 if (avail <= 0) {
1837 if (timer++ > 300) {
1838 DBG("timeout while writing desc to TxD fifo\n");
1839 break;
1840 }
1841 udelay(50);
1842 continue;
1843 }
1844 avail = min(avail, size);
1845 DBG("about to push %d bytes starting %p size %d\n", avail,
1846 data, size);
1847 bdx_tx_push_desc(priv, data, avail);
1848 size -= avail;
1849 data += avail;
1850 }
1851 RET();
1852}
1853
1854static const struct net_device_ops bdx_netdev_ops = {
1855 .ndo_open = bdx_open,
1856 .ndo_stop = bdx_close,
1857 .ndo_start_xmit = bdx_tx_transmit,
1858 .ndo_validate_addr = eth_validate_addr,
1859 .ndo_do_ioctl = bdx_ioctl,
1860 .ndo_set_rx_mode = bdx_setmulti,
1861 .ndo_change_mtu = bdx_change_mtu,
1862 .ndo_set_mac_address = bdx_set_mac,
1863 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1864 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1865};
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884static int
1885bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1886{
1887 struct net_device *ndev;
1888 struct bdx_priv *priv;
1889 int err, pci_using_dac, port;
1890 unsigned long pciaddr;
1891 u32 regionSize;
1892 struct pci_nic *nic;
1893
1894 ENTER;
1895
1896 nic = vmalloc(sizeof(*nic));
1897 if (!nic)
1898 RET(-ENOMEM);
1899
1900
1901 err = pci_enable_device(pdev);
1902 if (err)
1903 goto err_pci;
1904
1905 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1906 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1907 pci_using_dac = 1;
1908 } else {
1909 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1910 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1911 pr_err("No usable DMA configuration, aborting\n");
1912 goto err_dma;
1913 }
1914 pci_using_dac = 0;
1915 }
1916
1917 err = pci_request_regions(pdev, BDX_DRV_NAME);
1918 if (err)
1919 goto err_dma;
1920
1921 pci_set_master(pdev);
1922
1923 pciaddr = pci_resource_start(pdev, 0);
1924 if (!pciaddr) {
1925 err = -EIO;
1926 pr_err("no MMIO resource\n");
1927 goto err_out_res;
1928 }
1929 regionSize = pci_resource_len(pdev, 0);
1930 if (regionSize < BDX_REGS_SIZE) {
1931 err = -EIO;
1932 pr_err("MMIO resource (%x) too small\n", regionSize);
1933 goto err_out_res;
1934 }
1935
1936 nic->regs = ioremap(pciaddr, regionSize);
1937 if (!nic->regs) {
1938 err = -EIO;
1939 pr_err("ioremap failed\n");
1940 goto err_out_res;
1941 }
1942
1943 if (pdev->irq < 2) {
1944 err = -EIO;
1945 pr_err("invalid irq (%d)\n", pdev->irq);
1946 goto err_out_iomap;
1947 }
1948 pci_set_drvdata(pdev, nic);
1949
1950 if (pdev->device == 0x3014)
1951 nic->port_num = 2;
1952 else
1953 nic->port_num = 1;
1954
1955 print_hw_id(pdev);
1956
1957 bdx_hw_reset_direct(nic->regs);
1958
1959 nic->irq_type = IRQ_INTX;
1960#ifdef BDX_MSI
1961 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1962 err = pci_enable_msi(pdev);
1963 if (err)
1964 pr_err("Can't enable msi. error is %d\n", err);
1965 else
1966 nic->irq_type = IRQ_MSI;
1967 } else
1968 DBG("HW does not support MSI\n");
1969#endif
1970
1971
1972 for (port = 0; port < nic->port_num; port++) {
1973 ndev = alloc_etherdev(sizeof(struct bdx_priv));
1974 if (!ndev) {
1975 err = -ENOMEM;
1976 goto err_out_iomap;
1977 }
1978
1979 ndev->netdev_ops = &bdx_netdev_ops;
1980 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
1981
1982 bdx_set_ethtool_ops(ndev);
1983
1984
1985
1986 ndev->if_port = port;
1987 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1988 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1989 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
1990 ;
1991 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1992 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
1993
1994 if (pci_using_dac)
1995 ndev->features |= NETIF_F_HIGHDMA;
1996
1997
1998 priv = nic->priv[port] = netdev_priv(ndev);
1999
2000 priv->pBdxRegs = nic->regs + port * 0x8000;
2001 priv->port = port;
2002 priv->pdev = pdev;
2003 priv->ndev = ndev;
2004 priv->nic = nic;
2005 priv->msg_enable = BDX_DEF_MSG_ENABLE;
2006
2007 netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2008
2009 if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2010 DBG("HW statistics not supported\n");
2011 priv->stats_flag = 0;
2012 } else {
2013 priv->stats_flag = 1;
2014 }
2015
2016
2017 priv->txd_size = 2;
2018 priv->txf_size = 2;
2019 priv->rxd_size = 2;
2020 priv->rxf_size = 3;
2021
2022
2023 priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2024 priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2025
2026
2027
2028
2029
2030
2031#ifdef BDX_LLTX
2032 ndev->features |= NETIF_F_LLTX;
2033#endif
2034
2035 ndev->min_mtu = ETH_ZLEN;
2036 ndev->max_mtu = BDX_MAX_MTU;
2037
2038 spin_lock_init(&priv->tx_lock);
2039
2040
2041 if (bdx_read_mac(priv)) {
2042 pr_err("load MAC address failed\n");
2043 goto err_out_iomap;
2044 }
2045 SET_NETDEV_DEV(ndev, &pdev->dev);
2046 err = register_netdev(ndev);
2047 if (err) {
2048 pr_err("register_netdev failed\n");
2049 goto err_out_free;
2050 }
2051 netif_carrier_off(ndev);
2052 netif_stop_queue(ndev);
2053
2054 print_eth_id(ndev);
2055 }
2056 RET(0);
2057
2058err_out_free:
2059 free_netdev(ndev);
2060err_out_iomap:
2061 iounmap(nic->regs);
2062err_out_res:
2063 pci_release_regions(pdev);
2064err_dma:
2065 pci_disable_device(pdev);
2066err_pci:
2067 vfree(nic);
2068
2069 RET(err);
2070}
2071
2072
2073
2074static const char
2075 bdx_stat_names[][ETH_GSTRING_LEN] = {
2076 "InUCast",
2077 "InMCast",
2078 "InBCast",
2079 "InPkts",
2080 "InErrors",
2081 "InDropped",
2082 "FrameTooLong",
2083 "FrameSequenceErrors",
2084 "InVLAN",
2085 "InDroppedDFE",
2086 "InDroppedIntFull",
2087 "InFrameAlignErrors",
2088
2089
2090
2091 "OutUCast",
2092 "OutMCast",
2093 "OutBCast",
2094 "OutPkts",
2095
2096
2097
2098 "OutVLAN",
2099 "InUCastOctects",
2100 "OutUCastOctects",
2101
2102
2103
2104 "InBCastOctects",
2105 "OutBCastOctects",
2106 "InOctects",
2107 "OutOctects",
2108};
2109
2110
2111
2112
2113
2114
2115static int bdx_get_link_ksettings(struct net_device *netdev,
2116 struct ethtool_link_ksettings *ecmd)
2117{
2118 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
2119 ethtool_link_ksettings_add_link_mode(ecmd, supported,
2120 10000baseT_Full);
2121 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
2122 ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
2123 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
2124 10000baseT_Full);
2125 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
2126
2127 ecmd->base.speed = SPEED_10000;
2128 ecmd->base.duplex = DUPLEX_FULL;
2129 ecmd->base.port = PORT_FIBRE;
2130 ecmd->base.autoneg = AUTONEG_DISABLE;
2131
2132 return 0;
2133}
2134
2135
2136
2137
2138
2139
2140static void
2141bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2142{
2143 struct bdx_priv *priv = netdev_priv(netdev);
2144
2145 strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2146 strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2147 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2148 strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
2149 sizeof(drvinfo->bus_info));
2150}
2151
2152
2153
2154
2155
2156
2157static int
2158bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2159{
2160 u32 rdintcm;
2161 u32 tdintcm;
2162 struct bdx_priv *priv = netdev_priv(netdev);
2163
2164 rdintcm = priv->rdintcm;
2165 tdintcm = priv->tdintcm;
2166
2167
2168
2169 ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2170 ecoal->rx_max_coalesced_frames =
2171 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2172
2173 ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2174 ecoal->tx_max_coalesced_frames =
2175 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2176
2177
2178 return 0;
2179}
2180
2181
2182
2183
2184
2185
2186static int
2187bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2188{
2189 u32 rdintcm;
2190 u32 tdintcm;
2191 struct bdx_priv *priv = netdev_priv(netdev);
2192 int rx_coal;
2193 int tx_coal;
2194 int rx_max_coal;
2195 int tx_max_coal;
2196
2197
2198 rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2199 tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2200 rx_max_coal = ecoal->rx_max_coalesced_frames;
2201 tx_max_coal = ecoal->tx_max_coalesced_frames;
2202
2203
2204 rx_max_coal =
2205 (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2206 / PCK_TH_MULT);
2207 tx_max_coal =
2208 (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2209 / PCK_TH_MULT);
2210
2211 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2212 (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2213 return -EINVAL;
2214
2215 rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2216 GET_RXF_TH(priv->rdintcm), rx_max_coal);
2217 tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2218 tx_max_coal);
2219
2220 priv->rdintcm = rdintcm;
2221 priv->tdintcm = tdintcm;
2222
2223 WRITE_REG(priv, regRDINTCM0, rdintcm);
2224 WRITE_REG(priv, regTDINTCM0, tdintcm);
2225
2226 return 0;
2227}
2228
2229
2230static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2231{
2232 return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2233}
2234
2235
2236static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2237{
2238 return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2239}
2240
2241
2242
2243
2244
2245
2246static void
2247bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2248{
2249 struct bdx_priv *priv = netdev_priv(netdev);
2250
2251
2252 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2253 ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2254 ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2255 ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2256}
2257
2258
2259
2260
2261
2262
2263static int
2264bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2265{
2266 struct bdx_priv *priv = netdev_priv(netdev);
2267 int rx_size = 0;
2268 int tx_size = 0;
2269
2270 for (; rx_size < 4; rx_size++) {
2271 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2272 break;
2273 }
2274 if (rx_size == 4)
2275 rx_size = 3;
2276
2277 for (; tx_size < 4; tx_size++) {
2278 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2279 break;
2280 }
2281 if (tx_size == 4)
2282 tx_size = 3;
2283
2284
2285 if ((rx_size == priv->rxf_size) &&
2286 (tx_size == priv->txd_size))
2287 return 0;
2288
2289 priv->rxf_size = rx_size;
2290 if (rx_size > 1)
2291 priv->rxd_size = rx_size - 1;
2292 else
2293 priv->rxd_size = rx_size;
2294
2295 priv->txf_size = priv->txd_size = tx_size;
2296
2297 if (netif_running(netdev)) {
2298 bdx_close(netdev);
2299 bdx_open(netdev);
2300 }
2301 return 0;
2302}
2303
2304
2305
2306
2307
2308
2309static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2310{
2311 switch (stringset) {
2312 case ETH_SS_STATS:
2313 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2314 break;
2315 }
2316}
2317
2318
2319
2320
2321
2322static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2323{
2324 struct bdx_priv *priv = netdev_priv(netdev);
2325
2326 switch (stringset) {
2327 case ETH_SS_STATS:
2328 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2329 != sizeof(struct bdx_stats) / sizeof(u64));
2330 return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2331 }
2332
2333 return -EINVAL;
2334}
2335
2336
2337
2338
2339
2340
2341
2342static void bdx_get_ethtool_stats(struct net_device *netdev,
2343 struct ethtool_stats *stats, u64 *data)
2344{
2345 struct bdx_priv *priv = netdev_priv(netdev);
2346
2347 if (priv->stats_flag) {
2348
2349
2350 bdx_update_stats(priv);
2351
2352
2353 memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2354 }
2355}
2356
2357
2358
2359
2360
2361static void bdx_set_ethtool_ops(struct net_device *netdev)
2362{
2363 static const struct ethtool_ops bdx_ethtool_ops = {
2364 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2365 ETHTOOL_COALESCE_MAX_FRAMES,
2366 .get_drvinfo = bdx_get_drvinfo,
2367 .get_link = ethtool_op_get_link,
2368 .get_coalesce = bdx_get_coalesce,
2369 .set_coalesce = bdx_set_coalesce,
2370 .get_ringparam = bdx_get_ringparam,
2371 .set_ringparam = bdx_set_ringparam,
2372 .get_strings = bdx_get_strings,
2373 .get_sset_count = bdx_get_sset_count,
2374 .get_ethtool_stats = bdx_get_ethtool_stats,
2375 .get_link_ksettings = bdx_get_link_ksettings,
2376 };
2377
2378 netdev->ethtool_ops = &bdx_ethtool_ops;
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static void bdx_remove(struct pci_dev *pdev)
2391{
2392 struct pci_nic *nic = pci_get_drvdata(pdev);
2393 struct net_device *ndev;
2394 int port;
2395
2396 for (port = 0; port < nic->port_num; port++) {
2397 ndev = nic->priv[port]->ndev;
2398 unregister_netdev(ndev);
2399 free_netdev(ndev);
2400 }
2401
2402
2403#ifdef BDX_MSI
2404 if (nic->irq_type == IRQ_MSI)
2405 pci_disable_msi(pdev);
2406#endif
2407
2408 iounmap(nic->regs);
2409 pci_release_regions(pdev);
2410 pci_disable_device(pdev);
2411 vfree(nic);
2412
2413 RET();
2414}
2415
2416static struct pci_driver bdx_pci_driver = {
2417 .name = BDX_DRV_NAME,
2418 .id_table = bdx_pci_tbl,
2419 .probe = bdx_probe,
2420 .remove = bdx_remove,
2421};
2422
2423
2424
2425
2426static void __init print_driver_id(void)
2427{
2428 pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2429 pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2430}
2431
2432static int __init bdx_module_init(void)
2433{
2434 ENTER;
2435 init_txd_sizes();
2436 print_driver_id();
2437 RET(pci_register_driver(&bdx_pci_driver));
2438}
2439
2440module_init(bdx_module_init);
2441
2442static void __exit bdx_module_exit(void)
2443{
2444 ENTER;
2445 pci_unregister_driver(&bdx_pci_driver);
2446 RET();
2447}
2448
2449module_exit(bdx_module_exit);
2450
2451MODULE_LICENSE("GPL");
2452MODULE_AUTHOR(DRIVER_AUTHOR);
2453MODULE_DESCRIPTION(BDX_DRV_DESC);
2454MODULE_FIRMWARE("tehuti/bdx.bin");
2455