1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66
67#include "tehuti.h"
68
69static const struct pci_device_id bdx_pci_tbl[] = {
70 { PCI_VDEVICE(TEHUTI, 0x3009), },
71 { PCI_VDEVICE(TEHUTI, 0x3010), },
72 { PCI_VDEVICE(TEHUTI, 0x3014), },
73 { 0 }
74};
75
76MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
77
78
79static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
80static void bdx_tx_cleanup(struct bdx_priv *priv);
81static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
82
83
84static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
85
86
87static int bdx_tx_init(struct bdx_priv *priv);
88static int bdx_rx_init(struct bdx_priv *priv);
89
90
91static void bdx_rx_free(struct bdx_priv *priv);
92static void bdx_tx_free(struct bdx_priv *priv);
93
94
95static void bdx_set_ethtool_ops(struct net_device *netdev);
96
97
98
99
100
101static void print_hw_id(struct pci_dev *pdev)
102{
103 struct pci_nic *nic = pci_get_drvdata(pdev);
104 u16 pci_link_status = 0;
105 u16 pci_ctrl = 0;
106
107 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
108 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
109
110 pr_info("%s%s\n", BDX_NIC_NAME,
111 nic->port_num == 1 ? "" : ", 2-Port");
112 pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
113 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
114 readl(nic->regs + FPGA_SEED),
115 GET_LINK_STATUS_LANES(pci_link_status),
116 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
117}
118
119static void print_fw_id(struct pci_nic *nic)
120{
121 pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
122}
123
124static void print_eth_id(struct net_device *ndev)
125{
126 netdev_info(ndev, "%s, Port %c\n",
127 BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
128
129}
130
131
132
133
134
135#define bdx_enable_interrupts(priv) \
136 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
137#define bdx_disable_interrupts(priv) \
138 do { WRITE_REG(priv, regIMR, 0); } while (0)
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153static int
154bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
155 u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
156{
157 u16 memsz = FIFO_SIZE * (1 << fsz_type);
158
159 memset(f, 0, sizeof(struct fifo));
160
161 f->va = pci_alloc_consistent(priv->pdev,
162 memsz + FIFO_EXTRA_SPACE, &f->da);
163 if (!f->va) {
164 pr_err("pci_alloc_consistent failed\n");
165 RET(-ENOMEM);
166 }
167 f->reg_CFG0 = reg_CFG0;
168 f->reg_CFG1 = reg_CFG1;
169 f->reg_RPTR = reg_RPTR;
170 f->reg_WPTR = reg_WPTR;
171 f->rptr = 0;
172 f->wptr = 0;
173 f->memsz = memsz;
174 f->size_mask = memsz - 1;
175 WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
176 WRITE_REG(priv, reg_CFG1, H32_64(f->da));
177
178 RET(0);
179}
180
181
182
183
184
185
186static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
187{
188 ENTER;
189 if (f->va) {
190 pci_free_consistent(priv->pdev,
191 f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
192 f->va = NULL;
193 }
194 RET();
195}
196
197
198
199
200
201static void bdx_link_changed(struct bdx_priv *priv)
202{
203 u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
204
205 if (!link) {
206 if (netif_carrier_ok(priv->ndev)) {
207 netif_stop_queue(priv->ndev);
208 netif_carrier_off(priv->ndev);
209 netdev_err(priv->ndev, "Link Down\n");
210 }
211 } else {
212 if (!netif_carrier_ok(priv->ndev)) {
213 netif_wake_queue(priv->ndev);
214 netif_carrier_on(priv->ndev);
215 netdev_err(priv->ndev, "Link Up\n");
216 }
217 }
218}
219
220static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
221{
222 if (isr & IR_RX_FREE_0) {
223 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
224 DBG("RX_FREE_0\n");
225 }
226
227 if (isr & IR_LNKCHG0)
228 bdx_link_changed(priv);
229
230 if (isr & IR_PCIE_LINK)
231 netdev_err(priv->ndev, "PCI-E Link Fault\n");
232
233 if (isr & IR_PCIE_TOUT)
234 netdev_err(priv->ndev, "PCI-E Time Out\n");
235
236}
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252static irqreturn_t bdx_isr_napi(int irq, void *dev)
253{
254 struct net_device *ndev = dev;
255 struct bdx_priv *priv = netdev_priv(ndev);
256 u32 isr;
257
258 ENTER;
259 isr = (READ_REG(priv, regISR) & IR_RUN);
260 if (unlikely(!isr)) {
261 bdx_enable_interrupts(priv);
262 return IRQ_NONE;
263 }
264
265 if (isr & IR_EXTRA)
266 bdx_isr_extra(priv, isr);
267
268 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
269 if (likely(napi_schedule_prep(&priv->napi))) {
270 __napi_schedule(&priv->napi);
271 RET(IRQ_HANDLED);
272 } else {
273
274
275
276
277
278
279
280
281 READ_REG(priv, regTXF_WPTR_0);
282 READ_REG(priv, regRXD_WPTR_0);
283 }
284 }
285
286 bdx_enable_interrupts(priv);
287 RET(IRQ_HANDLED);
288}
289
290static int bdx_poll(struct napi_struct *napi, int budget)
291{
292 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
293 int work_done;
294
295 ENTER;
296 bdx_tx_cleanup(priv);
297 work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
298 if ((work_done < budget) ||
299 (priv->napi_stop++ >= 30)) {
300 DBG("rx poll is done. backing to isr-driven\n");
301
302
303
304 priv->napi_stop = 0;
305
306 napi_complete_done(napi, work_done);
307 bdx_enable_interrupts(priv);
308 }
309 return work_done;
310}
311
312
313
314
315
316
317
318
319
320
321
322static int bdx_fw_load(struct bdx_priv *priv)
323{
324 const struct firmware *fw = NULL;
325 int master, i;
326 int rc;
327
328 ENTER;
329 master = READ_REG(priv, regINIT_SEMAPHORE);
330 if (!READ_REG(priv, regINIT_STATUS) && master) {
331 rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
332 if (rc)
333 goto out;
334 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
335 mdelay(100);
336 }
337 for (i = 0; i < 200; i++) {
338 if (READ_REG(priv, regINIT_STATUS)) {
339 rc = 0;
340 goto out;
341 }
342 mdelay(2);
343 }
344 rc = -EIO;
345out:
346 if (master)
347 WRITE_REG(priv, regINIT_SEMAPHORE, 1);
348
349 release_firmware(fw);
350
351 if (rc) {
352 netdev_err(priv->ndev, "firmware loading failed\n");
353 if (rc == -EIO)
354 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
355 READ_REG(priv, regVPC),
356 READ_REG(priv, regVIC),
357 READ_REG(priv, regINIT_STATUS), i);
358 RET(rc);
359 } else {
360 DBG("%s: firmware loading success\n", priv->ndev->name);
361 RET(0);
362 }
363}
364
365static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
366{
367 u32 val;
368
369 ENTER;
370 DBG("mac0=%x mac1=%x mac2=%x\n",
371 READ_REG(priv, regUNC_MAC0_A),
372 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
373
374 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
375 WRITE_REG(priv, regUNC_MAC2_A, val);
376 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
377 WRITE_REG(priv, regUNC_MAC1_A, val);
378 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
379 WRITE_REG(priv, regUNC_MAC0_A, val);
380
381 DBG("mac0=%x mac1=%x mac2=%x\n",
382 READ_REG(priv, regUNC_MAC0_A),
383 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
384 RET();
385}
386
387
388
389
390
391static int bdx_hw_start(struct bdx_priv *priv)
392{
393 int rc = -EIO;
394 struct net_device *ndev = priv->ndev;
395
396 ENTER;
397 bdx_link_changed(priv);
398
399
400 WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
401 WRITE_REG(priv, regPAUSE_QUANT, 0x96);
402 WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
403 WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
404 WRITE_REG(priv, regRX_FULLNESS, 0);
405 WRITE_REG(priv, regTX_FULLNESS, 0);
406 WRITE_REG(priv, regCTRLST,
407 regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
408
409 WRITE_REG(priv, regVGLB, 0);
410 WRITE_REG(priv, regMAX_FRAME_A,
411 priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
412
413 DBG("RDINTCM=%08x\n", priv->rdintcm);
414 WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
415 WRITE_REG(priv, regRDINTCM2, 0);
416
417 DBG("TDINTCM=%08x\n", priv->tdintcm);
418 WRITE_REG(priv, regTDINTCM0, priv->tdintcm);
419
420
421
422 bdx_restore_mac(priv->ndev, priv);
423
424 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
425 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
426
427#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
428
429 rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
430 ndev->name, ndev);
431 if (rc)
432 goto err_irq;
433 bdx_enable_interrupts(priv);
434
435 RET(0);
436
437err_irq:
438 RET(rc);
439}
440
441static void bdx_hw_stop(struct bdx_priv *priv)
442{
443 ENTER;
444 bdx_disable_interrupts(priv);
445 free_irq(priv->pdev->irq, priv->ndev);
446
447 netif_carrier_off(priv->ndev);
448 netif_stop_queue(priv->ndev);
449
450 RET();
451}
452
453static int bdx_hw_reset_direct(void __iomem *regs)
454{
455 u32 val, i;
456 ENTER;
457
458
459 val = readl(regs + regCLKPLL);
460 writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
461 udelay(50);
462 val = readl(regs + regCLKPLL);
463 writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
464
465
466 for (i = 0; i < 70; i++, mdelay(10))
467 if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
468
469 readl(regs + regRXD_CFG0_0);
470 return 0;
471 }
472 pr_err("HW reset failed\n");
473 return 1;
474}
475
476static int bdx_hw_reset(struct bdx_priv *priv)
477{
478 u32 val, i;
479 ENTER;
480
481 if (priv->port == 0) {
482
483 val = READ_REG(priv, regCLKPLL);
484 WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
485 udelay(50);
486 val = READ_REG(priv, regCLKPLL);
487 WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
488 }
489
490 for (i = 0; i < 70; i++, mdelay(10))
491 if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
492
493 READ_REG(priv, regRXD_CFG0_0);
494 return 0;
495 }
496 pr_err("HW reset failed\n");
497 return 1;
498}
499
500static int bdx_sw_reset(struct bdx_priv *priv)
501{
502 int i;
503
504 ENTER;
505
506
507 WRITE_REG(priv, regGMAC_RXF_A, 0);
508 mdelay(100);
509
510 WRITE_REG(priv, regDIS_PORT, 1);
511
512 WRITE_REG(priv, regDIS_QU, 1);
513
514 for (i = 0; i < 50; i++) {
515 if (READ_REG(priv, regRST_PORT) & 1)
516 break;
517 mdelay(10);
518 }
519 if (i == 50)
520 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
521
522
523 WRITE_REG(priv, regRDINTCM0, 0);
524 WRITE_REG(priv, regTDINTCM0, 0);
525 WRITE_REG(priv, regIMR, 0);
526 READ_REG(priv, regISR);
527
528
529 WRITE_REG(priv, regRST_QU, 1);
530
531 WRITE_REG(priv, regRST_PORT, 1);
532
533 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
534 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
535 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
536 WRITE_REG(priv, i, 0);
537
538 WRITE_REG(priv, regDIS_PORT, 0);
539
540 WRITE_REG(priv, regDIS_QU, 0);
541
542 WRITE_REG(priv, regRST_QU, 0);
543
544 WRITE_REG(priv, regRST_PORT, 0);
545
546
547
548 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
549 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
550
551 RET(0);
552}
553
554
555static int bdx_reset(struct bdx_priv *priv)
556{
557 ENTER;
558 RET((priv->pdev->device == 0x3009)
559 ? bdx_hw_reset(priv)
560 : bdx_sw_reset(priv));
561}
562
563
564
565
566
567
568
569
570
571
572
573
574static int bdx_close(struct net_device *ndev)
575{
576 struct bdx_priv *priv = NULL;
577
578 ENTER;
579 priv = netdev_priv(ndev);
580
581 napi_disable(&priv->napi);
582
583 bdx_reset(priv);
584 bdx_hw_stop(priv);
585 bdx_rx_free(priv);
586 bdx_tx_free(priv);
587 RET(0);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602static int bdx_open(struct net_device *ndev)
603{
604 struct bdx_priv *priv;
605 int rc;
606
607 ENTER;
608 priv = netdev_priv(ndev);
609 bdx_reset(priv);
610 if (netif_running(ndev))
611 netif_stop_queue(priv->ndev);
612
613 if ((rc = bdx_tx_init(priv)) ||
614 (rc = bdx_rx_init(priv)) ||
615 (rc = bdx_fw_load(priv)))
616 goto err;
617
618 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
619
620 rc = bdx_hw_start(priv);
621 if (rc)
622 goto err;
623
624 napi_enable(&priv->napi);
625
626 print_fw_id(priv->nic);
627
628 RET(0);
629
630err:
631 bdx_close(ndev);
632 RET(rc);
633}
634
635static int bdx_range_check(struct bdx_priv *priv, u32 offset)
636{
637 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
638 -EINVAL : 0;
639}
640
641static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
642{
643 struct bdx_priv *priv = netdev_priv(ndev);
644 u32 data[3];
645 int error;
646
647 ENTER;
648
649 DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
650 if (cmd != SIOCDEVPRIVATE) {
651 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
652 if (error) {
653 pr_err("can't copy from user\n");
654 RET(-EFAULT);
655 }
656 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
657 }
658
659 if (!capable(CAP_SYS_RAWIO))
660 return -EPERM;
661
662 switch (data[0]) {
663
664 case BDX_OP_READ:
665 error = bdx_range_check(priv, data[1]);
666 if (error < 0)
667 return error;
668 data[2] = READ_REG(priv, data[1]);
669 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
670 data[2]);
671 error = copy_to_user(ifr->ifr_data, data, sizeof(data));
672 if (error)
673 RET(-EFAULT);
674 break;
675
676 case BDX_OP_WRITE:
677 error = bdx_range_check(priv, data[1]);
678 if (error < 0)
679 return error;
680 WRITE_REG(priv, data[1], data[2]);
681 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
682 break;
683
684 default:
685 RET(-EOPNOTSUPP);
686 }
687 return 0;
688}
689
690static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
691{
692 ENTER;
693 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
694 RET(bdx_ioctl_priv(ndev, ifr, cmd));
695 else
696 RET(-EOPNOTSUPP);
697}
698
699
700
701
702
703
704
705
706
707static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
708{
709 struct bdx_priv *priv = netdev_priv(ndev);
710 u32 reg, bit, val;
711
712 ENTER;
713 DBG2("vid=%d value=%d\n", (int)vid, enable);
714 if (unlikely(vid >= 4096)) {
715 pr_err("invalid VID: %u (> 4096)\n", vid);
716 RET();
717 }
718 reg = regVLAN_0 + (vid / 32) * 4;
719 bit = 1 << vid % 32;
720 val = READ_REG(priv, reg);
721 DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
722 if (enable)
723 val |= bit;
724 else
725 val &= ~bit;
726 DBG2("new val %x\n", val);
727 WRITE_REG(priv, reg, val);
728 RET();
729}
730
731
732
733
734
735
736static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
737{
738 __bdx_vlan_rx_vid(ndev, vid, 1);
739 return 0;
740}
741
742
743
744
745
746
747static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
748{
749 __bdx_vlan_rx_vid(ndev, vid, 0);
750 return 0;
751}
752
753
754
755
756
757
758
759
760static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
761{
762 ENTER;
763
764 ndev->mtu = new_mtu;
765 if (netif_running(ndev)) {
766 bdx_close(ndev);
767 bdx_open(ndev);
768 }
769 RET(0);
770}
771
772static void bdx_setmulti(struct net_device *ndev)
773{
774 struct bdx_priv *priv = netdev_priv(ndev);
775
776 u32 rxf_val =
777 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
778 int i;
779
780 ENTER;
781
782
783
784
785 if (ndev->flags & IFF_PROMISC) {
786 rxf_val |= GMAC_RX_FILTER_PRM;
787 } else if (ndev->flags & IFF_ALLMULTI) {
788
789 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
790 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
791 } else if (!netdev_mc_empty(ndev)) {
792 u8 hash;
793 struct netdev_hw_addr *ha;
794 u32 reg, val;
795
796
797 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
798 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
799
800 for (i = 0; i < MAC_MCST_NUM; i++) {
801 WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
802 WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
803 }
804
805
806
807
808
809
810 netdev_for_each_mc_addr(ha, ndev) {
811 hash = 0;
812 for (i = 0; i < ETH_ALEN; i++)
813 hash ^= ha->addr[i];
814 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
815 val = READ_REG(priv, reg);
816 val |= (1 << (hash % 32));
817 WRITE_REG(priv, reg, val);
818 }
819
820 } else {
821 DBG("only own mac %d\n", netdev_mc_count(ndev));
822 rxf_val |= GMAC_RX_FILTER_AB;
823 }
824 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
825
826
827 RET();
828}
829
830static int bdx_set_mac(struct net_device *ndev, void *p)
831{
832 struct bdx_priv *priv = netdev_priv(ndev);
833 struct sockaddr *addr = p;
834
835 ENTER;
836
837
838
839
840 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
841 bdx_restore_mac(ndev, priv);
842 RET(0);
843}
844
845static int bdx_read_mac(struct bdx_priv *priv)
846{
847 u16 macAddress[3], i;
848 ENTER;
849
850 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
851 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
852 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
853 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
854 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
855 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
856 for (i = 0; i < 3; i++) {
857 priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
858 priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
859 }
860 RET(0);
861}
862
863static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
864{
865 u64 val;
866
867 val = READ_REG(priv, reg);
868 val |= ((u64) READ_REG(priv, reg + 8)) << 32;
869 return val;
870}
871
872
873static void bdx_update_stats(struct bdx_priv *priv)
874{
875 struct bdx_stats *stats = &priv->hw_stats;
876 u64 *stats_vector = (u64 *) stats;
877 int i;
878 int addr;
879
880
881 addr = 0x7200;
882
883 for (i = 0; i < 12; i++) {
884 stats_vector[i] = bdx_read_l2stat(priv, addr);
885 addr += 0x10;
886 }
887 BDX_ASSERT(addr != 0x72C0);
888
889 addr = 0x72F0;
890 for (; i < 16; i++) {
891 stats_vector[i] = bdx_read_l2stat(priv, addr);
892 addr += 0x10;
893 }
894 BDX_ASSERT(addr != 0x7330);
895
896 addr = 0x7370;
897 for (; i < 19; i++) {
898 stats_vector[i] = bdx_read_l2stat(priv, addr);
899 addr += 0x10;
900 }
901 BDX_ASSERT(addr != 0x73A0);
902
903 addr = 0x73C0;
904 for (; i < 23; i++) {
905 stats_vector[i] = bdx_read_l2stat(priv, addr);
906 addr += 0x10;
907 }
908 BDX_ASSERT(addr != 0x7400);
909 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
910}
911
912static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
913 u16 rxd_vlan);
914static void print_rxfd(struct rxf_desc *rxfd);
915
916
917
918
919
920static void bdx_rxdb_destroy(struct rxdb *db)
921{
922 vfree(db);
923}
924
925static struct rxdb *bdx_rxdb_create(int nelem)
926{
927 struct rxdb *db;
928 int i;
929
930 db = vmalloc(sizeof(struct rxdb)
931 + (nelem * sizeof(int))
932 + (nelem * sizeof(struct rx_map)));
933 if (likely(db != NULL)) {
934 db->stack = (int *)(db + 1);
935 db->elems = (void *)(db->stack + nelem);
936 db->nelem = nelem;
937 db->top = nelem;
938 for (i = 0; i < nelem; i++)
939 db->stack[i] = nelem - i - 1;
940
941 }
942
943 return db;
944}
945
946static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
947{
948 BDX_ASSERT(db->top <= 0);
949 return db->stack[--(db->top)];
950}
951
952static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
953{
954 BDX_ASSERT((n < 0) || (n >= db->nelem));
955 return db->elems + n;
956}
957
958static inline int bdx_rxdb_available(struct rxdb *db)
959{
960 return db->top;
961}
962
963static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
964{
965 BDX_ASSERT((n >= db->nelem) || (n < 0));
966 db->stack[(db->top)++] = n;
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991static int bdx_rx_init(struct bdx_priv *priv)
992{
993 ENTER;
994
995 if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
996 regRXD_CFG0_0, regRXD_CFG1_0,
997 regRXD_RPTR_0, regRXD_WPTR_0))
998 goto err_mem;
999 if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
1000 regRXF_CFG0_0, regRXF_CFG1_0,
1001 regRXF_RPTR_0, regRXF_WPTR_0))
1002 goto err_mem;
1003 priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1004 sizeof(struct rxf_desc));
1005 if (!priv->rxdb)
1006 goto err_mem;
1007
1008 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1009 return 0;
1010
1011err_mem:
1012 netdev_err(priv->ndev, "Rx init failed\n");
1013 return -ENOMEM;
1014}
1015
1016
1017
1018
1019
1020
1021static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1022{
1023 struct rx_map *dm;
1024 struct rxdb *db = priv->rxdb;
1025 u16 i;
1026
1027 ENTER;
1028 DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1029 db->nelem - bdx_rxdb_available(db));
1030 while (bdx_rxdb_available(db) > 0) {
1031 i = bdx_rxdb_alloc_elem(db);
1032 dm = bdx_rxdb_addr_elem(db, i);
1033 dm->dma = 0;
1034 }
1035 for (i = 0; i < db->nelem; i++) {
1036 dm = bdx_rxdb_addr_elem(db, i);
1037 if (dm->dma) {
1038 pci_unmap_single(priv->pdev,
1039 dm->dma, f->m.pktsz,
1040 PCI_DMA_FROMDEVICE);
1041 dev_kfree_skb(dm->skb);
1042 }
1043 }
1044}
1045
1046
1047
1048
1049
1050
1051
1052static void bdx_rx_free(struct bdx_priv *priv)
1053{
1054 ENTER;
1055 if (priv->rxdb) {
1056 bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1057 bdx_rxdb_destroy(priv->rxdb);
1058 priv->rxdb = NULL;
1059 }
1060 bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1061 bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1062
1063 RET();
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1084{
1085 struct sk_buff *skb;
1086 struct rxf_desc *rxfd;
1087 struct rx_map *dm;
1088 int dno, delta, idx;
1089 struct rxdb *db = priv->rxdb;
1090
1091 ENTER;
1092 dno = bdx_rxdb_available(db) - 1;
1093 while (dno > 0) {
1094 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
1095 if (!skb)
1096 break;
1097
1098 skb_reserve(skb, NET_IP_ALIGN);
1099
1100 idx = bdx_rxdb_alloc_elem(db);
1101 dm = bdx_rxdb_addr_elem(db, idx);
1102 dm->dma = pci_map_single(priv->pdev,
1103 skb->data, f->m.pktsz,
1104 PCI_DMA_FROMDEVICE);
1105 dm->skb = skb;
1106 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1107 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1108 rxfd->va_lo = idx;
1109 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1110 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1111 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1112 print_rxfd(rxfd);
1113
1114 f->m.wptr += sizeof(struct rxf_desc);
1115 delta = f->m.wptr - f->m.memsz;
1116 if (unlikely(delta >= 0)) {
1117 f->m.wptr = delta;
1118 if (delta > 0) {
1119 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1120 DBG("wrapped descriptor\n");
1121 }
1122 }
1123 dno--;
1124 }
1125
1126 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1127 RET();
1128}
1129
1130static inline void
1131NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1132 struct sk_buff *skb)
1133{
1134 ENTER;
1135 DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
1136 if (GET_RXD_VTAG(rxd_val1)) {
1137 DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
1138 priv->ndev->name,
1139 GET_RXD_VLAN_ID(rxd_vlan),
1140 GET_RXD_VTAG(rxd_val1));
1141 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
1142 }
1143 netif_receive_skb(skb);
1144}
1145
1146static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1147{
1148 struct rxf_desc *rxfd;
1149 struct rx_map *dm;
1150 struct rxf_fifo *f;
1151 struct rxdb *db;
1152 struct sk_buff *skb;
1153 int delta;
1154
1155 ENTER;
1156 DBG("priv=%p rxdd=%p\n", priv, rxdd);
1157 f = &priv->rxf_fifo0;
1158 db = priv->rxdb;
1159 DBG("db=%p f=%p\n", db, f);
1160 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1161 DBG("dm=%p\n", dm);
1162 skb = dm->skb;
1163 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1164 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1165 rxfd->va_lo = rxdd->va_lo;
1166 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1167 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1168 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1169 print_rxfd(rxfd);
1170
1171 f->m.wptr += sizeof(struct rxf_desc);
1172 delta = f->m.wptr - f->m.memsz;
1173 if (unlikely(delta >= 0)) {
1174 f->m.wptr = delta;
1175 if (delta > 0) {
1176 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1177 DBG("wrapped descriptor\n");
1178 }
1179 }
1180 RET();
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1197{
1198 struct net_device *ndev = priv->ndev;
1199 struct sk_buff *skb, *skb2;
1200 struct rxd_desc *rxdd;
1201 struct rx_map *dm;
1202 struct rxf_fifo *rxf_fifo;
1203 int tmp_len, size;
1204 int done = 0;
1205 int max_done = BDX_MAX_RX_DONE;
1206 struct rxdb *db = NULL;
1207
1208 u32 rxd_val1;
1209 u16 len;
1210 u16 rxd_vlan;
1211
1212 ENTER;
1213 max_done = budget;
1214
1215 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1216
1217 size = f->m.wptr - f->m.rptr;
1218 if (size < 0)
1219 size = f->m.memsz + size;
1220
1221 while (size > 0) {
1222
1223 rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1224 rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1225
1226 len = CPU_CHIP_SWAP16(rxdd->len);
1227
1228 rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1229
1230 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1231
1232 tmp_len = GET_RXD_BC(rxd_val1) << 3;
1233 BDX_ASSERT(tmp_len <= 0);
1234 size -= tmp_len;
1235 if (size < 0)
1236 break;
1237
1238 f->m.rptr += tmp_len;
1239
1240 tmp_len = f->m.rptr - f->m.memsz;
1241 if (unlikely(tmp_len >= 0)) {
1242 f->m.rptr = tmp_len;
1243 if (tmp_len > 0) {
1244 DBG("wrapped desc rptr=%d tmp_len=%d\n",
1245 f->m.rptr, tmp_len);
1246 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1247 }
1248 }
1249
1250 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1251 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1252 ndev->stats.rx_errors++;
1253 bdx_recycle_skb(priv, rxdd);
1254 continue;
1255 }
1256
1257 rxf_fifo = &priv->rxf_fifo0;
1258 db = priv->rxdb;
1259 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1260 skb = dm->skb;
1261
1262 if (len < BDX_COPYBREAK &&
1263 (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
1264 skb_reserve(skb2, NET_IP_ALIGN);
1265
1266 pci_dma_sync_single_for_cpu(priv->pdev,
1267 dm->dma, rxf_fifo->m.pktsz,
1268 PCI_DMA_FROMDEVICE);
1269 memcpy(skb2->data, skb->data, len);
1270 bdx_recycle_skb(priv, rxdd);
1271 skb = skb2;
1272 } else {
1273 pci_unmap_single(priv->pdev,
1274 dm->dma, rxf_fifo->m.pktsz,
1275 PCI_DMA_FROMDEVICE);
1276 bdx_rxdb_free_elem(db, rxdd->va_lo);
1277 }
1278
1279 ndev->stats.rx_bytes += len;
1280
1281 skb_put(skb, len);
1282 skb->protocol = eth_type_trans(skb, ndev);
1283
1284
1285 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1286 skb_checksum_none_assert(skb);
1287 else
1288 skb->ip_summed = CHECKSUM_UNNECESSARY;
1289
1290 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1291
1292 if (++done >= max_done)
1293 break;
1294 }
1295
1296 ndev->stats.rx_packets += done;
1297
1298
1299 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1300
1301 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1302
1303 RET(done);
1304}
1305
1306
1307
1308
1309static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1310 u16 rxd_vlan)
1311{
1312 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1313 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1314 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1315 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1316 GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1317 GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1318 rxdd->va_hi);
1319}
1320
1321static void print_rxfd(struct rxf_desc *rxfd)
1322{
1323 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1324 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1325 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372static inline int bdx_tx_db_size(struct txdb *db)
1373{
1374 int taken = db->wptr - db->rptr;
1375 if (taken < 0)
1376 taken = db->size + 1 + taken;
1377
1378 return db->size - taken;
1379}
1380
1381
1382
1383
1384
1385
1386static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1387{
1388 BDX_ASSERT(db == NULL || pptr == NULL);
1389
1390 BDX_ASSERT(*pptr != db->rptr &&
1391 *pptr != db->wptr);
1392
1393 BDX_ASSERT(*pptr < db->start ||
1394 *pptr >= db->end);
1395
1396 ++*pptr;
1397 if (unlikely(*pptr == db->end))
1398 *pptr = db->start;
1399}
1400
1401
1402
1403
1404
1405static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1406{
1407 BDX_ASSERT(db->rptr == db->wptr);
1408 __bdx_tx_db_ptr_next(db, &db->rptr);
1409}
1410
1411
1412
1413
1414
1415static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1416{
1417 __bdx_tx_db_ptr_next(db, &db->wptr);
1418 BDX_ASSERT(db->rptr == db->wptr);
1419
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429static int bdx_tx_db_init(struct txdb *d, int sz_type)
1430{
1431 int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1432
1433 d->start = vmalloc(memsz);
1434 if (!d->start)
1435 return -ENOMEM;
1436
1437
1438
1439
1440
1441
1442 d->size = memsz / sizeof(struct tx_map) - 1;
1443 d->end = d->start + d->size + 1;
1444
1445
1446 d->rptr = d->start;
1447 d->wptr = d->start;
1448
1449 return 0;
1450}
1451
1452
1453
1454
1455
1456static void bdx_tx_db_close(struct txdb *d)
1457{
1458 BDX_ASSERT(d == NULL);
1459
1460 vfree(d->start);
1461 d->start = NULL;
1462}
1463
1464
1465
1466
1467
1468
1469
1470static struct {
1471 u16 bytes;
1472 u16 qwords;
1473} txd_sizes[MAX_SKB_FRAGS + 1];
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static inline void
1488bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1489 struct txd_desc *txdd)
1490{
1491 struct txdb *db = &priv->txdb;
1492 struct pbl *pbl = &txdd->pbl[0];
1493 int nr_frags = skb_shinfo(skb)->nr_frags;
1494 int i;
1495
1496 db->wptr->len = skb_headlen(skb);
1497 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1498 db->wptr->len, PCI_DMA_TODEVICE);
1499 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1500 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1501 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1502 DBG("=== pbl len: 0x%x ================\n", pbl->len);
1503 DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1504 DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1505 bdx_tx_db_inc_wptr(db);
1506
1507 for (i = 0; i < nr_frags; i++) {
1508 const struct skb_frag_struct *frag;
1509
1510 frag = &skb_shinfo(skb)->frags[i];
1511 db->wptr->len = skb_frag_size(frag);
1512 db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
1513 0, skb_frag_size(frag),
1514 DMA_TO_DEVICE);
1515
1516 pbl++;
1517 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1518 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1519 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1520 bdx_tx_db_inc_wptr(db);
1521 }
1522
1523
1524 db->wptr->len = -txd_sizes[nr_frags].bytes;
1525 db->wptr->addr.skb = skb;
1526 bdx_tx_db_inc_wptr(db);
1527}
1528
1529
1530
1531
1532static void __init init_txd_sizes(void)
1533{
1534 int i, lwords;
1535
1536
1537
1538 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1539 lwords = 7 + (i * 3);
1540 if (lwords & 1)
1541 lwords++;
1542 txd_sizes[i].qwords = lwords >> 1;
1543 txd_sizes[i].bytes = lwords << 2;
1544 }
1545}
1546
1547
1548
1549static int bdx_tx_init(struct bdx_priv *priv)
1550{
1551 if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1552 regTXD_CFG0_0,
1553 regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1554 goto err_mem;
1555 if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1556 regTXF_CFG0_0,
1557 regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1558 goto err_mem;
1559
1560
1561
1562 if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1563 goto err_mem;
1564
1565 priv->tx_level = BDX_MAX_TX_LEVEL;
1566#ifdef BDX_DELAY_WPTR
1567 priv->tx_update_mark = priv->tx_level - 1024;
1568#endif
1569 return 0;
1570
1571err_mem:
1572 netdev_err(priv->ndev, "Tx init failed\n");
1573 return -ENOMEM;
1574}
1575
1576
1577
1578
1579
1580
1581
1582static inline int bdx_tx_space(struct bdx_priv *priv)
1583{
1584 struct txd_fifo *f = &priv->txd_fifo0;
1585 int fsize;
1586
1587 f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1588 fsize = f->m.rptr - f->m.wptr;
1589 if (fsize <= 0)
1590 fsize = f->m.memsz + fsize;
1591 return fsize;
1592}
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1605 struct net_device *ndev)
1606{
1607 struct bdx_priv *priv = netdev_priv(ndev);
1608 struct txd_fifo *f = &priv->txd_fifo0;
1609 int txd_checksum = 7;
1610 int txd_lgsnd = 0;
1611 int txd_vlan_id = 0;
1612 int txd_vtag = 0;
1613 int txd_mss = 0;
1614
1615 int nr_frags = skb_shinfo(skb)->nr_frags;
1616 struct txd_desc *txdd;
1617 int len;
1618 unsigned long flags;
1619
1620 ENTER;
1621 local_irq_save(flags);
1622 spin_lock(&priv->tx_lock);
1623
1624
1625 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1626 txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1627 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1628 txd_checksum = 0;
1629
1630 if (skb_shinfo(skb)->gso_size) {
1631 txd_mss = skb_shinfo(skb)->gso_size;
1632 txd_lgsnd = 1;
1633 DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1634 txd_mss);
1635 }
1636
1637 if (skb_vlan_tag_present(skb)) {
1638
1639 txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
1640 txd_vtag = 1;
1641 }
1642
1643 txdd->length = CPU_CHIP_SWAP16(skb->len);
1644 txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1645 txdd->txd_val1 =
1646 CPU_CHIP_SWAP32(TXD_W1_VAL
1647 (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1648 txd_lgsnd, txd_vlan_id));
1649 DBG("=== TxD desc =====================\n");
1650 DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1651 DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1652
1653 bdx_tx_map_skb(priv, skb, txdd);
1654
1655
1656
1657
1658 f->m.wptr += txd_sizes[nr_frags].bytes;
1659 len = f->m.wptr - f->m.memsz;
1660 if (unlikely(len >= 0)) {
1661 f->m.wptr = len;
1662 if (len > 0) {
1663 BDX_ASSERT(len > f->m.memsz);
1664 memcpy(f->m.va, f->m.va + f->m.memsz, len);
1665 }
1666 }
1667 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1668
1669 priv->tx_level -= txd_sizes[nr_frags].bytes;
1670 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1671#ifdef BDX_DELAY_WPTR
1672 if (priv->tx_level > priv->tx_update_mark) {
1673
1674
1675
1676
1677 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1678 } else {
1679 if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1680 priv->tx_noupd = 0;
1681 WRITE_REG(priv, f->m.reg_WPTR,
1682 f->m.wptr & TXF_WPTR_WR_PTR);
1683 }
1684 }
1685#else
1686
1687
1688
1689
1690 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1691
1692#endif
1693#ifdef BDX_LLTX
1694 netif_trans_update(ndev);
1695#endif
1696 ndev->stats.tx_packets++;
1697 ndev->stats.tx_bytes += skb->len;
1698
1699 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1700 DBG("%s: %s: TX Q STOP level %d\n",
1701 BDX_DRV_NAME, ndev->name, priv->tx_level);
1702 netif_stop_queue(ndev);
1703 }
1704
1705 spin_unlock_irqrestore(&priv->tx_lock, flags);
1706 return NETDEV_TX_OK;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716static void bdx_tx_cleanup(struct bdx_priv *priv)
1717{
1718 struct txf_fifo *f = &priv->txf_fifo0;
1719 struct txdb *db = &priv->txdb;
1720 int tx_level = 0;
1721
1722 ENTER;
1723 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1724 BDX_ASSERT(f->m.rptr >= f->m.memsz);
1725
1726 while (f->m.wptr != f->m.rptr) {
1727 f->m.rptr += BDX_TXF_DESC_SZ;
1728 f->m.rptr &= f->m.size_mask;
1729
1730
1731
1732 BDX_ASSERT(db->rptr->len == 0);
1733 do {
1734 BDX_ASSERT(db->rptr->addr.dma == 0);
1735 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1736 db->rptr->len, PCI_DMA_TODEVICE);
1737 bdx_tx_db_inc_rptr(db);
1738 } while (db->rptr->len > 0);
1739 tx_level -= db->rptr->len;
1740
1741
1742 dev_kfree_skb_irq(db->rptr->addr.skb);
1743 bdx_tx_db_inc_rptr(db);
1744 }
1745
1746
1747 BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1748 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1749
1750
1751
1752 spin_lock(&priv->tx_lock);
1753 priv->tx_level += tx_level;
1754 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1755#ifdef BDX_DELAY_WPTR
1756 if (priv->tx_noupd) {
1757 priv->tx_noupd = 0;
1758 WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1759 priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1760 }
1761#endif
1762
1763 if (unlikely(netif_queue_stopped(priv->ndev) &&
1764 netif_carrier_ok(priv->ndev) &&
1765 (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1766 DBG("%s: %s: TX Q WAKE level %d\n",
1767 BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1768 netif_wake_queue(priv->ndev);
1769 }
1770 spin_unlock(&priv->tx_lock);
1771}
1772
1773
1774
1775
1776
1777static void bdx_tx_free_skbs(struct bdx_priv *priv)
1778{
1779 struct txdb *db = &priv->txdb;
1780
1781 ENTER;
1782 while (db->rptr != db->wptr) {
1783 if (likely(db->rptr->len))
1784 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1785 db->rptr->len, PCI_DMA_TODEVICE);
1786 else
1787 dev_kfree_skb(db->rptr->addr.skb);
1788 bdx_tx_db_inc_rptr(db);
1789 }
1790 RET();
1791}
1792
1793
1794static void bdx_tx_free(struct bdx_priv *priv)
1795{
1796 ENTER;
1797 bdx_tx_free_skbs(priv);
1798 bdx_fifo_free(priv, &priv->txd_fifo0.m);
1799 bdx_fifo_free(priv, &priv->txf_fifo0.m);
1800 bdx_tx_db_close(&priv->txdb);
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1815{
1816 struct txd_fifo *f = &priv->txd_fifo0;
1817 int i = f->m.memsz - f->m.wptr;
1818
1819 if (size == 0)
1820 return;
1821
1822 if (i > size) {
1823 memcpy(f->m.va + f->m.wptr, data, size);
1824 f->m.wptr += size;
1825 } else {
1826 memcpy(f->m.va + f->m.wptr, data, i);
1827 f->m.wptr = size - i;
1828 memcpy(f->m.va, data + i, f->m.wptr);
1829 }
1830 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1843{
1844 int timer = 0;
1845 ENTER;
1846
1847 while (size > 0) {
1848
1849
1850
1851 int avail = bdx_tx_space(priv) - 8;
1852 if (avail <= 0) {
1853 if (timer++ > 300) {
1854 DBG("timeout while writing desc to TxD fifo\n");
1855 break;
1856 }
1857 udelay(50);
1858 continue;
1859 }
1860 avail = min(avail, size);
1861 DBG("about to push %d bytes starting %p size %d\n", avail,
1862 data, size);
1863 bdx_tx_push_desc(priv, data, avail);
1864 size -= avail;
1865 data += avail;
1866 }
1867 RET();
1868}
1869
1870static const struct net_device_ops bdx_netdev_ops = {
1871 .ndo_open = bdx_open,
1872 .ndo_stop = bdx_close,
1873 .ndo_start_xmit = bdx_tx_transmit,
1874 .ndo_validate_addr = eth_validate_addr,
1875 .ndo_do_ioctl = bdx_ioctl,
1876 .ndo_set_rx_mode = bdx_setmulti,
1877 .ndo_change_mtu = bdx_change_mtu,
1878 .ndo_set_mac_address = bdx_set_mac,
1879 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1880 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1881};
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static int
1901bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1902{
1903 struct net_device *ndev;
1904 struct bdx_priv *priv;
1905 int err, pci_using_dac, port;
1906 unsigned long pciaddr;
1907 u32 regionSize;
1908 struct pci_nic *nic;
1909
1910 ENTER;
1911
1912 nic = vmalloc(sizeof(*nic));
1913 if (!nic)
1914 RET(-ENOMEM);
1915
1916
1917 err = pci_enable_device(pdev);
1918 if (err)
1919 goto err_pci;
1920
1921 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1922 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1923 pci_using_dac = 1;
1924 } else {
1925 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1926 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1927 pr_err("No usable DMA configuration, aborting\n");
1928 goto err_dma;
1929 }
1930 pci_using_dac = 0;
1931 }
1932
1933 err = pci_request_regions(pdev, BDX_DRV_NAME);
1934 if (err)
1935 goto err_dma;
1936
1937 pci_set_master(pdev);
1938
1939 pciaddr = pci_resource_start(pdev, 0);
1940 if (!pciaddr) {
1941 err = -EIO;
1942 pr_err("no MMIO resource\n");
1943 goto err_out_res;
1944 }
1945 regionSize = pci_resource_len(pdev, 0);
1946 if (regionSize < BDX_REGS_SIZE) {
1947 err = -EIO;
1948 pr_err("MMIO resource (%x) too small\n", regionSize);
1949 goto err_out_res;
1950 }
1951
1952 nic->regs = ioremap(pciaddr, regionSize);
1953 if (!nic->regs) {
1954 err = -EIO;
1955 pr_err("ioremap failed\n");
1956 goto err_out_res;
1957 }
1958
1959 if (pdev->irq < 2) {
1960 err = -EIO;
1961 pr_err("invalid irq (%d)\n", pdev->irq);
1962 goto err_out_iomap;
1963 }
1964 pci_set_drvdata(pdev, nic);
1965
1966 if (pdev->device == 0x3014)
1967 nic->port_num = 2;
1968 else
1969 nic->port_num = 1;
1970
1971 print_hw_id(pdev);
1972
1973 bdx_hw_reset_direct(nic->regs);
1974
1975 nic->irq_type = IRQ_INTX;
1976#ifdef BDX_MSI
1977 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1978 err = pci_enable_msi(pdev);
1979 if (err)
1980 pr_err("Can't enable msi. error is %d\n", err);
1981 else
1982 nic->irq_type = IRQ_MSI;
1983 } else
1984 DBG("HW does not support MSI\n");
1985#endif
1986
1987
1988 for (port = 0; port < nic->port_num; port++) {
1989 ndev = alloc_etherdev(sizeof(struct bdx_priv));
1990 if (!ndev) {
1991 err = -ENOMEM;
1992 goto err_out_iomap;
1993 }
1994
1995 ndev->netdev_ops = &bdx_netdev_ops;
1996 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
1997
1998 bdx_set_ethtool_ops(ndev);
1999
2000
2001
2002 ndev->if_port = port;
2003 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2004 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2005 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
2006 ;
2007 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2008 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
2009
2010 if (pci_using_dac)
2011 ndev->features |= NETIF_F_HIGHDMA;
2012
2013
2014 priv = nic->priv[port] = netdev_priv(ndev);
2015
2016 priv->pBdxRegs = nic->regs + port * 0x8000;
2017 priv->port = port;
2018 priv->pdev = pdev;
2019 priv->ndev = ndev;
2020 priv->nic = nic;
2021 priv->msg_enable = BDX_DEF_MSG_ENABLE;
2022
2023 netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2024
2025 if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2026 DBG("HW statistics not supported\n");
2027 priv->stats_flag = 0;
2028 } else {
2029 priv->stats_flag = 1;
2030 }
2031
2032
2033 priv->txd_size = 2;
2034 priv->txf_size = 2;
2035 priv->rxd_size = 2;
2036 priv->rxf_size = 3;
2037
2038
2039 priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2040 priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2041
2042
2043
2044
2045
2046
2047#ifdef BDX_LLTX
2048 ndev->features |= NETIF_F_LLTX;
2049#endif
2050
2051 ndev->min_mtu = ETH_ZLEN;
2052 ndev->max_mtu = BDX_MAX_MTU;
2053
2054 spin_lock_init(&priv->tx_lock);
2055
2056
2057 if (bdx_read_mac(priv)) {
2058 pr_err("load MAC address failed\n");
2059 goto err_out_iomap;
2060 }
2061 SET_NETDEV_DEV(ndev, &pdev->dev);
2062 err = register_netdev(ndev);
2063 if (err) {
2064 pr_err("register_netdev failed\n");
2065 goto err_out_free;
2066 }
2067 netif_carrier_off(ndev);
2068 netif_stop_queue(ndev);
2069
2070 print_eth_id(ndev);
2071 }
2072 RET(0);
2073
2074err_out_free:
2075 free_netdev(ndev);
2076err_out_iomap:
2077 iounmap(nic->regs);
2078err_out_res:
2079 pci_release_regions(pdev);
2080err_dma:
2081 pci_disable_device(pdev);
2082err_pci:
2083 vfree(nic);
2084
2085 RET(err);
2086}
2087
2088
2089
2090static const char
2091 bdx_stat_names[][ETH_GSTRING_LEN] = {
2092 "InUCast",
2093 "InMCast",
2094 "InBCast",
2095 "InPkts",
2096 "InErrors",
2097 "InDropped",
2098 "FrameTooLong",
2099 "FrameSequenceErrors",
2100 "InVLAN",
2101 "InDroppedDFE",
2102 "InDroppedIntFull",
2103 "InFrameAlignErrors",
2104
2105
2106
2107 "OutUCast",
2108 "OutMCast",
2109 "OutBCast",
2110 "OutPkts",
2111
2112
2113
2114 "OutVLAN",
2115 "InUCastOctects",
2116 "OutUCastOctects",
2117
2118
2119
2120 "InBCastOctects",
2121 "OutBCastOctects",
2122 "InOctects",
2123 "OutOctects",
2124};
2125
2126
2127
2128
2129
2130
2131static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2132{
2133 u32 rdintcm;
2134 u32 tdintcm;
2135 struct bdx_priv *priv = netdev_priv(netdev);
2136
2137 rdintcm = priv->rdintcm;
2138 tdintcm = priv->tdintcm;
2139
2140 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2141 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
2142 ethtool_cmd_speed_set(ecmd, SPEED_10000);
2143 ecmd->duplex = DUPLEX_FULL;
2144 ecmd->port = PORT_FIBRE;
2145 ecmd->transceiver = XCVR_EXTERNAL;
2146 ecmd->autoneg = AUTONEG_DISABLE;
2147
2148
2149
2150 ecmd->maxtxpkt =
2151 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2152 ecmd->maxrxpkt =
2153 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2154
2155 return 0;
2156}
2157
2158
2159
2160
2161
2162
2163static void
2164bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2165{
2166 struct bdx_priv *priv = netdev_priv(netdev);
2167
2168 strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2169 strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2170 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2171 strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
2172 sizeof(drvinfo->bus_info));
2173}
2174
2175
2176
2177
2178
2179
2180static int
2181bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2182{
2183 u32 rdintcm;
2184 u32 tdintcm;
2185 struct bdx_priv *priv = netdev_priv(netdev);
2186
2187 rdintcm = priv->rdintcm;
2188 tdintcm = priv->tdintcm;
2189
2190
2191
2192 ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2193 ecoal->rx_max_coalesced_frames =
2194 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2195
2196 ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2197 ecoal->tx_max_coalesced_frames =
2198 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2199
2200
2201 return 0;
2202}
2203
2204
2205
2206
2207
2208
2209static int
2210bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2211{
2212 u32 rdintcm;
2213 u32 tdintcm;
2214 struct bdx_priv *priv = netdev_priv(netdev);
2215 int rx_coal;
2216 int tx_coal;
2217 int rx_max_coal;
2218 int tx_max_coal;
2219
2220
2221 rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2222 tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2223 rx_max_coal = ecoal->rx_max_coalesced_frames;
2224 tx_max_coal = ecoal->tx_max_coalesced_frames;
2225
2226
2227 rx_max_coal =
2228 (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2229 / PCK_TH_MULT);
2230 tx_max_coal =
2231 (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2232 / PCK_TH_MULT);
2233
2234 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2235 (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2236 return -EINVAL;
2237
2238 rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2239 GET_RXF_TH(priv->rdintcm), rx_max_coal);
2240 tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2241 tx_max_coal);
2242
2243 priv->rdintcm = rdintcm;
2244 priv->tdintcm = tdintcm;
2245
2246 WRITE_REG(priv, regRDINTCM0, rdintcm);
2247 WRITE_REG(priv, regTDINTCM0, tdintcm);
2248
2249 return 0;
2250}
2251
2252
2253static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2254{
2255 return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2256}
2257
2258
2259static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2260{
2261 return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2262}
2263
2264
2265
2266
2267
2268
2269static void
2270bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2271{
2272 struct bdx_priv *priv = netdev_priv(netdev);
2273
2274
2275 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2276 ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2277 ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2278 ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2279}
2280
2281
2282
2283
2284
2285
2286static int
2287bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2288{
2289 struct bdx_priv *priv = netdev_priv(netdev);
2290 int rx_size = 0;
2291 int tx_size = 0;
2292
2293 for (; rx_size < 4; rx_size++) {
2294 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2295 break;
2296 }
2297 if (rx_size == 4)
2298 rx_size = 3;
2299
2300 for (; tx_size < 4; tx_size++) {
2301 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2302 break;
2303 }
2304 if (tx_size == 4)
2305 tx_size = 3;
2306
2307
2308 if ((rx_size == priv->rxf_size) &&
2309 (tx_size == priv->txd_size))
2310 return 0;
2311
2312 priv->rxf_size = rx_size;
2313 if (rx_size > 1)
2314 priv->rxd_size = rx_size - 1;
2315 else
2316 priv->rxd_size = rx_size;
2317
2318 priv->txf_size = priv->txd_size = tx_size;
2319
2320 if (netif_running(netdev)) {
2321 bdx_close(netdev);
2322 bdx_open(netdev);
2323 }
2324 return 0;
2325}
2326
2327
2328
2329
2330
2331
2332static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2333{
2334 switch (stringset) {
2335 case ETH_SS_STATS:
2336 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2337 break;
2338 }
2339}
2340
2341
2342
2343
2344
2345static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2346{
2347 struct bdx_priv *priv = netdev_priv(netdev);
2348
2349 switch (stringset) {
2350 case ETH_SS_STATS:
2351 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2352 != sizeof(struct bdx_stats) / sizeof(u64));
2353 return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2354 }
2355
2356 return -EINVAL;
2357}
2358
2359
2360
2361
2362
2363
2364
2365static void bdx_get_ethtool_stats(struct net_device *netdev,
2366 struct ethtool_stats *stats, u64 *data)
2367{
2368 struct bdx_priv *priv = netdev_priv(netdev);
2369
2370 if (priv->stats_flag) {
2371
2372
2373 bdx_update_stats(priv);
2374
2375
2376 memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2377 }
2378}
2379
2380
2381
2382
2383
2384static void bdx_set_ethtool_ops(struct net_device *netdev)
2385{
2386 static const struct ethtool_ops bdx_ethtool_ops = {
2387 .get_settings = bdx_get_settings,
2388 .get_drvinfo = bdx_get_drvinfo,
2389 .get_link = ethtool_op_get_link,
2390 .get_coalesce = bdx_get_coalesce,
2391 .set_coalesce = bdx_set_coalesce,
2392 .get_ringparam = bdx_get_ringparam,
2393 .set_ringparam = bdx_set_ringparam,
2394 .get_strings = bdx_get_strings,
2395 .get_sset_count = bdx_get_sset_count,
2396 .get_ethtool_stats = bdx_get_ethtool_stats,
2397 };
2398
2399 netdev->ethtool_ops = &bdx_ethtool_ops;
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411static void bdx_remove(struct pci_dev *pdev)
2412{
2413 struct pci_nic *nic = pci_get_drvdata(pdev);
2414 struct net_device *ndev;
2415 int port;
2416
2417 for (port = 0; port < nic->port_num; port++) {
2418 ndev = nic->priv[port]->ndev;
2419 unregister_netdev(ndev);
2420 free_netdev(ndev);
2421 }
2422
2423
2424#ifdef BDX_MSI
2425 if (nic->irq_type == IRQ_MSI)
2426 pci_disable_msi(pdev);
2427#endif
2428
2429 iounmap(nic->regs);
2430 pci_release_regions(pdev);
2431 pci_disable_device(pdev);
2432 vfree(nic);
2433
2434 RET();
2435}
2436
2437static struct pci_driver bdx_pci_driver = {
2438 .name = BDX_DRV_NAME,
2439 .id_table = bdx_pci_tbl,
2440 .probe = bdx_probe,
2441 .remove = bdx_remove,
2442};
2443
2444
2445
2446
2447static void __init print_driver_id(void)
2448{
2449 pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2450 pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2451}
2452
2453static int __init bdx_module_init(void)
2454{
2455 ENTER;
2456 init_txd_sizes();
2457 print_driver_id();
2458 RET(pci_register_driver(&bdx_pci_driver));
2459}
2460
2461module_init(bdx_module_init);
2462
2463static void __exit bdx_module_exit(void)
2464{
2465 ENTER;
2466 pci_unregister_driver(&bdx_pci_driver);
2467 RET();
2468}
2469
2470module_exit(bdx_module_exit);
2471
2472MODULE_LICENSE("GPL");
2473MODULE_AUTHOR(DRIVER_AUTHOR);
2474MODULE_DESCRIPTION(BDX_DRV_DESC);
2475MODULE_FIRMWARE("tehuti/bdx.bin");
2476