1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66
67#include "tehuti.h"
68
69static const struct pci_device_id bdx_pci_tbl[] = {
70 { PCI_VDEVICE(TEHUTI, 0x3009), },
71 { PCI_VDEVICE(TEHUTI, 0x3010), },
72 { PCI_VDEVICE(TEHUTI, 0x3014), },
73 { 0 }
74};
75
76MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
77
78
79static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
80static void bdx_tx_cleanup(struct bdx_priv *priv);
81static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
82
83
84static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
85
86
87static int bdx_tx_init(struct bdx_priv *priv);
88static int bdx_rx_init(struct bdx_priv *priv);
89
90
91static void bdx_rx_free(struct bdx_priv *priv);
92static void bdx_tx_free(struct bdx_priv *priv);
93
94
95static void bdx_set_ethtool_ops(struct net_device *netdev);
96
97
98
99
100
101static void print_hw_id(struct pci_dev *pdev)
102{
103 struct pci_nic *nic = pci_get_drvdata(pdev);
104 u16 pci_link_status = 0;
105 u16 pci_ctrl = 0;
106
107 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
108 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
109
110 pr_info("%s%s\n", BDX_NIC_NAME,
111 nic->port_num == 1 ? "" : ", 2-Port");
112 pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
113 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
114 readl(nic->regs + FPGA_SEED),
115 GET_LINK_STATUS_LANES(pci_link_status),
116 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
117}
118
119static void print_fw_id(struct pci_nic *nic)
120{
121 pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
122}
123
124static void print_eth_id(struct net_device *ndev)
125{
126 netdev_info(ndev, "%s, Port %c\n",
127 BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
128
129}
130
131
132
133
134
135#define bdx_enable_interrupts(priv) \
136 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
137#define bdx_disable_interrupts(priv) \
138 do { WRITE_REG(priv, regIMR, 0); } while (0)
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153static int
154bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
155 u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
156{
157 u16 memsz = FIFO_SIZE * (1 << fsz_type);
158
159 memset(f, 0, sizeof(struct fifo));
160
161 f->va = pci_alloc_consistent(priv->pdev,
162 memsz + FIFO_EXTRA_SPACE, &f->da);
163 if (!f->va) {
164 pr_err("pci_alloc_consistent failed\n");
165 RET(-ENOMEM);
166 }
167 f->reg_CFG0 = reg_CFG0;
168 f->reg_CFG1 = reg_CFG1;
169 f->reg_RPTR = reg_RPTR;
170 f->reg_WPTR = reg_WPTR;
171 f->rptr = 0;
172 f->wptr = 0;
173 f->memsz = memsz;
174 f->size_mask = memsz - 1;
175 WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
176 WRITE_REG(priv, reg_CFG1, H32_64(f->da));
177
178 RET(0);
179}
180
181
182
183
184
185
186static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
187{
188 ENTER;
189 if (f->va) {
190 pci_free_consistent(priv->pdev,
191 f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
192 f->va = NULL;
193 }
194 RET();
195}
196
197
198
199
200
201static void bdx_link_changed(struct bdx_priv *priv)
202{
203 u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
204
205 if (!link) {
206 if (netif_carrier_ok(priv->ndev)) {
207 netif_stop_queue(priv->ndev);
208 netif_carrier_off(priv->ndev);
209 netdev_err(priv->ndev, "Link Down\n");
210 }
211 } else {
212 if (!netif_carrier_ok(priv->ndev)) {
213 netif_wake_queue(priv->ndev);
214 netif_carrier_on(priv->ndev);
215 netdev_err(priv->ndev, "Link Up\n");
216 }
217 }
218}
219
220static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
221{
222 if (isr & IR_RX_FREE_0) {
223 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
224 DBG("RX_FREE_0\n");
225 }
226
227 if (isr & IR_LNKCHG0)
228 bdx_link_changed(priv);
229
230 if (isr & IR_PCIE_LINK)
231 netdev_err(priv->ndev, "PCI-E Link Fault\n");
232
233 if (isr & IR_PCIE_TOUT)
234 netdev_err(priv->ndev, "PCI-E Time Out\n");
235
236}
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252static irqreturn_t bdx_isr_napi(int irq, void *dev)
253{
254 struct net_device *ndev = dev;
255 struct bdx_priv *priv = netdev_priv(ndev);
256 u32 isr;
257
258 ENTER;
259 isr = (READ_REG(priv, regISR) & IR_RUN);
260 if (unlikely(!isr)) {
261 bdx_enable_interrupts(priv);
262 return IRQ_NONE;
263 }
264
265 if (isr & IR_EXTRA)
266 bdx_isr_extra(priv, isr);
267
268 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
269 if (likely(napi_schedule_prep(&priv->napi))) {
270 __napi_schedule(&priv->napi);
271 RET(IRQ_HANDLED);
272 } else {
273
274
275
276
277
278
279
280
281 READ_REG(priv, regTXF_WPTR_0);
282 READ_REG(priv, regRXD_WPTR_0);
283 }
284 }
285
286 bdx_enable_interrupts(priv);
287 RET(IRQ_HANDLED);
288}
289
290static int bdx_poll(struct napi_struct *napi, int budget)
291{
292 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
293 int work_done;
294
295 ENTER;
296 bdx_tx_cleanup(priv);
297 work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
298 if ((work_done < budget) ||
299 (priv->napi_stop++ >= 30)) {
300 DBG("rx poll is done. backing to isr-driven\n");
301
302
303
304 priv->napi_stop = 0;
305
306 napi_complete_done(napi, work_done);
307 bdx_enable_interrupts(priv);
308 }
309 return work_done;
310}
311
312
313
314
315
316
317
318
319
320
321
322static int bdx_fw_load(struct bdx_priv *priv)
323{
324 const struct firmware *fw = NULL;
325 int master, i;
326 int rc;
327
328 ENTER;
329 master = READ_REG(priv, regINIT_SEMAPHORE);
330 if (!READ_REG(priv, regINIT_STATUS) && master) {
331 rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
332 if (rc)
333 goto out;
334 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
335 mdelay(100);
336 }
337 for (i = 0; i < 200; i++) {
338 if (READ_REG(priv, regINIT_STATUS)) {
339 rc = 0;
340 goto out;
341 }
342 mdelay(2);
343 }
344 rc = -EIO;
345out:
346 if (master)
347 WRITE_REG(priv, regINIT_SEMAPHORE, 1);
348
349 release_firmware(fw);
350
351 if (rc) {
352 netdev_err(priv->ndev, "firmware loading failed\n");
353 if (rc == -EIO)
354 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
355 READ_REG(priv, regVPC),
356 READ_REG(priv, regVIC),
357 READ_REG(priv, regINIT_STATUS), i);
358 RET(rc);
359 } else {
360 DBG("%s: firmware loading success\n", priv->ndev->name);
361 RET(0);
362 }
363}
364
365static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
366{
367 u32 val;
368
369 ENTER;
370 DBG("mac0=%x mac1=%x mac2=%x\n",
371 READ_REG(priv, regUNC_MAC0_A),
372 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
373
374 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
375 WRITE_REG(priv, regUNC_MAC2_A, val);
376 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
377 WRITE_REG(priv, regUNC_MAC1_A, val);
378 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
379 WRITE_REG(priv, regUNC_MAC0_A, val);
380
381 DBG("mac0=%x mac1=%x mac2=%x\n",
382 READ_REG(priv, regUNC_MAC0_A),
383 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
384 RET();
385}
386
387
388
389
390
391static int bdx_hw_start(struct bdx_priv *priv)
392{
393 int rc = -EIO;
394 struct net_device *ndev = priv->ndev;
395
396 ENTER;
397 bdx_link_changed(priv);
398
399
400 WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
401 WRITE_REG(priv, regPAUSE_QUANT, 0x96);
402 WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
403 WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
404 WRITE_REG(priv, regRX_FULLNESS, 0);
405 WRITE_REG(priv, regTX_FULLNESS, 0);
406 WRITE_REG(priv, regCTRLST,
407 regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
408
409 WRITE_REG(priv, regVGLB, 0);
410 WRITE_REG(priv, regMAX_FRAME_A,
411 priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
412
413 DBG("RDINTCM=%08x\n", priv->rdintcm);
414 WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
415 WRITE_REG(priv, regRDINTCM2, 0);
416
417 DBG("TDINTCM=%08x\n", priv->tdintcm);
418 WRITE_REG(priv, regTDINTCM0, priv->tdintcm);
419
420
421
422 bdx_restore_mac(priv->ndev, priv);
423
424 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
425 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
426
427#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
428
429 rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
430 ndev->name, ndev);
431 if (rc)
432 goto err_irq;
433 bdx_enable_interrupts(priv);
434
435 RET(0);
436
437err_irq:
438 RET(rc);
439}
440
441static void bdx_hw_stop(struct bdx_priv *priv)
442{
443 ENTER;
444 bdx_disable_interrupts(priv);
445 free_irq(priv->pdev->irq, priv->ndev);
446
447 netif_carrier_off(priv->ndev);
448 netif_stop_queue(priv->ndev);
449
450 RET();
451}
452
453static int bdx_hw_reset_direct(void __iomem *regs)
454{
455 u32 val, i;
456 ENTER;
457
458
459 val = readl(regs + regCLKPLL);
460 writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
461 udelay(50);
462 val = readl(regs + regCLKPLL);
463 writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
464
465
466 for (i = 0; i < 70; i++, mdelay(10))
467 if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
468
469 readl(regs + regRXD_CFG0_0);
470 return 0;
471 }
472 pr_err("HW reset failed\n");
473 return 1;
474}
475
476static int bdx_hw_reset(struct bdx_priv *priv)
477{
478 u32 val, i;
479 ENTER;
480
481 if (priv->port == 0) {
482
483 val = READ_REG(priv, regCLKPLL);
484 WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
485 udelay(50);
486 val = READ_REG(priv, regCLKPLL);
487 WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
488 }
489
490 for (i = 0; i < 70; i++, mdelay(10))
491 if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
492
493 READ_REG(priv, regRXD_CFG0_0);
494 return 0;
495 }
496 pr_err("HW reset failed\n");
497 return 1;
498}
499
500static int bdx_sw_reset(struct bdx_priv *priv)
501{
502 int i;
503
504 ENTER;
505
506
507 WRITE_REG(priv, regGMAC_RXF_A, 0);
508 mdelay(100);
509
510 WRITE_REG(priv, regDIS_PORT, 1);
511
512 WRITE_REG(priv, regDIS_QU, 1);
513
514 for (i = 0; i < 50; i++) {
515 if (READ_REG(priv, regRST_PORT) & 1)
516 break;
517 mdelay(10);
518 }
519 if (i == 50)
520 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
521
522
523 WRITE_REG(priv, regRDINTCM0, 0);
524 WRITE_REG(priv, regTDINTCM0, 0);
525 WRITE_REG(priv, regIMR, 0);
526 READ_REG(priv, regISR);
527
528
529 WRITE_REG(priv, regRST_QU, 1);
530
531 WRITE_REG(priv, regRST_PORT, 1);
532
533 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
534 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
535 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
536 WRITE_REG(priv, i, 0);
537
538 WRITE_REG(priv, regDIS_PORT, 0);
539
540 WRITE_REG(priv, regDIS_QU, 0);
541
542 WRITE_REG(priv, regRST_QU, 0);
543
544 WRITE_REG(priv, regRST_PORT, 0);
545
546
547
548 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
549 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
550
551 RET(0);
552}
553
554
555static int bdx_reset(struct bdx_priv *priv)
556{
557 ENTER;
558 RET((priv->pdev->device == 0x3009)
559 ? bdx_hw_reset(priv)
560 : bdx_sw_reset(priv));
561}
562
563
564
565
566
567
568
569
570
571
572
573
574static int bdx_close(struct net_device *ndev)
575{
576 struct bdx_priv *priv = NULL;
577
578 ENTER;
579 priv = netdev_priv(ndev);
580
581 napi_disable(&priv->napi);
582
583 bdx_reset(priv);
584 bdx_hw_stop(priv);
585 bdx_rx_free(priv);
586 bdx_tx_free(priv);
587 RET(0);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602static int bdx_open(struct net_device *ndev)
603{
604 struct bdx_priv *priv;
605 int rc;
606
607 ENTER;
608 priv = netdev_priv(ndev);
609 bdx_reset(priv);
610 if (netif_running(ndev))
611 netif_stop_queue(priv->ndev);
612
613 if ((rc = bdx_tx_init(priv)) ||
614 (rc = bdx_rx_init(priv)) ||
615 (rc = bdx_fw_load(priv)))
616 goto err;
617
618 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
619
620 rc = bdx_hw_start(priv);
621 if (rc)
622 goto err;
623
624 napi_enable(&priv->napi);
625
626 print_fw_id(priv->nic);
627
628 RET(0);
629
630err:
631 bdx_close(ndev);
632 RET(rc);
633}
634
635static int bdx_range_check(struct bdx_priv *priv, u32 offset)
636{
637 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
638 -EINVAL : 0;
639}
640
641static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
642{
643 struct bdx_priv *priv = netdev_priv(ndev);
644 u32 data[3];
645 int error;
646
647 ENTER;
648
649 DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
650 if (cmd != SIOCDEVPRIVATE) {
651 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
652 if (error) {
653 pr_err("can't copy from user\n");
654 RET(-EFAULT);
655 }
656 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
657 } else {
658 return -EOPNOTSUPP;
659 }
660
661 if (!capable(CAP_SYS_RAWIO))
662 return -EPERM;
663
664 switch (data[0]) {
665
666 case BDX_OP_READ:
667 error = bdx_range_check(priv, data[1]);
668 if (error < 0)
669 return error;
670 data[2] = READ_REG(priv, data[1]);
671 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
672 data[2]);
673 error = copy_to_user(ifr->ifr_data, data, sizeof(data));
674 if (error)
675 RET(-EFAULT);
676 break;
677
678 case BDX_OP_WRITE:
679 error = bdx_range_check(priv, data[1]);
680 if (error < 0)
681 return error;
682 WRITE_REG(priv, data[1], data[2]);
683 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
684 break;
685
686 default:
687 RET(-EOPNOTSUPP);
688 }
689 return 0;
690}
691
692static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
693{
694 ENTER;
695 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
696 RET(bdx_ioctl_priv(ndev, ifr, cmd));
697 else
698 RET(-EOPNOTSUPP);
699}
700
701
702
703
704
705
706
707
708
709static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
710{
711 struct bdx_priv *priv = netdev_priv(ndev);
712 u32 reg, bit, val;
713
714 ENTER;
715 DBG2("vid=%d value=%d\n", (int)vid, enable);
716 if (unlikely(vid >= 4096)) {
717 pr_err("invalid VID: %u (> 4096)\n", vid);
718 RET();
719 }
720 reg = regVLAN_0 + (vid / 32) * 4;
721 bit = 1 << vid % 32;
722 val = READ_REG(priv, reg);
723 DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
724 if (enable)
725 val |= bit;
726 else
727 val &= ~bit;
728 DBG2("new val %x\n", val);
729 WRITE_REG(priv, reg, val);
730 RET();
731}
732
733
734
735
736
737
738static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
739{
740 __bdx_vlan_rx_vid(ndev, vid, 1);
741 return 0;
742}
743
744
745
746
747
748
749static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
750{
751 __bdx_vlan_rx_vid(ndev, vid, 0);
752 return 0;
753}
754
755
756
757
758
759
760
761
762static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
763{
764 ENTER;
765
766 ndev->mtu = new_mtu;
767 if (netif_running(ndev)) {
768 bdx_close(ndev);
769 bdx_open(ndev);
770 }
771 RET(0);
772}
773
774static void bdx_setmulti(struct net_device *ndev)
775{
776 struct bdx_priv *priv = netdev_priv(ndev);
777
778 u32 rxf_val =
779 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
780 int i;
781
782 ENTER;
783
784
785
786
787 if (ndev->flags & IFF_PROMISC) {
788 rxf_val |= GMAC_RX_FILTER_PRM;
789 } else if (ndev->flags & IFF_ALLMULTI) {
790
791 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
792 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
793 } else if (!netdev_mc_empty(ndev)) {
794 u8 hash;
795 struct netdev_hw_addr *ha;
796 u32 reg, val;
797
798
799 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
800 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
801
802 for (i = 0; i < MAC_MCST_NUM; i++) {
803 WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
804 WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
805 }
806
807
808
809
810
811
812 netdev_for_each_mc_addr(ha, ndev) {
813 hash = 0;
814 for (i = 0; i < ETH_ALEN; i++)
815 hash ^= ha->addr[i];
816 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
817 val = READ_REG(priv, reg);
818 val |= (1 << (hash % 32));
819 WRITE_REG(priv, reg, val);
820 }
821
822 } else {
823 DBG("only own mac %d\n", netdev_mc_count(ndev));
824 rxf_val |= GMAC_RX_FILTER_AB;
825 }
826 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
827
828
829 RET();
830}
831
832static int bdx_set_mac(struct net_device *ndev, void *p)
833{
834 struct bdx_priv *priv = netdev_priv(ndev);
835 struct sockaddr *addr = p;
836
837 ENTER;
838
839
840
841
842 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
843 bdx_restore_mac(ndev, priv);
844 RET(0);
845}
846
847static int bdx_read_mac(struct bdx_priv *priv)
848{
849 u16 macAddress[3], i;
850 ENTER;
851
852 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
853 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
854 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
855 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
856 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
857 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
858 for (i = 0; i < 3; i++) {
859 priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
860 priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
861 }
862 RET(0);
863}
864
865static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
866{
867 u64 val;
868
869 val = READ_REG(priv, reg);
870 val |= ((u64) READ_REG(priv, reg + 8)) << 32;
871 return val;
872}
873
874
875static void bdx_update_stats(struct bdx_priv *priv)
876{
877 struct bdx_stats *stats = &priv->hw_stats;
878 u64 *stats_vector = (u64 *) stats;
879 int i;
880 int addr;
881
882
883 addr = 0x7200;
884
885 for (i = 0; i < 12; i++) {
886 stats_vector[i] = bdx_read_l2stat(priv, addr);
887 addr += 0x10;
888 }
889 BDX_ASSERT(addr != 0x72C0);
890
891 addr = 0x72F0;
892 for (; i < 16; i++) {
893 stats_vector[i] = bdx_read_l2stat(priv, addr);
894 addr += 0x10;
895 }
896 BDX_ASSERT(addr != 0x7330);
897
898 addr = 0x7370;
899 for (; i < 19; i++) {
900 stats_vector[i] = bdx_read_l2stat(priv, addr);
901 addr += 0x10;
902 }
903 BDX_ASSERT(addr != 0x73A0);
904
905 addr = 0x73C0;
906 for (; i < 23; i++) {
907 stats_vector[i] = bdx_read_l2stat(priv, addr);
908 addr += 0x10;
909 }
910 BDX_ASSERT(addr != 0x7400);
911 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
912}
913
914static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
915 u16 rxd_vlan);
916static void print_rxfd(struct rxf_desc *rxfd);
917
918
919
920
921
922static void bdx_rxdb_destroy(struct rxdb *db)
923{
924 vfree(db);
925}
926
927static struct rxdb *bdx_rxdb_create(int nelem)
928{
929 struct rxdb *db;
930 int i;
931
932 db = vmalloc(sizeof(struct rxdb)
933 + (nelem * sizeof(int))
934 + (nelem * sizeof(struct rx_map)));
935 if (likely(db != NULL)) {
936 db->stack = (int *)(db + 1);
937 db->elems = (void *)(db->stack + nelem);
938 db->nelem = nelem;
939 db->top = nelem;
940 for (i = 0; i < nelem; i++)
941 db->stack[i] = nelem - i - 1;
942
943 }
944
945 return db;
946}
947
948static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
949{
950 BDX_ASSERT(db->top <= 0);
951 return db->stack[--(db->top)];
952}
953
954static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
955{
956 BDX_ASSERT((n < 0) || (n >= db->nelem));
957 return db->elems + n;
958}
959
960static inline int bdx_rxdb_available(struct rxdb *db)
961{
962 return db->top;
963}
964
965static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
966{
967 BDX_ASSERT((n >= db->nelem) || (n < 0));
968 db->stack[(db->top)++] = n;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993static int bdx_rx_init(struct bdx_priv *priv)
994{
995 ENTER;
996
997 if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
998 regRXD_CFG0_0, regRXD_CFG1_0,
999 regRXD_RPTR_0, regRXD_WPTR_0))
1000 goto err_mem;
1001 if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
1002 regRXF_CFG0_0, regRXF_CFG1_0,
1003 regRXF_RPTR_0, regRXF_WPTR_0))
1004 goto err_mem;
1005 priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1006 sizeof(struct rxf_desc));
1007 if (!priv->rxdb)
1008 goto err_mem;
1009
1010 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1011 return 0;
1012
1013err_mem:
1014 netdev_err(priv->ndev, "Rx init failed\n");
1015 return -ENOMEM;
1016}
1017
1018
1019
1020
1021
1022
1023static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1024{
1025 struct rx_map *dm;
1026 struct rxdb *db = priv->rxdb;
1027 u16 i;
1028
1029 ENTER;
1030 DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1031 db->nelem - bdx_rxdb_available(db));
1032 while (bdx_rxdb_available(db) > 0) {
1033 i = bdx_rxdb_alloc_elem(db);
1034 dm = bdx_rxdb_addr_elem(db, i);
1035 dm->dma = 0;
1036 }
1037 for (i = 0; i < db->nelem; i++) {
1038 dm = bdx_rxdb_addr_elem(db, i);
1039 if (dm->dma) {
1040 pci_unmap_single(priv->pdev,
1041 dm->dma, f->m.pktsz,
1042 PCI_DMA_FROMDEVICE);
1043 dev_kfree_skb(dm->skb);
1044 }
1045 }
1046}
1047
1048
1049
1050
1051
1052
1053
1054static void bdx_rx_free(struct bdx_priv *priv)
1055{
1056 ENTER;
1057 if (priv->rxdb) {
1058 bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1059 bdx_rxdb_destroy(priv->rxdb);
1060 priv->rxdb = NULL;
1061 }
1062 bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1063 bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1064
1065 RET();
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1086{
1087 struct sk_buff *skb;
1088 struct rxf_desc *rxfd;
1089 struct rx_map *dm;
1090 int dno, delta, idx;
1091 struct rxdb *db = priv->rxdb;
1092
1093 ENTER;
1094 dno = bdx_rxdb_available(db) - 1;
1095 while (dno > 0) {
1096 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
1097 if (!skb)
1098 break;
1099
1100 skb_reserve(skb, NET_IP_ALIGN);
1101
1102 idx = bdx_rxdb_alloc_elem(db);
1103 dm = bdx_rxdb_addr_elem(db, idx);
1104 dm->dma = pci_map_single(priv->pdev,
1105 skb->data, f->m.pktsz,
1106 PCI_DMA_FROMDEVICE);
1107 dm->skb = skb;
1108 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1109 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1110 rxfd->va_lo = idx;
1111 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1112 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1113 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1114 print_rxfd(rxfd);
1115
1116 f->m.wptr += sizeof(struct rxf_desc);
1117 delta = f->m.wptr - f->m.memsz;
1118 if (unlikely(delta >= 0)) {
1119 f->m.wptr = delta;
1120 if (delta > 0) {
1121 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1122 DBG("wrapped descriptor\n");
1123 }
1124 }
1125 dno--;
1126 }
1127
1128 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1129 RET();
1130}
1131
1132static inline void
1133NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1134 struct sk_buff *skb)
1135{
1136 ENTER;
1137 DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
1138 if (GET_RXD_VTAG(rxd_val1)) {
1139 DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
1140 priv->ndev->name,
1141 GET_RXD_VLAN_ID(rxd_vlan),
1142 GET_RXD_VTAG(rxd_val1));
1143 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
1144 }
1145 netif_receive_skb(skb);
1146}
1147
1148static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1149{
1150 struct rxf_desc *rxfd;
1151 struct rx_map *dm;
1152 struct rxf_fifo *f;
1153 struct rxdb *db;
1154 int delta;
1155
1156 ENTER;
1157 DBG("priv=%p rxdd=%p\n", priv, rxdd);
1158 f = &priv->rxf_fifo0;
1159 db = priv->rxdb;
1160 DBG("db=%p f=%p\n", db, f);
1161 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1162 DBG("dm=%p\n", dm);
1163 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1164 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1165 rxfd->va_lo = rxdd->va_lo;
1166 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1167 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1168 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1169 print_rxfd(rxfd);
1170
1171 f->m.wptr += sizeof(struct rxf_desc);
1172 delta = f->m.wptr - f->m.memsz;
1173 if (unlikely(delta >= 0)) {
1174 f->m.wptr = delta;
1175 if (delta > 0) {
1176 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1177 DBG("wrapped descriptor\n");
1178 }
1179 }
1180 RET();
1181}
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1197{
1198 struct net_device *ndev = priv->ndev;
1199 struct sk_buff *skb, *skb2;
1200 struct rxd_desc *rxdd;
1201 struct rx_map *dm;
1202 struct rxf_fifo *rxf_fifo;
1203 int tmp_len, size;
1204 int done = 0;
1205 int max_done = BDX_MAX_RX_DONE;
1206 struct rxdb *db = NULL;
1207
1208 u32 rxd_val1;
1209 u16 len;
1210 u16 rxd_vlan;
1211
1212 ENTER;
1213 max_done = budget;
1214
1215 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1216
1217 size = f->m.wptr - f->m.rptr;
1218 if (size < 0)
1219 size = f->m.memsz + size;
1220
1221 while (size > 0) {
1222
1223 rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1224 rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1225
1226 len = CPU_CHIP_SWAP16(rxdd->len);
1227
1228 rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1229
1230 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1231
1232 tmp_len = GET_RXD_BC(rxd_val1) << 3;
1233 BDX_ASSERT(tmp_len <= 0);
1234 size -= tmp_len;
1235 if (size < 0)
1236 break;
1237
1238 f->m.rptr += tmp_len;
1239
1240 tmp_len = f->m.rptr - f->m.memsz;
1241 if (unlikely(tmp_len >= 0)) {
1242 f->m.rptr = tmp_len;
1243 if (tmp_len > 0) {
1244 DBG("wrapped desc rptr=%d tmp_len=%d\n",
1245 f->m.rptr, tmp_len);
1246 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1247 }
1248 }
1249
1250 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1251 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1252 ndev->stats.rx_errors++;
1253 bdx_recycle_skb(priv, rxdd);
1254 continue;
1255 }
1256
1257 rxf_fifo = &priv->rxf_fifo0;
1258 db = priv->rxdb;
1259 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1260 skb = dm->skb;
1261
1262 if (len < BDX_COPYBREAK &&
1263 (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
1264 skb_reserve(skb2, NET_IP_ALIGN);
1265
1266 pci_dma_sync_single_for_cpu(priv->pdev,
1267 dm->dma, rxf_fifo->m.pktsz,
1268 PCI_DMA_FROMDEVICE);
1269 memcpy(skb2->data, skb->data, len);
1270 bdx_recycle_skb(priv, rxdd);
1271 skb = skb2;
1272 } else {
1273 pci_unmap_single(priv->pdev,
1274 dm->dma, rxf_fifo->m.pktsz,
1275 PCI_DMA_FROMDEVICE);
1276 bdx_rxdb_free_elem(db, rxdd->va_lo);
1277 }
1278
1279 ndev->stats.rx_bytes += len;
1280
1281 skb_put(skb, len);
1282 skb->protocol = eth_type_trans(skb, ndev);
1283
1284
1285 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1286 skb_checksum_none_assert(skb);
1287 else
1288 skb->ip_summed = CHECKSUM_UNNECESSARY;
1289
1290 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1291
1292 if (++done >= max_done)
1293 break;
1294 }
1295
1296 ndev->stats.rx_packets += done;
1297
1298
1299 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1300
1301 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1302
1303 RET(done);
1304}
1305
1306
1307
1308
1309static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1310 u16 rxd_vlan)
1311{
1312 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1313 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1314 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1315 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1316 GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1317 GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1318 rxdd->va_hi);
1319}
1320
1321static void print_rxfd(struct rxf_desc *rxfd)
1322{
1323 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1324 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1325 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372static inline int bdx_tx_db_size(struct txdb *db)
1373{
1374 int taken = db->wptr - db->rptr;
1375 if (taken < 0)
1376 taken = db->size + 1 + taken;
1377
1378 return db->size - taken;
1379}
1380
1381
1382
1383
1384
1385
1386static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1387{
1388 BDX_ASSERT(db == NULL || pptr == NULL);
1389
1390 BDX_ASSERT(*pptr != db->rptr &&
1391 *pptr != db->wptr);
1392
1393 BDX_ASSERT(*pptr < db->start ||
1394 *pptr >= db->end);
1395
1396 ++*pptr;
1397 if (unlikely(*pptr == db->end))
1398 *pptr = db->start;
1399}
1400
1401
1402
1403
1404
1405static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1406{
1407 BDX_ASSERT(db->rptr == db->wptr);
1408 __bdx_tx_db_ptr_next(db, &db->rptr);
1409}
1410
1411
1412
1413
1414
1415static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1416{
1417 __bdx_tx_db_ptr_next(db, &db->wptr);
1418 BDX_ASSERT(db->rptr == db->wptr);
1419
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429static int bdx_tx_db_init(struct txdb *d, int sz_type)
1430{
1431 int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1432
1433 d->start = vmalloc(memsz);
1434 if (!d->start)
1435 return -ENOMEM;
1436
1437
1438
1439
1440
1441
1442 d->size = memsz / sizeof(struct tx_map) - 1;
1443 d->end = d->start + d->size + 1;
1444
1445
1446 d->rptr = d->start;
1447 d->wptr = d->start;
1448
1449 return 0;
1450}
1451
1452
1453
1454
1455
1456static void bdx_tx_db_close(struct txdb *d)
1457{
1458 BDX_ASSERT(d == NULL);
1459
1460 vfree(d->start);
1461 d->start = NULL;
1462}
1463
1464
1465
1466
1467
1468
1469
1470static struct {
1471 u16 bytes;
1472 u16 qwords;
1473} txd_sizes[MAX_SKB_FRAGS + 1];
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static inline void
1488bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1489 struct txd_desc *txdd)
1490{
1491 struct txdb *db = &priv->txdb;
1492 struct pbl *pbl = &txdd->pbl[0];
1493 int nr_frags = skb_shinfo(skb)->nr_frags;
1494 int i;
1495
1496 db->wptr->len = skb_headlen(skb);
1497 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1498 db->wptr->len, PCI_DMA_TODEVICE);
1499 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1500 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1501 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1502 DBG("=== pbl len: 0x%x ================\n", pbl->len);
1503 DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1504 DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1505 bdx_tx_db_inc_wptr(db);
1506
1507 for (i = 0; i < nr_frags; i++) {
1508 const struct skb_frag_struct *frag;
1509
1510 frag = &skb_shinfo(skb)->frags[i];
1511 db->wptr->len = skb_frag_size(frag);
1512 db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
1513 0, skb_frag_size(frag),
1514 DMA_TO_DEVICE);
1515
1516 pbl++;
1517 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1518 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1519 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1520 bdx_tx_db_inc_wptr(db);
1521 }
1522
1523
1524 db->wptr->len = -txd_sizes[nr_frags].bytes;
1525 db->wptr->addr.skb = skb;
1526 bdx_tx_db_inc_wptr(db);
1527}
1528
1529
1530
1531
1532static void __init init_txd_sizes(void)
1533{
1534 int i, lwords;
1535
1536
1537
1538 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1539 lwords = 7 + (i * 3);
1540 if (lwords & 1)
1541 lwords++;
1542 txd_sizes[i].qwords = lwords >> 1;
1543 txd_sizes[i].bytes = lwords << 2;
1544 }
1545}
1546
1547
1548
1549static int bdx_tx_init(struct bdx_priv *priv)
1550{
1551 if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1552 regTXD_CFG0_0,
1553 regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1554 goto err_mem;
1555 if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1556 regTXF_CFG0_0,
1557 regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1558 goto err_mem;
1559
1560
1561
1562 if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1563 goto err_mem;
1564
1565 priv->tx_level = BDX_MAX_TX_LEVEL;
1566#ifdef BDX_DELAY_WPTR
1567 priv->tx_update_mark = priv->tx_level - 1024;
1568#endif
1569 return 0;
1570
1571err_mem:
1572 netdev_err(priv->ndev, "Tx init failed\n");
1573 return -ENOMEM;
1574}
1575
1576
1577
1578
1579
1580
1581
1582static inline int bdx_tx_space(struct bdx_priv *priv)
1583{
1584 struct txd_fifo *f = &priv->txd_fifo0;
1585 int fsize;
1586
1587 f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1588 fsize = f->m.rptr - f->m.wptr;
1589 if (fsize <= 0)
1590 fsize = f->m.memsz + fsize;
1591 return fsize;
1592}
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1605 struct net_device *ndev)
1606{
1607 struct bdx_priv *priv = netdev_priv(ndev);
1608 struct txd_fifo *f = &priv->txd_fifo0;
1609 int txd_checksum = 7;
1610 int txd_lgsnd = 0;
1611 int txd_vlan_id = 0;
1612 int txd_vtag = 0;
1613 int txd_mss = 0;
1614
1615 int nr_frags = skb_shinfo(skb)->nr_frags;
1616 struct txd_desc *txdd;
1617 int len;
1618 unsigned long flags;
1619
1620 ENTER;
1621 local_irq_save(flags);
1622 spin_lock(&priv->tx_lock);
1623
1624
1625 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1626 txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1627 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1628 txd_checksum = 0;
1629
1630 if (skb_shinfo(skb)->gso_size) {
1631 txd_mss = skb_shinfo(skb)->gso_size;
1632 txd_lgsnd = 1;
1633 DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1634 txd_mss);
1635 }
1636
1637 if (skb_vlan_tag_present(skb)) {
1638
1639 txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
1640 txd_vtag = 1;
1641 }
1642
1643 txdd->length = CPU_CHIP_SWAP16(skb->len);
1644 txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1645 txdd->txd_val1 =
1646 CPU_CHIP_SWAP32(TXD_W1_VAL
1647 (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1648 txd_lgsnd, txd_vlan_id));
1649 DBG("=== TxD desc =====================\n");
1650 DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1651 DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1652
1653 bdx_tx_map_skb(priv, skb, txdd);
1654
1655
1656
1657
1658 f->m.wptr += txd_sizes[nr_frags].bytes;
1659 len = f->m.wptr - f->m.memsz;
1660 if (unlikely(len >= 0)) {
1661 f->m.wptr = len;
1662 if (len > 0) {
1663 BDX_ASSERT(len > f->m.memsz);
1664 memcpy(f->m.va, f->m.va + f->m.memsz, len);
1665 }
1666 }
1667 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1668
1669 priv->tx_level -= txd_sizes[nr_frags].bytes;
1670 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1671#ifdef BDX_DELAY_WPTR
1672 if (priv->tx_level > priv->tx_update_mark) {
1673
1674
1675
1676
1677 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1678 } else {
1679 if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1680 priv->tx_noupd = 0;
1681 WRITE_REG(priv, f->m.reg_WPTR,
1682 f->m.wptr & TXF_WPTR_WR_PTR);
1683 }
1684 }
1685#else
1686
1687
1688
1689
1690 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1691
1692#endif
1693#ifdef BDX_LLTX
1694 netif_trans_update(ndev);
1695#endif
1696 ndev->stats.tx_packets++;
1697 ndev->stats.tx_bytes += skb->len;
1698
1699 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1700 DBG("%s: %s: TX Q STOP level %d\n",
1701 BDX_DRV_NAME, ndev->name, priv->tx_level);
1702 netif_stop_queue(ndev);
1703 }
1704
1705 spin_unlock_irqrestore(&priv->tx_lock, flags);
1706 return NETDEV_TX_OK;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716static void bdx_tx_cleanup(struct bdx_priv *priv)
1717{
1718 struct txf_fifo *f = &priv->txf_fifo0;
1719 struct txdb *db = &priv->txdb;
1720 int tx_level = 0;
1721
1722 ENTER;
1723 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1724 BDX_ASSERT(f->m.rptr >= f->m.memsz);
1725
1726 while (f->m.wptr != f->m.rptr) {
1727 f->m.rptr += BDX_TXF_DESC_SZ;
1728 f->m.rptr &= f->m.size_mask;
1729
1730
1731
1732 BDX_ASSERT(db->rptr->len == 0);
1733 do {
1734 BDX_ASSERT(db->rptr->addr.dma == 0);
1735 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1736 db->rptr->len, PCI_DMA_TODEVICE);
1737 bdx_tx_db_inc_rptr(db);
1738 } while (db->rptr->len > 0);
1739 tx_level -= db->rptr->len;
1740
1741
1742 dev_kfree_skb_irq(db->rptr->addr.skb);
1743 bdx_tx_db_inc_rptr(db);
1744 }
1745
1746
1747 BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1748 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1749
1750
1751
1752 spin_lock(&priv->tx_lock);
1753 priv->tx_level += tx_level;
1754 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1755#ifdef BDX_DELAY_WPTR
1756 if (priv->tx_noupd) {
1757 priv->tx_noupd = 0;
1758 WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1759 priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1760 }
1761#endif
1762
1763 if (unlikely(netif_queue_stopped(priv->ndev) &&
1764 netif_carrier_ok(priv->ndev) &&
1765 (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1766 DBG("%s: %s: TX Q WAKE level %d\n",
1767 BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1768 netif_wake_queue(priv->ndev);
1769 }
1770 spin_unlock(&priv->tx_lock);
1771}
1772
1773
1774
1775
1776
1777static void bdx_tx_free_skbs(struct bdx_priv *priv)
1778{
1779 struct txdb *db = &priv->txdb;
1780
1781 ENTER;
1782 while (db->rptr != db->wptr) {
1783 if (likely(db->rptr->len))
1784 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1785 db->rptr->len, PCI_DMA_TODEVICE);
1786 else
1787 dev_kfree_skb(db->rptr->addr.skb);
1788 bdx_tx_db_inc_rptr(db);
1789 }
1790 RET();
1791}
1792
1793
1794static void bdx_tx_free(struct bdx_priv *priv)
1795{
1796 ENTER;
1797 bdx_tx_free_skbs(priv);
1798 bdx_fifo_free(priv, &priv->txd_fifo0.m);
1799 bdx_fifo_free(priv, &priv->txf_fifo0.m);
1800 bdx_tx_db_close(&priv->txdb);
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1815{
1816 struct txd_fifo *f = &priv->txd_fifo0;
1817 int i = f->m.memsz - f->m.wptr;
1818
1819 if (size == 0)
1820 return;
1821
1822 if (i > size) {
1823 memcpy(f->m.va + f->m.wptr, data, size);
1824 f->m.wptr += size;
1825 } else {
1826 memcpy(f->m.va + f->m.wptr, data, i);
1827 f->m.wptr = size - i;
1828 memcpy(f->m.va, data + i, f->m.wptr);
1829 }
1830 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1843{
1844 int timer = 0;
1845 ENTER;
1846
1847 while (size > 0) {
1848
1849
1850
1851 int avail = bdx_tx_space(priv) - 8;
1852 if (avail <= 0) {
1853 if (timer++ > 300) {
1854 DBG("timeout while writing desc to TxD fifo\n");
1855 break;
1856 }
1857 udelay(50);
1858 continue;
1859 }
1860 avail = min(avail, size);
1861 DBG("about to push %d bytes starting %p size %d\n", avail,
1862 data, size);
1863 bdx_tx_push_desc(priv, data, avail);
1864 size -= avail;
1865 data += avail;
1866 }
1867 RET();
1868}
1869
1870static const struct net_device_ops bdx_netdev_ops = {
1871 .ndo_open = bdx_open,
1872 .ndo_stop = bdx_close,
1873 .ndo_start_xmit = bdx_tx_transmit,
1874 .ndo_validate_addr = eth_validate_addr,
1875 .ndo_do_ioctl = bdx_ioctl,
1876 .ndo_set_rx_mode = bdx_setmulti,
1877 .ndo_change_mtu = bdx_change_mtu,
1878 .ndo_set_mac_address = bdx_set_mac,
1879 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1880 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1881};
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static int
1901bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1902{
1903 struct net_device *ndev;
1904 struct bdx_priv *priv;
1905 int err, pci_using_dac, port;
1906 unsigned long pciaddr;
1907 u32 regionSize;
1908 struct pci_nic *nic;
1909
1910 ENTER;
1911
1912 nic = vmalloc(sizeof(*nic));
1913 if (!nic)
1914 RET(-ENOMEM);
1915
1916
1917 err = pci_enable_device(pdev);
1918 if (err)
1919 goto err_pci;
1920
1921 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1922 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1923 pci_using_dac = 1;
1924 } else {
1925 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1926 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1927 pr_err("No usable DMA configuration, aborting\n");
1928 goto err_dma;
1929 }
1930 pci_using_dac = 0;
1931 }
1932
1933 err = pci_request_regions(pdev, BDX_DRV_NAME);
1934 if (err)
1935 goto err_dma;
1936
1937 pci_set_master(pdev);
1938
1939 pciaddr = pci_resource_start(pdev, 0);
1940 if (!pciaddr) {
1941 err = -EIO;
1942 pr_err("no MMIO resource\n");
1943 goto err_out_res;
1944 }
1945 regionSize = pci_resource_len(pdev, 0);
1946 if (regionSize < BDX_REGS_SIZE) {
1947 err = -EIO;
1948 pr_err("MMIO resource (%x) too small\n", regionSize);
1949 goto err_out_res;
1950 }
1951
1952 nic->regs = ioremap(pciaddr, regionSize);
1953 if (!nic->regs) {
1954 err = -EIO;
1955 pr_err("ioremap failed\n");
1956 goto err_out_res;
1957 }
1958
1959 if (pdev->irq < 2) {
1960 err = -EIO;
1961 pr_err("invalid irq (%d)\n", pdev->irq);
1962 goto err_out_iomap;
1963 }
1964 pci_set_drvdata(pdev, nic);
1965
1966 if (pdev->device == 0x3014)
1967 nic->port_num = 2;
1968 else
1969 nic->port_num = 1;
1970
1971 print_hw_id(pdev);
1972
1973 bdx_hw_reset_direct(nic->regs);
1974
1975 nic->irq_type = IRQ_INTX;
1976#ifdef BDX_MSI
1977 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1978 err = pci_enable_msi(pdev);
1979 if (err)
1980 pr_err("Can't enable msi. error is %d\n", err);
1981 else
1982 nic->irq_type = IRQ_MSI;
1983 } else
1984 DBG("HW does not support MSI\n");
1985#endif
1986
1987
1988 for (port = 0; port < nic->port_num; port++) {
1989 ndev = alloc_etherdev(sizeof(struct bdx_priv));
1990 if (!ndev) {
1991 err = -ENOMEM;
1992 goto err_out_iomap;
1993 }
1994
1995 ndev->netdev_ops = &bdx_netdev_ops;
1996 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
1997
1998 bdx_set_ethtool_ops(ndev);
1999
2000
2001
2002 ndev->if_port = port;
2003 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2004 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2005 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
2006 ;
2007 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2008 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
2009
2010 if (pci_using_dac)
2011 ndev->features |= NETIF_F_HIGHDMA;
2012
2013
2014 priv = nic->priv[port] = netdev_priv(ndev);
2015
2016 priv->pBdxRegs = nic->regs + port * 0x8000;
2017 priv->port = port;
2018 priv->pdev = pdev;
2019 priv->ndev = ndev;
2020 priv->nic = nic;
2021 priv->msg_enable = BDX_DEF_MSG_ENABLE;
2022
2023 netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2024
2025 if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2026 DBG("HW statistics not supported\n");
2027 priv->stats_flag = 0;
2028 } else {
2029 priv->stats_flag = 1;
2030 }
2031
2032
2033 priv->txd_size = 2;
2034 priv->txf_size = 2;
2035 priv->rxd_size = 2;
2036 priv->rxf_size = 3;
2037
2038
2039 priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2040 priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2041
2042
2043
2044
2045
2046
2047#ifdef BDX_LLTX
2048 ndev->features |= NETIF_F_LLTX;
2049#endif
2050
2051 ndev->min_mtu = ETH_ZLEN;
2052 ndev->max_mtu = BDX_MAX_MTU;
2053
2054 spin_lock_init(&priv->tx_lock);
2055
2056
2057 if (bdx_read_mac(priv)) {
2058 pr_err("load MAC address failed\n");
2059 goto err_out_iomap;
2060 }
2061 SET_NETDEV_DEV(ndev, &pdev->dev);
2062 err = register_netdev(ndev);
2063 if (err) {
2064 pr_err("register_netdev failed\n");
2065 goto err_out_free;
2066 }
2067 netif_carrier_off(ndev);
2068 netif_stop_queue(ndev);
2069
2070 print_eth_id(ndev);
2071 }
2072 RET(0);
2073
2074err_out_free:
2075 free_netdev(ndev);
2076err_out_iomap:
2077 iounmap(nic->regs);
2078err_out_res:
2079 pci_release_regions(pdev);
2080err_dma:
2081 pci_disable_device(pdev);
2082err_pci:
2083 vfree(nic);
2084
2085 RET(err);
2086}
2087
2088
2089
2090static const char
2091 bdx_stat_names[][ETH_GSTRING_LEN] = {
2092 "InUCast",
2093 "InMCast",
2094 "InBCast",
2095 "InPkts",
2096 "InErrors",
2097 "InDropped",
2098 "FrameTooLong",
2099 "FrameSequenceErrors",
2100 "InVLAN",
2101 "InDroppedDFE",
2102 "InDroppedIntFull",
2103 "InFrameAlignErrors",
2104
2105
2106
2107 "OutUCast",
2108 "OutMCast",
2109 "OutBCast",
2110 "OutPkts",
2111
2112
2113
2114 "OutVLAN",
2115 "InUCastOctects",
2116 "OutUCastOctects",
2117
2118
2119
2120 "InBCastOctects",
2121 "OutBCastOctects",
2122 "InOctects",
2123 "OutOctects",
2124};
2125
2126
2127
2128
2129
2130
2131static int bdx_get_link_ksettings(struct net_device *netdev,
2132 struct ethtool_link_ksettings *ecmd)
2133{
2134 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
2135 ethtool_link_ksettings_add_link_mode(ecmd, supported,
2136 10000baseT_Full);
2137 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
2138 ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
2139 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
2140 10000baseT_Full);
2141 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
2142
2143 ecmd->base.speed = SPEED_10000;
2144 ecmd->base.duplex = DUPLEX_FULL;
2145 ecmd->base.port = PORT_FIBRE;
2146 ecmd->base.autoneg = AUTONEG_DISABLE;
2147
2148 return 0;
2149}
2150
2151
2152
2153
2154
2155
2156static void
2157bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2158{
2159 struct bdx_priv *priv = netdev_priv(netdev);
2160
2161 strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2162 strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2163 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2164 strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
2165 sizeof(drvinfo->bus_info));
2166}
2167
2168
2169
2170
2171
2172
2173static int
2174bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2175{
2176 u32 rdintcm;
2177 u32 tdintcm;
2178 struct bdx_priv *priv = netdev_priv(netdev);
2179
2180 rdintcm = priv->rdintcm;
2181 tdintcm = priv->tdintcm;
2182
2183
2184
2185 ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2186 ecoal->rx_max_coalesced_frames =
2187 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2188
2189 ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2190 ecoal->tx_max_coalesced_frames =
2191 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2192
2193
2194 return 0;
2195}
2196
2197
2198
2199
2200
2201
2202static int
2203bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2204{
2205 u32 rdintcm;
2206 u32 tdintcm;
2207 struct bdx_priv *priv = netdev_priv(netdev);
2208 int rx_coal;
2209 int tx_coal;
2210 int rx_max_coal;
2211 int tx_max_coal;
2212
2213
2214 rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2215 tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2216 rx_max_coal = ecoal->rx_max_coalesced_frames;
2217 tx_max_coal = ecoal->tx_max_coalesced_frames;
2218
2219
2220 rx_max_coal =
2221 (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2222 / PCK_TH_MULT);
2223 tx_max_coal =
2224 (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2225 / PCK_TH_MULT);
2226
2227 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2228 (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2229 return -EINVAL;
2230
2231 rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2232 GET_RXF_TH(priv->rdintcm), rx_max_coal);
2233 tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2234 tx_max_coal);
2235
2236 priv->rdintcm = rdintcm;
2237 priv->tdintcm = tdintcm;
2238
2239 WRITE_REG(priv, regRDINTCM0, rdintcm);
2240 WRITE_REG(priv, regTDINTCM0, tdintcm);
2241
2242 return 0;
2243}
2244
2245
2246static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2247{
2248 return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2249}
2250
2251
2252static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2253{
2254 return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2255}
2256
2257
2258
2259
2260
2261
2262static void
2263bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2264{
2265 struct bdx_priv *priv = netdev_priv(netdev);
2266
2267
2268 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2269 ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2270 ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2271 ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2272}
2273
2274
2275
2276
2277
2278
2279static int
2280bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2281{
2282 struct bdx_priv *priv = netdev_priv(netdev);
2283 int rx_size = 0;
2284 int tx_size = 0;
2285
2286 for (; rx_size < 4; rx_size++) {
2287 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2288 break;
2289 }
2290 if (rx_size == 4)
2291 rx_size = 3;
2292
2293 for (; tx_size < 4; tx_size++) {
2294 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2295 break;
2296 }
2297 if (tx_size == 4)
2298 tx_size = 3;
2299
2300
2301 if ((rx_size == priv->rxf_size) &&
2302 (tx_size == priv->txd_size))
2303 return 0;
2304
2305 priv->rxf_size = rx_size;
2306 if (rx_size > 1)
2307 priv->rxd_size = rx_size - 1;
2308 else
2309 priv->rxd_size = rx_size;
2310
2311 priv->txf_size = priv->txd_size = tx_size;
2312
2313 if (netif_running(netdev)) {
2314 bdx_close(netdev);
2315 bdx_open(netdev);
2316 }
2317 return 0;
2318}
2319
2320
2321
2322
2323
2324
2325static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2326{
2327 switch (stringset) {
2328 case ETH_SS_STATS:
2329 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2330 break;
2331 }
2332}
2333
2334
2335
2336
2337
2338static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2339{
2340 struct bdx_priv *priv = netdev_priv(netdev);
2341
2342 switch (stringset) {
2343 case ETH_SS_STATS:
2344 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2345 != sizeof(struct bdx_stats) / sizeof(u64));
2346 return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2347 }
2348
2349 return -EINVAL;
2350}
2351
2352
2353
2354
2355
2356
2357
2358static void bdx_get_ethtool_stats(struct net_device *netdev,
2359 struct ethtool_stats *stats, u64 *data)
2360{
2361 struct bdx_priv *priv = netdev_priv(netdev);
2362
2363 if (priv->stats_flag) {
2364
2365
2366 bdx_update_stats(priv);
2367
2368
2369 memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2370 }
2371}
2372
2373
2374
2375
2376
2377static void bdx_set_ethtool_ops(struct net_device *netdev)
2378{
2379 static const struct ethtool_ops bdx_ethtool_ops = {
2380 .get_drvinfo = bdx_get_drvinfo,
2381 .get_link = ethtool_op_get_link,
2382 .get_coalesce = bdx_get_coalesce,
2383 .set_coalesce = bdx_set_coalesce,
2384 .get_ringparam = bdx_get_ringparam,
2385 .set_ringparam = bdx_set_ringparam,
2386 .get_strings = bdx_get_strings,
2387 .get_sset_count = bdx_get_sset_count,
2388 .get_ethtool_stats = bdx_get_ethtool_stats,
2389 .get_link_ksettings = bdx_get_link_ksettings,
2390 };
2391
2392 netdev->ethtool_ops = &bdx_ethtool_ops;
2393}
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404static void bdx_remove(struct pci_dev *pdev)
2405{
2406 struct pci_nic *nic = pci_get_drvdata(pdev);
2407 struct net_device *ndev;
2408 int port;
2409
2410 for (port = 0; port < nic->port_num; port++) {
2411 ndev = nic->priv[port]->ndev;
2412 unregister_netdev(ndev);
2413 free_netdev(ndev);
2414 }
2415
2416
2417#ifdef BDX_MSI
2418 if (nic->irq_type == IRQ_MSI)
2419 pci_disable_msi(pdev);
2420#endif
2421
2422 iounmap(nic->regs);
2423 pci_release_regions(pdev);
2424 pci_disable_device(pdev);
2425 vfree(nic);
2426
2427 RET();
2428}
2429
2430static struct pci_driver bdx_pci_driver = {
2431 .name = BDX_DRV_NAME,
2432 .id_table = bdx_pci_tbl,
2433 .probe = bdx_probe,
2434 .remove = bdx_remove,
2435};
2436
2437
2438
2439
2440static void __init print_driver_id(void)
2441{
2442 pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2443 pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2444}
2445
2446static int __init bdx_module_init(void)
2447{
2448 ENTER;
2449 init_txd_sizes();
2450 print_driver_id();
2451 RET(pci_register_driver(&bdx_pci_driver));
2452}
2453
2454module_init(bdx_module_init);
2455
2456static void __exit bdx_module_exit(void)
2457{
2458 ENTER;
2459 pci_unregister_driver(&bdx_pci_driver);
2460 RET();
2461}
2462
2463module_exit(bdx_module_exit);
2464
2465MODULE_LICENSE("GPL");
2466MODULE_AUTHOR(DRIVER_AUTHOR);
2467MODULE_DESCRIPTION(BDX_DRV_DESC);
2468MODULE_FIRMWARE("tehuti/bdx.bin");
2469