1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62
63#include "tehuti.h"
64
65static const struct pci_device_id bdx_pci_tbl[] = {
66 { PCI_VDEVICE(TEHUTI, 0x3009), },
67 { PCI_VDEVICE(TEHUTI, 0x3010), },
68 { PCI_VDEVICE(TEHUTI, 0x3014), },
69 { 0 }
70};
71
72MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
73
74
75static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
76static void bdx_tx_cleanup(struct bdx_priv *priv);
77static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
78
79
80static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
81
82
83static int bdx_tx_init(struct bdx_priv *priv);
84static int bdx_rx_init(struct bdx_priv *priv);
85
86
87static void bdx_rx_free(struct bdx_priv *priv);
88static void bdx_tx_free(struct bdx_priv *priv);
89
90
91static void bdx_set_ethtool_ops(struct net_device *netdev);
92
93
94
95
96
97static void print_hw_id(struct pci_dev *pdev)
98{
99 struct pci_nic *nic = pci_get_drvdata(pdev);
100 u16 pci_link_status = 0;
101 u16 pci_ctrl = 0;
102
103 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
104 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
105
106 pr_info("%s%s\n", BDX_NIC_NAME,
107 nic->port_num == 1 ? "" : ", 2-Port");
108 pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
109 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
110 readl(nic->regs + FPGA_SEED),
111 GET_LINK_STATUS_LANES(pci_link_status),
112 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
113}
114
115static void print_fw_id(struct pci_nic *nic)
116{
117 pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
118}
119
120static void print_eth_id(struct net_device *ndev)
121{
122 netdev_info(ndev, "%s, Port %c\n",
123 BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
124
125}
126
127
128
129
130
131#define bdx_enable_interrupts(priv) \
132 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
133#define bdx_disable_interrupts(priv) \
134 do { WRITE_REG(priv, regIMR, 0); } while (0)
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152static int
153bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
154 u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
155{
156 u16 memsz = FIFO_SIZE * (1 << fsz_type);
157
158 memset(f, 0, sizeof(struct fifo));
159
160 f->va = dma_alloc_coherent(&priv->pdev->dev, memsz + FIFO_EXTRA_SPACE,
161 &f->da, GFP_ATOMIC);
162 if (!f->va) {
163 pr_err("dma_alloc_coherent failed\n");
164 RET(-ENOMEM);
165 }
166 f->reg_CFG0 = reg_CFG0;
167 f->reg_CFG1 = reg_CFG1;
168 f->reg_RPTR = reg_RPTR;
169 f->reg_WPTR = reg_WPTR;
170 f->rptr = 0;
171 f->wptr = 0;
172 f->memsz = memsz;
173 f->size_mask = memsz - 1;
174 WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
175 WRITE_REG(priv, reg_CFG1, H32_64(f->da));
176
177 RET(0);
178}
179
180
181
182
183
184
185static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
186{
187 ENTER;
188 if (f->va) {
189 dma_free_coherent(&priv->pdev->dev,
190 f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
191 f->va = NULL;
192 }
193 RET();
194}
195
196
197
198
199
200static void bdx_link_changed(struct bdx_priv *priv)
201{
202 u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
203
204 if (!link) {
205 if (netif_carrier_ok(priv->ndev)) {
206 netif_stop_queue(priv->ndev);
207 netif_carrier_off(priv->ndev);
208 netdev_err(priv->ndev, "Link Down\n");
209 }
210 } else {
211 if (!netif_carrier_ok(priv->ndev)) {
212 netif_wake_queue(priv->ndev);
213 netif_carrier_on(priv->ndev);
214 netdev_err(priv->ndev, "Link Up\n");
215 }
216 }
217}
218
219static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
220{
221 if (isr & IR_RX_FREE_0) {
222 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
223 DBG("RX_FREE_0\n");
224 }
225
226 if (isr & IR_LNKCHG0)
227 bdx_link_changed(priv);
228
229 if (isr & IR_PCIE_LINK)
230 netdev_err(priv->ndev, "PCI-E Link Fault\n");
231
232 if (isr & IR_PCIE_TOUT)
233 netdev_err(priv->ndev, "PCI-E Time Out\n");
234
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251static irqreturn_t bdx_isr_napi(int irq, void *dev)
252{
253 struct net_device *ndev = dev;
254 struct bdx_priv *priv = netdev_priv(ndev);
255 u32 isr;
256
257 ENTER;
258 isr = (READ_REG(priv, regISR) & IR_RUN);
259 if (unlikely(!isr)) {
260 bdx_enable_interrupts(priv);
261 return IRQ_NONE;
262 }
263
264 if (isr & IR_EXTRA)
265 bdx_isr_extra(priv, isr);
266
267 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
268 if (likely(napi_schedule_prep(&priv->napi))) {
269 __napi_schedule(&priv->napi);
270 RET(IRQ_HANDLED);
271 } else {
272
273
274
275
276
277
278
279
280 READ_REG(priv, regTXF_WPTR_0);
281 READ_REG(priv, regRXD_WPTR_0);
282 }
283 }
284
285 bdx_enable_interrupts(priv);
286 RET(IRQ_HANDLED);
287}
288
289static int bdx_poll(struct napi_struct *napi, int budget)
290{
291 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
292 int work_done;
293
294 ENTER;
295 bdx_tx_cleanup(priv);
296 work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
297 if ((work_done < budget) ||
298 (priv->napi_stop++ >= 30)) {
299 DBG("rx poll is done. backing to isr-driven\n");
300
301
302
303 priv->napi_stop = 0;
304
305 napi_complete_done(napi, work_done);
306 bdx_enable_interrupts(priv);
307 }
308 return work_done;
309}
310
311
312
313
314
315
316
317
318
319
320
321static int bdx_fw_load(struct bdx_priv *priv)
322{
323 const struct firmware *fw = NULL;
324 int master, i;
325 int rc;
326
327 ENTER;
328 master = READ_REG(priv, regINIT_SEMAPHORE);
329 if (!READ_REG(priv, regINIT_STATUS) && master) {
330 rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
331 if (rc)
332 goto out;
333 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
334 mdelay(100);
335 }
336 for (i = 0; i < 200; i++) {
337 if (READ_REG(priv, regINIT_STATUS)) {
338 rc = 0;
339 goto out;
340 }
341 mdelay(2);
342 }
343 rc = -EIO;
344out:
345 if (master)
346 WRITE_REG(priv, regINIT_SEMAPHORE, 1);
347
348 release_firmware(fw);
349
350 if (rc) {
351 netdev_err(priv->ndev, "firmware loading failed\n");
352 if (rc == -EIO)
353 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
354 READ_REG(priv, regVPC),
355 READ_REG(priv, regVIC),
356 READ_REG(priv, regINIT_STATUS), i);
357 RET(rc);
358 } else {
359 DBG("%s: firmware loading success\n", priv->ndev->name);
360 RET(0);
361 }
362}
363
364static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
365{
366 u32 val;
367
368 ENTER;
369 DBG("mac0=%x mac1=%x mac2=%x\n",
370 READ_REG(priv, regUNC_MAC0_A),
371 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
372
373 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
374 WRITE_REG(priv, regUNC_MAC2_A, val);
375 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
376 WRITE_REG(priv, regUNC_MAC1_A, val);
377 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
378 WRITE_REG(priv, regUNC_MAC0_A, val);
379
380 DBG("mac0=%x mac1=%x mac2=%x\n",
381 READ_REG(priv, regUNC_MAC0_A),
382 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
383 RET();
384}
385
386
387
388
389
390static int bdx_hw_start(struct bdx_priv *priv)
391{
392 int rc = -EIO;
393 struct net_device *ndev = priv->ndev;
394
395 ENTER;
396 bdx_link_changed(priv);
397
398
399 WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
400 WRITE_REG(priv, regPAUSE_QUANT, 0x96);
401 WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
402 WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
403 WRITE_REG(priv, regRX_FULLNESS, 0);
404 WRITE_REG(priv, regTX_FULLNESS, 0);
405 WRITE_REG(priv, regCTRLST,
406 regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
407
408 WRITE_REG(priv, regVGLB, 0);
409 WRITE_REG(priv, regMAX_FRAME_A,
410 priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
411
412 DBG("RDINTCM=%08x\n", priv->rdintcm);
413 WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
414 WRITE_REG(priv, regRDINTCM2, 0);
415
416 DBG("TDINTCM=%08x\n", priv->tdintcm);
417 WRITE_REG(priv, regTDINTCM0, priv->tdintcm);
418
419
420
421 bdx_restore_mac(priv->ndev, priv);
422
423 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
424 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
425
426#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
427
428 rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
429 ndev->name, ndev);
430 if (rc)
431 goto err_irq;
432 bdx_enable_interrupts(priv);
433
434 RET(0);
435
436err_irq:
437 RET(rc);
438}
439
440static void bdx_hw_stop(struct bdx_priv *priv)
441{
442 ENTER;
443 bdx_disable_interrupts(priv);
444 free_irq(priv->pdev->irq, priv->ndev);
445
446 netif_carrier_off(priv->ndev);
447 netif_stop_queue(priv->ndev);
448
449 RET();
450}
451
452static int bdx_hw_reset_direct(void __iomem *regs)
453{
454 u32 val, i;
455 ENTER;
456
457
458 val = readl(regs + regCLKPLL);
459 writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
460 udelay(50);
461 val = readl(regs + regCLKPLL);
462 writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
463
464
465 for (i = 0; i < 70; i++, mdelay(10))
466 if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
467
468 readl(regs + regRXD_CFG0_0);
469 return 0;
470 }
471 pr_err("HW reset failed\n");
472 return 1;
473}
474
475static int bdx_hw_reset(struct bdx_priv *priv)
476{
477 u32 val, i;
478 ENTER;
479
480 if (priv->port == 0) {
481
482 val = READ_REG(priv, regCLKPLL);
483 WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
484 udelay(50);
485 val = READ_REG(priv, regCLKPLL);
486 WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
487 }
488
489 for (i = 0; i < 70; i++, mdelay(10))
490 if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
491
492 READ_REG(priv, regRXD_CFG0_0);
493 return 0;
494 }
495 pr_err("HW reset failed\n");
496 return 1;
497}
498
499static int bdx_sw_reset(struct bdx_priv *priv)
500{
501 int i;
502
503 ENTER;
504
505
506 WRITE_REG(priv, regGMAC_RXF_A, 0);
507 mdelay(100);
508
509 WRITE_REG(priv, regDIS_PORT, 1);
510
511 WRITE_REG(priv, regDIS_QU, 1);
512
513 for (i = 0; i < 50; i++) {
514 if (READ_REG(priv, regRST_PORT) & 1)
515 break;
516 mdelay(10);
517 }
518 if (i == 50)
519 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
520
521
522 WRITE_REG(priv, regRDINTCM0, 0);
523 WRITE_REG(priv, regTDINTCM0, 0);
524 WRITE_REG(priv, regIMR, 0);
525 READ_REG(priv, regISR);
526
527
528 WRITE_REG(priv, regRST_QU, 1);
529
530 WRITE_REG(priv, regRST_PORT, 1);
531
532 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
533 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
534 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
535 WRITE_REG(priv, i, 0);
536
537 WRITE_REG(priv, regDIS_PORT, 0);
538
539 WRITE_REG(priv, regDIS_QU, 0);
540
541 WRITE_REG(priv, regRST_QU, 0);
542
543 WRITE_REG(priv, regRST_PORT, 0);
544
545
546
547 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
548 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
549
550 RET(0);
551}
552
553
554static int bdx_reset(struct bdx_priv *priv)
555{
556 ENTER;
557 RET((priv->pdev->device == 0x3009)
558 ? bdx_hw_reset(priv)
559 : bdx_sw_reset(priv));
560}
561
562
563
564
565
566
567
568
569
570
571
572
573static int bdx_close(struct net_device *ndev)
574{
575 struct bdx_priv *priv = NULL;
576
577 ENTER;
578 priv = netdev_priv(ndev);
579
580 napi_disable(&priv->napi);
581
582 bdx_reset(priv);
583 bdx_hw_stop(priv);
584 bdx_rx_free(priv);
585 bdx_tx_free(priv);
586 RET(0);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601static int bdx_open(struct net_device *ndev)
602{
603 struct bdx_priv *priv;
604 int rc;
605
606 ENTER;
607 priv = netdev_priv(ndev);
608 bdx_reset(priv);
609 if (netif_running(ndev))
610 netif_stop_queue(priv->ndev);
611
612 if ((rc = bdx_tx_init(priv)) ||
613 (rc = bdx_rx_init(priv)) ||
614 (rc = bdx_fw_load(priv)))
615 goto err;
616
617 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
618
619 rc = bdx_hw_start(priv);
620 if (rc)
621 goto err;
622
623 napi_enable(&priv->napi);
624
625 print_fw_id(priv->nic);
626
627 RET(0);
628
629err:
630 bdx_close(ndev);
631 RET(rc);
632}
633
634static int bdx_range_check(struct bdx_priv *priv, u32 offset)
635{
636 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
637 -EINVAL : 0;
638}
639
640static int bdx_siocdevprivate(struct net_device *ndev, struct ifreq *ifr,
641 void __user *udata, int cmd)
642{
643 struct bdx_priv *priv = netdev_priv(ndev);
644 u32 data[3];
645 int error;
646
647 ENTER;
648
649 DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
650 if (cmd != SIOCDEVPRIVATE) {
651 error = copy_from_user(data, udata, sizeof(data));
652 if (error) {
653 pr_err("can't copy from user\n");
654 RET(-EFAULT);
655 }
656 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
657 } else {
658 return -EOPNOTSUPP;
659 }
660
661 if (!capable(CAP_SYS_RAWIO))
662 return -EPERM;
663
664 switch (data[0]) {
665
666 case BDX_OP_READ:
667 error = bdx_range_check(priv, data[1]);
668 if (error < 0)
669 return error;
670 data[2] = READ_REG(priv, data[1]);
671 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
672 data[2]);
673 error = copy_to_user(udata, data, sizeof(data));
674 if (error)
675 RET(-EFAULT);
676 break;
677
678 case BDX_OP_WRITE:
679 error = bdx_range_check(priv, data[1]);
680 if (error < 0)
681 return error;
682 WRITE_REG(priv, data[1], data[2]);
683 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
684 break;
685
686 default:
687 RET(-EOPNOTSUPP);
688 }
689 return 0;
690}
691
692
693
694
695
696
697
698
699
700static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
701{
702 struct bdx_priv *priv = netdev_priv(ndev);
703 u32 reg, bit, val;
704
705 ENTER;
706 DBG2("vid=%d value=%d\n", (int)vid, enable);
707 if (unlikely(vid >= 4096)) {
708 pr_err("invalid VID: %u (> 4096)\n", vid);
709 RET();
710 }
711 reg = regVLAN_0 + (vid / 32) * 4;
712 bit = 1 << vid % 32;
713 val = READ_REG(priv, reg);
714 DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
715 if (enable)
716 val |= bit;
717 else
718 val &= ~bit;
719 DBG2("new val %x\n", val);
720 WRITE_REG(priv, reg, val);
721 RET();
722}
723
724
725
726
727
728
729
730static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
731{
732 __bdx_vlan_rx_vid(ndev, vid, 1);
733 return 0;
734}
735
736
737
738
739
740
741
742static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
743{
744 __bdx_vlan_rx_vid(ndev, vid, 0);
745 return 0;
746}
747
748
749
750
751
752
753
754
755static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
756{
757 ENTER;
758
759 ndev->mtu = new_mtu;
760 if (netif_running(ndev)) {
761 bdx_close(ndev);
762 bdx_open(ndev);
763 }
764 RET(0);
765}
766
767static void bdx_setmulti(struct net_device *ndev)
768{
769 struct bdx_priv *priv = netdev_priv(ndev);
770
771 u32 rxf_val =
772 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
773 int i;
774
775 ENTER;
776
777
778
779
780 if (ndev->flags & IFF_PROMISC) {
781 rxf_val |= GMAC_RX_FILTER_PRM;
782 } else if (ndev->flags & IFF_ALLMULTI) {
783
784 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
785 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
786 } else if (!netdev_mc_empty(ndev)) {
787 u8 hash;
788 struct netdev_hw_addr *ha;
789 u32 reg, val;
790
791
792 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
793 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
794
795 for (i = 0; i < MAC_MCST_NUM; i++) {
796 WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
797 WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
798 }
799
800
801
802
803
804
805 netdev_for_each_mc_addr(ha, ndev) {
806 hash = 0;
807 for (i = 0; i < ETH_ALEN; i++)
808 hash ^= ha->addr[i];
809 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
810 val = READ_REG(priv, reg);
811 val |= (1 << (hash % 32));
812 WRITE_REG(priv, reg, val);
813 }
814
815 } else {
816 DBG("only own mac %d\n", netdev_mc_count(ndev));
817 rxf_val |= GMAC_RX_FILTER_AB;
818 }
819 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
820
821
822 RET();
823}
824
825static int bdx_set_mac(struct net_device *ndev, void *p)
826{
827 struct bdx_priv *priv = netdev_priv(ndev);
828 struct sockaddr *addr = p;
829
830 ENTER;
831
832
833
834
835 eth_hw_addr_set(ndev, addr->sa_data);
836 bdx_restore_mac(ndev, priv);
837 RET(0);
838}
839
840static int bdx_read_mac(struct bdx_priv *priv)
841{
842 u16 macAddress[3], i;
843 u8 addr[ETH_ALEN];
844 ENTER;
845
846 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
847 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
848 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
849 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
850 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
851 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
852 for (i = 0; i < 3; i++) {
853 addr[i * 2 + 1] = macAddress[i];
854 addr[i * 2] = macAddress[i] >> 8;
855 }
856 eth_hw_addr_set(priv->ndev, addr);
857 RET(0);
858}
859
860static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
861{
862 u64 val;
863
864 val = READ_REG(priv, reg);
865 val |= ((u64) READ_REG(priv, reg + 8)) << 32;
866 return val;
867}
868
869
870static void bdx_update_stats(struct bdx_priv *priv)
871{
872 struct bdx_stats *stats = &priv->hw_stats;
873 u64 *stats_vector = (u64 *) stats;
874 int i;
875 int addr;
876
877
878 addr = 0x7200;
879
880 for (i = 0; i < 12; i++) {
881 stats_vector[i] = bdx_read_l2stat(priv, addr);
882 addr += 0x10;
883 }
884 BDX_ASSERT(addr != 0x72C0);
885
886 addr = 0x72F0;
887 for (; i < 16; i++) {
888 stats_vector[i] = bdx_read_l2stat(priv, addr);
889 addr += 0x10;
890 }
891 BDX_ASSERT(addr != 0x7330);
892
893 addr = 0x7370;
894 for (; i < 19; i++) {
895 stats_vector[i] = bdx_read_l2stat(priv, addr);
896 addr += 0x10;
897 }
898 BDX_ASSERT(addr != 0x73A0);
899
900 addr = 0x73C0;
901 for (; i < 23; i++) {
902 stats_vector[i] = bdx_read_l2stat(priv, addr);
903 addr += 0x10;
904 }
905 BDX_ASSERT(addr != 0x7400);
906 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
907}
908
909static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
910 u16 rxd_vlan);
911static void print_rxfd(struct rxf_desc *rxfd);
912
913
914
915
916
917static void bdx_rxdb_destroy(struct rxdb *db)
918{
919 vfree(db);
920}
921
922static struct rxdb *bdx_rxdb_create(int nelem)
923{
924 struct rxdb *db;
925 int i;
926
927 db = vmalloc(sizeof(struct rxdb)
928 + (nelem * sizeof(int))
929 + (nelem * sizeof(struct rx_map)));
930 if (likely(db != NULL)) {
931 db->stack = (int *)(db + 1);
932 db->elems = (void *)(db->stack + nelem);
933 db->nelem = nelem;
934 db->top = nelem;
935 for (i = 0; i < nelem; i++)
936 db->stack[i] = nelem - i - 1;
937
938 }
939
940 return db;
941}
942
943static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
944{
945 BDX_ASSERT(db->top <= 0);
946 return db->stack[--(db->top)];
947}
948
949static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
950{
951 BDX_ASSERT((n < 0) || (n >= db->nelem));
952 return db->elems + n;
953}
954
955static inline int bdx_rxdb_available(struct rxdb *db)
956{
957 return db->top;
958}
959
960static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
961{
962 BDX_ASSERT((n >= db->nelem) || (n < 0));
963 db->stack[(db->top)++] = n;
964}
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988static int bdx_rx_init(struct bdx_priv *priv)
989{
990 ENTER;
991
992 if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
993 regRXD_CFG0_0, regRXD_CFG1_0,
994 regRXD_RPTR_0, regRXD_WPTR_0))
995 goto err_mem;
996 if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
997 regRXF_CFG0_0, regRXF_CFG1_0,
998 regRXF_RPTR_0, regRXF_WPTR_0))
999 goto err_mem;
1000 priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1001 sizeof(struct rxf_desc));
1002 if (!priv->rxdb)
1003 goto err_mem;
1004
1005 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1006 return 0;
1007
1008err_mem:
1009 netdev_err(priv->ndev, "Rx init failed\n");
1010 return -ENOMEM;
1011}
1012
1013
1014
1015
1016
1017
1018static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1019{
1020 struct rx_map *dm;
1021 struct rxdb *db = priv->rxdb;
1022 u16 i;
1023
1024 ENTER;
1025 DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1026 db->nelem - bdx_rxdb_available(db));
1027 while (bdx_rxdb_available(db) > 0) {
1028 i = bdx_rxdb_alloc_elem(db);
1029 dm = bdx_rxdb_addr_elem(db, i);
1030 dm->dma = 0;
1031 }
1032 for (i = 0; i < db->nelem; i++) {
1033 dm = bdx_rxdb_addr_elem(db, i);
1034 if (dm->dma) {
1035 dma_unmap_single(&priv->pdev->dev, dm->dma,
1036 f->m.pktsz, DMA_FROM_DEVICE);
1037 dev_kfree_skb(dm->skb);
1038 }
1039 }
1040}
1041
1042
1043
1044
1045
1046
1047
1048static void bdx_rx_free(struct bdx_priv *priv)
1049{
1050 ENTER;
1051 if (priv->rxdb) {
1052 bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1053 bdx_rxdb_destroy(priv->rxdb);
1054 priv->rxdb = NULL;
1055 }
1056 bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1057 bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1058
1059 RET();
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1080{
1081 struct sk_buff *skb;
1082 struct rxf_desc *rxfd;
1083 struct rx_map *dm;
1084 int dno, delta, idx;
1085 struct rxdb *db = priv->rxdb;
1086
1087 ENTER;
1088 dno = bdx_rxdb_available(db) - 1;
1089 while (dno > 0) {
1090 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
1091 if (!skb)
1092 break;
1093
1094 skb_reserve(skb, NET_IP_ALIGN);
1095
1096 idx = bdx_rxdb_alloc_elem(db);
1097 dm = bdx_rxdb_addr_elem(db, idx);
1098 dm->dma = dma_map_single(&priv->pdev->dev, skb->data,
1099 f->m.pktsz, DMA_FROM_DEVICE);
1100 dm->skb = skb;
1101 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1102 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1103 rxfd->va_lo = idx;
1104 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1105 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1106 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1107 print_rxfd(rxfd);
1108
1109 f->m.wptr += sizeof(struct rxf_desc);
1110 delta = f->m.wptr - f->m.memsz;
1111 if (unlikely(delta >= 0)) {
1112 f->m.wptr = delta;
1113 if (delta > 0) {
1114 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1115 DBG("wrapped descriptor\n");
1116 }
1117 }
1118 dno--;
1119 }
1120
1121 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1122 RET();
1123}
1124
1125static inline void
1126NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1127 struct sk_buff *skb)
1128{
1129 ENTER;
1130 DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
1131 if (GET_RXD_VTAG(rxd_val1)) {
1132 DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
1133 priv->ndev->name,
1134 GET_RXD_VLAN_ID(rxd_vlan),
1135 GET_RXD_VTAG(rxd_val1));
1136 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
1137 }
1138 netif_receive_skb(skb);
1139}
1140
1141static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1142{
1143 struct rxf_desc *rxfd;
1144 struct rx_map *dm;
1145 struct rxf_fifo *f;
1146 struct rxdb *db;
1147 int delta;
1148
1149 ENTER;
1150 DBG("priv=%p rxdd=%p\n", priv, rxdd);
1151 f = &priv->rxf_fifo0;
1152 db = priv->rxdb;
1153 DBG("db=%p f=%p\n", db, f);
1154 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1155 DBG("dm=%p\n", dm);
1156 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1157 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1158 rxfd->va_lo = rxdd->va_lo;
1159 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1160 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1161 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1162 print_rxfd(rxfd);
1163
1164 f->m.wptr += sizeof(struct rxf_desc);
1165 delta = f->m.wptr - f->m.memsz;
1166 if (unlikely(delta >= 0)) {
1167 f->m.wptr = delta;
1168 if (delta > 0) {
1169 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1170 DBG("wrapped descriptor\n");
1171 }
1172 }
1173 RET();
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1190{
1191 struct net_device *ndev = priv->ndev;
1192 struct sk_buff *skb, *skb2;
1193 struct rxd_desc *rxdd;
1194 struct rx_map *dm;
1195 struct rxf_fifo *rxf_fifo;
1196 int tmp_len, size;
1197 int done = 0;
1198 int max_done = BDX_MAX_RX_DONE;
1199 struct rxdb *db = NULL;
1200
1201 u32 rxd_val1;
1202 u16 len;
1203 u16 rxd_vlan;
1204
1205 ENTER;
1206 max_done = budget;
1207
1208 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1209
1210 size = f->m.wptr - f->m.rptr;
1211 if (size < 0)
1212 size = f->m.memsz + size;
1213
1214 while (size > 0) {
1215
1216 rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1217 rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1218
1219 len = CPU_CHIP_SWAP16(rxdd->len);
1220
1221 rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1222
1223 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1224
1225 tmp_len = GET_RXD_BC(rxd_val1) << 3;
1226 BDX_ASSERT(tmp_len <= 0);
1227 size -= tmp_len;
1228 if (size < 0)
1229 break;
1230
1231 f->m.rptr += tmp_len;
1232
1233 tmp_len = f->m.rptr - f->m.memsz;
1234 if (unlikely(tmp_len >= 0)) {
1235 f->m.rptr = tmp_len;
1236 if (tmp_len > 0) {
1237 DBG("wrapped desc rptr=%d tmp_len=%d\n",
1238 f->m.rptr, tmp_len);
1239 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1240 }
1241 }
1242
1243 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1244 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1245 ndev->stats.rx_errors++;
1246 bdx_recycle_skb(priv, rxdd);
1247 continue;
1248 }
1249
1250 rxf_fifo = &priv->rxf_fifo0;
1251 db = priv->rxdb;
1252 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1253 skb = dm->skb;
1254
1255 if (len < BDX_COPYBREAK &&
1256 (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
1257 skb_reserve(skb2, NET_IP_ALIGN);
1258
1259 dma_sync_single_for_cpu(&priv->pdev->dev, dm->dma,
1260 rxf_fifo->m.pktsz,
1261 DMA_FROM_DEVICE);
1262 memcpy(skb2->data, skb->data, len);
1263 bdx_recycle_skb(priv, rxdd);
1264 skb = skb2;
1265 } else {
1266 dma_unmap_single(&priv->pdev->dev, dm->dma,
1267 rxf_fifo->m.pktsz, DMA_FROM_DEVICE);
1268 bdx_rxdb_free_elem(db, rxdd->va_lo);
1269 }
1270
1271 ndev->stats.rx_bytes += len;
1272
1273 skb_put(skb, len);
1274 skb->protocol = eth_type_trans(skb, ndev);
1275
1276
1277 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1278 skb_checksum_none_assert(skb);
1279 else
1280 skb->ip_summed = CHECKSUM_UNNECESSARY;
1281
1282 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1283
1284 if (++done >= max_done)
1285 break;
1286 }
1287
1288 ndev->stats.rx_packets += done;
1289
1290
1291 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1292
1293 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1294
1295 RET(done);
1296}
1297
1298
1299
1300
1301static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1302 u16 rxd_vlan)
1303{
1304 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1305 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1306 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1307 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1308 GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1309 GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1310 rxdd->va_hi);
1311}
1312
1313static void print_rxfd(struct rxf_desc *rxfd)
1314{
1315 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1316 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1317 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1367{
1368 BDX_ASSERT(db == NULL || pptr == NULL);
1369
1370 BDX_ASSERT(*pptr != db->rptr &&
1371 *pptr != db->wptr);
1372
1373 BDX_ASSERT(*pptr < db->start ||
1374 *pptr >= db->end);
1375
1376 ++*pptr;
1377 if (unlikely(*pptr == db->end))
1378 *pptr = db->start;
1379}
1380
1381
1382
1383
1384
1385static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1386{
1387 BDX_ASSERT(db->rptr == db->wptr);
1388 __bdx_tx_db_ptr_next(db, &db->rptr);
1389}
1390
1391
1392
1393
1394
1395static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1396{
1397 __bdx_tx_db_ptr_next(db, &db->wptr);
1398 BDX_ASSERT(db->rptr == db->wptr);
1399
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409static int bdx_tx_db_init(struct txdb *d, int sz_type)
1410{
1411 int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1412
1413 d->start = vmalloc(memsz);
1414 if (!d->start)
1415 return -ENOMEM;
1416
1417
1418
1419
1420
1421
1422 d->size = memsz / sizeof(struct tx_map) - 1;
1423 d->end = d->start + d->size + 1;
1424
1425
1426 d->rptr = d->start;
1427 d->wptr = d->start;
1428
1429 return 0;
1430}
1431
1432
1433
1434
1435
1436static void bdx_tx_db_close(struct txdb *d)
1437{
1438 BDX_ASSERT(d == NULL);
1439
1440 vfree(d->start);
1441 d->start = NULL;
1442}
1443
1444
1445
1446
1447
1448
1449
1450static struct {
1451 u16 bytes;
1452 u16 qwords;
1453} txd_sizes[MAX_SKB_FRAGS + 1];
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467static inline void
1468bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1469 struct txd_desc *txdd)
1470{
1471 struct txdb *db = &priv->txdb;
1472 struct pbl *pbl = &txdd->pbl[0];
1473 int nr_frags = skb_shinfo(skb)->nr_frags;
1474 int i;
1475
1476 db->wptr->len = skb_headlen(skb);
1477 db->wptr->addr.dma = dma_map_single(&priv->pdev->dev, skb->data,
1478 db->wptr->len, DMA_TO_DEVICE);
1479 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1480 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1481 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1482 DBG("=== pbl len: 0x%x ================\n", pbl->len);
1483 DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1484 DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1485 bdx_tx_db_inc_wptr(db);
1486
1487 for (i = 0; i < nr_frags; i++) {
1488 const skb_frag_t *frag;
1489
1490 frag = &skb_shinfo(skb)->frags[i];
1491 db->wptr->len = skb_frag_size(frag);
1492 db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
1493 0, skb_frag_size(frag),
1494 DMA_TO_DEVICE);
1495
1496 pbl++;
1497 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1498 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1499 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1500 bdx_tx_db_inc_wptr(db);
1501 }
1502
1503
1504 db->wptr->len = -txd_sizes[nr_frags].bytes;
1505 db->wptr->addr.skb = skb;
1506 bdx_tx_db_inc_wptr(db);
1507}
1508
1509
1510
1511
1512static void __init init_txd_sizes(void)
1513{
1514 int i, lwords;
1515
1516
1517
1518 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1519 lwords = 7 + (i * 3);
1520 if (lwords & 1)
1521 lwords++;
1522 txd_sizes[i].qwords = lwords >> 1;
1523 txd_sizes[i].bytes = lwords << 2;
1524 }
1525}
1526
1527
1528
1529static int bdx_tx_init(struct bdx_priv *priv)
1530{
1531 if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1532 regTXD_CFG0_0,
1533 regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1534 goto err_mem;
1535 if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1536 regTXF_CFG0_0,
1537 regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1538 goto err_mem;
1539
1540
1541
1542 if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1543 goto err_mem;
1544
1545 priv->tx_level = BDX_MAX_TX_LEVEL;
1546#ifdef BDX_DELAY_WPTR
1547 priv->tx_update_mark = priv->tx_level - 1024;
1548#endif
1549 return 0;
1550
1551err_mem:
1552 netdev_err(priv->ndev, "Tx init failed\n");
1553 return -ENOMEM;
1554}
1555
1556
1557
1558
1559
1560
1561
1562static inline int bdx_tx_space(struct bdx_priv *priv)
1563{
1564 struct txd_fifo *f = &priv->txd_fifo0;
1565 int fsize;
1566
1567 f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1568 fsize = f->m.rptr - f->m.wptr;
1569 if (fsize <= 0)
1570 fsize = f->m.memsz + fsize;
1571 return fsize;
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1585 struct net_device *ndev)
1586{
1587 struct bdx_priv *priv = netdev_priv(ndev);
1588 struct txd_fifo *f = &priv->txd_fifo0;
1589 int txd_checksum = 7;
1590 int txd_lgsnd = 0;
1591 int txd_vlan_id = 0;
1592 int txd_vtag = 0;
1593 int txd_mss = 0;
1594
1595 int nr_frags = skb_shinfo(skb)->nr_frags;
1596 struct txd_desc *txdd;
1597 int len;
1598 unsigned long flags;
1599
1600 ENTER;
1601 local_irq_save(flags);
1602 spin_lock(&priv->tx_lock);
1603
1604
1605 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1606 txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1607 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1608 txd_checksum = 0;
1609
1610 if (skb_shinfo(skb)->gso_size) {
1611 txd_mss = skb_shinfo(skb)->gso_size;
1612 txd_lgsnd = 1;
1613 DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1614 txd_mss);
1615 }
1616
1617 if (skb_vlan_tag_present(skb)) {
1618
1619 txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
1620 txd_vtag = 1;
1621 }
1622
1623 txdd->length = CPU_CHIP_SWAP16(skb->len);
1624 txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1625 txdd->txd_val1 =
1626 CPU_CHIP_SWAP32(TXD_W1_VAL
1627 (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1628 txd_lgsnd, txd_vlan_id));
1629 DBG("=== TxD desc =====================\n");
1630 DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1631 DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1632
1633 bdx_tx_map_skb(priv, skb, txdd);
1634
1635
1636
1637
1638 f->m.wptr += txd_sizes[nr_frags].bytes;
1639 len = f->m.wptr - f->m.memsz;
1640 if (unlikely(len >= 0)) {
1641 f->m.wptr = len;
1642 if (len > 0) {
1643 BDX_ASSERT(len > f->m.memsz);
1644 memcpy(f->m.va, f->m.va + f->m.memsz, len);
1645 }
1646 }
1647 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1648
1649 priv->tx_level -= txd_sizes[nr_frags].bytes;
1650 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1651#ifdef BDX_DELAY_WPTR
1652 if (priv->tx_level > priv->tx_update_mark) {
1653
1654
1655
1656
1657 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1658 } else {
1659 if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1660 priv->tx_noupd = 0;
1661 WRITE_REG(priv, f->m.reg_WPTR,
1662 f->m.wptr & TXF_WPTR_WR_PTR);
1663 }
1664 }
1665#else
1666
1667
1668
1669
1670 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1671
1672#endif
1673#ifdef BDX_LLTX
1674 netif_trans_update(ndev);
1675#endif
1676 ndev->stats.tx_packets++;
1677 ndev->stats.tx_bytes += skb->len;
1678
1679 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1680 DBG("%s: %s: TX Q STOP level %d\n",
1681 BDX_DRV_NAME, ndev->name, priv->tx_level);
1682 netif_stop_queue(ndev);
1683 }
1684
1685 spin_unlock_irqrestore(&priv->tx_lock, flags);
1686 return NETDEV_TX_OK;
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696static void bdx_tx_cleanup(struct bdx_priv *priv)
1697{
1698 struct txf_fifo *f = &priv->txf_fifo0;
1699 struct txdb *db = &priv->txdb;
1700 int tx_level = 0;
1701
1702 ENTER;
1703 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1704 BDX_ASSERT(f->m.rptr >= f->m.memsz);
1705
1706 while (f->m.wptr != f->m.rptr) {
1707 f->m.rptr += BDX_TXF_DESC_SZ;
1708 f->m.rptr &= f->m.size_mask;
1709
1710
1711
1712 BDX_ASSERT(db->rptr->len == 0);
1713 do {
1714 BDX_ASSERT(db->rptr->addr.dma == 0);
1715 dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
1716 db->rptr->len, DMA_TO_DEVICE);
1717 bdx_tx_db_inc_rptr(db);
1718 } while (db->rptr->len > 0);
1719 tx_level -= db->rptr->len;
1720
1721
1722 dev_consume_skb_irq(db->rptr->addr.skb);
1723 bdx_tx_db_inc_rptr(db);
1724 }
1725
1726
1727 BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1728 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1729
1730
1731
1732 spin_lock(&priv->tx_lock);
1733 priv->tx_level += tx_level;
1734 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1735#ifdef BDX_DELAY_WPTR
1736 if (priv->tx_noupd) {
1737 priv->tx_noupd = 0;
1738 WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1739 priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1740 }
1741#endif
1742
1743 if (unlikely(netif_queue_stopped(priv->ndev) &&
1744 netif_carrier_ok(priv->ndev) &&
1745 (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1746 DBG("%s: %s: TX Q WAKE level %d\n",
1747 BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1748 netif_wake_queue(priv->ndev);
1749 }
1750 spin_unlock(&priv->tx_lock);
1751}
1752
1753
1754
1755
1756
1757
1758
1759static void bdx_tx_free_skbs(struct bdx_priv *priv)
1760{
1761 struct txdb *db = &priv->txdb;
1762
1763 ENTER;
1764 while (db->rptr != db->wptr) {
1765 if (likely(db->rptr->len))
1766 dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
1767 db->rptr->len, DMA_TO_DEVICE);
1768 else
1769 dev_kfree_skb(db->rptr->addr.skb);
1770 bdx_tx_db_inc_rptr(db);
1771 }
1772 RET();
1773}
1774
1775
1776static void bdx_tx_free(struct bdx_priv *priv)
1777{
1778 ENTER;
1779 bdx_tx_free_skbs(priv);
1780 bdx_fifo_free(priv, &priv->txd_fifo0.m);
1781 bdx_fifo_free(priv, &priv->txf_fifo0.m);
1782 bdx_tx_db_close(&priv->txdb);
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1797{
1798 struct txd_fifo *f = &priv->txd_fifo0;
1799 int i = f->m.memsz - f->m.wptr;
1800
1801 if (size == 0)
1802 return;
1803
1804 if (i > size) {
1805 memcpy(f->m.va + f->m.wptr, data, size);
1806 f->m.wptr += size;
1807 } else {
1808 memcpy(f->m.va + f->m.wptr, data, i);
1809 f->m.wptr = size - i;
1810 memcpy(f->m.va, data + i, f->m.wptr);
1811 }
1812 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1825{
1826 int timer = 0;
1827 ENTER;
1828
1829 while (size > 0) {
1830
1831
1832
1833 int avail = bdx_tx_space(priv) - 8;
1834 if (avail <= 0) {
1835 if (timer++ > 300) {
1836 DBG("timeout while writing desc to TxD fifo\n");
1837 break;
1838 }
1839 udelay(50);
1840 continue;
1841 }
1842 avail = min(avail, size);
1843 DBG("about to push %d bytes starting %p size %d\n", avail,
1844 data, size);
1845 bdx_tx_push_desc(priv, data, avail);
1846 size -= avail;
1847 data += avail;
1848 }
1849 RET();
1850}
1851
1852static const struct net_device_ops bdx_netdev_ops = {
1853 .ndo_open = bdx_open,
1854 .ndo_stop = bdx_close,
1855 .ndo_start_xmit = bdx_tx_transmit,
1856 .ndo_validate_addr = eth_validate_addr,
1857 .ndo_siocdevprivate = bdx_siocdevprivate,
1858 .ndo_set_rx_mode = bdx_setmulti,
1859 .ndo_change_mtu = bdx_change_mtu,
1860 .ndo_set_mac_address = bdx_set_mac,
1861 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1862 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1863};
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882static int
1883bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1884{
1885 struct net_device *ndev;
1886 struct bdx_priv *priv;
1887 int err, pci_using_dac, port;
1888 unsigned long pciaddr;
1889 u32 regionSize;
1890 struct pci_nic *nic;
1891
1892 ENTER;
1893
1894 nic = vmalloc(sizeof(*nic));
1895 if (!nic)
1896 RET(-ENOMEM);
1897
1898
1899 err = pci_enable_device(pdev);
1900 if (err)
1901 goto err_pci;
1902
1903 if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) &&
1904 !(err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
1905 pci_using_dac = 1;
1906 } else {
1907 if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
1908 (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
1909 pr_err("No usable DMA configuration, aborting\n");
1910 goto err_dma;
1911 }
1912 pci_using_dac = 0;
1913 }
1914
1915 err = pci_request_regions(pdev, BDX_DRV_NAME);
1916 if (err)
1917 goto err_dma;
1918
1919 pci_set_master(pdev);
1920
1921 pciaddr = pci_resource_start(pdev, 0);
1922 if (!pciaddr) {
1923 err = -EIO;
1924 pr_err("no MMIO resource\n");
1925 goto err_out_res;
1926 }
1927 regionSize = pci_resource_len(pdev, 0);
1928 if (regionSize < BDX_REGS_SIZE) {
1929 err = -EIO;
1930 pr_err("MMIO resource (%x) too small\n", regionSize);
1931 goto err_out_res;
1932 }
1933
1934 nic->regs = ioremap(pciaddr, regionSize);
1935 if (!nic->regs) {
1936 err = -EIO;
1937 pr_err("ioremap failed\n");
1938 goto err_out_res;
1939 }
1940
1941 if (pdev->irq < 2) {
1942 err = -EIO;
1943 pr_err("invalid irq (%d)\n", pdev->irq);
1944 goto err_out_iomap;
1945 }
1946 pci_set_drvdata(pdev, nic);
1947
1948 if (pdev->device == 0x3014)
1949 nic->port_num = 2;
1950 else
1951 nic->port_num = 1;
1952
1953 print_hw_id(pdev);
1954
1955 bdx_hw_reset_direct(nic->regs);
1956
1957 nic->irq_type = IRQ_INTX;
1958#ifdef BDX_MSI
1959 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1960 err = pci_enable_msi(pdev);
1961 if (err)
1962 pr_err("Can't enable msi. error is %d\n", err);
1963 else
1964 nic->irq_type = IRQ_MSI;
1965 } else
1966 DBG("HW does not support MSI\n");
1967#endif
1968
1969
1970 for (port = 0; port < nic->port_num; port++) {
1971 ndev = alloc_etherdev(sizeof(struct bdx_priv));
1972 if (!ndev) {
1973 err = -ENOMEM;
1974 goto err_out_iomap;
1975 }
1976
1977 ndev->netdev_ops = &bdx_netdev_ops;
1978 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
1979
1980 bdx_set_ethtool_ops(ndev);
1981
1982
1983
1984 ndev->if_port = port;
1985 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1986 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1987 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
1988 ;
1989 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1990 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
1991
1992 if (pci_using_dac)
1993 ndev->features |= NETIF_F_HIGHDMA;
1994
1995
1996 priv = nic->priv[port] = netdev_priv(ndev);
1997
1998 priv->pBdxRegs = nic->regs + port * 0x8000;
1999 priv->port = port;
2000 priv->pdev = pdev;
2001 priv->ndev = ndev;
2002 priv->nic = nic;
2003 priv->msg_enable = BDX_DEF_MSG_ENABLE;
2004
2005 netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2006
2007 if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2008 DBG("HW statistics not supported\n");
2009 priv->stats_flag = 0;
2010 } else {
2011 priv->stats_flag = 1;
2012 }
2013
2014
2015 priv->txd_size = 2;
2016 priv->txf_size = 2;
2017 priv->rxd_size = 2;
2018 priv->rxf_size = 3;
2019
2020
2021 priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2022 priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2023
2024
2025
2026
2027
2028
2029#ifdef BDX_LLTX
2030 ndev->features |= NETIF_F_LLTX;
2031#endif
2032
2033 ndev->min_mtu = ETH_ZLEN;
2034 ndev->max_mtu = BDX_MAX_MTU;
2035
2036 spin_lock_init(&priv->tx_lock);
2037
2038
2039 if (bdx_read_mac(priv)) {
2040 pr_err("load MAC address failed\n");
2041 err = -EFAULT;
2042 goto err_out_iomap;
2043 }
2044 SET_NETDEV_DEV(ndev, &pdev->dev);
2045 err = register_netdev(ndev);
2046 if (err) {
2047 pr_err("register_netdev failed\n");
2048 goto err_out_free;
2049 }
2050 netif_carrier_off(ndev);
2051 netif_stop_queue(ndev);
2052
2053 print_eth_id(ndev);
2054 }
2055 RET(0);
2056
2057err_out_free:
2058 free_netdev(ndev);
2059err_out_iomap:
2060 iounmap(nic->regs);
2061err_out_res:
2062 pci_release_regions(pdev);
2063err_dma:
2064 pci_disable_device(pdev);
2065err_pci:
2066 vfree(nic);
2067
2068 RET(err);
2069}
2070
2071
2072
2073static const char
2074 bdx_stat_names[][ETH_GSTRING_LEN] = {
2075 "InUCast",
2076 "InMCast",
2077 "InBCast",
2078 "InPkts",
2079 "InErrors",
2080 "InDropped",
2081 "FrameTooLong",
2082 "FrameSequenceErrors",
2083 "InVLAN",
2084 "InDroppedDFE",
2085 "InDroppedIntFull",
2086 "InFrameAlignErrors",
2087
2088
2089
2090 "OutUCast",
2091 "OutMCast",
2092 "OutBCast",
2093 "OutPkts",
2094
2095
2096
2097 "OutVLAN",
2098 "InUCastOctects",
2099 "OutUCastOctects",
2100
2101
2102
2103 "InBCastOctects",
2104 "OutBCastOctects",
2105 "InOctects",
2106 "OutOctects",
2107};
2108
2109
2110
2111
2112
2113
2114static int bdx_get_link_ksettings(struct net_device *netdev,
2115 struct ethtool_link_ksettings *ecmd)
2116{
2117 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
2118 ethtool_link_ksettings_add_link_mode(ecmd, supported,
2119 10000baseT_Full);
2120 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
2121 ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
2122 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
2123 10000baseT_Full);
2124 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
2125
2126 ecmd->base.speed = SPEED_10000;
2127 ecmd->base.duplex = DUPLEX_FULL;
2128 ecmd->base.port = PORT_FIBRE;
2129 ecmd->base.autoneg = AUTONEG_DISABLE;
2130
2131 return 0;
2132}
2133
2134
2135
2136
2137
2138
2139static void
2140bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2141{
2142 struct bdx_priv *priv = netdev_priv(netdev);
2143
2144 strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2145 strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2146 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2147 strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
2148 sizeof(drvinfo->bus_info));
2149}
2150
2151
2152
2153
2154
2155
2156static int bdx_get_coalesce(struct net_device *netdev,
2157 struct ethtool_coalesce *ecoal,
2158 struct kernel_ethtool_coalesce *kernel_coal,
2159 struct netlink_ext_ack *extack)
2160{
2161 u32 rdintcm;
2162 u32 tdintcm;
2163 struct bdx_priv *priv = netdev_priv(netdev);
2164
2165 rdintcm = priv->rdintcm;
2166 tdintcm = priv->tdintcm;
2167
2168
2169
2170 ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2171 ecoal->rx_max_coalesced_frames =
2172 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2173
2174 ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2175 ecoal->tx_max_coalesced_frames =
2176 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2177
2178
2179 return 0;
2180}
2181
2182
2183
2184
2185
2186
2187static int bdx_set_coalesce(struct net_device *netdev,
2188 struct ethtool_coalesce *ecoal,
2189 struct kernel_ethtool_coalesce *kernel_coal,
2190 struct netlink_ext_ack *extack)
2191{
2192 u32 rdintcm;
2193 u32 tdintcm;
2194 struct bdx_priv *priv = netdev_priv(netdev);
2195 int rx_coal;
2196 int tx_coal;
2197 int rx_max_coal;
2198 int tx_max_coal;
2199
2200
2201 rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2202 tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2203 rx_max_coal = ecoal->rx_max_coalesced_frames;
2204 tx_max_coal = ecoal->tx_max_coalesced_frames;
2205
2206
2207 rx_max_coal =
2208 (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2209 / PCK_TH_MULT);
2210 tx_max_coal =
2211 (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2212 / PCK_TH_MULT);
2213
2214 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2215 (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2216 return -EINVAL;
2217
2218 rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2219 GET_RXF_TH(priv->rdintcm), rx_max_coal);
2220 tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2221 tx_max_coal);
2222
2223 priv->rdintcm = rdintcm;
2224 priv->tdintcm = tdintcm;
2225
2226 WRITE_REG(priv, regRDINTCM0, rdintcm);
2227 WRITE_REG(priv, regTDINTCM0, tdintcm);
2228
2229 return 0;
2230}
2231
2232
2233static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2234{
2235 return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2236}
2237
2238
2239static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2240{
2241 return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2242}
2243
2244
2245
2246
2247
2248
2249static void
2250bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2251{
2252 struct bdx_priv *priv = netdev_priv(netdev);
2253
2254
2255 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2256 ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2257 ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2258 ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2259}
2260
2261
2262
2263
2264
2265
2266static int
2267bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2268{
2269 struct bdx_priv *priv = netdev_priv(netdev);
2270 int rx_size = 0;
2271 int tx_size = 0;
2272
2273 for (; rx_size < 4; rx_size++) {
2274 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2275 break;
2276 }
2277 if (rx_size == 4)
2278 rx_size = 3;
2279
2280 for (; tx_size < 4; tx_size++) {
2281 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2282 break;
2283 }
2284 if (tx_size == 4)
2285 tx_size = 3;
2286
2287
2288 if ((rx_size == priv->rxf_size) &&
2289 (tx_size == priv->txd_size))
2290 return 0;
2291
2292 priv->rxf_size = rx_size;
2293 if (rx_size > 1)
2294 priv->rxd_size = rx_size - 1;
2295 else
2296 priv->rxd_size = rx_size;
2297
2298 priv->txf_size = priv->txd_size = tx_size;
2299
2300 if (netif_running(netdev)) {
2301 bdx_close(netdev);
2302 bdx_open(netdev);
2303 }
2304 return 0;
2305}
2306
2307
2308
2309
2310
2311
2312static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2313{
2314 switch (stringset) {
2315 case ETH_SS_STATS:
2316 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2317 break;
2318 }
2319}
2320
2321
2322
2323
2324
2325static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2326{
2327 struct bdx_priv *priv = netdev_priv(netdev);
2328
2329 switch (stringset) {
2330 case ETH_SS_STATS:
2331 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2332 != sizeof(struct bdx_stats) / sizeof(u64));
2333 return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2334 }
2335
2336 return -EINVAL;
2337}
2338
2339
2340
2341
2342
2343
2344
2345static void bdx_get_ethtool_stats(struct net_device *netdev,
2346 struct ethtool_stats *stats, u64 *data)
2347{
2348 struct bdx_priv *priv = netdev_priv(netdev);
2349
2350 if (priv->stats_flag) {
2351
2352
2353 bdx_update_stats(priv);
2354
2355
2356 memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2357 }
2358}
2359
2360
2361
2362
2363
2364static void bdx_set_ethtool_ops(struct net_device *netdev)
2365{
2366 static const struct ethtool_ops bdx_ethtool_ops = {
2367 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2368 ETHTOOL_COALESCE_MAX_FRAMES,
2369 .get_drvinfo = bdx_get_drvinfo,
2370 .get_link = ethtool_op_get_link,
2371 .get_coalesce = bdx_get_coalesce,
2372 .set_coalesce = bdx_set_coalesce,
2373 .get_ringparam = bdx_get_ringparam,
2374 .set_ringparam = bdx_set_ringparam,
2375 .get_strings = bdx_get_strings,
2376 .get_sset_count = bdx_get_sset_count,
2377 .get_ethtool_stats = bdx_get_ethtool_stats,
2378 .get_link_ksettings = bdx_get_link_ksettings,
2379 };
2380
2381 netdev->ethtool_ops = &bdx_ethtool_ops;
2382}
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static void bdx_remove(struct pci_dev *pdev)
2394{
2395 struct pci_nic *nic = pci_get_drvdata(pdev);
2396 struct net_device *ndev;
2397 int port;
2398
2399 for (port = 0; port < nic->port_num; port++) {
2400 ndev = nic->priv[port]->ndev;
2401 unregister_netdev(ndev);
2402 free_netdev(ndev);
2403 }
2404
2405
2406#ifdef BDX_MSI
2407 if (nic->irq_type == IRQ_MSI)
2408 pci_disable_msi(pdev);
2409#endif
2410
2411 iounmap(nic->regs);
2412 pci_release_regions(pdev);
2413 pci_disable_device(pdev);
2414 vfree(nic);
2415
2416 RET();
2417}
2418
2419static struct pci_driver bdx_pci_driver = {
2420 .name = BDX_DRV_NAME,
2421 .id_table = bdx_pci_tbl,
2422 .probe = bdx_probe,
2423 .remove = bdx_remove,
2424};
2425
2426
2427
2428
2429static void __init print_driver_id(void)
2430{
2431 pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2432 pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2433}
2434
2435static int __init bdx_module_init(void)
2436{
2437 ENTER;
2438 init_txd_sizes();
2439 print_driver_id();
2440 RET(pci_register_driver(&bdx_pci_driver));
2441}
2442
2443module_init(bdx_module_init);
2444
2445static void __exit bdx_module_exit(void)
2446{
2447 ENTER;
2448 pci_unregister_driver(&bdx_pci_driver);
2449 RET();
2450}
2451
2452module_exit(bdx_module_exit);
2453
2454MODULE_LICENSE("GPL");
2455MODULE_AUTHOR(DRIVER_AUTHOR);
2456MODULE_DESCRIPTION(BDX_DRV_DESC);
2457MODULE_FIRMWARE("tehuti/bdx.bin");
2458