1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#include "tehuti.h"
66
67static struct pci_device_id __devinitdata bdx_pci_tbl[] = {
68 {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
69 {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70 {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
71 {0}
72};
73
74MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
75
76
77static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
78static void bdx_tx_cleanup(struct bdx_priv *priv);
79static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
80
81
82static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
83
84
85static int bdx_tx_init(struct bdx_priv *priv);
86static int bdx_rx_init(struct bdx_priv *priv);
87
88
89static void bdx_rx_free(struct bdx_priv *priv);
90static void bdx_tx_free(struct bdx_priv *priv);
91
92
93static void bdx_ethtool_ops(struct net_device *netdev);
94
95
96
97
98
99static void print_hw_id(struct pci_dev *pdev)
100{
101 struct pci_nic *nic = pci_get_drvdata(pdev);
102 u16 pci_link_status = 0;
103 u16 pci_ctrl = 0;
104
105 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
106 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
107
108 printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME,
109 nic->port_num == 1 ? "" : ", 2-Port");
110 printk(KERN_INFO
111 "tehuti: srom 0x%x fpga %d build %u lane# %d"
112 " max_pl 0x%x mrrs 0x%x\n",
113 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
114 readl(nic->regs + FPGA_SEED),
115 GET_LINK_STATUS_LANES(pci_link_status),
116 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
117}
118
119static void print_fw_id(struct pci_nic *nic)
120{
121 printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER));
122}
123
124static void print_eth_id(struct net_device *ndev)
125{
126 printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME,
127 (ndev->if_port == 0) ? 'A' : 'B');
128
129}
130
131
132
133
134
135#define bdx_enable_interrupts(priv) \
136 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
137#define bdx_disable_interrupts(priv) \
138 do { WRITE_REG(priv, regIMR, 0); } while (0)
139
140
141
142
143
144
145
146
147
148
149
150
151
152static int
153bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
154 u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
155{
156 u16 memsz = FIFO_SIZE * (1 << fsz_type);
157
158 memset(f, 0, sizeof(struct fifo));
159
160 f->va = pci_alloc_consistent(priv->pdev,
161 memsz + FIFO_EXTRA_SPACE, &f->da);
162 if (!f->va) {
163 ERR("pci_alloc_consistent failed\n");
164 RET(-ENOMEM);
165 }
166 f->reg_CFG0 = reg_CFG0;
167 f->reg_CFG1 = reg_CFG1;
168 f->reg_RPTR = reg_RPTR;
169 f->reg_WPTR = reg_WPTR;
170 f->rptr = 0;
171 f->wptr = 0;
172 f->memsz = memsz;
173 f->size_mask = memsz - 1;
174 WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
175 WRITE_REG(priv, reg_CFG1, H32_64(f->da));
176
177 RET(0);
178}
179
180
181
182
183
184static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
185{
186 ENTER;
187 if (f->va) {
188 pci_free_consistent(priv->pdev,
189 f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
190 f->va = NULL;
191 }
192 RET();
193}
194
195
196
197
198
199static void bdx_link_changed(struct bdx_priv *priv)
200{
201 u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
202
203 if (!link) {
204 if (netif_carrier_ok(priv->ndev)) {
205 netif_stop_queue(priv->ndev);
206 netif_carrier_off(priv->ndev);
207 ERR("%s: Link Down\n", priv->ndev->name);
208 }
209 } else {
210 if (!netif_carrier_ok(priv->ndev)) {
211 netif_wake_queue(priv->ndev);
212 netif_carrier_on(priv->ndev);
213 ERR("%s: Link Up\n", priv->ndev->name);
214 }
215 }
216}
217
218static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
219{
220 if (isr & IR_RX_FREE_0) {
221 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
222 DBG("RX_FREE_0\n");
223 }
224
225 if (isr & IR_LNKCHG0)
226 bdx_link_changed(priv);
227
228 if (isr & IR_PCIE_LINK)
229 ERR("%s: PCI-E Link Fault\n", priv->ndev->name);
230
231 if (isr & IR_PCIE_TOUT)
232 ERR("%s: PCI-E Time Out\n", priv->ndev->name);
233
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250static irqreturn_t bdx_isr_napi(int irq, void *dev)
251{
252 struct net_device *ndev = dev;
253 struct bdx_priv *priv = netdev_priv(ndev);
254 u32 isr;
255
256 ENTER;
257 isr = (READ_REG(priv, regISR) & IR_RUN);
258 if (unlikely(!isr)) {
259 bdx_enable_interrupts(priv);
260 return IRQ_NONE;
261 }
262
263 if (isr & IR_EXTRA)
264 bdx_isr_extra(priv, isr);
265
266 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
267 if (likely(napi_schedule_prep(&priv->napi))) {
268 __napi_schedule(&priv->napi);
269 RET(IRQ_HANDLED);
270 } else {
271
272
273
274
275
276
277
278
279 READ_REG(priv, regTXF_WPTR_0);
280 READ_REG(priv, regRXD_WPTR_0);
281 }
282 }
283
284 bdx_enable_interrupts(priv);
285 RET(IRQ_HANDLED);
286}
287
288static int bdx_poll(struct napi_struct *napi, int budget)
289{
290 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
291 int work_done;
292
293 ENTER;
294 bdx_tx_cleanup(priv);
295 work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
296 if ((work_done < budget) ||
297 (priv->napi_stop++ >= 30)) {
298 DBG("rx poll is done. backing to isr-driven\n");
299
300
301
302 priv->napi_stop = 0;
303
304 napi_complete(napi);
305 bdx_enable_interrupts(priv);
306 }
307 return work_done;
308}
309
310
311
312
313
314
315
316
317
318static int bdx_fw_load(struct bdx_priv *priv)
319{
320 const struct firmware *fw = NULL;
321 int master, i;
322 int rc;
323
324 ENTER;
325 master = READ_REG(priv, regINIT_SEMAPHORE);
326 if (!READ_REG(priv, regINIT_STATUS) && master) {
327 rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
328 if (rc)
329 goto out;
330 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
331 mdelay(100);
332 }
333 for (i = 0; i < 200; i++) {
334 if (READ_REG(priv, regINIT_STATUS)) {
335 rc = 0;
336 goto out;
337 }
338 mdelay(2);
339 }
340 rc = -EIO;
341out:
342 if (master)
343 WRITE_REG(priv, regINIT_SEMAPHORE, 1);
344 if (fw)
345 release_firmware(fw);
346
347 if (rc) {
348 ERR("%s: firmware loading failed\n", priv->ndev->name);
349 if (rc == -EIO)
350 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
351 READ_REG(priv, regVPC),
352 READ_REG(priv, regVIC),
353 READ_REG(priv, regINIT_STATUS), i);
354 RET(rc);
355 } else {
356 DBG("%s: firmware loading success\n", priv->ndev->name);
357 RET(0);
358 }
359}
360
361static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
362{
363 u32 val;
364
365 ENTER;
366 DBG("mac0=%x mac1=%x mac2=%x\n",
367 READ_REG(priv, regUNC_MAC0_A),
368 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
369
370 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
371 WRITE_REG(priv, regUNC_MAC2_A, val);
372 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
373 WRITE_REG(priv, regUNC_MAC1_A, val);
374 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
375 WRITE_REG(priv, regUNC_MAC0_A, val);
376
377 DBG("mac0=%x mac1=%x mac2=%x\n",
378 READ_REG(priv, regUNC_MAC0_A),
379 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
380 RET();
381}
382
383
384
385
386static int bdx_hw_start(struct bdx_priv *priv)
387{
388 int rc = -EIO;
389 struct net_device *ndev = priv->ndev;
390
391 ENTER;
392 bdx_link_changed(priv);
393
394
395 WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
396 WRITE_REG(priv, regPAUSE_QUANT, 0x96);
397 WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
398 WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
399 WRITE_REG(priv, regRX_FULLNESS, 0);
400 WRITE_REG(priv, regTX_FULLNESS, 0);
401 WRITE_REG(priv, regCTRLST,
402 regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
403
404 WRITE_REG(priv, regVGLB, 0);
405 WRITE_REG(priv, regMAX_FRAME_A,
406 priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
407
408 DBG("RDINTCM=%08x\n", priv->rdintcm);
409 WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
410 WRITE_REG(priv, regRDINTCM2, 0);
411
412 DBG("TDINTCM=%08x\n", priv->tdintcm);
413 WRITE_REG(priv, regTDINTCM0, priv->tdintcm);
414
415
416
417 bdx_restore_mac(priv->ndev, priv);
418
419 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
420 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
421
422#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED)
423 if ((rc = request_irq(priv->pdev->irq, &bdx_isr_napi, BDX_IRQ_TYPE,
424 ndev->name, ndev)))
425 goto err_irq;
426 bdx_enable_interrupts(priv);
427
428 RET(0);
429
430err_irq:
431 RET(rc);
432}
433
434static void bdx_hw_stop(struct bdx_priv *priv)
435{
436 ENTER;
437 bdx_disable_interrupts(priv);
438 free_irq(priv->pdev->irq, priv->ndev);
439
440 netif_carrier_off(priv->ndev);
441 netif_stop_queue(priv->ndev);
442
443 RET();
444}
445
446static int bdx_hw_reset_direct(void __iomem *regs)
447{
448 u32 val, i;
449 ENTER;
450
451
452 val = readl(regs + regCLKPLL);
453 writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
454 udelay(50);
455 val = readl(regs + regCLKPLL);
456 writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
457
458
459 for (i = 0; i < 70; i++, mdelay(10))
460 if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
461
462 readl(regs + regRXD_CFG0_0);
463 return 0;
464 }
465 ERR("tehuti: HW reset failed\n");
466 return 1;
467}
468
469static int bdx_hw_reset(struct bdx_priv *priv)
470{
471 u32 val, i;
472 ENTER;
473
474 if (priv->port == 0) {
475
476 val = READ_REG(priv, regCLKPLL);
477 WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
478 udelay(50);
479 val = READ_REG(priv, regCLKPLL);
480 WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
481 }
482
483 for (i = 0; i < 70; i++, mdelay(10))
484 if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
485
486 READ_REG(priv, regRXD_CFG0_0);
487 return 0;
488 }
489 ERR("tehuti: HW reset failed\n");
490 return 1;
491}
492
493static int bdx_sw_reset(struct bdx_priv *priv)
494{
495 int i;
496
497 ENTER;
498
499
500 WRITE_REG(priv, regGMAC_RXF_A, 0);
501 mdelay(100);
502
503 WRITE_REG(priv, regDIS_PORT, 1);
504
505 WRITE_REG(priv, regDIS_QU, 1);
506
507 for (i = 0; i < 50; i++) {
508 if (READ_REG(priv, regRST_PORT) & 1)
509 break;
510 mdelay(10);
511 }
512 if (i == 50)
513 ERR("%s: SW reset timeout. continuing anyway\n",
514 priv->ndev->name);
515
516
517 WRITE_REG(priv, regRDINTCM0, 0);
518 WRITE_REG(priv, regTDINTCM0, 0);
519 WRITE_REG(priv, regIMR, 0);
520 READ_REG(priv, regISR);
521
522
523 WRITE_REG(priv, regRST_QU, 1);
524
525 WRITE_REG(priv, regRST_PORT, 1);
526
527 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
528 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
529 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
530 WRITE_REG(priv, i, 0);
531
532 WRITE_REG(priv, regDIS_PORT, 0);
533
534 WRITE_REG(priv, regDIS_QU, 0);
535
536 WRITE_REG(priv, regRST_QU, 0);
537
538 WRITE_REG(priv, regRST_PORT, 0);
539
540
541
542 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
543 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
544
545 RET(0);
546}
547
548
549static int bdx_reset(struct bdx_priv *priv)
550{
551 ENTER;
552 RET((priv->pdev->device == 0x3009)
553 ? bdx_hw_reset(priv)
554 : bdx_sw_reset(priv));
555}
556
557
558
559
560
561
562
563
564
565
566
567
568static int bdx_close(struct net_device *ndev)
569{
570 struct bdx_priv *priv = NULL;
571
572 ENTER;
573 priv = netdev_priv(ndev);
574
575 napi_disable(&priv->napi);
576
577 bdx_reset(priv);
578 bdx_hw_stop(priv);
579 bdx_rx_free(priv);
580 bdx_tx_free(priv);
581 RET(0);
582}
583
584
585
586
587
588
589
590
591
592
593
594
595
596static int bdx_open(struct net_device *ndev)
597{
598 struct bdx_priv *priv;
599 int rc;
600
601 ENTER;
602 priv = netdev_priv(ndev);
603 bdx_reset(priv);
604 if (netif_running(ndev))
605 netif_stop_queue(priv->ndev);
606
607 if ((rc = bdx_tx_init(priv)))
608 goto err;
609
610 if ((rc = bdx_rx_init(priv)))
611 goto err;
612
613 if ((rc = bdx_fw_load(priv)))
614 goto err;
615
616 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
617
618 if ((rc = bdx_hw_start(priv)))
619 goto err;
620
621 napi_enable(&priv->napi);
622
623 print_fw_id(priv->nic);
624
625 RET(0);
626
627err:
628 bdx_close(ndev);
629 RET(rc);
630}
631
632static int bdx_range_check(struct bdx_priv *priv, u32 offset)
633{
634 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
635 -EINVAL : 0;
636}
637
638static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
639{
640 struct bdx_priv *priv = netdev_priv(ndev);
641 u32 data[3];
642 int error;
643
644 ENTER;
645
646 DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
647 if (cmd != SIOCDEVPRIVATE) {
648 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
649 if (error) {
650 ERR("cant copy from user\n");
651 RET(error);
652 }
653 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
654 }
655
656 if (!capable(CAP_SYS_RAWIO))
657 return -EPERM;
658
659 switch (data[0]) {
660
661 case BDX_OP_READ:
662 error = bdx_range_check(priv, data[1]);
663 if (error < 0)
664 return error;
665 data[2] = READ_REG(priv, data[1]);
666 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
667 data[2]);
668 error = copy_to_user(ifr->ifr_data, data, sizeof(data));
669 if (error)
670 RET(error);
671 break;
672
673 case BDX_OP_WRITE:
674 error = bdx_range_check(priv, data[1]);
675 if (error < 0)
676 return error;
677 WRITE_REG(priv, data[1], data[2]);
678 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
679 break;
680
681 default:
682 RET(-EOPNOTSUPP);
683 }
684 return 0;
685}
686
687static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
688{
689 ENTER;
690 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
691 RET(bdx_ioctl_priv(ndev, ifr, cmd));
692 else
693 RET(-EOPNOTSUPP);
694}
695
696
697
698
699
700
701
702
703static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
704{
705 struct bdx_priv *priv = netdev_priv(ndev);
706 u32 reg, bit, val;
707
708 ENTER;
709 DBG2("vid=%d value=%d\n", (int)vid, enable);
710 if (unlikely(vid >= 4096)) {
711 ERR("tehuti: invalid VID: %u (> 4096)\n", vid);
712 RET();
713 }
714 reg = regVLAN_0 + (vid / 32) * 4;
715 bit = 1 << vid % 32;
716 val = READ_REG(priv, reg);
717 DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
718 if (enable)
719 val |= bit;
720 else
721 val &= ~bit;
722 DBG2("new val %x\n", val);
723 WRITE_REG(priv, reg, val);
724 RET();
725}
726
727
728
729
730
731
732static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
733{
734 __bdx_vlan_rx_vid(ndev, vid, 1);
735}
736
737
738
739
740
741
742static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
743{
744 __bdx_vlan_rx_vid(ndev, vid, 0);
745}
746
747
748
749
750
751
752static void
753bdx_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
754{
755 struct bdx_priv *priv = netdev_priv(ndev);
756
757 ENTER;
758 DBG("device='%s', group='%p'\n", ndev->name, grp);
759 priv->vlgrp = grp;
760 RET();
761}
762
763
764
765
766
767
768
769
770static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
771{
772 ENTER;
773
774 if (new_mtu == ndev->mtu)
775 RET(0);
776
777
778 if (new_mtu < ETH_ZLEN) {
779 ERR("%s: %s mtu %d is less then minimal %d\n",
780 BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN);
781 RET(-EINVAL);
782 }
783
784 ndev->mtu = new_mtu;
785 if (netif_running(ndev)) {
786 bdx_close(ndev);
787 bdx_open(ndev);
788 }
789 RET(0);
790}
791
792static void bdx_setmulti(struct net_device *ndev)
793{
794 struct bdx_priv *priv = netdev_priv(ndev);
795
796 u32 rxf_val =
797 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
798 int i;
799
800 ENTER;
801
802
803
804
805 if (ndev->flags & IFF_PROMISC) {
806 rxf_val |= GMAC_RX_FILTER_PRM;
807 } else if (ndev->flags & IFF_ALLMULTI) {
808
809 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
810 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
811 } else if (ndev->mc_count) {
812 u8 hash;
813 struct dev_mc_list *mclist;
814 u32 reg, val;
815
816
817 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
818 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
819
820 for (i = 0; i < MAC_MCST_NUM; i++) {
821 WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
822 WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
823 }
824
825
826
827
828
829 mclist = ndev->mc_list;
830
831
832 for (; mclist; mclist = mclist->next) {
833 hash = 0;
834 for (i = 0; i < ETH_ALEN; i++)
835 hash ^= mclist->dmi_addr[i];
836 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
837 val = READ_REG(priv, reg);
838 val |= (1 << (hash % 32));
839 WRITE_REG(priv, reg, val);
840 }
841
842 } else {
843 DBG("only own mac %d\n", ndev->mc_count);
844 rxf_val |= GMAC_RX_FILTER_AB;
845 }
846 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
847
848
849 RET();
850}
851
852static int bdx_set_mac(struct net_device *ndev, void *p)
853{
854 struct bdx_priv *priv = netdev_priv(ndev);
855 struct sockaddr *addr = p;
856
857 ENTER;
858
859
860
861
862 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
863 bdx_restore_mac(ndev, priv);
864 RET(0);
865}
866
867static int bdx_read_mac(struct bdx_priv *priv)
868{
869 u16 macAddress[3], i;
870 ENTER;
871
872 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
873 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
874 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
875 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
876 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
877 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
878 for (i = 0; i < 3; i++) {
879 priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
880 priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
881 }
882 RET(0);
883}
884
885static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
886{
887 u64 val;
888
889 val = READ_REG(priv, reg);
890 val |= ((u64) READ_REG(priv, reg + 8)) << 32;
891 return val;
892}
893
894
895static void bdx_update_stats(struct bdx_priv *priv)
896{
897 struct bdx_stats *stats = &priv->hw_stats;
898 u64 *stats_vector = (u64 *) stats;
899 int i;
900 int addr;
901
902
903 addr = 0x7200;
904
905 for (i = 0; i < 12; i++) {
906 stats_vector[i] = bdx_read_l2stat(priv, addr);
907 addr += 0x10;
908 }
909 BDX_ASSERT(addr != 0x72C0);
910
911 addr = 0x72F0;
912 for (; i < 16; i++) {
913 stats_vector[i] = bdx_read_l2stat(priv, addr);
914 addr += 0x10;
915 }
916 BDX_ASSERT(addr != 0x7330);
917
918 addr = 0x7370;
919 for (; i < 19; i++) {
920 stats_vector[i] = bdx_read_l2stat(priv, addr);
921 addr += 0x10;
922 }
923 BDX_ASSERT(addr != 0x73A0);
924
925 addr = 0x73C0;
926 for (; i < 23; i++) {
927 stats_vector[i] = bdx_read_l2stat(priv, addr);
928 addr += 0x10;
929 }
930 BDX_ASSERT(addr != 0x7400);
931 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
932}
933
934static struct net_device_stats *bdx_get_stats(struct net_device *ndev)
935{
936 struct bdx_priv *priv = netdev_priv(ndev);
937 struct net_device_stats *net_stat = &priv->net_stats;
938 return net_stat;
939}
940
941static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
942 u16 rxd_vlan);
943static void print_rxfd(struct rxf_desc *rxfd);
944
945
946
947
948
949static void bdx_rxdb_destroy(struct rxdb *db)
950{
951 vfree(db);
952}
953
954static struct rxdb *bdx_rxdb_create(int nelem)
955{
956 struct rxdb *db;
957 int i;
958
959 db = vmalloc(sizeof(struct rxdb)
960 + (nelem * sizeof(int))
961 + (nelem * sizeof(struct rx_map)));
962 if (likely(db != NULL)) {
963 db->stack = (int *)(db + 1);
964 db->elems = (void *)(db->stack + nelem);
965 db->nelem = nelem;
966 db->top = nelem;
967 for (i = 0; i < nelem; i++)
968 db->stack[i] = nelem - i - 1;
969
970 }
971
972 return db;
973}
974
975static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
976{
977 BDX_ASSERT(db->top <= 0);
978 return db->stack[--(db->top)];
979}
980
981static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
982{
983 BDX_ASSERT((n < 0) || (n >= db->nelem));
984 return db->elems + n;
985}
986
987static inline int bdx_rxdb_available(struct rxdb *db)
988{
989 return db->top;
990}
991
992static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
993{
994 BDX_ASSERT((n >= db->nelem) || (n < 0));
995 db->stack[(db->top)++] = n;
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019static int bdx_rx_init(struct bdx_priv *priv)
1020{
1021 ENTER;
1022
1023 if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
1024 regRXD_CFG0_0, regRXD_CFG1_0,
1025 regRXD_RPTR_0, regRXD_WPTR_0))
1026 goto err_mem;
1027 if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
1028 regRXF_CFG0_0, regRXF_CFG1_0,
1029 regRXF_RPTR_0, regRXF_WPTR_0))
1030 goto err_mem;
1031 if (!
1032 (priv->rxdb =
1033 bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1034 sizeof(struct rxf_desc))))
1035 goto err_mem;
1036
1037 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1038 return 0;
1039
1040err_mem:
1041 ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name);
1042 return -ENOMEM;
1043}
1044
1045
1046
1047
1048
1049static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1050{
1051 struct rx_map *dm;
1052 struct rxdb *db = priv->rxdb;
1053 u16 i;
1054
1055 ENTER;
1056 DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1057 db->nelem - bdx_rxdb_available(db));
1058 while (bdx_rxdb_available(db) > 0) {
1059 i = bdx_rxdb_alloc_elem(db);
1060 dm = bdx_rxdb_addr_elem(db, i);
1061 dm->dma = 0;
1062 }
1063 for (i = 0; i < db->nelem; i++) {
1064 dm = bdx_rxdb_addr_elem(db, i);
1065 if (dm->dma) {
1066 pci_unmap_single(priv->pdev,
1067 dm->dma, f->m.pktsz,
1068 PCI_DMA_FROMDEVICE);
1069 dev_kfree_skb(dm->skb);
1070 }
1071 }
1072}
1073
1074
1075
1076
1077
1078static void bdx_rx_free(struct bdx_priv *priv)
1079{
1080 ENTER;
1081 if (priv->rxdb) {
1082 bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1083 bdx_rxdb_destroy(priv->rxdb);
1084 priv->rxdb = NULL;
1085 }
1086 bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1087 bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1088
1089 RET();
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1108{
1109 struct sk_buff *skb;
1110 struct rxf_desc *rxfd;
1111 struct rx_map *dm;
1112 int dno, delta, idx;
1113 struct rxdb *db = priv->rxdb;
1114
1115 ENTER;
1116 dno = bdx_rxdb_available(db) - 1;
1117 while (dno > 0) {
1118 if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) {
1119 ERR("NO MEM: dev_alloc_skb failed\n");
1120 break;
1121 }
1122 skb->dev = priv->ndev;
1123 skb_reserve(skb, NET_IP_ALIGN);
1124
1125 idx = bdx_rxdb_alloc_elem(db);
1126 dm = bdx_rxdb_addr_elem(db, idx);
1127 dm->dma = pci_map_single(priv->pdev,
1128 skb->data, f->m.pktsz,
1129 PCI_DMA_FROMDEVICE);
1130 dm->skb = skb;
1131 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1132 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1133 rxfd->va_lo = idx;
1134 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1135 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1136 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1137 print_rxfd(rxfd);
1138
1139 f->m.wptr += sizeof(struct rxf_desc);
1140 delta = f->m.wptr - f->m.memsz;
1141 if (unlikely(delta >= 0)) {
1142 f->m.wptr = delta;
1143 if (delta > 0) {
1144 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1145 DBG("wrapped descriptor\n");
1146 }
1147 }
1148 dno--;
1149 }
1150
1151 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1152 RET();
1153}
1154
1155static inline void
1156NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1157 struct sk_buff *skb)
1158{
1159 ENTER;
1160 DBG("rxdd->flags.bits.vtag=%d vlgrp=%p\n", GET_RXD_VTAG(rxd_val1),
1161 priv->vlgrp);
1162 if (priv->vlgrp && GET_RXD_VTAG(rxd_val1)) {
1163 DBG("%s: vlan rcv vlan '%x' vtag '%x', device name '%s'\n",
1164 priv->ndev->name,
1165 GET_RXD_VLAN_ID(rxd_vlan),
1166 GET_RXD_VTAG(rxd_val1),
1167 vlan_group_get_device(priv->vlgrp,
1168 GET_RXD_VLAN_ID(rxd_vlan))->name);
1169
1170 vlan_hwaccel_receive_skb(skb, priv->vlgrp,
1171 GET_RXD_VLAN_TCI(rxd_vlan));
1172 } else {
1173 netif_receive_skb(skb);
1174 }
1175}
1176
1177static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1178{
1179 struct rxf_desc *rxfd;
1180 struct rx_map *dm;
1181 struct rxf_fifo *f;
1182 struct rxdb *db;
1183 struct sk_buff *skb;
1184 int delta;
1185
1186 ENTER;
1187 DBG("priv=%p rxdd=%p\n", priv, rxdd);
1188 f = &priv->rxf_fifo0;
1189 db = priv->rxdb;
1190 DBG("db=%p f=%p\n", db, f);
1191 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1192 DBG("dm=%p\n", dm);
1193 skb = dm->skb;
1194 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1195 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1196 rxfd->va_lo = rxdd->va_lo;
1197 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1198 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1199 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1200 print_rxfd(rxfd);
1201
1202 f->m.wptr += sizeof(struct rxf_desc);
1203 delta = f->m.wptr - f->m.memsz;
1204 if (unlikely(delta >= 0)) {
1205 f->m.wptr = delta;
1206 if (delta > 0) {
1207 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1208 DBG("wrapped descriptor\n");
1209 }
1210 }
1211 RET();
1212}
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1226{
1227 struct sk_buff *skb, *skb2;
1228 struct rxd_desc *rxdd;
1229 struct rx_map *dm;
1230 struct rxf_fifo *rxf_fifo;
1231 int tmp_len, size;
1232 int done = 0;
1233 int max_done = BDX_MAX_RX_DONE;
1234 struct rxdb *db = NULL;
1235
1236 u32 rxd_val1;
1237 u16 len;
1238 u16 rxd_vlan;
1239
1240 ENTER;
1241 max_done = budget;
1242
1243 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1244
1245 size = f->m.wptr - f->m.rptr;
1246 if (size < 0)
1247 size = f->m.memsz + size;
1248
1249 while (size > 0) {
1250
1251 rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1252 rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1253
1254 len = CPU_CHIP_SWAP16(rxdd->len);
1255
1256 rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1257
1258 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1259
1260 tmp_len = GET_RXD_BC(rxd_val1) << 3;
1261 BDX_ASSERT(tmp_len <= 0);
1262 size -= tmp_len;
1263 if (size < 0)
1264 break;
1265
1266 f->m.rptr += tmp_len;
1267
1268 tmp_len = f->m.rptr - f->m.memsz;
1269 if (unlikely(tmp_len >= 0)) {
1270 f->m.rptr = tmp_len;
1271 if (tmp_len > 0) {
1272 DBG("wrapped desc rptr=%d tmp_len=%d\n",
1273 f->m.rptr, tmp_len);
1274 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1275 }
1276 }
1277
1278 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1279 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1280 priv->net_stats.rx_errors++;
1281 bdx_recycle_skb(priv, rxdd);
1282 continue;
1283 }
1284
1285 rxf_fifo = &priv->rxf_fifo0;
1286 db = priv->rxdb;
1287 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1288 skb = dm->skb;
1289
1290 if (len < BDX_COPYBREAK &&
1291 (skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) {
1292 skb_reserve(skb2, NET_IP_ALIGN);
1293
1294 pci_dma_sync_single_for_cpu(priv->pdev,
1295 dm->dma, rxf_fifo->m.pktsz,
1296 PCI_DMA_FROMDEVICE);
1297 memcpy(skb2->data, skb->data, len);
1298 bdx_recycle_skb(priv, rxdd);
1299 skb = skb2;
1300 } else {
1301 pci_unmap_single(priv->pdev,
1302 dm->dma, rxf_fifo->m.pktsz,
1303 PCI_DMA_FROMDEVICE);
1304 bdx_rxdb_free_elem(db, rxdd->va_lo);
1305 }
1306
1307 priv->net_stats.rx_bytes += len;
1308
1309 skb_put(skb, len);
1310 skb->dev = priv->ndev;
1311 skb->ip_summed = CHECKSUM_UNNECESSARY;
1312 skb->protocol = eth_type_trans(skb, priv->ndev);
1313
1314
1315 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1316 skb->ip_summed = CHECKSUM_NONE;
1317
1318 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1319
1320 if (++done >= max_done)
1321 break;
1322 }
1323
1324 priv->net_stats.rx_packets += done;
1325
1326
1327 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1328
1329 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1330
1331 RET(done);
1332}
1333
1334
1335
1336
1337static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1338 u16 rxd_vlan)
1339{
1340 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d "
1341 "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d "
1342 "va_lo %d va_hi %d\n",
1343 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1344 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1345 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1346 GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1347 GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1348 rxdd->va_hi);
1349}
1350
1351static void print_rxfd(struct rxf_desc *rxfd)
1352{
1353 DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n"
1354 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1355 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402static inline int bdx_tx_db_size(struct txdb *db)
1403{
1404 int taken = db->wptr - db->rptr;
1405 if (taken < 0)
1406 taken = db->size + 1 + taken;
1407
1408 return db->size - taken;
1409}
1410
1411
1412
1413
1414
1415static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1416{
1417 BDX_ASSERT(db == NULL || pptr == NULL);
1418
1419 BDX_ASSERT(*pptr != db->rptr &&
1420 *pptr != db->wptr);
1421
1422 BDX_ASSERT(*pptr < db->start ||
1423 *pptr >= db->end);
1424
1425 ++*pptr;
1426 if (unlikely(*pptr == db->end))
1427 *pptr = db->start;
1428}
1429
1430
1431
1432
1433static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1434{
1435 BDX_ASSERT(db->rptr == db->wptr);
1436 __bdx_tx_db_ptr_next(db, &db->rptr);
1437}
1438
1439
1440
1441
1442static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1443{
1444 __bdx_tx_db_ptr_next(db, &db->wptr);
1445 BDX_ASSERT(db->rptr == db->wptr);
1446
1447}
1448
1449
1450
1451
1452
1453
1454static int bdx_tx_db_init(struct txdb *d, int sz_type)
1455{
1456 int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1457
1458 d->start = vmalloc(memsz);
1459 if (!d->start)
1460 return -ENOMEM;
1461
1462
1463
1464
1465
1466
1467 d->size = memsz / sizeof(struct tx_map) - 1;
1468 d->end = d->start + d->size + 1;
1469
1470
1471 d->rptr = d->start;
1472 d->wptr = d->start;
1473
1474 return 0;
1475}
1476
1477
1478
1479
1480static void bdx_tx_db_close(struct txdb *d)
1481{
1482 BDX_ASSERT(d == NULL);
1483
1484 vfree(d->start);
1485 d->start = NULL;
1486}
1487
1488
1489
1490
1491
1492
1493
1494static struct {
1495 u16 bytes;
1496 u16 qwords;
1497} txd_sizes[MAX_SKB_FRAGS + 1];
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509static inline void
1510bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1511 struct txd_desc *txdd)
1512{
1513 struct txdb *db = &priv->txdb;
1514 struct pbl *pbl = &txdd->pbl[0];
1515 int nr_frags = skb_shinfo(skb)->nr_frags;
1516 int i;
1517
1518 db->wptr->len = skb->len - skb->data_len;
1519 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1520 db->wptr->len, PCI_DMA_TODEVICE);
1521 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1522 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1523 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1524 DBG("=== pbl len: 0x%x ================\n", pbl->len);
1525 DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1526 DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1527 bdx_tx_db_inc_wptr(db);
1528
1529 for (i = 0; i < nr_frags; i++) {
1530 struct skb_frag_struct *frag;
1531
1532 frag = &skb_shinfo(skb)->frags[i];
1533 db->wptr->len = frag->size;
1534 db->wptr->addr.dma =
1535 pci_map_page(priv->pdev, frag->page, frag->page_offset,
1536 frag->size, PCI_DMA_TODEVICE);
1537
1538 pbl++;
1539 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1540 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1541 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1542 bdx_tx_db_inc_wptr(db);
1543 }
1544
1545
1546 db->wptr->len = -txd_sizes[nr_frags].bytes;
1547 db->wptr->addr.skb = skb;
1548 bdx_tx_db_inc_wptr(db);
1549}
1550
1551
1552
1553
1554static void __init init_txd_sizes(void)
1555{
1556 int i, lwords;
1557
1558
1559
1560 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1561 lwords = 7 + (i * 3);
1562 if (lwords & 1)
1563 lwords++;
1564 txd_sizes[i].qwords = lwords >> 1;
1565 txd_sizes[i].bytes = lwords << 2;
1566 }
1567}
1568
1569
1570
1571static int bdx_tx_init(struct bdx_priv *priv)
1572{
1573 if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1574 regTXD_CFG0_0,
1575 regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1576 goto err_mem;
1577 if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1578 regTXF_CFG0_0,
1579 regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1580 goto err_mem;
1581
1582
1583
1584 if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1585 goto err_mem;
1586
1587 priv->tx_level = BDX_MAX_TX_LEVEL;
1588#ifdef BDX_DELAY_WPTR
1589 priv->tx_update_mark = priv->tx_level - 1024;
1590#endif
1591 return 0;
1592
1593err_mem:
1594 ERR("tehuti: %s: Tx init failed\n", priv->ndev->name);
1595 return -ENOMEM;
1596}
1597
1598
1599
1600
1601
1602
1603static inline int bdx_tx_space(struct bdx_priv *priv)
1604{
1605 struct txd_fifo *f = &priv->txd_fifo0;
1606 int fsize;
1607
1608 f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1609 fsize = f->m.rptr - f->m.wptr;
1610 if (fsize <= 0)
1611 fsize = f->m.memsz + fsize;
1612 return (fsize);
1613}
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1626 struct net_device *ndev)
1627{
1628 struct bdx_priv *priv = netdev_priv(ndev);
1629 struct txd_fifo *f = &priv->txd_fifo0;
1630 int txd_checksum = 7;
1631 int txd_lgsnd = 0;
1632 int txd_vlan_id = 0;
1633 int txd_vtag = 0;
1634 int txd_mss = 0;
1635
1636 int nr_frags = skb_shinfo(skb)->nr_frags;
1637 struct txd_desc *txdd;
1638 int len;
1639 unsigned long flags;
1640
1641 ENTER;
1642 local_irq_save(flags);
1643 if (!spin_trylock(&priv->tx_lock)) {
1644 local_irq_restore(flags);
1645 DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
1646 BDX_DRV_NAME, ndev->name);
1647 return NETDEV_TX_LOCKED;
1648 }
1649
1650
1651 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1652 txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1653 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1654 txd_checksum = 0;
1655
1656 if (skb_shinfo(skb)->gso_size) {
1657 txd_mss = skb_shinfo(skb)->gso_size;
1658 txd_lgsnd = 1;
1659 DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1660 txd_mss);
1661 }
1662
1663 if (vlan_tx_tag_present(skb)) {
1664
1665 txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
1666 txd_vtag = 1;
1667 }
1668
1669 txdd->length = CPU_CHIP_SWAP16(skb->len);
1670 txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1671 txdd->txd_val1 =
1672 CPU_CHIP_SWAP32(TXD_W1_VAL
1673 (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1674 txd_lgsnd, txd_vlan_id));
1675 DBG("=== TxD desc =====================\n");
1676 DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1677 DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1678
1679 bdx_tx_map_skb(priv, skb, txdd);
1680
1681
1682
1683
1684 f->m.wptr += txd_sizes[nr_frags].bytes;
1685 len = f->m.wptr - f->m.memsz;
1686 if (unlikely(len >= 0)) {
1687 f->m.wptr = len;
1688 if (len > 0) {
1689 BDX_ASSERT(len > f->m.memsz);
1690 memcpy(f->m.va, f->m.va + f->m.memsz, len);
1691 }
1692 }
1693 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1694
1695 priv->tx_level -= txd_sizes[nr_frags].bytes;
1696 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1697#ifdef BDX_DELAY_WPTR
1698 if (priv->tx_level > priv->tx_update_mark) {
1699
1700
1701
1702
1703 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1704 } else {
1705 if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1706 priv->tx_noupd = 0;
1707 WRITE_REG(priv, f->m.reg_WPTR,
1708 f->m.wptr & TXF_WPTR_WR_PTR);
1709 }
1710 }
1711#else
1712
1713
1714
1715
1716 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1717
1718#endif
1719#ifdef BDX_LLTX
1720 ndev->trans_start = jiffies;
1721#endif
1722 priv->net_stats.tx_packets++;
1723 priv->net_stats.tx_bytes += skb->len;
1724
1725 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1726 DBG("%s: %s: TX Q STOP level %d\n",
1727 BDX_DRV_NAME, ndev->name, priv->tx_level);
1728 netif_stop_queue(ndev);
1729 }
1730
1731 spin_unlock_irqrestore(&priv->tx_lock, flags);
1732 return NETDEV_TX_OK;
1733}
1734
1735
1736
1737
1738
1739
1740static void bdx_tx_cleanup(struct bdx_priv *priv)
1741{
1742 struct txf_fifo *f = &priv->txf_fifo0;
1743 struct txdb *db = &priv->txdb;
1744 int tx_level = 0;
1745
1746 ENTER;
1747 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1748 BDX_ASSERT(f->m.rptr >= f->m.memsz);
1749
1750 while (f->m.wptr != f->m.rptr) {
1751 f->m.rptr += BDX_TXF_DESC_SZ;
1752 f->m.rptr &= f->m.size_mask;
1753
1754
1755
1756 BDX_ASSERT(db->rptr->len == 0);
1757 do {
1758 BDX_ASSERT(db->rptr->addr.dma == 0);
1759 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1760 db->rptr->len, PCI_DMA_TODEVICE);
1761 bdx_tx_db_inc_rptr(db);
1762 } while (db->rptr->len > 0);
1763 tx_level -= db->rptr->len;
1764
1765
1766 dev_kfree_skb_irq(db->rptr->addr.skb);
1767 bdx_tx_db_inc_rptr(db);
1768 }
1769
1770
1771 BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1772 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1773
1774
1775
1776 spin_lock(&priv->tx_lock);
1777 priv->tx_level += tx_level;
1778 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1779#ifdef BDX_DELAY_WPTR
1780 if (priv->tx_noupd) {
1781 priv->tx_noupd = 0;
1782 WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1783 priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1784 }
1785#endif
1786
1787 if (unlikely(netif_queue_stopped(priv->ndev)
1788 && netif_carrier_ok(priv->ndev)
1789 && (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1790 DBG("%s: %s: TX Q WAKE level %d\n",
1791 BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1792 netif_wake_queue(priv->ndev);
1793 }
1794 spin_unlock(&priv->tx_lock);
1795}
1796
1797
1798
1799
1800static void bdx_tx_free_skbs(struct bdx_priv *priv)
1801{
1802 struct txdb *db = &priv->txdb;
1803
1804 ENTER;
1805 while (db->rptr != db->wptr) {
1806 if (likely(db->rptr->len))
1807 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1808 db->rptr->len, PCI_DMA_TODEVICE);
1809 else
1810 dev_kfree_skb(db->rptr->addr.skb);
1811 bdx_tx_db_inc_rptr(db);
1812 }
1813 RET();
1814}
1815
1816
1817static void bdx_tx_free(struct bdx_priv *priv)
1818{
1819 ENTER;
1820 bdx_tx_free_skbs(priv);
1821 bdx_fifo_free(priv, &priv->txd_fifo0.m);
1822 bdx_fifo_free(priv, &priv->txf_fifo0.m);
1823 bdx_tx_db_close(&priv->txdb);
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1837{
1838 struct txd_fifo *f = &priv->txd_fifo0;
1839 int i = f->m.memsz - f->m.wptr;
1840
1841 if (size == 0)
1842 return;
1843
1844 if (i > size) {
1845 memcpy(f->m.va + f->m.wptr, data, size);
1846 f->m.wptr += size;
1847 } else {
1848 memcpy(f->m.va + f->m.wptr, data, i);
1849 f->m.wptr = size - i;
1850 memcpy(f->m.va, data + i, f->m.wptr);
1851 }
1852 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1853}
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1864{
1865 int timer = 0;
1866 ENTER;
1867
1868 while (size > 0) {
1869
1870
1871
1872 int avail = bdx_tx_space(priv) - 8;
1873 if (avail <= 0) {
1874 if (timer++ > 300) {
1875 DBG("timeout while writing desc to TxD fifo\n");
1876 break;
1877 }
1878 udelay(50);
1879 continue;
1880 }
1881 avail = MIN(avail, size);
1882 DBG("about to push %d bytes starting %p size %d\n", avail,
1883 data, size);
1884 bdx_tx_push_desc(priv, data, avail);
1885 size -= avail;
1886 data += avail;
1887 }
1888 RET();
1889}
1890
1891static const struct net_device_ops bdx_netdev_ops = {
1892 .ndo_open = bdx_open,
1893 .ndo_stop = bdx_close,
1894 .ndo_start_xmit = bdx_tx_transmit,
1895 .ndo_validate_addr = eth_validate_addr,
1896 .ndo_do_ioctl = bdx_ioctl,
1897 .ndo_set_multicast_list = bdx_setmulti,
1898 .ndo_get_stats = bdx_get_stats,
1899 .ndo_change_mtu = bdx_change_mtu,
1900 .ndo_set_mac_address = bdx_set_mac,
1901 .ndo_vlan_rx_register = bdx_vlan_rx_register,
1902 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1903 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1904};
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923static int __devinit
1924bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1925{
1926 struct net_device *ndev;
1927 struct bdx_priv *priv;
1928 int err, pci_using_dac, port;
1929 unsigned long pciaddr;
1930 u32 regionSize;
1931 struct pci_nic *nic;
1932
1933 ENTER;
1934
1935 nic = vmalloc(sizeof(*nic));
1936 if (!nic)
1937 RET(-ENOMEM);
1938
1939
1940 if ((err = pci_enable_device(pdev)))
1941 goto err_pci;
1942
1943 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1944 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1945 pci_using_dac = 1;
1946 } else {
1947 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1948 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1949 printk(KERN_ERR "tehuti: No usable DMA configuration"
1950 ", aborting\n");
1951 goto err_dma;
1952 }
1953 pci_using_dac = 0;
1954 }
1955
1956 if ((err = pci_request_regions(pdev, BDX_DRV_NAME)))
1957 goto err_dma;
1958
1959 pci_set_master(pdev);
1960
1961 pciaddr = pci_resource_start(pdev, 0);
1962 if (!pciaddr) {
1963 err = -EIO;
1964 ERR("tehuti: no MMIO resource\n");
1965 goto err_out_res;
1966 }
1967 if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) {
1968 err = -EIO;
1969 ERR("tehuti: MMIO resource (%x) too small\n", regionSize);
1970 goto err_out_res;
1971 }
1972
1973 nic->regs = ioremap(pciaddr, regionSize);
1974 if (!nic->regs) {
1975 err = -EIO;
1976 ERR("tehuti: ioremap failed\n");
1977 goto err_out_res;
1978 }
1979
1980 if (pdev->irq < 2) {
1981 err = -EIO;
1982 ERR("tehuti: invalid irq (%d)\n", pdev->irq);
1983 goto err_out_iomap;
1984 }
1985 pci_set_drvdata(pdev, nic);
1986
1987 if (pdev->device == 0x3014)
1988 nic->port_num = 2;
1989 else
1990 nic->port_num = 1;
1991
1992 print_hw_id(pdev);
1993
1994 bdx_hw_reset_direct(nic->regs);
1995
1996 nic->irq_type = IRQ_INTX;
1997#ifdef BDX_MSI
1998 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1999 if ((err = pci_enable_msi(pdev)))
2000 ERR("Tehuti: Can't eneble msi. error is %d\n", err);
2001 else
2002 nic->irq_type = IRQ_MSI;
2003 } else
2004 DBG("HW does not support MSI\n");
2005#endif
2006
2007
2008 for (port = 0; port < nic->port_num; port++) {
2009 if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) {
2010 err = -ENOMEM;
2011 printk(KERN_ERR "tehuti: alloc_etherdev failed\n");
2012 goto err_out_iomap;
2013 }
2014
2015 ndev->netdev_ops = &bdx_netdev_ops;
2016 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
2017
2018 bdx_ethtool_ops(ndev);
2019
2020
2021
2022 ndev->if_port = port;
2023 ndev->base_addr = pciaddr;
2024 ndev->mem_start = pciaddr;
2025 ndev->mem_end = pciaddr + regionSize;
2026 ndev->irq = pdev->irq;
2027 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2028 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2029 NETIF_F_HW_VLAN_FILTER
2030
2031 ;
2032
2033 if (pci_using_dac)
2034 ndev->features |= NETIF_F_HIGHDMA;
2035
2036
2037 priv = nic->priv[port] = netdev_priv(ndev);
2038
2039 memset(priv, 0, sizeof(struct bdx_priv));
2040 priv->pBdxRegs = nic->regs + port * 0x8000;
2041 priv->port = port;
2042 priv->pdev = pdev;
2043 priv->ndev = ndev;
2044 priv->nic = nic;
2045 priv->msg_enable = BDX_DEF_MSG_ENABLE;
2046
2047 netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2048
2049 if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2050 DBG("HW statistics not supported\n");
2051 priv->stats_flag = 0;
2052 } else {
2053 priv->stats_flag = 1;
2054 }
2055
2056
2057 priv->txd_size = 2;
2058 priv->txf_size = 2;
2059 priv->rxd_size = 2;
2060 priv->rxf_size = 3;
2061
2062
2063 priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2064 priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2065
2066
2067
2068
2069
2070
2071#ifdef BDX_LLTX
2072 ndev->features |= NETIF_F_LLTX;
2073#endif
2074 spin_lock_init(&priv->tx_lock);
2075
2076
2077 if (bdx_read_mac(priv)) {
2078 printk(KERN_ERR "tehuti: load MAC address failed\n");
2079 goto err_out_iomap;
2080 }
2081 SET_NETDEV_DEV(ndev, &pdev->dev);
2082 if ((err = register_netdev(ndev))) {
2083 printk(KERN_ERR "tehuti: register_netdev failed\n");
2084 goto err_out_free;
2085 }
2086 netif_carrier_off(ndev);
2087 netif_stop_queue(ndev);
2088
2089 print_eth_id(ndev);
2090 }
2091 RET(0);
2092
2093err_out_free:
2094 free_netdev(ndev);
2095err_out_iomap:
2096 iounmap(nic->regs);
2097err_out_res:
2098 pci_release_regions(pdev);
2099err_dma:
2100 pci_disable_device(pdev);
2101err_pci:
2102 vfree(nic);
2103
2104 RET(err);
2105}
2106
2107
2108
2109static const char
2110 bdx_test_names[][ETH_GSTRING_LEN] = {
2111 "No tests defined"
2112};
2113
2114
2115static const char
2116 bdx_stat_names[][ETH_GSTRING_LEN] = {
2117 "InUCast",
2118 "InMCast",
2119 "InBCast",
2120 "InPkts",
2121 "InErrors",
2122 "InDropped",
2123 "FrameTooLong",
2124 "FrameSequenceErrors",
2125 "InVLAN",
2126 "InDroppedDFE",
2127 "InDroppedIntFull",
2128 "InFrameAlignErrors",
2129
2130
2131
2132 "OutUCast",
2133 "OutMCast",
2134 "OutBCast",
2135 "OutPkts",
2136
2137
2138
2139 "OutVLAN",
2140 "InUCastOctects",
2141 "OutUCastOctects",
2142
2143
2144
2145 "InBCastOctects",
2146 "OutBCastOctects",
2147 "InOctects",
2148 "OutOctects",
2149};
2150
2151
2152
2153
2154
2155
2156static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2157{
2158 u32 rdintcm;
2159 u32 tdintcm;
2160 struct bdx_priv *priv = netdev_priv(netdev);
2161
2162 rdintcm = priv->rdintcm;
2163 tdintcm = priv->tdintcm;
2164
2165 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2166 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
2167 ecmd->speed = SPEED_10000;
2168 ecmd->duplex = DUPLEX_FULL;
2169 ecmd->port = PORT_FIBRE;
2170 ecmd->transceiver = XCVR_EXTERNAL;
2171 ecmd->autoneg = AUTONEG_DISABLE;
2172
2173
2174
2175 ecmd->maxtxpkt =
2176 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2177 ecmd->maxrxpkt =
2178 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2179
2180 return 0;
2181}
2182
2183
2184
2185
2186
2187
2188static void
2189bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2190{
2191 struct bdx_priv *priv = netdev_priv(netdev);
2192
2193 strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2194 strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2195 strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2196 strlcat(drvinfo->bus_info, pci_name(priv->pdev),
2197 sizeof(drvinfo->bus_info));
2198
2199 drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
2200 drvinfo->testinfo_len = 0;
2201 drvinfo->regdump_len = 0;
2202 drvinfo->eedump_len = 0;
2203}
2204
2205
2206
2207
2208
2209static u32 bdx_get_rx_csum(struct net_device *netdev)
2210{
2211 return 1;
2212}
2213
2214
2215
2216
2217
2218static u32 bdx_get_tx_csum(struct net_device *netdev)
2219{
2220 return (netdev->features & NETIF_F_IP_CSUM) != 0;
2221}
2222
2223
2224
2225
2226
2227
2228static int
2229bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2230{
2231 u32 rdintcm;
2232 u32 tdintcm;
2233 struct bdx_priv *priv = netdev_priv(netdev);
2234
2235 rdintcm = priv->rdintcm;
2236 tdintcm = priv->tdintcm;
2237
2238
2239
2240 ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2241 ecoal->rx_max_coalesced_frames =
2242 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2243
2244 ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2245 ecoal->tx_max_coalesced_frames =
2246 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2247
2248
2249 return 0;
2250}
2251
2252
2253
2254
2255
2256
2257static int
2258bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2259{
2260 u32 rdintcm;
2261 u32 tdintcm;
2262 struct bdx_priv *priv = netdev_priv(netdev);
2263 int rx_coal;
2264 int tx_coal;
2265 int rx_max_coal;
2266 int tx_max_coal;
2267
2268
2269 rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2270 tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2271 rx_max_coal = ecoal->rx_max_coalesced_frames;
2272 tx_max_coal = ecoal->tx_max_coalesced_frames;
2273
2274
2275 rx_max_coal =
2276 (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2277 / PCK_TH_MULT);
2278 tx_max_coal =
2279 (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2280 / PCK_TH_MULT);
2281
2282 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF)
2283 || (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2284 return -EINVAL;
2285
2286 rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2287 GET_RXF_TH(priv->rdintcm), rx_max_coal);
2288 tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2289 tx_max_coal);
2290
2291 priv->rdintcm = rdintcm;
2292 priv->tdintcm = tdintcm;
2293
2294 WRITE_REG(priv, regRDINTCM0, rdintcm);
2295 WRITE_REG(priv, regTDINTCM0, tdintcm);
2296
2297 return 0;
2298}
2299
2300
2301static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2302{
2303 return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc));
2304}
2305
2306
2307static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2308{
2309 return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ);
2310}
2311
2312
2313
2314
2315
2316
2317static void
2318bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2319{
2320 struct bdx_priv *priv = netdev_priv(netdev);
2321
2322
2323 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2324 ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2325 ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2326 ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2327}
2328
2329
2330
2331
2332
2333
2334static int
2335bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2336{
2337 struct bdx_priv *priv = netdev_priv(netdev);
2338 int rx_size = 0;
2339 int tx_size = 0;
2340
2341 for (; rx_size < 4; rx_size++) {
2342 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2343 break;
2344 }
2345 if (rx_size == 4)
2346 rx_size = 3;
2347
2348 for (; tx_size < 4; tx_size++) {
2349 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2350 break;
2351 }
2352 if (tx_size == 4)
2353 tx_size = 3;
2354
2355
2356 if ((rx_size == priv->rxf_size)
2357 && (tx_size == priv->txd_size))
2358 return 0;
2359
2360 priv->rxf_size = rx_size;
2361 if (rx_size > 1)
2362 priv->rxd_size = rx_size - 1;
2363 else
2364 priv->rxd_size = rx_size;
2365
2366 priv->txf_size = priv->txd_size = tx_size;
2367
2368 if (netif_running(netdev)) {
2369 bdx_close(netdev);
2370 bdx_open(netdev);
2371 }
2372 return 0;
2373}
2374
2375
2376
2377
2378
2379
2380static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2381{
2382 switch (stringset) {
2383 case ETH_SS_TEST:
2384 memcpy(data, *bdx_test_names, sizeof(bdx_test_names));
2385 break;
2386 case ETH_SS_STATS:
2387 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2388 break;
2389 }
2390}
2391
2392
2393
2394
2395
2396static int bdx_get_stats_count(struct net_device *netdev)
2397{
2398 struct bdx_priv *priv = netdev_priv(netdev);
2399 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2400 != sizeof(struct bdx_stats) / sizeof(u64));
2401 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
2402}
2403
2404
2405
2406
2407
2408
2409
2410static void bdx_get_ethtool_stats(struct net_device *netdev,
2411 struct ethtool_stats *stats, u64 *data)
2412{
2413 struct bdx_priv *priv = netdev_priv(netdev);
2414
2415 if (priv->stats_flag) {
2416
2417
2418 bdx_update_stats(priv);
2419
2420
2421 memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2422 }
2423}
2424
2425
2426
2427
2428
2429static void bdx_ethtool_ops(struct net_device *netdev)
2430{
2431 static const struct ethtool_ops bdx_ethtool_ops = {
2432 .get_settings = bdx_get_settings,
2433 .get_drvinfo = bdx_get_drvinfo,
2434 .get_link = ethtool_op_get_link,
2435 .get_coalesce = bdx_get_coalesce,
2436 .set_coalesce = bdx_set_coalesce,
2437 .get_ringparam = bdx_get_ringparam,
2438 .set_ringparam = bdx_set_ringparam,
2439 .get_rx_csum = bdx_get_rx_csum,
2440 .get_tx_csum = bdx_get_tx_csum,
2441 .get_sg = ethtool_op_get_sg,
2442 .get_tso = ethtool_op_get_tso,
2443 .get_strings = bdx_get_strings,
2444 .get_stats_count = bdx_get_stats_count,
2445 .get_ethtool_stats = bdx_get_ethtool_stats,
2446 };
2447
2448 SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
2449}
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460static void __devexit bdx_remove(struct pci_dev *pdev)
2461{
2462 struct pci_nic *nic = pci_get_drvdata(pdev);
2463 struct net_device *ndev;
2464 int port;
2465
2466 for (port = 0; port < nic->port_num; port++) {
2467 ndev = nic->priv[port]->ndev;
2468 unregister_netdev(ndev);
2469 free_netdev(ndev);
2470 }
2471
2472
2473#ifdef BDX_MSI
2474 if (nic->irq_type == IRQ_MSI)
2475 pci_disable_msi(pdev);
2476#endif
2477
2478 iounmap(nic->regs);
2479 pci_release_regions(pdev);
2480 pci_disable_device(pdev);
2481 pci_set_drvdata(pdev, NULL);
2482 vfree(nic);
2483
2484 RET();
2485}
2486
2487static struct pci_driver bdx_pci_driver = {
2488 .name = BDX_DRV_NAME,
2489 .id_table = bdx_pci_tbl,
2490 .probe = bdx_probe,
2491 .remove = __devexit_p(bdx_remove),
2492};
2493
2494
2495
2496
2497static void __init print_driver_id(void)
2498{
2499 printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC,
2500 BDX_DRV_VERSION);
2501 printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME,
2502 BDX_MSI_STRING);
2503}
2504
2505static int __init bdx_module_init(void)
2506{
2507 ENTER;
2508 init_txd_sizes();
2509 print_driver_id();
2510 RET(pci_register_driver(&bdx_pci_driver));
2511}
2512
2513module_init(bdx_module_init);
2514
2515static void __exit bdx_module_exit(void)
2516{
2517 ENTER;
2518 pci_unregister_driver(&bdx_pci_driver);
2519 RET();
2520}
2521
2522module_exit(bdx_module_exit);
2523
2524MODULE_LICENSE("GPL");
2525MODULE_AUTHOR(DRIVER_AUTHOR);
2526MODULE_DESCRIPTION(BDX_DRV_DESC);
2527MODULE_FIRMWARE("tehuti/firmware.bin");
2528