1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define IOC3_NAME "ioc3-eth"
24#define IOC3_VERSION "2.6.3-4"
25
26#include <linux/delay.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/errno.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/crc32.h>
33#include <linux/mii.h>
34#include <linux/in.h>
35#include <linux/io.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/udp.h>
39#include <linux/gfp.h>
40
41#ifdef CONFIG_SERIAL_8250
42#include <linux/serial_core.h>
43#include <linux/serial_8250.h>
44#include <linux/serial_reg.h>
45#endif
46
47#include <linux/netdevice.h>
48#include <linux/etherdevice.h>
49#include <linux/ethtool.h>
50#include <linux/skbuff.h>
51#include <linux/dma-direct.h>
52
53#include <net/ip.h>
54
55#include <asm/byteorder.h>
56#include <asm/pgtable.h>
57#include <linux/uaccess.h>
58#include <asm/sn/types.h>
59#include <asm/sn/ioc3.h>
60#include <asm/pci/bridge.h>
61
62
63
64
65#define RX_BUFFS 64
66#define RX_RING_ENTRIES 512
67#define RX_RING_MASK (RX_RING_ENTRIES - 1)
68#define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64))
69
70
71#define TX_RING_ENTRIES 128
72#define TX_RING_MASK (TX_RING_ENTRIES - 1)
73#define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd))
74
75
76#define IOC3_DMA_XFER_LEN 128UL
77
78
79#define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN)
80#define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN)
81
82#define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21)
83#define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21)
84
85
86struct ioc3_private {
87 struct ioc3_ethregs *regs;
88 struct ioc3 *all_regs;
89 struct device *dma_dev;
90 u32 *ssram;
91 unsigned long *rxr;
92 struct ioc3_etxd *txr;
93 dma_addr_t rxr_dma;
94 dma_addr_t txr_dma;
95 struct sk_buff *rx_skbs[RX_RING_ENTRIES];
96 struct sk_buff *tx_skbs[TX_RING_ENTRIES];
97 int rx_ci;
98 int rx_pi;
99 int tx_ci;
100 int tx_pi;
101 int txqlen;
102 u32 emcr, ehar_h, ehar_l;
103 spinlock_t ioc3_lock;
104 struct mii_if_info mii;
105
106 struct net_device *dev;
107 struct pci_dev *pdev;
108
109
110 struct timer_list ioc3_timer;
111};
112
113static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
114static void ioc3_set_multicast_list(struct net_device *dev);
115static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
116static void ioc3_timeout(struct net_device *dev);
117static inline unsigned int ioc3_hash(const unsigned char *addr);
118static void ioc3_start(struct ioc3_private *ip);
119static inline void ioc3_stop(struct ioc3_private *ip);
120static void ioc3_init(struct net_device *dev);
121static int ioc3_alloc_rx_bufs(struct net_device *dev);
122static void ioc3_free_rx_bufs(struct ioc3_private *ip);
123static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
124
125static const char ioc3_str[] = "IOC3 Ethernet";
126static const struct ethtool_ops ioc3_ethtool_ops;
127
128
129static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
130{
131 return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
132}
133
134static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb,
135 struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma)
136{
137 struct sk_buff *new_skb;
138 dma_addr_t d;
139 int offset;
140
141 new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC);
142 if (!new_skb)
143 return -ENOMEM;
144
145
146 offset = aligned_rx_skb_addr((unsigned long)new_skb->data);
147 if (offset)
148 skb_reserve(new_skb, offset);
149
150 d = dma_map_single(ip->dma_dev, new_skb->data,
151 RX_BUF_SIZE, DMA_FROM_DEVICE);
152
153 if (dma_mapping_error(ip->dma_dev, d)) {
154 dev_kfree_skb_any(new_skb);
155 return -ENOMEM;
156 }
157 *rxb_dma = d;
158 *rxb = (struct ioc3_erxbuf *)new_skb->data;
159 skb_reserve(new_skb, RX_OFFSET);
160 *skb = new_skb;
161
162 return 0;
163}
164
165#ifdef CONFIG_PCI_XTALK_BRIDGE
166static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
167{
168 return (addr & ~PCI64_ATTR_BAR) | attr;
169}
170
171#define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT)
172#else
173static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
174{
175 return addr;
176}
177
178#define ERBAR_VAL 0
179#endif
180
181#define IOC3_SIZE 0x100000
182
183static inline u32 mcr_pack(u32 pulse, u32 sample)
184{
185 return (pulse << 10) | (sample << 2);
186}
187
188static int nic_wait(u32 __iomem *mcr)
189{
190 u32 m;
191
192 do {
193 m = readl(mcr);
194 } while (!(m & 2));
195
196 return m & 1;
197}
198
199static int nic_reset(u32 __iomem *mcr)
200{
201 int presence;
202
203 writel(mcr_pack(500, 65), mcr);
204 presence = nic_wait(mcr);
205
206 writel(mcr_pack(0, 500), mcr);
207 nic_wait(mcr);
208
209 return presence;
210}
211
212static inline int nic_read_bit(u32 __iomem *mcr)
213{
214 int result;
215
216 writel(mcr_pack(6, 13), mcr);
217 result = nic_wait(mcr);
218 writel(mcr_pack(0, 100), mcr);
219 nic_wait(mcr);
220
221 return result;
222}
223
224static inline void nic_write_bit(u32 __iomem *mcr, int bit)
225{
226 if (bit)
227 writel(mcr_pack(6, 110), mcr);
228 else
229 writel(mcr_pack(80, 30), mcr);
230
231 nic_wait(mcr);
232}
233
234
235
236static u32 nic_read_byte(u32 __iomem *mcr)
237{
238 u32 result = 0;
239 int i;
240
241 for (i = 0; i < 8; i++)
242 result = (result >> 1) | (nic_read_bit(mcr) << 7);
243
244 return result;
245}
246
247
248
249static void nic_write_byte(u32 __iomem *mcr, int byte)
250{
251 int i, bit;
252
253 for (i = 8; i; i--) {
254 bit = byte & 1;
255 byte >>= 1;
256
257 nic_write_bit(mcr, bit);
258 }
259}
260
261static u64 nic_find(u32 __iomem *mcr, int *last)
262{
263 int a, b, index, disc;
264 u64 address = 0;
265
266 nic_reset(mcr);
267
268 nic_write_byte(mcr, 0xf0);
269
270
271 for (index = 0, disc = 0; index < 64; index++) {
272 a = nic_read_bit(mcr);
273 b = nic_read_bit(mcr);
274
275 if (a && b) {
276 pr_warn("NIC search failed (not fatal).\n");
277 *last = 0;
278 return 0;
279 }
280
281 if (!a && !b) {
282 if (index == *last) {
283 address |= 1UL << index;
284 } else if (index > *last) {
285 address &= ~(1UL << index);
286 disc = index;
287 } else if ((address & (1UL << index)) == 0) {
288 disc = index;
289 }
290 nic_write_bit(mcr, address & (1UL << index));
291 continue;
292 } else {
293 if (a)
294 address |= 1UL << index;
295 else
296 address &= ~(1UL << index);
297 nic_write_bit(mcr, a);
298 continue;
299 }
300 }
301
302 *last = disc;
303
304 return address;
305}
306
307static int nic_init(u32 __iomem *mcr)
308{
309 const char *unknown = "unknown";
310 const char *type = unknown;
311 u8 crc;
312 u8 serial[6];
313 int save = 0, i;
314
315 while (1) {
316 u64 reg;
317
318 reg = nic_find(mcr, &save);
319
320 switch (reg & 0xff) {
321 case 0x91:
322 type = "DS1981U";
323 break;
324 default:
325 if (save == 0) {
326
327 return -1;
328 }
329 continue;
330 }
331
332 nic_reset(mcr);
333
334
335 nic_write_byte(mcr, 0x55);
336 for (i = 0; i < 8; i++)
337 nic_write_byte(mcr, (reg >> (i << 3)) & 0xff);
338
339 reg >>= 8;
340 for (i = 0; i < 6; i++) {
341 serial[i] = reg & 0xff;
342 reg >>= 8;
343 }
344 crc = reg & 0xff;
345 break;
346 }
347
348 pr_info("Found %s NIC", type);
349 if (type != unknown)
350 pr_cont(" registration number %pM, CRC %02x", serial, crc);
351 pr_cont(".\n");
352
353 return 0;
354}
355
356
357
358
359static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
360{
361 u32 __iomem *mcr = &ip->all_regs->mcr;
362 int tries = 2;
363 u8 nic[14];
364 int i;
365
366 writel(1 << 21, &ip->all_regs->gpcr_s);
367
368 while (tries--) {
369 if (!nic_init(mcr))
370 break;
371 udelay(500);
372 }
373
374 if (tries < 0) {
375 pr_err("Failed to read MAC address\n");
376 return;
377 }
378
379
380 nic_write_byte(mcr, 0xf0);
381 nic_write_byte(mcr, 0x00);
382 nic_write_byte(mcr, 0x00);
383
384 for (i = 13; i >= 0; i--)
385 nic[i] = nic_read_byte(mcr);
386
387 for (i = 2; i < 8; i++)
388 ip->dev->dev_addr[i - 2] = nic[i];
389}
390
391
392
393
394
395static void ioc3_get_eaddr(struct ioc3_private *ip)
396{
397 ioc3_get_eaddr_nic(ip);
398
399 pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr);
400}
401
402static void __ioc3_set_mac_address(struct net_device *dev)
403{
404 struct ioc3_private *ip = netdev_priv(dev);
405
406 writel((dev->dev_addr[5] << 8) |
407 dev->dev_addr[4],
408 &ip->regs->emar_h);
409 writel((dev->dev_addr[3] << 24) |
410 (dev->dev_addr[2] << 16) |
411 (dev->dev_addr[1] << 8) |
412 dev->dev_addr[0],
413 &ip->regs->emar_l);
414}
415
416static int ioc3_set_mac_address(struct net_device *dev, void *addr)
417{
418 struct ioc3_private *ip = netdev_priv(dev);
419 struct sockaddr *sa = addr;
420
421 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
422
423 spin_lock_irq(&ip->ioc3_lock);
424 __ioc3_set_mac_address(dev);
425 spin_unlock_irq(&ip->ioc3_lock);
426
427 return 0;
428}
429
430
431
432
433static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
434{
435 struct ioc3_private *ip = netdev_priv(dev);
436 struct ioc3_ethregs *regs = ip->regs;
437
438 while (readl(®s->micr) & MICR_BUSY)
439 ;
440 writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG,
441 ®s->micr);
442 while (readl(®s->micr) & MICR_BUSY)
443 ;
444
445 return readl(®s->midr_r) & MIDR_DATA_MASK;
446}
447
448static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
449{
450 struct ioc3_private *ip = netdev_priv(dev);
451 struct ioc3_ethregs *regs = ip->regs;
452
453 while (readl(®s->micr) & MICR_BUSY)
454 ;
455 writel(data, ®s->midr_w);
456 writel((phy << MICR_PHYADDR_SHIFT) | reg, ®s->micr);
457 while (readl(®s->micr) & MICR_BUSY)
458 ;
459}
460
461static int ioc3_mii_init(struct ioc3_private *ip);
462
463static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
464{
465 struct ioc3_private *ip = netdev_priv(dev);
466 struct ioc3_ethregs *regs = ip->regs;
467
468 dev->stats.collisions += readl(®s->etcdc) & ETCDC_COLLCNT_MASK;
469 return &dev->stats;
470}
471
472static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len)
473{
474 struct ethhdr *eh = eth_hdr(skb);
475 unsigned int proto;
476 unsigned char *cp;
477 struct iphdr *ih;
478 u32 csum, ehsum;
479 u16 *ew;
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494 if (eh->h_proto != htons(ETH_P_IP))
495 return;
496
497 ih = (struct iphdr *)((char *)eh + ETH_HLEN);
498 if (ip_is_fragment(ih))
499 return;
500
501 proto = ih->protocol;
502 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
503 return;
504
505
506 csum = hwsum +
507 (ih->tot_len - (ih->ihl << 2)) +
508 htons((u16)ih->protocol) +
509 (ih->saddr >> 16) + (ih->saddr & 0xffff) +
510 (ih->daddr >> 16) + (ih->daddr & 0xffff);
511
512
513 ew = (u16 *)eh;
514 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
515
516 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
517 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
518
519 csum += 0xffff ^ ehsum;
520
521
522
523
524 cp = (char *)eh + len;
525 if (len & 1) {
526 csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]);
527 csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]);
528 } else {
529 csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]);
530 csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]);
531 }
532
533 csum = (csum & 0xffff) + (csum >> 16);
534 csum = (csum & 0xffff) + (csum >> 16);
535
536 if (csum == 0xffff)
537 skb->ip_summed = CHECKSUM_UNNECESSARY;
538}
539
540static inline void ioc3_rx(struct net_device *dev)
541{
542 struct ioc3_private *ip = netdev_priv(dev);
543 struct sk_buff *skb, *new_skb;
544 int rx_entry, n_entry, len;
545 struct ioc3_erxbuf *rxb;
546 unsigned long *rxr;
547 dma_addr_t d;
548 u32 w0, err;
549
550 rxr = ip->rxr;
551 rx_entry = ip->rx_ci;
552 n_entry = ip->rx_pi;
553
554 skb = ip->rx_skbs[rx_entry];
555 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
556 w0 = be32_to_cpu(rxb->w0);
557
558 while (w0 & ERXBUF_V) {
559 err = be32_to_cpu(rxb->err);
560 if (err & ERXBUF_GOODPKT) {
561 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
562 skb_put(skb, len);
563 skb->protocol = eth_type_trans(skb, dev);
564
565 if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) {
566
567
568
569 dev->stats.rx_dropped++;
570 new_skb = skb;
571 d = rxr[rx_entry];
572 goto next;
573 }
574
575 if (likely(dev->features & NETIF_F_RXCSUM))
576 ioc3_tcpudp_checksum(skb,
577 w0 & ERXBUF_IPCKSUM_MASK,
578 len);
579
580 dma_unmap_single(ip->dma_dev, rxr[rx_entry],
581 RX_BUF_SIZE, DMA_FROM_DEVICE);
582
583 netif_rx(skb);
584
585 ip->rx_skbs[rx_entry] = NULL;
586
587 dev->stats.rx_packets++;
588 dev->stats.rx_bytes += len;
589 } else {
590
591
592
593
594 new_skb = skb;
595 d = rxr[rx_entry];
596 dev->stats.rx_errors++;
597 }
598 if (err & ERXBUF_CRCERR)
599 dev->stats.rx_crc_errors++;
600 if (err & ERXBUF_FRAMERR)
601 dev->stats.rx_frame_errors++;
602
603next:
604 ip->rx_skbs[n_entry] = new_skb;
605 rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
606 rxb->w0 = 0;
607 n_entry = (n_entry + 1) & RX_RING_MASK;
608
609
610 rx_entry = (rx_entry + 1) & RX_RING_MASK;
611 skb = ip->rx_skbs[rx_entry];
612 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
613 w0 = be32_to_cpu(rxb->w0);
614 }
615 writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir);
616 ip->rx_pi = n_entry;
617 ip->rx_ci = rx_entry;
618}
619
620static inline void ioc3_tx(struct net_device *dev)
621{
622 struct ioc3_private *ip = netdev_priv(dev);
623 struct ioc3_ethregs *regs = ip->regs;
624 unsigned long packets, bytes;
625 int tx_entry, o_entry;
626 struct sk_buff *skb;
627 u32 etcir;
628
629 spin_lock(&ip->ioc3_lock);
630 etcir = readl(®s->etcir);
631
632 tx_entry = (etcir >> 7) & TX_RING_MASK;
633 o_entry = ip->tx_ci;
634 packets = 0;
635 bytes = 0;
636
637 while (o_entry != tx_entry) {
638 packets++;
639 skb = ip->tx_skbs[o_entry];
640 bytes += skb->len;
641 dev_consume_skb_irq(skb);
642 ip->tx_skbs[o_entry] = NULL;
643
644 o_entry = (o_entry + 1) & TX_RING_MASK;
645
646 etcir = readl(®s->etcir);
647 tx_entry = (etcir >> 7) & TX_RING_MASK;
648 }
649
650 dev->stats.tx_packets += packets;
651 dev->stats.tx_bytes += bytes;
652 ip->txqlen -= packets;
653
654 if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES)
655 netif_wake_queue(dev);
656
657 ip->tx_ci = o_entry;
658 spin_unlock(&ip->ioc3_lock);
659}
660
661
662
663
664
665
666
667static void ioc3_error(struct net_device *dev, u32 eisr)
668{
669 struct ioc3_private *ip = netdev_priv(dev);
670
671 spin_lock(&ip->ioc3_lock);
672
673 if (eisr & EISR_RXOFLO)
674 net_err_ratelimited("%s: RX overflow.\n", dev->name);
675 if (eisr & EISR_RXBUFOFLO)
676 net_err_ratelimited("%s: RX buffer overflow.\n", dev->name);
677 if (eisr & EISR_RXMEMERR)
678 net_err_ratelimited("%s: RX PCI error.\n", dev->name);
679 if (eisr & EISR_RXPARERR)
680 net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name);
681 if (eisr & EISR_TXBUFUFLO)
682 net_err_ratelimited("%s: TX buffer underflow.\n", dev->name);
683 if (eisr & EISR_TXMEMERR)
684 net_err_ratelimited("%s: TX PCI error.\n", dev->name);
685
686 ioc3_stop(ip);
687 ioc3_free_rx_bufs(ip);
688 ioc3_clean_tx_ring(ip);
689
690 ioc3_init(dev);
691 if (ioc3_alloc_rx_bufs(dev)) {
692 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
693 spin_unlock(&ip->ioc3_lock);
694 return;
695 }
696 ioc3_start(ip);
697 ioc3_mii_init(ip);
698
699 netif_wake_queue(dev);
700
701 spin_unlock(&ip->ioc3_lock);
702}
703
704
705
706
707static irqreturn_t ioc3_interrupt(int irq, void *dev_id)
708{
709 struct ioc3_private *ip = netdev_priv(dev_id);
710 struct ioc3_ethregs *regs = ip->regs;
711 u32 eisr;
712
713 eisr = readl(®s->eisr);
714 writel(eisr, ®s->eisr);
715 readl(®s->eisr);
716
717 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
718 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
719 ioc3_error(dev_id, eisr);
720 if (eisr & EISR_RXTIMERINT)
721 ioc3_rx(dev_id);
722 if (eisr & EISR_TXEXPLICIT)
723 ioc3_tx(dev_id);
724
725 return IRQ_HANDLED;
726}
727
728static inline void ioc3_setup_duplex(struct ioc3_private *ip)
729{
730 struct ioc3_ethregs *regs = ip->regs;
731
732 spin_lock_irq(&ip->ioc3_lock);
733
734 if (ip->mii.full_duplex) {
735 writel(ETCSR_FD, ®s->etcsr);
736 ip->emcr |= EMCR_DUPLEX;
737 } else {
738 writel(ETCSR_HD, ®s->etcsr);
739 ip->emcr &= ~EMCR_DUPLEX;
740 }
741 writel(ip->emcr, ®s->emcr);
742
743 spin_unlock_irq(&ip->ioc3_lock);
744}
745
746static void ioc3_timer(struct timer_list *t)
747{
748 struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
749
750
751 mii_check_media(&ip->mii, 1, 0);
752 ioc3_setup_duplex(ip);
753
754 ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10);
755 add_timer(&ip->ioc3_timer);
756}
757
758
759
760
761
762
763
764
765static int ioc3_mii_init(struct ioc3_private *ip)
766{
767 int ioc3_phy_workaround = 1;
768 int i, found = 0, res = 0;
769 u16 word;
770
771 for (i = 0; i < 32; i++) {
772 word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
773
774 if (word != 0xffff && word != 0x0000) {
775 found = 1;
776 break;
777 }
778 }
779
780 if (!found) {
781 if (ioc3_phy_workaround) {
782 i = 31;
783 } else {
784 ip->mii.phy_id = -1;
785 res = -ENODEV;
786 goto out;
787 }
788 }
789
790 ip->mii.phy_id = i;
791
792out:
793 return res;
794}
795
796static void ioc3_mii_start(struct ioc3_private *ip)
797{
798 ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10;
799 add_timer(&ip->ioc3_timer);
800}
801
802static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry)
803{
804 struct ioc3_etxd *desc;
805 u32 cmd, bufcnt, len;
806
807 desc = &ip->txr[entry];
808 cmd = be32_to_cpu(desc->cmd);
809 bufcnt = be32_to_cpu(desc->bufcnt);
810 if (cmd & ETXD_B1V) {
811 len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT;
812 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1),
813 len, DMA_TO_DEVICE);
814 }
815 if (cmd & ETXD_B2V) {
816 len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT;
817 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2),
818 len, DMA_TO_DEVICE);
819 }
820}
821
822static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
823{
824 struct sk_buff *skb;
825 int i;
826
827 for (i = 0; i < TX_RING_ENTRIES; i++) {
828 skb = ip->tx_skbs[i];
829 if (skb) {
830 ioc3_tx_unmap(ip, i);
831 ip->tx_skbs[i] = NULL;
832 dev_kfree_skb_any(skb);
833 }
834 ip->txr[i].cmd = 0;
835 }
836 ip->tx_pi = 0;
837 ip->tx_ci = 0;
838}
839
840static void ioc3_free_rx_bufs(struct ioc3_private *ip)
841{
842 int rx_entry, n_entry;
843 struct sk_buff *skb;
844
845 n_entry = ip->rx_ci;
846 rx_entry = ip->rx_pi;
847
848 while (n_entry != rx_entry) {
849 skb = ip->rx_skbs[n_entry];
850 if (skb) {
851 dma_unmap_single(ip->dma_dev,
852 be64_to_cpu(ip->rxr[n_entry]),
853 RX_BUF_SIZE, DMA_FROM_DEVICE);
854 dev_kfree_skb_any(skb);
855 }
856 n_entry = (n_entry + 1) & RX_RING_MASK;
857 }
858}
859
860static int ioc3_alloc_rx_bufs(struct net_device *dev)
861{
862 struct ioc3_private *ip = netdev_priv(dev);
863 struct ioc3_erxbuf *rxb;
864 dma_addr_t d;
865 int i;
866
867
868
869
870
871 for (i = 0; i < RX_BUFFS; i++) {
872 if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d))
873 return -ENOMEM;
874
875 rxb->w0 = 0;
876 ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
877 }
878 ip->rx_ci = 0;
879 ip->rx_pi = RX_BUFFS;
880
881 return 0;
882}
883
884static inline void ioc3_ssram_disc(struct ioc3_private *ip)
885{
886 struct ioc3_ethregs *regs = ip->regs;
887 u32 *ssram0 = &ip->ssram[0x0000];
888 u32 *ssram1 = &ip->ssram[0x4000];
889 u32 pattern = 0x5555;
890
891
892 writel(readl(®s->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), ®s->emcr);
893 readl(®s->emcr);
894
895 writel(pattern, ssram0);
896 writel(~pattern & IOC3_SSRAM_DM, ssram1);
897
898 if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern ||
899 (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
900
901 ip->emcr |= EMCR_RAMPAR;
902 writel(readl(®s->emcr) & ~EMCR_BUFSIZ, ®s->emcr);
903 } else {
904 ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR;
905 }
906}
907
908static void ioc3_init(struct net_device *dev)
909{
910 struct ioc3_private *ip = netdev_priv(dev);
911 struct ioc3_ethregs *regs = ip->regs;
912
913 del_timer_sync(&ip->ioc3_timer);
914
915 writel(EMCR_RST, ®s->emcr);
916 readl(®s->emcr);
917 udelay(4);
918 writel(0, ®s->emcr);
919 readl(®s->emcr);
920
921
922 writel(ERBAR_VAL, ®s->erbar);
923 readl(®s->etcdc);
924 writel(15, ®s->ercsr);
925 writel(0, ®s->ertr);
926 __ioc3_set_mac_address(dev);
927 writel(ip->ehar_h, ®s->ehar_h);
928 writel(ip->ehar_l, ®s->ehar_l);
929 writel(42, ®s->ersr);
930}
931
932static void ioc3_start(struct ioc3_private *ip)
933{
934 struct ioc3_ethregs *regs = ip->regs;
935 unsigned long ring;
936
937
938 ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC);
939 writel(ring >> 32, ®s->erbr_h);
940 writel(ring & 0xffffffff, ®s->erbr_l);
941 writel(ip->rx_ci << 3, ®s->ercir);
942 writel((ip->rx_pi << 3) | ERPIR_ARM, ®s->erpir);
943
944 ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC);
945
946 ip->txqlen = 0;
947
948
949 writel(ring >> 32, ®s->etbr_h);
950 writel(ring & 0xffffffff, ®s->etbr_l);
951 writel(ip->tx_pi << 7, ®s->etpir);
952 writel(ip->tx_ci << 7, ®s->etcir);
953 readl(®s->etcir);
954
955 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
956 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
957 writel(ip->emcr, ®s->emcr);
958 writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
959 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
960 EISR_TXEXPLICIT | EISR_TXMEMERR, ®s->eier);
961 readl(®s->eier);
962}
963
964static inline void ioc3_stop(struct ioc3_private *ip)
965{
966 struct ioc3_ethregs *regs = ip->regs;
967
968 writel(0, ®s->emcr);
969 writel(0, ®s->eier);
970 readl(®s->eier);
971}
972
973static int ioc3_open(struct net_device *dev)
974{
975 struct ioc3_private *ip = netdev_priv(dev);
976
977 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
978 netdev_err(dev, "Can't get irq %d\n", dev->irq);
979
980 return -EAGAIN;
981 }
982
983 ip->ehar_h = 0;
984 ip->ehar_l = 0;
985
986 ioc3_init(dev);
987 if (ioc3_alloc_rx_bufs(dev)) {
988 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
989 return -ENOMEM;
990 }
991 ioc3_start(ip);
992 ioc3_mii_start(ip);
993
994 netif_start_queue(dev);
995 return 0;
996}
997
998static int ioc3_close(struct net_device *dev)
999{
1000 struct ioc3_private *ip = netdev_priv(dev);
1001
1002 del_timer_sync(&ip->ioc3_timer);
1003
1004 netif_stop_queue(dev);
1005
1006 ioc3_stop(ip);
1007 free_irq(dev->irq, dev);
1008
1009 ioc3_free_rx_bufs(ip);
1010 ioc3_clean_tx_ring(ip);
1011
1012 return 0;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
1026{
1027 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
1028 int ret = 0;
1029
1030 if (dev) {
1031 if (dev->vendor == PCI_VENDOR_ID_SGI &&
1032 dev->device == PCI_DEVICE_ID_SGI_IOC3)
1033 ret = 1;
1034 pci_dev_put(dev);
1035 }
1036
1037 return ret;
1038}
1039
1040static int ioc3_is_menet(struct pci_dev *pdev)
1041{
1042 return !pdev->bus->parent &&
1043 ioc3_adjacent_is_ioc3(pdev, 0) &&
1044 ioc3_adjacent_is_ioc3(pdev, 1) &&
1045 ioc3_adjacent_is_ioc3(pdev, 2);
1046}
1047
1048#ifdef CONFIG_SERIAL_8250
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1082{
1083#define COSMISC_CONSTANT 6
1084
1085 struct uart_8250_port port = {
1086 .port = {
1087 .irq = 0,
1088 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
1089 .iotype = UPIO_MEM,
1090 .regshift = 0,
1091 .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
1092
1093 .membase = (unsigned char __iomem *)uart,
1094 .mapbase = (unsigned long)uart,
1095 }
1096 };
1097 unsigned char lcr;
1098
1099 lcr = readb(&uart->iu_lcr);
1100 writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
1101 writeb(COSMISC_CONSTANT, &uart->iu_scr);
1102 writeb(lcr, &uart->iu_lcr);
1103 readb(&uart->iu_lcr);
1104 serial8250_register_8250_port(&port);
1105}
1106
1107static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1108{
1109 u32 sio_iec;
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1120 return;
1121
1122
1123
1124
1125 writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s);
1126 readl(&ioc3->gpcr_s);
1127 writel(0, &ioc3->gppr[6]);
1128 readl(&ioc3->gppr[6]);
1129 writel(0, &ioc3->gppr[7]);
1130 readl(&ioc3->gppr[7]);
1131 writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr);
1132 readl(&ioc3->port_a.sscr);
1133 writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr);
1134 readl(&ioc3->port_b.sscr);
1135
1136 sio_iec = readl(&ioc3->sio_iec);
1137 sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
1138 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
1139 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
1140 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
1141 sio_iec |= SIO_IR_SA_INT;
1142 sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
1143 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
1144 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
1145 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
1146 sio_iec |= SIO_IR_SB_INT;
1147 writel(sio_iec, &ioc3->sio_iec);
1148 writel(0, &ioc3->port_a.sscr);
1149 writel(0, &ioc3->port_b.sscr);
1150
1151 ioc3_8250_register(&ioc3->sregs.uarta);
1152 ioc3_8250_register(&ioc3->sregs.uartb);
1153}
1154#endif
1155
1156static const struct net_device_ops ioc3_netdev_ops = {
1157 .ndo_open = ioc3_open,
1158 .ndo_stop = ioc3_close,
1159 .ndo_start_xmit = ioc3_start_xmit,
1160 .ndo_tx_timeout = ioc3_timeout,
1161 .ndo_get_stats = ioc3_get_stats,
1162 .ndo_set_rx_mode = ioc3_set_multicast_list,
1163 .ndo_do_ioctl = ioc3_ioctl,
1164 .ndo_validate_addr = eth_validate_addr,
1165 .ndo_set_mac_address = ioc3_set_mac_address,
1166};
1167
1168static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1169{
1170 unsigned int sw_physid1, sw_physid2;
1171 struct net_device *dev = NULL;
1172 struct ioc3_private *ip;
1173 struct ioc3 *ioc3;
1174 unsigned long ioc3_base, ioc3_size;
1175 u32 vendor, model, rev;
1176 int err, pci_using_dac;
1177
1178
1179 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1180 if (!err) {
1181 pci_using_dac = 1;
1182 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1183 if (err < 0) {
1184 pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n",
1185 pci_name(pdev));
1186 goto out;
1187 }
1188 } else {
1189 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1190 if (err) {
1191 pr_err("%s: No usable DMA configuration, aborting.\n",
1192 pci_name(pdev));
1193 goto out;
1194 }
1195 pci_using_dac = 0;
1196 }
1197
1198 if (pci_enable_device(pdev))
1199 return -ENODEV;
1200
1201 dev = alloc_etherdev(sizeof(struct ioc3_private));
1202 if (!dev) {
1203 err = -ENOMEM;
1204 goto out_disable;
1205 }
1206
1207 if (pci_using_dac)
1208 dev->features |= NETIF_F_HIGHDMA;
1209
1210 err = pci_request_regions(pdev, "ioc3");
1211 if (err)
1212 goto out_free;
1213
1214 SET_NETDEV_DEV(dev, &pdev->dev);
1215
1216 ip = netdev_priv(dev);
1217 ip->dev = dev;
1218 ip->dma_dev = &pdev->dev;
1219
1220 dev->irq = pdev->irq;
1221
1222 ioc3_base = pci_resource_start(pdev, 0);
1223 ioc3_size = pci_resource_len(pdev, 0);
1224 ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size);
1225 if (!ioc3) {
1226 pr_err("ioc3eth(%s): ioremap failed, goodbye.\n",
1227 pci_name(pdev));
1228 err = -ENOMEM;
1229 goto out_res;
1230 }
1231 ip->regs = &ioc3->eth;
1232 ip->ssram = ioc3->ssram;
1233 ip->all_regs = ioc3;
1234
1235#ifdef CONFIG_SERIAL_8250
1236 ioc3_serial_probe(pdev, ioc3);
1237#endif
1238
1239 spin_lock_init(&ip->ioc3_lock);
1240 timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
1241
1242 ioc3_stop(ip);
1243
1244
1245 ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE,
1246 &ip->rxr_dma, GFP_ATOMIC, 0);
1247 if (!ip->rxr) {
1248 pr_err("ioc3-eth: rx ring allocation failed\n");
1249 err = -ENOMEM;
1250 goto out_stop;
1251 }
1252
1253
1254 ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE,
1255 &ip->txr_dma,
1256 GFP_KERNEL | __GFP_ZERO, 0);
1257 if (!ip->txr) {
1258 pr_err("ioc3-eth: tx ring allocation failed\n");
1259 err = -ENOMEM;
1260 goto out_stop;
1261 }
1262
1263 ioc3_init(dev);
1264
1265 ip->pdev = pdev;
1266
1267 ip->mii.phy_id_mask = 0x1f;
1268 ip->mii.reg_num_mask = 0x1f;
1269 ip->mii.dev = dev;
1270 ip->mii.mdio_read = ioc3_mdio_read;
1271 ip->mii.mdio_write = ioc3_mdio_write;
1272
1273 ioc3_mii_init(ip);
1274
1275 if (ip->mii.phy_id == -1) {
1276 pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1277 pci_name(pdev));
1278 err = -ENODEV;
1279 goto out_stop;
1280 }
1281
1282 ioc3_mii_start(ip);
1283 ioc3_ssram_disc(ip);
1284 ioc3_get_eaddr(ip);
1285
1286
1287 dev->watchdog_timeo = 5 * HZ;
1288 dev->netdev_ops = &ioc3_netdev_ops;
1289 dev->ethtool_ops = &ioc3_ethtool_ops;
1290 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1291 dev->features = NETIF_F_IP_CSUM;
1292
1293 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
1294 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
1295
1296 err = register_netdev(dev);
1297 if (err)
1298 goto out_stop;
1299
1300 mii_check_media(&ip->mii, 1, 1);
1301 ioc3_setup_duplex(ip);
1302
1303 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
1304 model = (sw_physid2 >> 4) & 0x3f;
1305 rev = sw_physid2 & 0xf;
1306 netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n",
1307 ip->mii.phy_id, vendor, model, rev);
1308 netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n",
1309 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1310
1311 return 0;
1312
1313out_stop:
1314 del_timer_sync(&ip->ioc3_timer);
1315 if (ip->rxr)
1316 dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
1317 ip->rxr_dma, 0);
1318 if (ip->txr)
1319 dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
1320 ip->txr_dma, 0);
1321out_res:
1322 pci_release_regions(pdev);
1323out_free:
1324 free_netdev(dev);
1325out_disable:
1326
1327
1328
1329out:
1330 return err;
1331}
1332
1333static void ioc3_remove_one(struct pci_dev *pdev)
1334{
1335 struct net_device *dev = pci_get_drvdata(pdev);
1336 struct ioc3_private *ip = netdev_priv(dev);
1337
1338 dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
1339 ip->rxr_dma, 0);
1340 dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
1341 ip->txr_dma, 0);
1342
1343 unregister_netdev(dev);
1344 del_timer_sync(&ip->ioc3_timer);
1345
1346 iounmap(ip->all_regs);
1347 pci_release_regions(pdev);
1348 free_netdev(dev);
1349
1350
1351
1352}
1353
1354static const struct pci_device_id ioc3_pci_tbl[] = {
1355 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1356 { 0 }
1357};
1358MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1359
1360static struct pci_driver ioc3_driver = {
1361 .name = "ioc3-eth",
1362 .id_table = ioc3_pci_tbl,
1363 .probe = ioc3_probe,
1364 .remove = ioc3_remove_one,
1365};
1366
1367static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1368{
1369 struct ioc3_private *ip = netdev_priv(dev);
1370 struct ioc3_etxd *desc;
1371 unsigned long data;
1372 unsigned int len;
1373 int produce;
1374 u32 w0 = 0;
1375
1376
1377
1378
1379
1380
1381
1382
1383 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1384 const struct iphdr *ih = ip_hdr(skb);
1385 const int proto = ntohs(ih->protocol);
1386 unsigned int csoff;
1387 u32 csum, ehsum;
1388 u16 *eh;
1389
1390
1391
1392
1393 eh = (u16 *)skb->data;
1394
1395
1396 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
1397
1398
1399
1400
1401 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
1402 ih->tot_len - (ih->ihl << 2),
1403 proto, csum_fold(ehsum));
1404
1405 csum = (csum & 0xffff) + (csum >> 16);
1406 csum = (csum & 0xffff) + (csum >> 16);
1407
1408 csoff = ETH_HLEN + (ih->ihl << 2);
1409 if (proto == IPPROTO_UDP) {
1410 csoff += offsetof(struct udphdr, check);
1411 udp_hdr(skb)->check = csum;
1412 }
1413 if (proto == IPPROTO_TCP) {
1414 csoff += offsetof(struct tcphdr, check);
1415 tcp_hdr(skb)->check = csum;
1416 }
1417
1418 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
1419 }
1420
1421 spin_lock_irq(&ip->ioc3_lock);
1422
1423 data = (unsigned long)skb->data;
1424 len = skb->len;
1425
1426 produce = ip->tx_pi;
1427 desc = &ip->txr[produce];
1428
1429 if (len <= 104) {
1430
1431 skb_copy_from_linear_data(skb, desc->data, skb->len);
1432 if (len < ETH_ZLEN) {
1433
1434 memset(desc->data + len, 0, ETH_ZLEN - len);
1435 len = ETH_ZLEN;
1436 }
1437 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
1438 desc->bufcnt = cpu_to_be32(len);
1439 } else if ((data ^ (data + len - 1)) & 0x4000) {
1440 unsigned long b2 = (data | 0x3fffUL) + 1UL;
1441 unsigned long s1 = b2 - data;
1442 unsigned long s2 = data + len - b2;
1443 dma_addr_t d1, d2;
1444
1445 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1446 ETXD_B1V | ETXD_B2V | w0);
1447 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
1448 (s2 << ETXD_B2CNT_SHIFT));
1449 d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE);
1450 if (dma_mapping_error(ip->dma_dev, d1))
1451 goto drop_packet;
1452 d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE);
1453 if (dma_mapping_error(ip->dma_dev, d2)) {
1454 dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE);
1455 goto drop_packet;
1456 }
1457 desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF));
1458 desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF));
1459 } else {
1460 dma_addr_t d;
1461
1462
1463 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
1464 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1465 d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE);
1466 if (dma_mapping_error(ip->dma_dev, d))
1467 goto drop_packet;
1468 desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF));
1469 }
1470
1471 mb();
1472
1473 ip->tx_skbs[produce] = skb;
1474 produce = (produce + 1) & TX_RING_MASK;
1475 ip->tx_pi = produce;
1476 writel(produce << 7, &ip->regs->etpir);
1477
1478 ip->txqlen++;
1479
1480 if (ip->txqlen >= (TX_RING_ENTRIES - 1))
1481 netif_stop_queue(dev);
1482
1483 spin_unlock_irq(&ip->ioc3_lock);
1484
1485 return NETDEV_TX_OK;
1486
1487drop_packet:
1488 dev_kfree_skb_any(skb);
1489 dev->stats.tx_dropped++;
1490
1491 spin_unlock_irq(&ip->ioc3_lock);
1492
1493 return NETDEV_TX_OK;
1494}
1495
1496static void ioc3_timeout(struct net_device *dev)
1497{
1498 struct ioc3_private *ip = netdev_priv(dev);
1499
1500 netdev_err(dev, "transmit timed out, resetting\n");
1501
1502 spin_lock_irq(&ip->ioc3_lock);
1503
1504 ioc3_stop(ip);
1505 ioc3_free_rx_bufs(ip);
1506 ioc3_clean_tx_ring(ip);
1507
1508 ioc3_init(dev);
1509 if (ioc3_alloc_rx_bufs(dev)) {
1510 netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
1511 spin_unlock_irq(&ip->ioc3_lock);
1512 return;
1513 }
1514 ioc3_start(ip);
1515 ioc3_mii_init(ip);
1516 ioc3_mii_start(ip);
1517
1518 spin_unlock_irq(&ip->ioc3_lock);
1519
1520 netif_wake_queue(dev);
1521}
1522
1523
1524
1525
1526static inline unsigned int ioc3_hash(const unsigned char *addr)
1527{
1528 unsigned int temp = 0;
1529 int bits;
1530 u32 crc;
1531
1532 crc = ether_crc_le(ETH_ALEN, addr);
1533
1534 crc &= 0x3f;
1535 for (bits = 6; --bits >= 0; ) {
1536 temp <<= 1;
1537 temp |= (crc & 0x1);
1538 crc >>= 1;
1539 }
1540
1541 return temp;
1542}
1543
1544static void ioc3_get_drvinfo(struct net_device *dev,
1545 struct ethtool_drvinfo *info)
1546{
1547 struct ioc3_private *ip = netdev_priv(dev);
1548
1549 strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
1550 strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
1551 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
1552}
1553
1554static int ioc3_get_link_ksettings(struct net_device *dev,
1555 struct ethtool_link_ksettings *cmd)
1556{
1557 struct ioc3_private *ip = netdev_priv(dev);
1558
1559 spin_lock_irq(&ip->ioc3_lock);
1560 mii_ethtool_get_link_ksettings(&ip->mii, cmd);
1561 spin_unlock_irq(&ip->ioc3_lock);
1562
1563 return 0;
1564}
1565
1566static int ioc3_set_link_ksettings(struct net_device *dev,
1567 const struct ethtool_link_ksettings *cmd)
1568{
1569 struct ioc3_private *ip = netdev_priv(dev);
1570 int rc;
1571
1572 spin_lock_irq(&ip->ioc3_lock);
1573 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
1574 spin_unlock_irq(&ip->ioc3_lock);
1575
1576 return rc;
1577}
1578
1579static int ioc3_nway_reset(struct net_device *dev)
1580{
1581 struct ioc3_private *ip = netdev_priv(dev);
1582 int rc;
1583
1584 spin_lock_irq(&ip->ioc3_lock);
1585 rc = mii_nway_restart(&ip->mii);
1586 spin_unlock_irq(&ip->ioc3_lock);
1587
1588 return rc;
1589}
1590
1591static u32 ioc3_get_link(struct net_device *dev)
1592{
1593 struct ioc3_private *ip = netdev_priv(dev);
1594 int rc;
1595
1596 spin_lock_irq(&ip->ioc3_lock);
1597 rc = mii_link_ok(&ip->mii);
1598 spin_unlock_irq(&ip->ioc3_lock);
1599
1600 return rc;
1601}
1602
1603static const struct ethtool_ops ioc3_ethtool_ops = {
1604 .get_drvinfo = ioc3_get_drvinfo,
1605 .nway_reset = ioc3_nway_reset,
1606 .get_link = ioc3_get_link,
1607 .get_link_ksettings = ioc3_get_link_ksettings,
1608 .set_link_ksettings = ioc3_set_link_ksettings,
1609};
1610
1611static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1612{
1613 struct ioc3_private *ip = netdev_priv(dev);
1614 int rc;
1615
1616 spin_lock_irq(&ip->ioc3_lock);
1617 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1618 spin_unlock_irq(&ip->ioc3_lock);
1619
1620 return rc;
1621}
1622
1623static void ioc3_set_multicast_list(struct net_device *dev)
1624{
1625 struct ioc3_private *ip = netdev_priv(dev);
1626 struct ioc3_ethregs *regs = ip->regs;
1627 struct netdev_hw_addr *ha;
1628 u64 ehar = 0;
1629
1630 spin_lock_irq(&ip->ioc3_lock);
1631
1632 if (dev->flags & IFF_PROMISC) {
1633 ip->emcr |= EMCR_PROMISC;
1634 writel(ip->emcr, ®s->emcr);
1635 readl(®s->emcr);
1636 } else {
1637 ip->emcr &= ~EMCR_PROMISC;
1638 writel(ip->emcr, ®s->emcr);
1639 readl(®s->emcr);
1640
1641 if ((dev->flags & IFF_ALLMULTI) ||
1642 (netdev_mc_count(dev) > 64)) {
1643
1644
1645
1646
1647 ip->ehar_h = 0xffffffff;
1648 ip->ehar_l = 0xffffffff;
1649 } else {
1650 netdev_for_each_mc_addr(ha, dev) {
1651 ehar |= (1UL << ioc3_hash(ha->addr));
1652 }
1653 ip->ehar_h = ehar >> 32;
1654 ip->ehar_l = ehar & 0xffffffff;
1655 }
1656 writel(ip->ehar_h, ®s->ehar_h);
1657 writel(ip->ehar_l, ®s->ehar_l);
1658 }
1659
1660 spin_unlock_irq(&ip->ioc3_lock);
1661}
1662
1663module_pci_driver(ioc3_driver);
1664MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1665MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1666MODULE_LICENSE("GPL");
1667