1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define IOC3_NAME "ioc3-eth"
31#define IOC3_VERSION "2.6.3-4"
32
33#include <linux/delay.h>
34#include <linux/kernel.h>
35#include <linux/mm.h>
36#include <linux/errno.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/crc32.h>
40#include <linux/mii.h>
41#include <linux/in.h>
42#include <linux/ip.h>
43#include <linux/tcp.h>
44#include <linux/udp.h>
45#include <linux/dma-mapping.h>
46#include <linux/gfp.h>
47
48#ifdef CONFIG_SERIAL_8250
49#include <linux/serial_core.h>
50#include <linux/serial_8250.h>
51#include <linux/serial_reg.h>
52#endif
53
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/ethtool.h>
57#include <linux/skbuff.h>
58#include <net/ip.h>
59
60#include <asm/byteorder.h>
61#include <asm/io.h>
62#include <asm/pgtable.h>
63#include <asm/uaccess.h>
64#include <asm/sn/types.h>
65#include <asm/sn/ioc3.h>
66#include <asm/pci/bridge.h>
67
68
69
70
71
72#define RX_BUFFS 64
73
74#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
75#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
76
77
78struct ioc3_private {
79 struct ioc3 *regs;
80 unsigned long *rxr;
81 struct ioc3_etxd *txr;
82 struct sk_buff *rx_skbs[512];
83 struct sk_buff *tx_skbs[128];
84 int rx_ci;
85 int rx_pi;
86 int tx_ci;
87 int tx_pi;
88 int txqlen;
89 u32 emcr, ehar_h, ehar_l;
90 spinlock_t ioc3_lock;
91 struct mii_if_info mii;
92
93 struct pci_dev *pdev;
94
95
96 struct timer_list ioc3_timer;
97};
98
99static inline struct net_device *priv_netdev(struct ioc3_private *dev)
100{
101 return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
102}
103
104static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105static void ioc3_set_multicast_list(struct net_device *dev);
106static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
107static void ioc3_timeout(struct net_device *dev);
108static inline unsigned int ioc3_hash(const unsigned char *addr);
109static inline void ioc3_stop(struct ioc3_private *ip);
110static void ioc3_init(struct net_device *dev);
111
112static const char ioc3_str[] = "IOC3 Ethernet";
113static const struct ethtool_ops ioc3_ethtool_ops;
114
115
116
117#define IOC3_CACHELINE 128UL
118
119static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
120{
121 return (~addr + 1) & (IOC3_CACHELINE - 1UL);
122}
123
124static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
125 unsigned int gfp_mask)
126{
127 struct sk_buff *skb;
128
129 skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
130 if (likely(skb)) {
131 int offset = aligned_rx_skb_addr((unsigned long) skb->data);
132 if (offset)
133 skb_reserve(skb, offset);
134 }
135
136 return skb;
137}
138
139static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
140{
141#ifdef CONFIG_SGI_IP27
142 vdev <<= 57;
143
144 return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
145 ((unsigned long)ptr & TO_PHYS_MASK);
146#else
147 return virt_to_bus(ptr);
148#endif
149}
150
151
152
153#define RX_OFFSET 10
154#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
155
156
157#define BARRIER() \
158 __asm__("sync" ::: "memory")
159
160
161#define IOC3_SIZE 0x100000
162
163
164
165
166
167
168
169
170#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
171#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
172#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
173#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
174#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
175#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
176#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
177#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
178#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
179#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
180#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
181#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
182#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
183#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
184#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
185#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
186#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
187#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
188#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
189#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
190#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
191#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
192#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
193#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
194#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
195#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
196#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
197#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
198#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
199#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
200#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
201#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
202#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
203#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
204#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
205#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
206#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
207#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
208#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
209#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
210#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
211#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
212#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
213#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
214#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
215#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
216#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
217#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
218#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
219#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
220#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
221#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
222#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
223
224static inline u32 mcr_pack(u32 pulse, u32 sample)
225{
226 return (pulse << 10) | (sample << 2);
227}
228
229static int nic_wait(struct ioc3 *ioc3)
230{
231 u32 mcr;
232
233 do {
234 mcr = ioc3_r_mcr();
235 } while (!(mcr & 2));
236
237 return mcr & 1;
238}
239
240static int nic_reset(struct ioc3 *ioc3)
241{
242 int presence;
243
244 ioc3_w_mcr(mcr_pack(500, 65));
245 presence = nic_wait(ioc3);
246
247 ioc3_w_mcr(mcr_pack(0, 500));
248 nic_wait(ioc3);
249
250 return presence;
251}
252
253static inline int nic_read_bit(struct ioc3 *ioc3)
254{
255 int result;
256
257 ioc3_w_mcr(mcr_pack(6, 13));
258 result = nic_wait(ioc3);
259 ioc3_w_mcr(mcr_pack(0, 100));
260 nic_wait(ioc3);
261
262 return result;
263}
264
265static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
266{
267 if (bit)
268 ioc3_w_mcr(mcr_pack(6, 110));
269 else
270 ioc3_w_mcr(mcr_pack(80, 30));
271
272 nic_wait(ioc3);
273}
274
275
276
277
278static u32 nic_read_byte(struct ioc3 *ioc3)
279{
280 u32 result = 0;
281 int i;
282
283 for (i = 0; i < 8; i++)
284 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
285
286 return result;
287}
288
289
290
291
292static void nic_write_byte(struct ioc3 *ioc3, int byte)
293{
294 int i, bit;
295
296 for (i = 8; i; i--) {
297 bit = byte & 1;
298 byte >>= 1;
299
300 nic_write_bit(ioc3, bit);
301 }
302}
303
304static u64 nic_find(struct ioc3 *ioc3, int *last)
305{
306 int a, b, index, disc;
307 u64 address = 0;
308
309 nic_reset(ioc3);
310
311 nic_write_byte(ioc3, 0xf0);
312
313
314 for (index = 0, disc = 0; index < 64; index++) {
315 a = nic_read_bit(ioc3);
316 b = nic_read_bit(ioc3);
317
318 if (a && b) {
319 printk("NIC search failed (not fatal).\n");
320 *last = 0;
321 return 0;
322 }
323
324 if (!a && !b) {
325 if (index == *last) {
326 address |= 1UL << index;
327 } else if (index > *last) {
328 address &= ~(1UL << index);
329 disc = index;
330 } else if ((address & (1UL << index)) == 0)
331 disc = index;
332 nic_write_bit(ioc3, address & (1UL << index));
333 continue;
334 } else {
335 if (a)
336 address |= 1UL << index;
337 else
338 address &= ~(1UL << index);
339 nic_write_bit(ioc3, a);
340 continue;
341 }
342 }
343
344 *last = disc;
345
346 return address;
347}
348
349static int nic_init(struct ioc3 *ioc3)
350{
351 const char *unknown = "unknown";
352 const char *type = unknown;
353 u8 crc;
354 u8 serial[6];
355 int save = 0, i;
356
357 while (1) {
358 u64 reg;
359 reg = nic_find(ioc3, &save);
360
361 switch (reg & 0xff) {
362 case 0x91:
363 type = "DS1981U";
364 break;
365 default:
366 if (save == 0) {
367
368 return -1;
369 }
370 continue;
371 }
372
373 nic_reset(ioc3);
374
375
376 nic_write_byte(ioc3, 0x55);
377 for (i = 0; i < 8; i++)
378 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
379
380 reg >>= 8;
381 for (i = 0; i < 6; i++) {
382 serial[i] = reg & 0xff;
383 reg >>= 8;
384 }
385 crc = reg & 0xff;
386 break;
387 }
388
389 printk("Found %s NIC", type);
390 if (type != unknown)
391 printk (" registration number %pM, CRC %02x", serial, crc);
392 printk(".\n");
393
394 return 0;
395}
396
397
398
399
400
401static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
402{
403 struct ioc3 *ioc3 = ip->regs;
404 u8 nic[14];
405 int tries = 2;
406 int i;
407
408 ioc3_w_gpcr_s(1 << 21);
409
410 while (tries--) {
411 if (!nic_init(ioc3))
412 break;
413 udelay(500);
414 }
415
416 if (tries < 0) {
417 printk("Failed to read MAC address\n");
418 return;
419 }
420
421
422 nic_write_byte(ioc3, 0xf0);
423 nic_write_byte(ioc3, 0x00);
424 nic_write_byte(ioc3, 0x00);
425
426 for (i = 13; i >= 0; i--)
427 nic[i] = nic_read_byte(ioc3);
428
429 for (i = 2; i < 8; i++)
430 priv_netdev(ip)->dev_addr[i - 2] = nic[i];
431}
432
433
434
435
436
437
438static void ioc3_get_eaddr(struct ioc3_private *ip)
439{
440 ioc3_get_eaddr_nic(ip);
441
442 printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
443}
444
445static void __ioc3_set_mac_address(struct net_device *dev)
446{
447 struct ioc3_private *ip = netdev_priv(dev);
448 struct ioc3 *ioc3 = ip->regs;
449
450 ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
451 ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
452 (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
453}
454
455static int ioc3_set_mac_address(struct net_device *dev, void *addr)
456{
457 struct ioc3_private *ip = netdev_priv(dev);
458 struct sockaddr *sa = addr;
459
460 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
461
462 spin_lock_irq(&ip->ioc3_lock);
463 __ioc3_set_mac_address(dev);
464 spin_unlock_irq(&ip->ioc3_lock);
465
466 return 0;
467}
468
469
470
471
472
473static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
474{
475 struct ioc3_private *ip = netdev_priv(dev);
476 struct ioc3 *ioc3 = ip->regs;
477
478 while (ioc3_r_micr() & MICR_BUSY);
479 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
480 while (ioc3_r_micr() & MICR_BUSY);
481
482 return ioc3_r_midr_r() & MIDR_DATA_MASK;
483}
484
485static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
486{
487 struct ioc3_private *ip = netdev_priv(dev);
488 struct ioc3 *ioc3 = ip->regs;
489
490 while (ioc3_r_micr() & MICR_BUSY);
491 ioc3_w_midr_w(data);
492 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
493 while (ioc3_r_micr() & MICR_BUSY);
494}
495
496static int ioc3_mii_init(struct ioc3_private *ip);
497
498static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
499{
500 struct ioc3_private *ip = netdev_priv(dev);
501 struct ioc3 *ioc3 = ip->regs;
502
503 dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
504 return &dev->stats;
505}
506
507static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
508{
509 struct ethhdr *eh = eth_hdr(skb);
510 uint32_t csum, ehsum;
511 unsigned int proto;
512 struct iphdr *ih;
513 uint16_t *ew;
514 unsigned char *cp;
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530 if (eh->h_proto != htons(ETH_P_IP))
531 return;
532
533 ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
534 if (ip_is_fragment(ih))
535 return;
536
537 proto = ih->protocol;
538 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
539 return;
540
541
542 csum = hwsum +
543 (ih->tot_len - (ih->ihl << 2)) +
544 htons((uint16_t)ih->protocol) +
545 (ih->saddr >> 16) + (ih->saddr & 0xffff) +
546 (ih->daddr >> 16) + (ih->daddr & 0xffff);
547
548
549 ew = (uint16_t *) eh;
550 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
551
552 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
553 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
554
555 csum += 0xffff ^ ehsum;
556
557
558
559 cp = (char *)eh + len;
560 if (len & 1) {
561 csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
562 csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
563 } else {
564 csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
565 csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
566 }
567
568 csum = (csum & 0xffff) + (csum >> 16);
569 csum = (csum & 0xffff) + (csum >> 16);
570
571 if (csum == 0xffff)
572 skb->ip_summed = CHECKSUM_UNNECESSARY;
573}
574
575static inline void ioc3_rx(struct net_device *dev)
576{
577 struct ioc3_private *ip = netdev_priv(dev);
578 struct sk_buff *skb, *new_skb;
579 struct ioc3 *ioc3 = ip->regs;
580 int rx_entry, n_entry, len;
581 struct ioc3_erxbuf *rxb;
582 unsigned long *rxr;
583 u32 w0, err;
584
585 rxr = ip->rxr;
586 rx_entry = ip->rx_ci;
587 n_entry = ip->rx_pi;
588
589 skb = ip->rx_skbs[rx_entry];
590 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
591 w0 = be32_to_cpu(rxb->w0);
592
593 while (w0 & ERXBUF_V) {
594 err = be32_to_cpu(rxb->err);
595 if (err & ERXBUF_GOODPKT) {
596 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
597 skb_trim(skb, len);
598 skb->protocol = eth_type_trans(skb, dev);
599
600 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
601 if (!new_skb) {
602
603
604 dev->stats.rx_dropped++;
605 new_skb = skb;
606 goto next;
607 }
608
609 if (likely(dev->features & NETIF_F_RXCSUM))
610 ioc3_tcpudp_checksum(skb,
611 w0 & ERXBUF_IPCKSUM_MASK, len);
612
613 netif_rx(skb);
614
615 ip->rx_skbs[rx_entry] = NULL;
616
617
618 skb_put(new_skb, (1664 + RX_OFFSET));
619 rxb = (struct ioc3_erxbuf *) new_skb->data;
620 skb_reserve(new_skb, RX_OFFSET);
621
622 dev->stats.rx_packets++;
623 dev->stats.rx_bytes += len;
624 } else {
625
626
627
628 new_skb = skb;
629 dev->stats.rx_errors++;
630 }
631 if (err & ERXBUF_CRCERR)
632 dev->stats.rx_crc_errors++;
633 if (err & ERXBUF_FRAMERR)
634 dev->stats.rx_frame_errors++;
635next:
636 ip->rx_skbs[n_entry] = new_skb;
637 rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
638 rxb->w0 = 0;
639 n_entry = (n_entry + 1) & 511;
640
641
642 rx_entry = (rx_entry + 1) & 511;
643 skb = ip->rx_skbs[rx_entry];
644 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
645 w0 = be32_to_cpu(rxb->w0);
646 }
647 ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
648 ip->rx_pi = n_entry;
649 ip->rx_ci = rx_entry;
650}
651
652static inline void ioc3_tx(struct net_device *dev)
653{
654 struct ioc3_private *ip = netdev_priv(dev);
655 unsigned long packets, bytes;
656 struct ioc3 *ioc3 = ip->regs;
657 int tx_entry, o_entry;
658 struct sk_buff *skb;
659 u32 etcir;
660
661 spin_lock(&ip->ioc3_lock);
662 etcir = ioc3_r_etcir();
663
664 tx_entry = (etcir >> 7) & 127;
665 o_entry = ip->tx_ci;
666 packets = 0;
667 bytes = 0;
668
669 while (o_entry != tx_entry) {
670 packets++;
671 skb = ip->tx_skbs[o_entry];
672 bytes += skb->len;
673 dev_kfree_skb_irq(skb);
674 ip->tx_skbs[o_entry] = NULL;
675
676 o_entry = (o_entry + 1) & 127;
677
678 etcir = ioc3_r_etcir();
679 tx_entry = (etcir >> 7) & 127;
680 }
681
682 dev->stats.tx_packets += packets;
683 dev->stats.tx_bytes += bytes;
684 ip->txqlen -= packets;
685
686 if (ip->txqlen < 128)
687 netif_wake_queue(dev);
688
689 ip->tx_ci = o_entry;
690 spin_unlock(&ip->ioc3_lock);
691}
692
693
694
695
696
697
698
699
700static void ioc3_error(struct net_device *dev, u32 eisr)
701{
702 struct ioc3_private *ip = netdev_priv(dev);
703 unsigned char *iface = dev->name;
704
705 spin_lock(&ip->ioc3_lock);
706
707 if (eisr & EISR_RXOFLO)
708 printk(KERN_ERR "%s: RX overflow.\n", iface);
709 if (eisr & EISR_RXBUFOFLO)
710 printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
711 if (eisr & EISR_RXMEMERR)
712 printk(KERN_ERR "%s: RX PCI error.\n", iface);
713 if (eisr & EISR_RXPARERR)
714 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
715 if (eisr & EISR_TXBUFUFLO)
716 printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
717 if (eisr & EISR_TXMEMERR)
718 printk(KERN_ERR "%s: TX PCI error.\n", iface);
719
720 ioc3_stop(ip);
721 ioc3_init(dev);
722 ioc3_mii_init(ip);
723
724 netif_wake_queue(dev);
725
726 spin_unlock(&ip->ioc3_lock);
727}
728
729
730
731static irqreturn_t ioc3_interrupt(int irq, void *_dev)
732{
733 struct net_device *dev = (struct net_device *)_dev;
734 struct ioc3_private *ip = netdev_priv(dev);
735 struct ioc3 *ioc3 = ip->regs;
736 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
737 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
738 EISR_TXEXPLICIT | EISR_TXMEMERR;
739 u32 eisr;
740
741 eisr = ioc3_r_eisr() & enabled;
742
743 ioc3_w_eisr(eisr);
744 (void) ioc3_r_eisr();
745
746 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
747 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
748 ioc3_error(dev, eisr);
749 if (eisr & EISR_RXTIMERINT)
750 ioc3_rx(dev);
751 if (eisr & EISR_TXEXPLICIT)
752 ioc3_tx(dev);
753
754 return IRQ_HANDLED;
755}
756
757static inline void ioc3_setup_duplex(struct ioc3_private *ip)
758{
759 struct ioc3 *ioc3 = ip->regs;
760
761 if (ip->mii.full_duplex) {
762 ioc3_w_etcsr(ETCSR_FD);
763 ip->emcr |= EMCR_DUPLEX;
764 } else {
765 ioc3_w_etcsr(ETCSR_HD);
766 ip->emcr &= ~EMCR_DUPLEX;
767 }
768 ioc3_w_emcr(ip->emcr);
769}
770
771static void ioc3_timer(unsigned long data)
772{
773 struct ioc3_private *ip = (struct ioc3_private *) data;
774
775
776 mii_check_media(&ip->mii, 1, 0);
777 ioc3_setup_duplex(ip);
778
779 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10);
780 add_timer(&ip->ioc3_timer);
781}
782
783
784
785
786
787
788
789
790
791static int ioc3_mii_init(struct ioc3_private *ip)
792{
793 struct net_device *dev = priv_netdev(ip);
794 int i, found = 0, res = 0;
795 int ioc3_phy_workaround = 1;
796 u16 word;
797
798 for (i = 0; i < 32; i++) {
799 word = ioc3_mdio_read(dev, i, MII_PHYSID1);
800
801 if (word != 0xffff && word != 0x0000) {
802 found = 1;
803 break;
804 }
805 }
806
807 if (!found) {
808 if (ioc3_phy_workaround)
809 i = 31;
810 else {
811 ip->mii.phy_id = -1;
812 res = -ENODEV;
813 goto out;
814 }
815 }
816
817 ip->mii.phy_id = i;
818
819out:
820 return res;
821}
822
823static void ioc3_mii_start(struct ioc3_private *ip)
824{
825 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10;
826 ip->ioc3_timer.data = (unsigned long) ip;
827 ip->ioc3_timer.function = ioc3_timer;
828 add_timer(&ip->ioc3_timer);
829}
830
831static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
832{
833 struct sk_buff *skb;
834 int i;
835
836 for (i = ip->rx_ci; i & 15; i++) {
837 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
838 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
839 }
840 ip->rx_pi &= 511;
841 ip->rx_ci &= 511;
842
843 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
844 struct ioc3_erxbuf *rxb;
845 skb = ip->rx_skbs[i];
846 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
847 rxb->w0 = 0;
848 }
849}
850
851static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
852{
853 struct sk_buff *skb;
854 int i;
855
856 for (i=0; i < 128; i++) {
857 skb = ip->tx_skbs[i];
858 if (skb) {
859 ip->tx_skbs[i] = NULL;
860 dev_kfree_skb_any(skb);
861 }
862 ip->txr[i].cmd = 0;
863 }
864 ip->tx_pi = 0;
865 ip->tx_ci = 0;
866}
867
868static void ioc3_free_rings(struct ioc3_private *ip)
869{
870 struct sk_buff *skb;
871 int rx_entry, n_entry;
872
873 if (ip->txr) {
874 ioc3_clean_tx_ring(ip);
875 free_pages((unsigned long)ip->txr, 2);
876 ip->txr = NULL;
877 }
878
879 if (ip->rxr) {
880 n_entry = ip->rx_ci;
881 rx_entry = ip->rx_pi;
882
883 while (n_entry != rx_entry) {
884 skb = ip->rx_skbs[n_entry];
885 if (skb)
886 dev_kfree_skb_any(skb);
887
888 n_entry = (n_entry + 1) & 511;
889 }
890 free_page((unsigned long)ip->rxr);
891 ip->rxr = NULL;
892 }
893}
894
895static void ioc3_alloc_rings(struct net_device *dev)
896{
897 struct ioc3_private *ip = netdev_priv(dev);
898 struct ioc3_erxbuf *rxb;
899 unsigned long *rxr;
900 int i;
901
902 if (ip->rxr == NULL) {
903
904 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
905 rxr = ip->rxr;
906 if (!rxr)
907 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
908
909
910
911
912 for (i = 0; i < RX_BUFFS; i++) {
913 struct sk_buff *skb;
914
915 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
916 if (!skb) {
917 show_free_areas(0);
918 continue;
919 }
920
921 ip->rx_skbs[i] = skb;
922
923
924 skb_put(skb, (1664 + RX_OFFSET));
925 rxb = (struct ioc3_erxbuf *) skb->data;
926 rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
927 skb_reserve(skb, RX_OFFSET);
928 }
929 ip->rx_ci = 0;
930 ip->rx_pi = RX_BUFFS;
931 }
932
933 if (ip->txr == NULL) {
934
935 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
936 if (!ip->txr)
937 printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
938 ip->tx_pi = 0;
939 ip->tx_ci = 0;
940 }
941}
942
943static void ioc3_init_rings(struct net_device *dev)
944{
945 struct ioc3_private *ip = netdev_priv(dev);
946 struct ioc3 *ioc3 = ip->regs;
947 unsigned long ring;
948
949 ioc3_free_rings(ip);
950 ioc3_alloc_rings(dev);
951
952 ioc3_clean_rx_ring(ip);
953 ioc3_clean_tx_ring(ip);
954
955
956 ring = ioc3_map(ip->rxr, 0);
957 ioc3_w_erbr_h(ring >> 32);
958 ioc3_w_erbr_l(ring & 0xffffffff);
959 ioc3_w_ercir(ip->rx_ci << 3);
960 ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
961
962 ring = ioc3_map(ip->txr, 0);
963
964 ip->txqlen = 0;
965
966
967 ioc3_w_etbr_h(ring >> 32);
968 ioc3_w_etbr_l(ring & 0xffffffff);
969 ioc3_w_etpir(ip->tx_pi << 7);
970 ioc3_w_etcir(ip->tx_ci << 7);
971 (void) ioc3_r_etcir();
972}
973
974static inline void ioc3_ssram_disc(struct ioc3_private *ip)
975{
976 struct ioc3 *ioc3 = ip->regs;
977 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
978 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
979 unsigned int pattern = 0x5555;
980
981
982 ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
983
984 *ssram0 = pattern;
985 *ssram1 = ~pattern & IOC3_SSRAM_DM;
986
987 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
988 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
989
990 ip->emcr = EMCR_RAMPAR;
991 ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
992 } else
993 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
994}
995
996static void ioc3_init(struct net_device *dev)
997{
998 struct ioc3_private *ip = netdev_priv(dev);
999 struct ioc3 *ioc3 = ip->regs;
1000
1001 del_timer_sync(&ip->ioc3_timer);
1002
1003 ioc3_w_emcr(EMCR_RST);
1004 (void) ioc3_r_emcr();
1005 udelay(4);
1006 ioc3_w_emcr(0);
1007 (void) ioc3_r_emcr();
1008
1009
1010#ifdef CONFIG_SGI_IP27
1011 ioc3_w_erbar(PCI64_ATTR_BAR >> 32);
1012#else
1013 ioc3_w_erbar(0);
1014#endif
1015 (void) ioc3_r_etcdc();
1016 ioc3_w_ercsr(15);
1017 ioc3_w_ertr(0);
1018 __ioc3_set_mac_address(dev);
1019 ioc3_w_ehar_h(ip->ehar_h);
1020 ioc3_w_ehar_l(ip->ehar_l);
1021 ioc3_w_ersr(42);
1022
1023 ioc3_init_rings(dev);
1024
1025 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
1026 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
1027 ioc3_w_emcr(ip->emcr);
1028 ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
1029 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
1030 EISR_TXEXPLICIT | EISR_TXMEMERR);
1031 (void) ioc3_r_eier();
1032}
1033
1034static inline void ioc3_stop(struct ioc3_private *ip)
1035{
1036 struct ioc3 *ioc3 = ip->regs;
1037
1038 ioc3_w_emcr(0);
1039 ioc3_w_eier(0);
1040 (void) ioc3_r_eier();
1041}
1042
1043static int ioc3_open(struct net_device *dev)
1044{
1045 struct ioc3_private *ip = netdev_priv(dev);
1046
1047 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
1048 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
1049
1050 return -EAGAIN;
1051 }
1052
1053 ip->ehar_h = 0;
1054 ip->ehar_l = 0;
1055 ioc3_init(dev);
1056 ioc3_mii_start(ip);
1057
1058 netif_start_queue(dev);
1059 return 0;
1060}
1061
1062static int ioc3_close(struct net_device *dev)
1063{
1064 struct ioc3_private *ip = netdev_priv(dev);
1065
1066 del_timer_sync(&ip->ioc3_timer);
1067
1068 netif_stop_queue(dev);
1069
1070 ioc3_stop(ip);
1071 free_irq(dev->irq, dev);
1072
1073 ioc3_free_rings(ip);
1074 return 0;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
1089{
1090 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
1091 int ret = 0;
1092
1093 if (dev) {
1094 if (dev->vendor == PCI_VENDOR_ID_SGI &&
1095 dev->device == PCI_DEVICE_ID_SGI_IOC3)
1096 ret = 1;
1097 pci_dev_put(dev);
1098 }
1099
1100 return ret;
1101}
1102
1103static int ioc3_is_menet(struct pci_dev *pdev)
1104{
1105 return pdev->bus->parent == NULL &&
1106 ioc3_adjacent_is_ioc3(pdev, 0) &&
1107 ioc3_adjacent_is_ioc3(pdev, 1) &&
1108 ioc3_adjacent_is_ioc3(pdev, 2);
1109}
1110
1111#ifdef CONFIG_SERIAL_8250
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1146{
1147#define COSMISC_CONSTANT 6
1148
1149 struct uart_8250_port port = {
1150 .port = {
1151 .irq = 0,
1152 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
1153 .iotype = UPIO_MEM,
1154 .regshift = 0,
1155 .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
1156
1157 .membase = (unsigned char __iomem *) uart,
1158 .mapbase = (unsigned long) uart,
1159 }
1160 };
1161 unsigned char lcr;
1162
1163 lcr = uart->iu_lcr;
1164 uart->iu_lcr = lcr | UART_LCR_DLAB;
1165 uart->iu_scr = COSMISC_CONSTANT,
1166 uart->iu_lcr = lcr;
1167 uart->iu_lcr;
1168 serial8250_register_8250_port(&port);
1169}
1170
1171static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1172{
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1183 return;
1184
1185
1186
1187
1188
1189 ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
1190 ioc3->gpcr_s;
1191 ioc3->gppr_6 = 0;
1192 ioc3->gppr_6;
1193 ioc3->gppr_7 = 0;
1194 ioc3->gppr_7;
1195 ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
1196 ioc3->sscr_a;
1197 ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
1198 ioc3->sscr_b;
1199
1200 ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
1201 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
1202 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
1203 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
1204 ioc3->sio_iec |= SIO_IR_SA_INT;
1205 ioc3->sscr_a = 0;
1206 ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
1207 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
1208 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
1209 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
1210 ioc3->sio_iec |= SIO_IR_SB_INT;
1211 ioc3->sscr_b = 0;
1212
1213 ioc3_8250_register(&ioc3->sregs.uarta);
1214 ioc3_8250_register(&ioc3->sregs.uartb);
1215}
1216#endif
1217
1218static const struct net_device_ops ioc3_netdev_ops = {
1219 .ndo_open = ioc3_open,
1220 .ndo_stop = ioc3_close,
1221 .ndo_start_xmit = ioc3_start_xmit,
1222 .ndo_tx_timeout = ioc3_timeout,
1223 .ndo_get_stats = ioc3_get_stats,
1224 .ndo_set_rx_mode = ioc3_set_multicast_list,
1225 .ndo_do_ioctl = ioc3_ioctl,
1226 .ndo_validate_addr = eth_validate_addr,
1227 .ndo_set_mac_address = ioc3_set_mac_address,
1228 .ndo_change_mtu = eth_change_mtu,
1229};
1230
1231static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1232{
1233 unsigned int sw_physid1, sw_physid2;
1234 struct net_device *dev = NULL;
1235 struct ioc3_private *ip;
1236 struct ioc3 *ioc3;
1237 unsigned long ioc3_base, ioc3_size;
1238 u32 vendor, model, rev;
1239 int err, pci_using_dac;
1240
1241
1242 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1243 if (!err) {
1244 pci_using_dac = 1;
1245 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1246 if (err < 0) {
1247 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
1248 "for consistent allocations\n", pci_name(pdev));
1249 goto out;
1250 }
1251 } else {
1252 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1253 if (err) {
1254 printk(KERN_ERR "%s: No usable DMA configuration, "
1255 "aborting.\n", pci_name(pdev));
1256 goto out;
1257 }
1258 pci_using_dac = 0;
1259 }
1260
1261 if (pci_enable_device(pdev))
1262 return -ENODEV;
1263
1264 dev = alloc_etherdev(sizeof(struct ioc3_private));
1265 if (!dev) {
1266 err = -ENOMEM;
1267 goto out_disable;
1268 }
1269
1270 if (pci_using_dac)
1271 dev->features |= NETIF_F_HIGHDMA;
1272
1273 err = pci_request_regions(pdev, "ioc3");
1274 if (err)
1275 goto out_free;
1276
1277 SET_NETDEV_DEV(dev, &pdev->dev);
1278
1279 ip = netdev_priv(dev);
1280
1281 dev->irq = pdev->irq;
1282
1283 ioc3_base = pci_resource_start(pdev, 0);
1284 ioc3_size = pci_resource_len(pdev, 0);
1285 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
1286 if (!ioc3) {
1287 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
1288 pci_name(pdev));
1289 err = -ENOMEM;
1290 goto out_res;
1291 }
1292 ip->regs = ioc3;
1293
1294#ifdef CONFIG_SERIAL_8250
1295 ioc3_serial_probe(pdev, ioc3);
1296#endif
1297
1298 spin_lock_init(&ip->ioc3_lock);
1299 init_timer(&ip->ioc3_timer);
1300
1301 ioc3_stop(ip);
1302 ioc3_init(dev);
1303
1304 ip->pdev = pdev;
1305
1306 ip->mii.phy_id_mask = 0x1f;
1307 ip->mii.reg_num_mask = 0x1f;
1308 ip->mii.dev = dev;
1309 ip->mii.mdio_read = ioc3_mdio_read;
1310 ip->mii.mdio_write = ioc3_mdio_write;
1311
1312 ioc3_mii_init(ip);
1313
1314 if (ip->mii.phy_id == -1) {
1315 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1316 pci_name(pdev));
1317 err = -ENODEV;
1318 goto out_stop;
1319 }
1320
1321 ioc3_mii_start(ip);
1322 ioc3_ssram_disc(ip);
1323 ioc3_get_eaddr(ip);
1324
1325
1326 dev->watchdog_timeo = 5 * HZ;
1327 dev->netdev_ops = &ioc3_netdev_ops;
1328 dev->ethtool_ops = &ioc3_ethtool_ops;
1329 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1330 dev->features = NETIF_F_IP_CSUM;
1331
1332 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
1333 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
1334
1335 err = register_netdev(dev);
1336 if (err)
1337 goto out_stop;
1338
1339 mii_check_media(&ip->mii, 1, 1);
1340 ioc3_setup_duplex(ip);
1341
1342 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
1343 model = (sw_physid2 >> 4) & 0x3f;
1344 rev = sw_physid2 & 0xf;
1345 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
1346 "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
1347 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
1348 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1349
1350 return 0;
1351
1352out_stop:
1353 ioc3_stop(ip);
1354 del_timer_sync(&ip->ioc3_timer);
1355 ioc3_free_rings(ip);
1356out_res:
1357 pci_release_regions(pdev);
1358out_free:
1359 free_netdev(dev);
1360out_disable:
1361
1362
1363
1364
1365out:
1366 return err;
1367}
1368
1369static void ioc3_remove_one(struct pci_dev *pdev)
1370{
1371 struct net_device *dev = pci_get_drvdata(pdev);
1372 struct ioc3_private *ip = netdev_priv(dev);
1373 struct ioc3 *ioc3 = ip->regs;
1374
1375 unregister_netdev(dev);
1376 del_timer_sync(&ip->ioc3_timer);
1377
1378 iounmap(ioc3);
1379 pci_release_regions(pdev);
1380 free_netdev(dev);
1381
1382
1383
1384
1385}
1386
1387static const struct pci_device_id ioc3_pci_tbl[] = {
1388 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1389 { 0 }
1390};
1391MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1392
1393static struct pci_driver ioc3_driver = {
1394 .name = "ioc3-eth",
1395 .id_table = ioc3_pci_tbl,
1396 .probe = ioc3_probe,
1397 .remove = ioc3_remove_one,
1398};
1399
1400static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1401{
1402 unsigned long data;
1403 struct ioc3_private *ip = netdev_priv(dev);
1404 struct ioc3 *ioc3 = ip->regs;
1405 unsigned int len;
1406 struct ioc3_etxd *desc;
1407 uint32_t w0 = 0;
1408 int produce;
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1419 const struct iphdr *ih = ip_hdr(skb);
1420 const int proto = ntohs(ih->protocol);
1421 unsigned int csoff;
1422 uint32_t csum, ehsum;
1423 uint16_t *eh;
1424
1425
1426
1427 eh = (uint16_t *) skb->data;
1428
1429
1430 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
1431
1432
1433 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1434 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1435
1436
1437
1438 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
1439 ih->tot_len - (ih->ihl << 2),
1440 proto, 0xffff ^ ehsum);
1441
1442 csum = (csum & 0xffff) + (csum >> 16);
1443 csum = (csum & 0xffff) + (csum >> 16);
1444
1445 csoff = ETH_HLEN + (ih->ihl << 2);
1446 if (proto == IPPROTO_UDP) {
1447 csoff += offsetof(struct udphdr, check);
1448 udp_hdr(skb)->check = csum;
1449 }
1450 if (proto == IPPROTO_TCP) {
1451 csoff += offsetof(struct tcphdr, check);
1452 tcp_hdr(skb)->check = csum;
1453 }
1454
1455 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
1456 }
1457
1458 spin_lock_irq(&ip->ioc3_lock);
1459
1460 data = (unsigned long) skb->data;
1461 len = skb->len;
1462
1463 produce = ip->tx_pi;
1464 desc = &ip->txr[produce];
1465
1466 if (len <= 104) {
1467
1468 skb_copy_from_linear_data(skb, desc->data, skb->len);
1469 if (len < ETH_ZLEN) {
1470
1471 memset(desc->data + len, 0, ETH_ZLEN - len);
1472 len = ETH_ZLEN;
1473 }
1474 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
1475 desc->bufcnt = cpu_to_be32(len);
1476 } else if ((data ^ (data + len - 1)) & 0x4000) {
1477 unsigned long b2 = (data | 0x3fffUL) + 1UL;
1478 unsigned long s1 = b2 - data;
1479 unsigned long s2 = data + len - b2;
1480
1481 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1482 ETXD_B1V | ETXD_B2V | w0);
1483 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
1484 (s2 << ETXD_B2CNT_SHIFT));
1485 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1486 desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
1487 } else {
1488
1489 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
1490 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1491 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1492 }
1493
1494 BARRIER();
1495
1496 ip->tx_skbs[produce] = skb;
1497 produce = (produce + 1) & 127;
1498 ip->tx_pi = produce;
1499 ioc3_w_etpir(produce << 7);
1500
1501 ip->txqlen++;
1502
1503 if (ip->txqlen >= 127)
1504 netif_stop_queue(dev);
1505
1506 spin_unlock_irq(&ip->ioc3_lock);
1507
1508 return NETDEV_TX_OK;
1509}
1510
1511static void ioc3_timeout(struct net_device *dev)
1512{
1513 struct ioc3_private *ip = netdev_priv(dev);
1514
1515 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1516
1517 spin_lock_irq(&ip->ioc3_lock);
1518
1519 ioc3_stop(ip);
1520 ioc3_init(dev);
1521 ioc3_mii_init(ip);
1522 ioc3_mii_start(ip);
1523
1524 spin_unlock_irq(&ip->ioc3_lock);
1525
1526 netif_wake_queue(dev);
1527}
1528
1529
1530
1531
1532
1533
1534static inline unsigned int ioc3_hash(const unsigned char *addr)
1535{
1536 unsigned int temp = 0;
1537 u32 crc;
1538 int bits;
1539
1540 crc = ether_crc_le(ETH_ALEN, addr);
1541
1542 crc &= 0x3f;
1543 for (bits = 6; --bits >= 0; ) {
1544 temp <<= 1;
1545 temp |= (crc & 0x1);
1546 crc >>= 1;
1547 }
1548
1549 return temp;
1550}
1551
1552static void ioc3_get_drvinfo (struct net_device *dev,
1553 struct ethtool_drvinfo *info)
1554{
1555 struct ioc3_private *ip = netdev_priv(dev);
1556
1557 strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
1558 strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
1559 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
1560}
1561
1562static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1563{
1564 struct ioc3_private *ip = netdev_priv(dev);
1565 int rc;
1566
1567 spin_lock_irq(&ip->ioc3_lock);
1568 rc = mii_ethtool_gset(&ip->mii, cmd);
1569 spin_unlock_irq(&ip->ioc3_lock);
1570
1571 return rc;
1572}
1573
1574static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1575{
1576 struct ioc3_private *ip = netdev_priv(dev);
1577 int rc;
1578
1579 spin_lock_irq(&ip->ioc3_lock);
1580 rc = mii_ethtool_sset(&ip->mii, cmd);
1581 spin_unlock_irq(&ip->ioc3_lock);
1582
1583 return rc;
1584}
1585
1586static int ioc3_nway_reset(struct net_device *dev)
1587{
1588 struct ioc3_private *ip = netdev_priv(dev);
1589 int rc;
1590
1591 spin_lock_irq(&ip->ioc3_lock);
1592 rc = mii_nway_restart(&ip->mii);
1593 spin_unlock_irq(&ip->ioc3_lock);
1594
1595 return rc;
1596}
1597
1598static u32 ioc3_get_link(struct net_device *dev)
1599{
1600 struct ioc3_private *ip = netdev_priv(dev);
1601 int rc;
1602
1603 spin_lock_irq(&ip->ioc3_lock);
1604 rc = mii_link_ok(&ip->mii);
1605 spin_unlock_irq(&ip->ioc3_lock);
1606
1607 return rc;
1608}
1609
1610static const struct ethtool_ops ioc3_ethtool_ops = {
1611 .get_drvinfo = ioc3_get_drvinfo,
1612 .get_settings = ioc3_get_settings,
1613 .set_settings = ioc3_set_settings,
1614 .nway_reset = ioc3_nway_reset,
1615 .get_link = ioc3_get_link,
1616};
1617
1618static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1619{
1620 struct ioc3_private *ip = netdev_priv(dev);
1621 int rc;
1622
1623 spin_lock_irq(&ip->ioc3_lock);
1624 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1625 spin_unlock_irq(&ip->ioc3_lock);
1626
1627 return rc;
1628}
1629
1630static void ioc3_set_multicast_list(struct net_device *dev)
1631{
1632 struct netdev_hw_addr *ha;
1633 struct ioc3_private *ip = netdev_priv(dev);
1634 struct ioc3 *ioc3 = ip->regs;
1635 u64 ehar = 0;
1636
1637 netif_stop_queue(dev);
1638
1639 if (dev->flags & IFF_PROMISC) {
1640 ip->emcr |= EMCR_PROMISC;
1641 ioc3_w_emcr(ip->emcr);
1642 (void) ioc3_r_emcr();
1643 } else {
1644 ip->emcr &= ~EMCR_PROMISC;
1645 ioc3_w_emcr(ip->emcr);
1646 (void) ioc3_r_emcr();
1647
1648 if ((dev->flags & IFF_ALLMULTI) ||
1649 (netdev_mc_count(dev) > 64)) {
1650
1651
1652
1653 ip->ehar_h = 0xffffffff;
1654 ip->ehar_l = 0xffffffff;
1655 } else {
1656 netdev_for_each_mc_addr(ha, dev) {
1657 ehar |= (1UL << ioc3_hash(ha->addr));
1658 }
1659 ip->ehar_h = ehar >> 32;
1660 ip->ehar_l = ehar & 0xffffffff;
1661 }
1662 ioc3_w_ehar_h(ip->ehar_h);
1663 ioc3_w_ehar_l(ip->ehar_l);
1664 }
1665
1666 netif_wake_queue(dev);
1667}
1668
1669module_pci_driver(ioc3_driver);
1670MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1671MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1672MODULE_LICENSE("GPL");
1673