1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define IOC3_NAME "ioc3-eth"
31#define IOC3_VERSION "2.6.3-4"
32
33#include <linux/delay.h>
34#include <linux/kernel.h>
35#include <linux/mm.h>
36#include <linux/errno.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/crc32.h>
40#include <linux/mii.h>
41#include <linux/in.h>
42#include <linux/ip.h>
43#include <linux/tcp.h>
44#include <linux/udp.h>
45#include <linux/dma-mapping.h>
46#include <linux/gfp.h>
47
48#ifdef CONFIG_SERIAL_8250
49#include <linux/serial_core.h>
50#include <linux/serial_8250.h>
51#include <linux/serial_reg.h>
52#endif
53
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/ethtool.h>
57#include <linux/skbuff.h>
58#include <net/ip.h>
59
60#include <asm/byteorder.h>
61#include <asm/io.h>
62#include <asm/pgtable.h>
63#include <linux/uaccess.h>
64#include <asm/sn/types.h>
65#include <asm/sn/ioc3.h>
66#include <asm/pci/bridge.h>
67
68
69
70
71
72#define RX_BUFFS 64
73
74#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
75#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
76
77
78struct ioc3_private {
79 struct ioc3 *regs;
80 unsigned long *rxr;
81 struct ioc3_etxd *txr;
82 struct sk_buff *rx_skbs[512];
83 struct sk_buff *tx_skbs[128];
84 int rx_ci;
85 int rx_pi;
86 int tx_ci;
87 int tx_pi;
88 int txqlen;
89 u32 emcr, ehar_h, ehar_l;
90 spinlock_t ioc3_lock;
91 struct mii_if_info mii;
92
93 struct net_device *dev;
94 struct pci_dev *pdev;
95
96
97 struct timer_list ioc3_timer;
98};
99
100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
101static void ioc3_set_multicast_list(struct net_device *dev);
102static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
103static void ioc3_timeout(struct net_device *dev);
104static inline unsigned int ioc3_hash(const unsigned char *addr);
105static inline void ioc3_stop(struct ioc3_private *ip);
106static void ioc3_init(struct net_device *dev);
107
108static const char ioc3_str[] = "IOC3 Ethernet";
109static const struct ethtool_ops ioc3_ethtool_ops;
110
111
112
113#define IOC3_CACHELINE 128UL
114
115static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
116{
117 return (~addr + 1) & (IOC3_CACHELINE - 1UL);
118}
119
120static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
121 unsigned int gfp_mask)
122{
123 struct sk_buff *skb;
124
125 skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
126 if (likely(skb)) {
127 int offset = aligned_rx_skb_addr((unsigned long) skb->data);
128 if (offset)
129 skb_reserve(skb, offset);
130 }
131
132 return skb;
133}
134
135static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
136{
137#ifdef CONFIG_SGI_IP27
138 vdev <<= 57;
139
140 return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
141 ((unsigned long)ptr & TO_PHYS_MASK);
142#else
143 return virt_to_bus(ptr);
144#endif
145}
146
147
148
149#define RX_OFFSET 10
150#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
151
152
153#define BARRIER() \
154 __asm__("sync" ::: "memory")
155
156
157#define IOC3_SIZE 0x100000
158
159
160
161
162
163
164
165
166#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
167#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
168#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
169#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
170#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
171#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
172#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
173#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
174#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
175#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
176#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
177#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
178#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
179#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
180#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
181#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
182#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
183#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
184#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
185#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
186#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
187#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
188#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
189#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
190#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
191#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
192#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
193#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
194#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
195#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
196#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
197#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
198#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
199#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
200#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
201#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
202#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
203#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
204#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
205#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
206#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
207#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
208#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
209#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
210#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
211#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
212#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
213#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
214#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
215#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
216#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
217#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
218#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
219
220static inline u32 mcr_pack(u32 pulse, u32 sample)
221{
222 return (pulse << 10) | (sample << 2);
223}
224
225static int nic_wait(struct ioc3 *ioc3)
226{
227 u32 mcr;
228
229 do {
230 mcr = ioc3_r_mcr();
231 } while (!(mcr & 2));
232
233 return mcr & 1;
234}
235
236static int nic_reset(struct ioc3 *ioc3)
237{
238 int presence;
239
240 ioc3_w_mcr(mcr_pack(500, 65));
241 presence = nic_wait(ioc3);
242
243 ioc3_w_mcr(mcr_pack(0, 500));
244 nic_wait(ioc3);
245
246 return presence;
247}
248
249static inline int nic_read_bit(struct ioc3 *ioc3)
250{
251 int result;
252
253 ioc3_w_mcr(mcr_pack(6, 13));
254 result = nic_wait(ioc3);
255 ioc3_w_mcr(mcr_pack(0, 100));
256 nic_wait(ioc3);
257
258 return result;
259}
260
261static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
262{
263 if (bit)
264 ioc3_w_mcr(mcr_pack(6, 110));
265 else
266 ioc3_w_mcr(mcr_pack(80, 30));
267
268 nic_wait(ioc3);
269}
270
271
272
273
274static u32 nic_read_byte(struct ioc3 *ioc3)
275{
276 u32 result = 0;
277 int i;
278
279 for (i = 0; i < 8; i++)
280 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
281
282 return result;
283}
284
285
286
287
288static void nic_write_byte(struct ioc3 *ioc3, int byte)
289{
290 int i, bit;
291
292 for (i = 8; i; i--) {
293 bit = byte & 1;
294 byte >>= 1;
295
296 nic_write_bit(ioc3, bit);
297 }
298}
299
300static u64 nic_find(struct ioc3 *ioc3, int *last)
301{
302 int a, b, index, disc;
303 u64 address = 0;
304
305 nic_reset(ioc3);
306
307 nic_write_byte(ioc3, 0xf0);
308
309
310 for (index = 0, disc = 0; index < 64; index++) {
311 a = nic_read_bit(ioc3);
312 b = nic_read_bit(ioc3);
313
314 if (a && b) {
315 printk("NIC search failed (not fatal).\n");
316 *last = 0;
317 return 0;
318 }
319
320 if (!a && !b) {
321 if (index == *last) {
322 address |= 1UL << index;
323 } else if (index > *last) {
324 address &= ~(1UL << index);
325 disc = index;
326 } else if ((address & (1UL << index)) == 0)
327 disc = index;
328 nic_write_bit(ioc3, address & (1UL << index));
329 continue;
330 } else {
331 if (a)
332 address |= 1UL << index;
333 else
334 address &= ~(1UL << index);
335 nic_write_bit(ioc3, a);
336 continue;
337 }
338 }
339
340 *last = disc;
341
342 return address;
343}
344
345static int nic_init(struct ioc3 *ioc3)
346{
347 const char *unknown = "unknown";
348 const char *type = unknown;
349 u8 crc;
350 u8 serial[6];
351 int save = 0, i;
352
353 while (1) {
354 u64 reg;
355 reg = nic_find(ioc3, &save);
356
357 switch (reg & 0xff) {
358 case 0x91:
359 type = "DS1981U";
360 break;
361 default:
362 if (save == 0) {
363
364 return -1;
365 }
366 continue;
367 }
368
369 nic_reset(ioc3);
370
371
372 nic_write_byte(ioc3, 0x55);
373 for (i = 0; i < 8; i++)
374 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
375
376 reg >>= 8;
377 for (i = 0; i < 6; i++) {
378 serial[i] = reg & 0xff;
379 reg >>= 8;
380 }
381 crc = reg & 0xff;
382 break;
383 }
384
385 printk("Found %s NIC", type);
386 if (type != unknown)
387 printk (" registration number %pM, CRC %02x", serial, crc);
388 printk(".\n");
389
390 return 0;
391}
392
393
394
395
396
397static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
398{
399 struct ioc3 *ioc3 = ip->regs;
400 u8 nic[14];
401 int tries = 2;
402 int i;
403
404 ioc3_w_gpcr_s(1 << 21);
405
406 while (tries--) {
407 if (!nic_init(ioc3))
408 break;
409 udelay(500);
410 }
411
412 if (tries < 0) {
413 printk("Failed to read MAC address\n");
414 return;
415 }
416
417
418 nic_write_byte(ioc3, 0xf0);
419 nic_write_byte(ioc3, 0x00);
420 nic_write_byte(ioc3, 0x00);
421
422 for (i = 13; i >= 0; i--)
423 nic[i] = nic_read_byte(ioc3);
424
425 for (i = 2; i < 8; i++)
426 ip->dev->dev_addr[i - 2] = nic[i];
427}
428
429
430
431
432
433
434static void ioc3_get_eaddr(struct ioc3_private *ip)
435{
436 ioc3_get_eaddr_nic(ip);
437
438 printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
439}
440
441static void __ioc3_set_mac_address(struct net_device *dev)
442{
443 struct ioc3_private *ip = netdev_priv(dev);
444 struct ioc3 *ioc3 = ip->regs;
445
446 ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
447 ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
448 (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
449}
450
451static int ioc3_set_mac_address(struct net_device *dev, void *addr)
452{
453 struct ioc3_private *ip = netdev_priv(dev);
454 struct sockaddr *sa = addr;
455
456 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
457
458 spin_lock_irq(&ip->ioc3_lock);
459 __ioc3_set_mac_address(dev);
460 spin_unlock_irq(&ip->ioc3_lock);
461
462 return 0;
463}
464
465
466
467
468
469static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
470{
471 struct ioc3_private *ip = netdev_priv(dev);
472 struct ioc3 *ioc3 = ip->regs;
473
474 while (ioc3_r_micr() & MICR_BUSY);
475 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
476 while (ioc3_r_micr() & MICR_BUSY);
477
478 return ioc3_r_midr_r() & MIDR_DATA_MASK;
479}
480
481static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
482{
483 struct ioc3_private *ip = netdev_priv(dev);
484 struct ioc3 *ioc3 = ip->regs;
485
486 while (ioc3_r_micr() & MICR_BUSY);
487 ioc3_w_midr_w(data);
488 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
489 while (ioc3_r_micr() & MICR_BUSY);
490}
491
492static int ioc3_mii_init(struct ioc3_private *ip);
493
494static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
495{
496 struct ioc3_private *ip = netdev_priv(dev);
497 struct ioc3 *ioc3 = ip->regs;
498
499 dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
500 return &dev->stats;
501}
502
503static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
504{
505 struct ethhdr *eh = eth_hdr(skb);
506 uint32_t csum, ehsum;
507 unsigned int proto;
508 struct iphdr *ih;
509 uint16_t *ew;
510 unsigned char *cp;
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526 if (eh->h_proto != htons(ETH_P_IP))
527 return;
528
529 ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
530 if (ip_is_fragment(ih))
531 return;
532
533 proto = ih->protocol;
534 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
535 return;
536
537
538 csum = hwsum +
539 (ih->tot_len - (ih->ihl << 2)) +
540 htons((uint16_t)ih->protocol) +
541 (ih->saddr >> 16) + (ih->saddr & 0xffff) +
542 (ih->daddr >> 16) + (ih->daddr & 0xffff);
543
544
545 ew = (uint16_t *) eh;
546 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
547
548 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
549 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
550
551 csum += 0xffff ^ ehsum;
552
553
554
555 cp = (char *)eh + len;
556 if (len & 1) {
557 csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
558 csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
559 } else {
560 csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
561 csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
562 }
563
564 csum = (csum & 0xffff) + (csum >> 16);
565 csum = (csum & 0xffff) + (csum >> 16);
566
567 if (csum == 0xffff)
568 skb->ip_summed = CHECKSUM_UNNECESSARY;
569}
570
571static inline void ioc3_rx(struct net_device *dev)
572{
573 struct ioc3_private *ip = netdev_priv(dev);
574 struct sk_buff *skb, *new_skb;
575 struct ioc3 *ioc3 = ip->regs;
576 int rx_entry, n_entry, len;
577 struct ioc3_erxbuf *rxb;
578 unsigned long *rxr;
579 u32 w0, err;
580
581 rxr = ip->rxr;
582 rx_entry = ip->rx_ci;
583 n_entry = ip->rx_pi;
584
585 skb = ip->rx_skbs[rx_entry];
586 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
587 w0 = be32_to_cpu(rxb->w0);
588
589 while (w0 & ERXBUF_V) {
590 err = be32_to_cpu(rxb->err);
591 if (err & ERXBUF_GOODPKT) {
592 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
593 skb_trim(skb, len);
594 skb->protocol = eth_type_trans(skb, dev);
595
596 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
597 if (!new_skb) {
598
599
600 dev->stats.rx_dropped++;
601 new_skb = skb;
602 goto next;
603 }
604
605 if (likely(dev->features & NETIF_F_RXCSUM))
606 ioc3_tcpudp_checksum(skb,
607 w0 & ERXBUF_IPCKSUM_MASK, len);
608
609 netif_rx(skb);
610
611 ip->rx_skbs[rx_entry] = NULL;
612
613
614 skb_put(new_skb, (1664 + RX_OFFSET));
615 rxb = (struct ioc3_erxbuf *) new_skb->data;
616 skb_reserve(new_skb, RX_OFFSET);
617
618 dev->stats.rx_packets++;
619 dev->stats.rx_bytes += len;
620 } else {
621
622
623
624 new_skb = skb;
625 dev->stats.rx_errors++;
626 }
627 if (err & ERXBUF_CRCERR)
628 dev->stats.rx_crc_errors++;
629 if (err & ERXBUF_FRAMERR)
630 dev->stats.rx_frame_errors++;
631next:
632 ip->rx_skbs[n_entry] = new_skb;
633 rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
634 rxb->w0 = 0;
635 n_entry = (n_entry + 1) & 511;
636
637
638 rx_entry = (rx_entry + 1) & 511;
639 skb = ip->rx_skbs[rx_entry];
640 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
641 w0 = be32_to_cpu(rxb->w0);
642 }
643 ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
644 ip->rx_pi = n_entry;
645 ip->rx_ci = rx_entry;
646}
647
648static inline void ioc3_tx(struct net_device *dev)
649{
650 struct ioc3_private *ip = netdev_priv(dev);
651 unsigned long packets, bytes;
652 struct ioc3 *ioc3 = ip->regs;
653 int tx_entry, o_entry;
654 struct sk_buff *skb;
655 u32 etcir;
656
657 spin_lock(&ip->ioc3_lock);
658 etcir = ioc3_r_etcir();
659
660 tx_entry = (etcir >> 7) & 127;
661 o_entry = ip->tx_ci;
662 packets = 0;
663 bytes = 0;
664
665 while (o_entry != tx_entry) {
666 packets++;
667 skb = ip->tx_skbs[o_entry];
668 bytes += skb->len;
669 dev_consume_skb_irq(skb);
670 ip->tx_skbs[o_entry] = NULL;
671
672 o_entry = (o_entry + 1) & 127;
673
674 etcir = ioc3_r_etcir();
675 tx_entry = (etcir >> 7) & 127;
676 }
677
678 dev->stats.tx_packets += packets;
679 dev->stats.tx_bytes += bytes;
680 ip->txqlen -= packets;
681
682 if (ip->txqlen < 128)
683 netif_wake_queue(dev);
684
685 ip->tx_ci = o_entry;
686 spin_unlock(&ip->ioc3_lock);
687}
688
689
690
691
692
693
694
695
696static void ioc3_error(struct net_device *dev, u32 eisr)
697{
698 struct ioc3_private *ip = netdev_priv(dev);
699 unsigned char *iface = dev->name;
700
701 spin_lock(&ip->ioc3_lock);
702
703 if (eisr & EISR_RXOFLO)
704 printk(KERN_ERR "%s: RX overflow.\n", iface);
705 if (eisr & EISR_RXBUFOFLO)
706 printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
707 if (eisr & EISR_RXMEMERR)
708 printk(KERN_ERR "%s: RX PCI error.\n", iface);
709 if (eisr & EISR_RXPARERR)
710 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
711 if (eisr & EISR_TXBUFUFLO)
712 printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
713 if (eisr & EISR_TXMEMERR)
714 printk(KERN_ERR "%s: TX PCI error.\n", iface);
715
716 ioc3_stop(ip);
717 ioc3_init(dev);
718 ioc3_mii_init(ip);
719
720 netif_wake_queue(dev);
721
722 spin_unlock(&ip->ioc3_lock);
723}
724
725
726
727static irqreturn_t ioc3_interrupt(int irq, void *_dev)
728{
729 struct net_device *dev = (struct net_device *)_dev;
730 struct ioc3_private *ip = netdev_priv(dev);
731 struct ioc3 *ioc3 = ip->regs;
732 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
733 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
734 EISR_TXEXPLICIT | EISR_TXMEMERR;
735 u32 eisr;
736
737 eisr = ioc3_r_eisr() & enabled;
738
739 ioc3_w_eisr(eisr);
740 (void) ioc3_r_eisr();
741
742 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
743 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
744 ioc3_error(dev, eisr);
745 if (eisr & EISR_RXTIMERINT)
746 ioc3_rx(dev);
747 if (eisr & EISR_TXEXPLICIT)
748 ioc3_tx(dev);
749
750 return IRQ_HANDLED;
751}
752
753static inline void ioc3_setup_duplex(struct ioc3_private *ip)
754{
755 struct ioc3 *ioc3 = ip->regs;
756
757 if (ip->mii.full_duplex) {
758 ioc3_w_etcsr(ETCSR_FD);
759 ip->emcr |= EMCR_DUPLEX;
760 } else {
761 ioc3_w_etcsr(ETCSR_HD);
762 ip->emcr &= ~EMCR_DUPLEX;
763 }
764 ioc3_w_emcr(ip->emcr);
765}
766
767static void ioc3_timer(struct timer_list *t)
768{
769 struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
770
771
772 mii_check_media(&ip->mii, 1, 0);
773 ioc3_setup_duplex(ip);
774
775 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10);
776 add_timer(&ip->ioc3_timer);
777}
778
779
780
781
782
783
784
785
786
787static int ioc3_mii_init(struct ioc3_private *ip)
788{
789 int i, found = 0, res = 0;
790 int ioc3_phy_workaround = 1;
791 u16 word;
792
793 for (i = 0; i < 32; i++) {
794 word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
795
796 if (word != 0xffff && word != 0x0000) {
797 found = 1;
798 break;
799 }
800 }
801
802 if (!found) {
803 if (ioc3_phy_workaround)
804 i = 31;
805 else {
806 ip->mii.phy_id = -1;
807 res = -ENODEV;
808 goto out;
809 }
810 }
811
812 ip->mii.phy_id = i;
813
814out:
815 return res;
816}
817
818static void ioc3_mii_start(struct ioc3_private *ip)
819{
820 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10;
821 add_timer(&ip->ioc3_timer);
822}
823
824static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
825{
826 struct sk_buff *skb;
827 int i;
828
829 for (i = ip->rx_ci; i & 15; i++) {
830 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
831 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
832 }
833 ip->rx_pi &= 511;
834 ip->rx_ci &= 511;
835
836 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
837 struct ioc3_erxbuf *rxb;
838 skb = ip->rx_skbs[i];
839 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
840 rxb->w0 = 0;
841 }
842}
843
844static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
845{
846 struct sk_buff *skb;
847 int i;
848
849 for (i=0; i < 128; i++) {
850 skb = ip->tx_skbs[i];
851 if (skb) {
852 ip->tx_skbs[i] = NULL;
853 dev_kfree_skb_any(skb);
854 }
855 ip->txr[i].cmd = 0;
856 }
857 ip->tx_pi = 0;
858 ip->tx_ci = 0;
859}
860
861static void ioc3_free_rings(struct ioc3_private *ip)
862{
863 struct sk_buff *skb;
864 int rx_entry, n_entry;
865
866 if (ip->txr) {
867 ioc3_clean_tx_ring(ip);
868 free_pages((unsigned long)ip->txr, 2);
869 ip->txr = NULL;
870 }
871
872 if (ip->rxr) {
873 n_entry = ip->rx_ci;
874 rx_entry = ip->rx_pi;
875
876 while (n_entry != rx_entry) {
877 skb = ip->rx_skbs[n_entry];
878 if (skb)
879 dev_kfree_skb_any(skb);
880
881 n_entry = (n_entry + 1) & 511;
882 }
883 free_page((unsigned long)ip->rxr);
884 ip->rxr = NULL;
885 }
886}
887
888static void ioc3_alloc_rings(struct net_device *dev)
889{
890 struct ioc3_private *ip = netdev_priv(dev);
891 struct ioc3_erxbuf *rxb;
892 unsigned long *rxr;
893 int i;
894
895 if (ip->rxr == NULL) {
896
897 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
898 rxr = ip->rxr;
899 if (!rxr)
900 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
901
902
903
904
905 for (i = 0; i < RX_BUFFS; i++) {
906 struct sk_buff *skb;
907
908 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
909 if (!skb) {
910 show_free_areas(0, NULL);
911 continue;
912 }
913
914 ip->rx_skbs[i] = skb;
915
916
917 skb_put(skb, (1664 + RX_OFFSET));
918 rxb = (struct ioc3_erxbuf *) skb->data;
919 rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
920 skb_reserve(skb, RX_OFFSET);
921 }
922 ip->rx_ci = 0;
923 ip->rx_pi = RX_BUFFS;
924 }
925
926 if (ip->txr == NULL) {
927
928 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
929 if (!ip->txr)
930 printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
931 ip->tx_pi = 0;
932 ip->tx_ci = 0;
933 }
934}
935
936static void ioc3_init_rings(struct net_device *dev)
937{
938 struct ioc3_private *ip = netdev_priv(dev);
939 struct ioc3 *ioc3 = ip->regs;
940 unsigned long ring;
941
942 ioc3_free_rings(ip);
943 ioc3_alloc_rings(dev);
944
945 ioc3_clean_rx_ring(ip);
946 ioc3_clean_tx_ring(ip);
947
948
949 ring = ioc3_map(ip->rxr, 0);
950 ioc3_w_erbr_h(ring >> 32);
951 ioc3_w_erbr_l(ring & 0xffffffff);
952 ioc3_w_ercir(ip->rx_ci << 3);
953 ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
954
955 ring = ioc3_map(ip->txr, 0);
956
957 ip->txqlen = 0;
958
959
960 ioc3_w_etbr_h(ring >> 32);
961 ioc3_w_etbr_l(ring & 0xffffffff);
962 ioc3_w_etpir(ip->tx_pi << 7);
963 ioc3_w_etcir(ip->tx_ci << 7);
964 (void) ioc3_r_etcir();
965}
966
967static inline void ioc3_ssram_disc(struct ioc3_private *ip)
968{
969 struct ioc3 *ioc3 = ip->regs;
970 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
971 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
972 unsigned int pattern = 0x5555;
973
974
975 ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
976
977 *ssram0 = pattern;
978 *ssram1 = ~pattern & IOC3_SSRAM_DM;
979
980 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
981 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
982
983 ip->emcr = EMCR_RAMPAR;
984 ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
985 } else
986 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
987}
988
989static void ioc3_init(struct net_device *dev)
990{
991 struct ioc3_private *ip = netdev_priv(dev);
992 struct ioc3 *ioc3 = ip->regs;
993
994 del_timer_sync(&ip->ioc3_timer);
995
996 ioc3_w_emcr(EMCR_RST);
997 (void) ioc3_r_emcr();
998 udelay(4);
999 ioc3_w_emcr(0);
1000 (void) ioc3_r_emcr();
1001
1002
1003#ifdef CONFIG_SGI_IP27
1004 ioc3_w_erbar(PCI64_ATTR_BAR >> 32);
1005#else
1006 ioc3_w_erbar(0);
1007#endif
1008 (void) ioc3_r_etcdc();
1009 ioc3_w_ercsr(15);
1010 ioc3_w_ertr(0);
1011 __ioc3_set_mac_address(dev);
1012 ioc3_w_ehar_h(ip->ehar_h);
1013 ioc3_w_ehar_l(ip->ehar_l);
1014 ioc3_w_ersr(42);
1015
1016 ioc3_init_rings(dev);
1017
1018 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
1019 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
1020 ioc3_w_emcr(ip->emcr);
1021 ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
1022 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
1023 EISR_TXEXPLICIT | EISR_TXMEMERR);
1024 (void) ioc3_r_eier();
1025}
1026
1027static inline void ioc3_stop(struct ioc3_private *ip)
1028{
1029 struct ioc3 *ioc3 = ip->regs;
1030
1031 ioc3_w_emcr(0);
1032 ioc3_w_eier(0);
1033 (void) ioc3_r_eier();
1034}
1035
1036static int ioc3_open(struct net_device *dev)
1037{
1038 struct ioc3_private *ip = netdev_priv(dev);
1039
1040 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
1041 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
1042
1043 return -EAGAIN;
1044 }
1045
1046 ip->ehar_h = 0;
1047 ip->ehar_l = 0;
1048 ioc3_init(dev);
1049 ioc3_mii_start(ip);
1050
1051 netif_start_queue(dev);
1052 return 0;
1053}
1054
1055static int ioc3_close(struct net_device *dev)
1056{
1057 struct ioc3_private *ip = netdev_priv(dev);
1058
1059 del_timer_sync(&ip->ioc3_timer);
1060
1061 netif_stop_queue(dev);
1062
1063 ioc3_stop(ip);
1064 free_irq(dev->irq, dev);
1065
1066 ioc3_free_rings(ip);
1067 return 0;
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
1082{
1083 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
1084 int ret = 0;
1085
1086 if (dev) {
1087 if (dev->vendor == PCI_VENDOR_ID_SGI &&
1088 dev->device == PCI_DEVICE_ID_SGI_IOC3)
1089 ret = 1;
1090 pci_dev_put(dev);
1091 }
1092
1093 return ret;
1094}
1095
1096static int ioc3_is_menet(struct pci_dev *pdev)
1097{
1098 return pdev->bus->parent == NULL &&
1099 ioc3_adjacent_is_ioc3(pdev, 0) &&
1100 ioc3_adjacent_is_ioc3(pdev, 1) &&
1101 ioc3_adjacent_is_ioc3(pdev, 2);
1102}
1103
1104#ifdef CONFIG_SERIAL_8250
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1139{
1140#define COSMISC_CONSTANT 6
1141
1142 struct uart_8250_port port = {
1143 .port = {
1144 .irq = 0,
1145 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
1146 .iotype = UPIO_MEM,
1147 .regshift = 0,
1148 .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
1149
1150 .membase = (unsigned char __iomem *) uart,
1151 .mapbase = (unsigned long) uart,
1152 }
1153 };
1154 unsigned char lcr;
1155
1156 lcr = uart->iu_lcr;
1157 uart->iu_lcr = lcr | UART_LCR_DLAB;
1158 uart->iu_scr = COSMISC_CONSTANT,
1159 uart->iu_lcr = lcr;
1160 uart->iu_lcr;
1161 serial8250_register_8250_port(&port);
1162}
1163
1164static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1165{
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1176 return;
1177
1178
1179
1180
1181
1182 ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
1183 ioc3->gpcr_s;
1184 ioc3->gppr_6 = 0;
1185 ioc3->gppr_6;
1186 ioc3->gppr_7 = 0;
1187 ioc3->gppr_7;
1188 ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
1189 ioc3->sscr_a;
1190 ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
1191 ioc3->sscr_b;
1192
1193 ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
1194 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
1195 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
1196 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
1197 ioc3->sio_iec |= SIO_IR_SA_INT;
1198 ioc3->sscr_a = 0;
1199 ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
1200 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
1201 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
1202 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
1203 ioc3->sio_iec |= SIO_IR_SB_INT;
1204 ioc3->sscr_b = 0;
1205
1206 ioc3_8250_register(&ioc3->sregs.uarta);
1207 ioc3_8250_register(&ioc3->sregs.uartb);
1208}
1209#endif
1210
1211static const struct net_device_ops ioc3_netdev_ops = {
1212 .ndo_open = ioc3_open,
1213 .ndo_stop = ioc3_close,
1214 .ndo_start_xmit = ioc3_start_xmit,
1215 .ndo_tx_timeout = ioc3_timeout,
1216 .ndo_get_stats = ioc3_get_stats,
1217 .ndo_set_rx_mode = ioc3_set_multicast_list,
1218 .ndo_do_ioctl = ioc3_ioctl,
1219 .ndo_validate_addr = eth_validate_addr,
1220 .ndo_set_mac_address = ioc3_set_mac_address,
1221};
1222
1223static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1224{
1225 unsigned int sw_physid1, sw_physid2;
1226 struct net_device *dev = NULL;
1227 struct ioc3_private *ip;
1228 struct ioc3 *ioc3;
1229 unsigned long ioc3_base, ioc3_size;
1230 u32 vendor, model, rev;
1231 int err, pci_using_dac;
1232
1233
1234 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1235 if (!err) {
1236 pci_using_dac = 1;
1237 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1238 if (err < 0) {
1239 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
1240 "for consistent allocations\n", pci_name(pdev));
1241 goto out;
1242 }
1243 } else {
1244 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1245 if (err) {
1246 printk(KERN_ERR "%s: No usable DMA configuration, "
1247 "aborting.\n", pci_name(pdev));
1248 goto out;
1249 }
1250 pci_using_dac = 0;
1251 }
1252
1253 if (pci_enable_device(pdev))
1254 return -ENODEV;
1255
1256 dev = alloc_etherdev(sizeof(struct ioc3_private));
1257 if (!dev) {
1258 err = -ENOMEM;
1259 goto out_disable;
1260 }
1261
1262 if (pci_using_dac)
1263 dev->features |= NETIF_F_HIGHDMA;
1264
1265 err = pci_request_regions(pdev, "ioc3");
1266 if (err)
1267 goto out_free;
1268
1269 SET_NETDEV_DEV(dev, &pdev->dev);
1270
1271 ip = netdev_priv(dev);
1272 ip->dev = dev;
1273
1274 dev->irq = pdev->irq;
1275
1276 ioc3_base = pci_resource_start(pdev, 0);
1277 ioc3_size = pci_resource_len(pdev, 0);
1278 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
1279 if (!ioc3) {
1280 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
1281 pci_name(pdev));
1282 err = -ENOMEM;
1283 goto out_res;
1284 }
1285 ip->regs = ioc3;
1286
1287#ifdef CONFIG_SERIAL_8250
1288 ioc3_serial_probe(pdev, ioc3);
1289#endif
1290
1291 spin_lock_init(&ip->ioc3_lock);
1292 timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
1293
1294 ioc3_stop(ip);
1295 ioc3_init(dev);
1296
1297 ip->pdev = pdev;
1298
1299 ip->mii.phy_id_mask = 0x1f;
1300 ip->mii.reg_num_mask = 0x1f;
1301 ip->mii.dev = dev;
1302 ip->mii.mdio_read = ioc3_mdio_read;
1303 ip->mii.mdio_write = ioc3_mdio_write;
1304
1305 ioc3_mii_init(ip);
1306
1307 if (ip->mii.phy_id == -1) {
1308 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1309 pci_name(pdev));
1310 err = -ENODEV;
1311 goto out_stop;
1312 }
1313
1314 ioc3_mii_start(ip);
1315 ioc3_ssram_disc(ip);
1316 ioc3_get_eaddr(ip);
1317
1318
1319 dev->watchdog_timeo = 5 * HZ;
1320 dev->netdev_ops = &ioc3_netdev_ops;
1321 dev->ethtool_ops = &ioc3_ethtool_ops;
1322 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1323 dev->features = NETIF_F_IP_CSUM;
1324
1325 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
1326 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
1327
1328 err = register_netdev(dev);
1329 if (err)
1330 goto out_stop;
1331
1332 mii_check_media(&ip->mii, 1, 1);
1333 ioc3_setup_duplex(ip);
1334
1335 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
1336 model = (sw_physid2 >> 4) & 0x3f;
1337 rev = sw_physid2 & 0xf;
1338 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
1339 "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
1340 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
1341 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1342
1343 return 0;
1344
1345out_stop:
1346 ioc3_stop(ip);
1347 del_timer_sync(&ip->ioc3_timer);
1348 ioc3_free_rings(ip);
1349out_res:
1350 pci_release_regions(pdev);
1351out_free:
1352 free_netdev(dev);
1353out_disable:
1354
1355
1356
1357
1358out:
1359 return err;
1360}
1361
1362static void ioc3_remove_one(struct pci_dev *pdev)
1363{
1364 struct net_device *dev = pci_get_drvdata(pdev);
1365 struct ioc3_private *ip = netdev_priv(dev);
1366 struct ioc3 *ioc3 = ip->regs;
1367
1368 unregister_netdev(dev);
1369 del_timer_sync(&ip->ioc3_timer);
1370
1371 iounmap(ioc3);
1372 pci_release_regions(pdev);
1373 free_netdev(dev);
1374
1375
1376
1377
1378}
1379
1380static const struct pci_device_id ioc3_pci_tbl[] = {
1381 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1382 { 0 }
1383};
1384MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1385
1386static struct pci_driver ioc3_driver = {
1387 .name = "ioc3-eth",
1388 .id_table = ioc3_pci_tbl,
1389 .probe = ioc3_probe,
1390 .remove = ioc3_remove_one,
1391};
1392
1393static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1394{
1395 unsigned long data;
1396 struct ioc3_private *ip = netdev_priv(dev);
1397 struct ioc3 *ioc3 = ip->regs;
1398 unsigned int len;
1399 struct ioc3_etxd *desc;
1400 uint32_t w0 = 0;
1401 int produce;
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1412 const struct iphdr *ih = ip_hdr(skb);
1413 const int proto = ntohs(ih->protocol);
1414 unsigned int csoff;
1415 uint32_t csum, ehsum;
1416 uint16_t *eh;
1417
1418
1419
1420 eh = (uint16_t *) skb->data;
1421
1422
1423 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
1424
1425
1426 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1427 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1428
1429
1430
1431 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
1432 ih->tot_len - (ih->ihl << 2),
1433 proto, 0xffff ^ ehsum);
1434
1435 csum = (csum & 0xffff) + (csum >> 16);
1436 csum = (csum & 0xffff) + (csum >> 16);
1437
1438 csoff = ETH_HLEN + (ih->ihl << 2);
1439 if (proto == IPPROTO_UDP) {
1440 csoff += offsetof(struct udphdr, check);
1441 udp_hdr(skb)->check = csum;
1442 }
1443 if (proto == IPPROTO_TCP) {
1444 csoff += offsetof(struct tcphdr, check);
1445 tcp_hdr(skb)->check = csum;
1446 }
1447
1448 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
1449 }
1450
1451 spin_lock_irq(&ip->ioc3_lock);
1452
1453 data = (unsigned long) skb->data;
1454 len = skb->len;
1455
1456 produce = ip->tx_pi;
1457 desc = &ip->txr[produce];
1458
1459 if (len <= 104) {
1460
1461 skb_copy_from_linear_data(skb, desc->data, skb->len);
1462 if (len < ETH_ZLEN) {
1463
1464 memset(desc->data + len, 0, ETH_ZLEN - len);
1465 len = ETH_ZLEN;
1466 }
1467 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
1468 desc->bufcnt = cpu_to_be32(len);
1469 } else if ((data ^ (data + len - 1)) & 0x4000) {
1470 unsigned long b2 = (data | 0x3fffUL) + 1UL;
1471 unsigned long s1 = b2 - data;
1472 unsigned long s2 = data + len - b2;
1473
1474 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1475 ETXD_B1V | ETXD_B2V | w0);
1476 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
1477 (s2 << ETXD_B2CNT_SHIFT));
1478 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1479 desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
1480 } else {
1481
1482 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
1483 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1484 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1485 }
1486
1487 BARRIER();
1488
1489 ip->tx_skbs[produce] = skb;
1490 produce = (produce + 1) & 127;
1491 ip->tx_pi = produce;
1492 ioc3_w_etpir(produce << 7);
1493
1494 ip->txqlen++;
1495
1496 if (ip->txqlen >= 127)
1497 netif_stop_queue(dev);
1498
1499 spin_unlock_irq(&ip->ioc3_lock);
1500
1501 return NETDEV_TX_OK;
1502}
1503
1504static void ioc3_timeout(struct net_device *dev)
1505{
1506 struct ioc3_private *ip = netdev_priv(dev);
1507
1508 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1509
1510 spin_lock_irq(&ip->ioc3_lock);
1511
1512 ioc3_stop(ip);
1513 ioc3_init(dev);
1514 ioc3_mii_init(ip);
1515 ioc3_mii_start(ip);
1516
1517 spin_unlock_irq(&ip->ioc3_lock);
1518
1519 netif_wake_queue(dev);
1520}
1521
1522
1523
1524
1525
1526
1527static inline unsigned int ioc3_hash(const unsigned char *addr)
1528{
1529 unsigned int temp = 0;
1530 u32 crc;
1531 int bits;
1532
1533 crc = ether_crc_le(ETH_ALEN, addr);
1534
1535 crc &= 0x3f;
1536 for (bits = 6; --bits >= 0; ) {
1537 temp <<= 1;
1538 temp |= (crc & 0x1);
1539 crc >>= 1;
1540 }
1541
1542 return temp;
1543}
1544
1545static void ioc3_get_drvinfo (struct net_device *dev,
1546 struct ethtool_drvinfo *info)
1547{
1548 struct ioc3_private *ip = netdev_priv(dev);
1549
1550 strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
1551 strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
1552 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
1553}
1554
1555static int ioc3_get_link_ksettings(struct net_device *dev,
1556 struct ethtool_link_ksettings *cmd)
1557{
1558 struct ioc3_private *ip = netdev_priv(dev);
1559
1560 spin_lock_irq(&ip->ioc3_lock);
1561 mii_ethtool_get_link_ksettings(&ip->mii, cmd);
1562 spin_unlock_irq(&ip->ioc3_lock);
1563
1564 return 0;
1565}
1566
1567static int ioc3_set_link_ksettings(struct net_device *dev,
1568 const struct ethtool_link_ksettings *cmd)
1569{
1570 struct ioc3_private *ip = netdev_priv(dev);
1571 int rc;
1572
1573 spin_lock_irq(&ip->ioc3_lock);
1574 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
1575 spin_unlock_irq(&ip->ioc3_lock);
1576
1577 return rc;
1578}
1579
1580static int ioc3_nway_reset(struct net_device *dev)
1581{
1582 struct ioc3_private *ip = netdev_priv(dev);
1583 int rc;
1584
1585 spin_lock_irq(&ip->ioc3_lock);
1586 rc = mii_nway_restart(&ip->mii);
1587 spin_unlock_irq(&ip->ioc3_lock);
1588
1589 return rc;
1590}
1591
1592static u32 ioc3_get_link(struct net_device *dev)
1593{
1594 struct ioc3_private *ip = netdev_priv(dev);
1595 int rc;
1596
1597 spin_lock_irq(&ip->ioc3_lock);
1598 rc = mii_link_ok(&ip->mii);
1599 spin_unlock_irq(&ip->ioc3_lock);
1600
1601 return rc;
1602}
1603
1604static const struct ethtool_ops ioc3_ethtool_ops = {
1605 .get_drvinfo = ioc3_get_drvinfo,
1606 .nway_reset = ioc3_nway_reset,
1607 .get_link = ioc3_get_link,
1608 .get_link_ksettings = ioc3_get_link_ksettings,
1609 .set_link_ksettings = ioc3_set_link_ksettings,
1610};
1611
1612static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1613{
1614 struct ioc3_private *ip = netdev_priv(dev);
1615 int rc;
1616
1617 spin_lock_irq(&ip->ioc3_lock);
1618 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1619 spin_unlock_irq(&ip->ioc3_lock);
1620
1621 return rc;
1622}
1623
1624static void ioc3_set_multicast_list(struct net_device *dev)
1625{
1626 struct netdev_hw_addr *ha;
1627 struct ioc3_private *ip = netdev_priv(dev);
1628 struct ioc3 *ioc3 = ip->regs;
1629 u64 ehar = 0;
1630
1631 netif_stop_queue(dev);
1632
1633 if (dev->flags & IFF_PROMISC) {
1634 ip->emcr |= EMCR_PROMISC;
1635 ioc3_w_emcr(ip->emcr);
1636 (void) ioc3_r_emcr();
1637 } else {
1638 ip->emcr &= ~EMCR_PROMISC;
1639 ioc3_w_emcr(ip->emcr);
1640 (void) ioc3_r_emcr();
1641
1642 if ((dev->flags & IFF_ALLMULTI) ||
1643 (netdev_mc_count(dev) > 64)) {
1644
1645
1646
1647 ip->ehar_h = 0xffffffff;
1648 ip->ehar_l = 0xffffffff;
1649 } else {
1650 netdev_for_each_mc_addr(ha, dev) {
1651 ehar |= (1UL << ioc3_hash(ha->addr));
1652 }
1653 ip->ehar_h = ehar >> 32;
1654 ip->ehar_l = ehar & 0xffffffff;
1655 }
1656 ioc3_w_ehar_h(ip->ehar_h);
1657 ioc3_w_ehar_l(ip->ehar_l);
1658 }
1659
1660 netif_wake_queue(dev);
1661}
1662
1663module_pci_driver(ioc3_driver);
1664MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1665MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1666MODULE_LICENSE("GPL");
1667