1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define IOC3_NAME "ioc3-eth"
31#define IOC3_VERSION "2.6.3-4"
32
33#include <linux/delay.h>
34#include <linux/kernel.h>
35#include <linux/mm.h>
36#include <linux/errno.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/crc32.h>
40#include <linux/mii.h>
41#include <linux/in.h>
42#include <linux/ip.h>
43#include <linux/tcp.h>
44#include <linux/udp.h>
45#include <linux/dma-mapping.h>
46#include <linux/gfp.h>
47
48#ifdef CONFIG_SERIAL_8250
49#include <linux/serial_core.h>
50#include <linux/serial_8250.h>
51#include <linux/serial_reg.h>
52#endif
53
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/ethtool.h>
57#include <linux/skbuff.h>
58#include <net/ip.h>
59
60#include <asm/byteorder.h>
61#include <asm/io.h>
62#include <asm/pgtable.h>
63#include <linux/uaccess.h>
64#include <asm/sn/types.h>
65#include <asm/sn/ioc3.h>
66#include <asm/pci/bridge.h>
67
68
69
70
71
72#define RX_BUFFS 64
73
74#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
75#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
76
77
78struct ioc3_private {
79 struct ioc3 *regs;
80 unsigned long *rxr;
81 struct ioc3_etxd *txr;
82 struct sk_buff *rx_skbs[512];
83 struct sk_buff *tx_skbs[128];
84 int rx_ci;
85 int rx_pi;
86 int tx_ci;
87 int tx_pi;
88 int txqlen;
89 u32 emcr, ehar_h, ehar_l;
90 spinlock_t ioc3_lock;
91 struct mii_if_info mii;
92
93 struct net_device *dev;
94 struct pci_dev *pdev;
95
96
97 struct timer_list ioc3_timer;
98};
99
100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
101static void ioc3_set_multicast_list(struct net_device *dev);
102static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
103static void ioc3_timeout(struct net_device *dev);
104static inline unsigned int ioc3_hash(const unsigned char *addr);
105static inline void ioc3_stop(struct ioc3_private *ip);
106static void ioc3_init(struct net_device *dev);
107
108static const char ioc3_str[] = "IOC3 Ethernet";
109static const struct ethtool_ops ioc3_ethtool_ops;
110
111
112
113#define IOC3_CACHELINE 128UL
114
115static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
116{
117 return (~addr + 1) & (IOC3_CACHELINE - 1UL);
118}
119
120static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
121 unsigned int gfp_mask)
122{
123 struct sk_buff *skb;
124
125 skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
126 if (likely(skb)) {
127 int offset = aligned_rx_skb_addr((unsigned long) skb->data);
128 if (offset)
129 skb_reserve(skb, offset);
130 }
131
132 return skb;
133}
134
135static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
136{
137#ifdef CONFIG_SGI_IP27
138 vdev <<= 57;
139
140 return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
141 ((unsigned long)ptr & TO_PHYS_MASK);
142#else
143 return virt_to_bus(ptr);
144#endif
145}
146
147
148
149#define RX_OFFSET 10
150#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
151
152
153#define BARRIER() \
154 __asm__("sync" ::: "memory")
155
156
157#define IOC3_SIZE 0x100000
158
159
160
161
162
163
164
165
166#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
167#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
168#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
169#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
170#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
171#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
172#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
173#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
174#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
175#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
176#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
177#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
178#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
179#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
180#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
181#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
182#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
183#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
184#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
185#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
186#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
187#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
188#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
189#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
190#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
191#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
192#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
193#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
194#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
195#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
196#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
197#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
198#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
199#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
200#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
201#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
202#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
203#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
204#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
205#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
206#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
207#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
208#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
209#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
210#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
211#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
212#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
213#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
214#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
215#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
216#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
217#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
218#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
219
220static inline u32 mcr_pack(u32 pulse, u32 sample)
221{
222 return (pulse << 10) | (sample << 2);
223}
224
225static int nic_wait(struct ioc3 *ioc3)
226{
227 u32 mcr;
228
229 do {
230 mcr = ioc3_r_mcr();
231 } while (!(mcr & 2));
232
233 return mcr & 1;
234}
235
236static int nic_reset(struct ioc3 *ioc3)
237{
238 int presence;
239
240 ioc3_w_mcr(mcr_pack(500, 65));
241 presence = nic_wait(ioc3);
242
243 ioc3_w_mcr(mcr_pack(0, 500));
244 nic_wait(ioc3);
245
246 return presence;
247}
248
249static inline int nic_read_bit(struct ioc3 *ioc3)
250{
251 int result;
252
253 ioc3_w_mcr(mcr_pack(6, 13));
254 result = nic_wait(ioc3);
255 ioc3_w_mcr(mcr_pack(0, 100));
256 nic_wait(ioc3);
257
258 return result;
259}
260
261static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
262{
263 if (bit)
264 ioc3_w_mcr(mcr_pack(6, 110));
265 else
266 ioc3_w_mcr(mcr_pack(80, 30));
267
268 nic_wait(ioc3);
269}
270
271
272
273
274static u32 nic_read_byte(struct ioc3 *ioc3)
275{
276 u32 result = 0;
277 int i;
278
279 for (i = 0; i < 8; i++)
280 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
281
282 return result;
283}
284
285
286
287
288static void nic_write_byte(struct ioc3 *ioc3, int byte)
289{
290 int i, bit;
291
292 for (i = 8; i; i--) {
293 bit = byte & 1;
294 byte >>= 1;
295
296 nic_write_bit(ioc3, bit);
297 }
298}
299
300static u64 nic_find(struct ioc3 *ioc3, int *last)
301{
302 int a, b, index, disc;
303 u64 address = 0;
304
305 nic_reset(ioc3);
306
307 nic_write_byte(ioc3, 0xf0);
308
309
310 for (index = 0, disc = 0; index < 64; index++) {
311 a = nic_read_bit(ioc3);
312 b = nic_read_bit(ioc3);
313
314 if (a && b) {
315 printk("NIC search failed (not fatal).\n");
316 *last = 0;
317 return 0;
318 }
319
320 if (!a && !b) {
321 if (index == *last) {
322 address |= 1UL << index;
323 } else if (index > *last) {
324 address &= ~(1UL << index);
325 disc = index;
326 } else if ((address & (1UL << index)) == 0)
327 disc = index;
328 nic_write_bit(ioc3, address & (1UL << index));
329 continue;
330 } else {
331 if (a)
332 address |= 1UL << index;
333 else
334 address &= ~(1UL << index);
335 nic_write_bit(ioc3, a);
336 continue;
337 }
338 }
339
340 *last = disc;
341
342 return address;
343}
344
345static int nic_init(struct ioc3 *ioc3)
346{
347 const char *unknown = "unknown";
348 const char *type = unknown;
349 u8 crc;
350 u8 serial[6];
351 int save = 0, i;
352
353 while (1) {
354 u64 reg;
355 reg = nic_find(ioc3, &save);
356
357 switch (reg & 0xff) {
358 case 0x91:
359 type = "DS1981U";
360 break;
361 default:
362 if (save == 0) {
363
364 return -1;
365 }
366 continue;
367 }
368
369 nic_reset(ioc3);
370
371
372 nic_write_byte(ioc3, 0x55);
373 for (i = 0; i < 8; i++)
374 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
375
376 reg >>= 8;
377 for (i = 0; i < 6; i++) {
378 serial[i] = reg & 0xff;
379 reg >>= 8;
380 }
381 crc = reg & 0xff;
382 break;
383 }
384
385 printk("Found %s NIC", type);
386 if (type != unknown)
387 printk (" registration number %pM, CRC %02x", serial, crc);
388 printk(".\n");
389
390 return 0;
391}
392
393
394
395
396
397static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
398{
399 struct ioc3 *ioc3 = ip->regs;
400 u8 nic[14];
401 int tries = 2;
402 int i;
403
404 ioc3_w_gpcr_s(1 << 21);
405
406 while (tries--) {
407 if (!nic_init(ioc3))
408 break;
409 udelay(500);
410 }
411
412 if (tries < 0) {
413 printk("Failed to read MAC address\n");
414 return;
415 }
416
417
418 nic_write_byte(ioc3, 0xf0);
419 nic_write_byte(ioc3, 0x00);
420 nic_write_byte(ioc3, 0x00);
421
422 for (i = 13; i >= 0; i--)
423 nic[i] = nic_read_byte(ioc3);
424
425 for (i = 2; i < 8; i++)
426 ip->dev->dev_addr[i - 2] = nic[i];
427}
428
429
430
431
432
433
434static void ioc3_get_eaddr(struct ioc3_private *ip)
435{
436 ioc3_get_eaddr_nic(ip);
437
438 printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
439}
440
441static void __ioc3_set_mac_address(struct net_device *dev)
442{
443 struct ioc3_private *ip = netdev_priv(dev);
444 struct ioc3 *ioc3 = ip->regs;
445
446 ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
447 ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
448 (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
449}
450
451static int ioc3_set_mac_address(struct net_device *dev, void *addr)
452{
453 struct ioc3_private *ip = netdev_priv(dev);
454 struct sockaddr *sa = addr;
455
456 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
457
458 spin_lock_irq(&ip->ioc3_lock);
459 __ioc3_set_mac_address(dev);
460 spin_unlock_irq(&ip->ioc3_lock);
461
462 return 0;
463}
464
465
466
467
468
469static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
470{
471 struct ioc3_private *ip = netdev_priv(dev);
472 struct ioc3 *ioc3 = ip->regs;
473
474 while (ioc3_r_micr() & MICR_BUSY);
475 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
476 while (ioc3_r_micr() & MICR_BUSY);
477
478 return ioc3_r_midr_r() & MIDR_DATA_MASK;
479}
480
481static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
482{
483 struct ioc3_private *ip = netdev_priv(dev);
484 struct ioc3 *ioc3 = ip->regs;
485
486 while (ioc3_r_micr() & MICR_BUSY);
487 ioc3_w_midr_w(data);
488 ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
489 while (ioc3_r_micr() & MICR_BUSY);
490}
491
492static int ioc3_mii_init(struct ioc3_private *ip);
493
494static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
495{
496 struct ioc3_private *ip = netdev_priv(dev);
497 struct ioc3 *ioc3 = ip->regs;
498
499 dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
500 return &dev->stats;
501}
502
503static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
504{
505 struct ethhdr *eh = eth_hdr(skb);
506 uint32_t csum, ehsum;
507 unsigned int proto;
508 struct iphdr *ih;
509 uint16_t *ew;
510 unsigned char *cp;
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526 if (eh->h_proto != htons(ETH_P_IP))
527 return;
528
529 ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
530 if (ip_is_fragment(ih))
531 return;
532
533 proto = ih->protocol;
534 if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
535 return;
536
537
538 csum = hwsum +
539 (ih->tot_len - (ih->ihl << 2)) +
540 htons((uint16_t)ih->protocol) +
541 (ih->saddr >> 16) + (ih->saddr & 0xffff) +
542 (ih->daddr >> 16) + (ih->daddr & 0xffff);
543
544
545 ew = (uint16_t *) eh;
546 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
547
548 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
549 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
550
551 csum += 0xffff ^ ehsum;
552
553
554
555 cp = (char *)eh + len;
556 if (len & 1) {
557 csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
558 csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
559 } else {
560 csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
561 csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
562 }
563
564 csum = (csum & 0xffff) + (csum >> 16);
565 csum = (csum & 0xffff) + (csum >> 16);
566
567 if (csum == 0xffff)
568 skb->ip_summed = CHECKSUM_UNNECESSARY;
569}
570
571static inline void ioc3_rx(struct net_device *dev)
572{
573 struct ioc3_private *ip = netdev_priv(dev);
574 struct sk_buff *skb, *new_skb;
575 struct ioc3 *ioc3 = ip->regs;
576 int rx_entry, n_entry, len;
577 struct ioc3_erxbuf *rxb;
578 unsigned long *rxr;
579 u32 w0, err;
580
581 rxr = ip->rxr;
582 rx_entry = ip->rx_ci;
583 n_entry = ip->rx_pi;
584
585 skb = ip->rx_skbs[rx_entry];
586 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
587 w0 = be32_to_cpu(rxb->w0);
588
589 while (w0 & ERXBUF_V) {
590 err = be32_to_cpu(rxb->err);
591 if (err & ERXBUF_GOODPKT) {
592 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
593 skb_trim(skb, len);
594 skb->protocol = eth_type_trans(skb, dev);
595
596 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
597 if (!new_skb) {
598
599
600 dev->stats.rx_dropped++;
601 new_skb = skb;
602 goto next;
603 }
604
605 if (likely(dev->features & NETIF_F_RXCSUM))
606 ioc3_tcpudp_checksum(skb,
607 w0 & ERXBUF_IPCKSUM_MASK, len);
608
609 netif_rx(skb);
610
611 ip->rx_skbs[rx_entry] = NULL;
612
613
614 skb_put(new_skb, (1664 + RX_OFFSET));
615 rxb = (struct ioc3_erxbuf *) new_skb->data;
616 skb_reserve(new_skb, RX_OFFSET);
617
618 dev->stats.rx_packets++;
619 dev->stats.rx_bytes += len;
620 } else {
621
622
623
624 new_skb = skb;
625 dev->stats.rx_errors++;
626 }
627 if (err & ERXBUF_CRCERR)
628 dev->stats.rx_crc_errors++;
629 if (err & ERXBUF_FRAMERR)
630 dev->stats.rx_frame_errors++;
631next:
632 ip->rx_skbs[n_entry] = new_skb;
633 rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
634 rxb->w0 = 0;
635 n_entry = (n_entry + 1) & 511;
636
637
638 rx_entry = (rx_entry + 1) & 511;
639 skb = ip->rx_skbs[rx_entry];
640 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
641 w0 = be32_to_cpu(rxb->w0);
642 }
643 ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
644 ip->rx_pi = n_entry;
645 ip->rx_ci = rx_entry;
646}
647
648static inline void ioc3_tx(struct net_device *dev)
649{
650 struct ioc3_private *ip = netdev_priv(dev);
651 unsigned long packets, bytes;
652 struct ioc3 *ioc3 = ip->regs;
653 int tx_entry, o_entry;
654 struct sk_buff *skb;
655 u32 etcir;
656
657 spin_lock(&ip->ioc3_lock);
658 etcir = ioc3_r_etcir();
659
660 tx_entry = (etcir >> 7) & 127;
661 o_entry = ip->tx_ci;
662 packets = 0;
663 bytes = 0;
664
665 while (o_entry != tx_entry) {
666 packets++;
667 skb = ip->tx_skbs[o_entry];
668 bytes += skb->len;
669 dev_kfree_skb_irq(skb);
670 ip->tx_skbs[o_entry] = NULL;
671
672 o_entry = (o_entry + 1) & 127;
673
674 etcir = ioc3_r_etcir();
675 tx_entry = (etcir >> 7) & 127;
676 }
677
678 dev->stats.tx_packets += packets;
679 dev->stats.tx_bytes += bytes;
680 ip->txqlen -= packets;
681
682 if (ip->txqlen < 128)
683 netif_wake_queue(dev);
684
685 ip->tx_ci = o_entry;
686 spin_unlock(&ip->ioc3_lock);
687}
688
689
690
691
692
693
694
695
696static void ioc3_error(struct net_device *dev, u32 eisr)
697{
698 struct ioc3_private *ip = netdev_priv(dev);
699 unsigned char *iface = dev->name;
700
701 spin_lock(&ip->ioc3_lock);
702
703 if (eisr & EISR_RXOFLO)
704 printk(KERN_ERR "%s: RX overflow.\n", iface);
705 if (eisr & EISR_RXBUFOFLO)
706 printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
707 if (eisr & EISR_RXMEMERR)
708 printk(KERN_ERR "%s: RX PCI error.\n", iface);
709 if (eisr & EISR_RXPARERR)
710 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
711 if (eisr & EISR_TXBUFUFLO)
712 printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
713 if (eisr & EISR_TXMEMERR)
714 printk(KERN_ERR "%s: TX PCI error.\n", iface);
715
716 ioc3_stop(ip);
717 ioc3_init(dev);
718 ioc3_mii_init(ip);
719
720 netif_wake_queue(dev);
721
722 spin_unlock(&ip->ioc3_lock);
723}
724
725
726
727static irqreturn_t ioc3_interrupt(int irq, void *_dev)
728{
729 struct net_device *dev = (struct net_device *)_dev;
730 struct ioc3_private *ip = netdev_priv(dev);
731 struct ioc3 *ioc3 = ip->regs;
732 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
733 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
734 EISR_TXEXPLICIT | EISR_TXMEMERR;
735 u32 eisr;
736
737 eisr = ioc3_r_eisr() & enabled;
738
739 ioc3_w_eisr(eisr);
740 (void) ioc3_r_eisr();
741
742 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
743 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
744 ioc3_error(dev, eisr);
745 if (eisr & EISR_RXTIMERINT)
746 ioc3_rx(dev);
747 if (eisr & EISR_TXEXPLICIT)
748 ioc3_tx(dev);
749
750 return IRQ_HANDLED;
751}
752
753static inline void ioc3_setup_duplex(struct ioc3_private *ip)
754{
755 struct ioc3 *ioc3 = ip->regs;
756
757 if (ip->mii.full_duplex) {
758 ioc3_w_etcsr(ETCSR_FD);
759 ip->emcr |= EMCR_DUPLEX;
760 } else {
761 ioc3_w_etcsr(ETCSR_HD);
762 ip->emcr &= ~EMCR_DUPLEX;
763 }
764 ioc3_w_emcr(ip->emcr);
765}
766
767static void ioc3_timer(unsigned long data)
768{
769 struct ioc3_private *ip = (struct ioc3_private *) data;
770
771
772 mii_check_media(&ip->mii, 1, 0);
773 ioc3_setup_duplex(ip);
774
775 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10);
776 add_timer(&ip->ioc3_timer);
777}
778
779
780
781
782
783
784
785
786
787static int ioc3_mii_init(struct ioc3_private *ip)
788{
789 int i, found = 0, res = 0;
790 int ioc3_phy_workaround = 1;
791 u16 word;
792
793 for (i = 0; i < 32; i++) {
794 word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
795
796 if (word != 0xffff && word != 0x0000) {
797 found = 1;
798 break;
799 }
800 }
801
802 if (!found) {
803 if (ioc3_phy_workaround)
804 i = 31;
805 else {
806 ip->mii.phy_id = -1;
807 res = -ENODEV;
808 goto out;
809 }
810 }
811
812 ip->mii.phy_id = i;
813
814out:
815 return res;
816}
817
818static void ioc3_mii_start(struct ioc3_private *ip)
819{
820 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10;
821 ip->ioc3_timer.data = (unsigned long) ip;
822 ip->ioc3_timer.function = ioc3_timer;
823 add_timer(&ip->ioc3_timer);
824}
825
826static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
827{
828 struct sk_buff *skb;
829 int i;
830
831 for (i = ip->rx_ci; i & 15; i++) {
832 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
833 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
834 }
835 ip->rx_pi &= 511;
836 ip->rx_ci &= 511;
837
838 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
839 struct ioc3_erxbuf *rxb;
840 skb = ip->rx_skbs[i];
841 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
842 rxb->w0 = 0;
843 }
844}
845
846static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
847{
848 struct sk_buff *skb;
849 int i;
850
851 for (i=0; i < 128; i++) {
852 skb = ip->tx_skbs[i];
853 if (skb) {
854 ip->tx_skbs[i] = NULL;
855 dev_kfree_skb_any(skb);
856 }
857 ip->txr[i].cmd = 0;
858 }
859 ip->tx_pi = 0;
860 ip->tx_ci = 0;
861}
862
863static void ioc3_free_rings(struct ioc3_private *ip)
864{
865 struct sk_buff *skb;
866 int rx_entry, n_entry;
867
868 if (ip->txr) {
869 ioc3_clean_tx_ring(ip);
870 free_pages((unsigned long)ip->txr, 2);
871 ip->txr = NULL;
872 }
873
874 if (ip->rxr) {
875 n_entry = ip->rx_ci;
876 rx_entry = ip->rx_pi;
877
878 while (n_entry != rx_entry) {
879 skb = ip->rx_skbs[n_entry];
880 if (skb)
881 dev_kfree_skb_any(skb);
882
883 n_entry = (n_entry + 1) & 511;
884 }
885 free_page((unsigned long)ip->rxr);
886 ip->rxr = NULL;
887 }
888}
889
890static void ioc3_alloc_rings(struct net_device *dev)
891{
892 struct ioc3_private *ip = netdev_priv(dev);
893 struct ioc3_erxbuf *rxb;
894 unsigned long *rxr;
895 int i;
896
897 if (ip->rxr == NULL) {
898
899 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
900 rxr = ip->rxr;
901 if (!rxr)
902 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
903
904
905
906
907 for (i = 0; i < RX_BUFFS; i++) {
908 struct sk_buff *skb;
909
910 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
911 if (!skb) {
912 show_free_areas(0, NULL);
913 continue;
914 }
915
916 ip->rx_skbs[i] = skb;
917
918
919 skb_put(skb, (1664 + RX_OFFSET));
920 rxb = (struct ioc3_erxbuf *) skb->data;
921 rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
922 skb_reserve(skb, RX_OFFSET);
923 }
924 ip->rx_ci = 0;
925 ip->rx_pi = RX_BUFFS;
926 }
927
928 if (ip->txr == NULL) {
929
930 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
931 if (!ip->txr)
932 printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
933 ip->tx_pi = 0;
934 ip->tx_ci = 0;
935 }
936}
937
938static void ioc3_init_rings(struct net_device *dev)
939{
940 struct ioc3_private *ip = netdev_priv(dev);
941 struct ioc3 *ioc3 = ip->regs;
942 unsigned long ring;
943
944 ioc3_free_rings(ip);
945 ioc3_alloc_rings(dev);
946
947 ioc3_clean_rx_ring(ip);
948 ioc3_clean_tx_ring(ip);
949
950
951 ring = ioc3_map(ip->rxr, 0);
952 ioc3_w_erbr_h(ring >> 32);
953 ioc3_w_erbr_l(ring & 0xffffffff);
954 ioc3_w_ercir(ip->rx_ci << 3);
955 ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
956
957 ring = ioc3_map(ip->txr, 0);
958
959 ip->txqlen = 0;
960
961
962 ioc3_w_etbr_h(ring >> 32);
963 ioc3_w_etbr_l(ring & 0xffffffff);
964 ioc3_w_etpir(ip->tx_pi << 7);
965 ioc3_w_etcir(ip->tx_ci << 7);
966 (void) ioc3_r_etcir();
967}
968
969static inline void ioc3_ssram_disc(struct ioc3_private *ip)
970{
971 struct ioc3 *ioc3 = ip->regs;
972 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
973 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
974 unsigned int pattern = 0x5555;
975
976
977 ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
978
979 *ssram0 = pattern;
980 *ssram1 = ~pattern & IOC3_SSRAM_DM;
981
982 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
983 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
984
985 ip->emcr = EMCR_RAMPAR;
986 ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
987 } else
988 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
989}
990
991static void ioc3_init(struct net_device *dev)
992{
993 struct ioc3_private *ip = netdev_priv(dev);
994 struct ioc3 *ioc3 = ip->regs;
995
996 del_timer_sync(&ip->ioc3_timer);
997
998 ioc3_w_emcr(EMCR_RST);
999 (void) ioc3_r_emcr();
1000 udelay(4);
1001 ioc3_w_emcr(0);
1002 (void) ioc3_r_emcr();
1003
1004
1005#ifdef CONFIG_SGI_IP27
1006 ioc3_w_erbar(PCI64_ATTR_BAR >> 32);
1007#else
1008 ioc3_w_erbar(0);
1009#endif
1010 (void) ioc3_r_etcdc();
1011 ioc3_w_ercsr(15);
1012 ioc3_w_ertr(0);
1013 __ioc3_set_mac_address(dev);
1014 ioc3_w_ehar_h(ip->ehar_h);
1015 ioc3_w_ehar_l(ip->ehar_l);
1016 ioc3_w_ersr(42);
1017
1018 ioc3_init_rings(dev);
1019
1020 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
1021 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
1022 ioc3_w_emcr(ip->emcr);
1023 ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
1024 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
1025 EISR_TXEXPLICIT | EISR_TXMEMERR);
1026 (void) ioc3_r_eier();
1027}
1028
1029static inline void ioc3_stop(struct ioc3_private *ip)
1030{
1031 struct ioc3 *ioc3 = ip->regs;
1032
1033 ioc3_w_emcr(0);
1034 ioc3_w_eier(0);
1035 (void) ioc3_r_eier();
1036}
1037
1038static int ioc3_open(struct net_device *dev)
1039{
1040 struct ioc3_private *ip = netdev_priv(dev);
1041
1042 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
1043 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
1044
1045 return -EAGAIN;
1046 }
1047
1048 ip->ehar_h = 0;
1049 ip->ehar_l = 0;
1050 ioc3_init(dev);
1051 ioc3_mii_start(ip);
1052
1053 netif_start_queue(dev);
1054 return 0;
1055}
1056
1057static int ioc3_close(struct net_device *dev)
1058{
1059 struct ioc3_private *ip = netdev_priv(dev);
1060
1061 del_timer_sync(&ip->ioc3_timer);
1062
1063 netif_stop_queue(dev);
1064
1065 ioc3_stop(ip);
1066 free_irq(dev->irq, dev);
1067
1068 ioc3_free_rings(ip);
1069 return 0;
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
1084{
1085 struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
1086 int ret = 0;
1087
1088 if (dev) {
1089 if (dev->vendor == PCI_VENDOR_ID_SGI &&
1090 dev->device == PCI_DEVICE_ID_SGI_IOC3)
1091 ret = 1;
1092 pci_dev_put(dev);
1093 }
1094
1095 return ret;
1096}
1097
1098static int ioc3_is_menet(struct pci_dev *pdev)
1099{
1100 return pdev->bus->parent == NULL &&
1101 ioc3_adjacent_is_ioc3(pdev, 0) &&
1102 ioc3_adjacent_is_ioc3(pdev, 1) &&
1103 ioc3_adjacent_is_ioc3(pdev, 2);
1104}
1105
1106#ifdef CONFIG_SERIAL_8250
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1141{
1142#define COSMISC_CONSTANT 6
1143
1144 struct uart_8250_port port = {
1145 .port = {
1146 .irq = 0,
1147 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
1148 .iotype = UPIO_MEM,
1149 .regshift = 0,
1150 .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
1151
1152 .membase = (unsigned char __iomem *) uart,
1153 .mapbase = (unsigned long) uart,
1154 }
1155 };
1156 unsigned char lcr;
1157
1158 lcr = uart->iu_lcr;
1159 uart->iu_lcr = lcr | UART_LCR_DLAB;
1160 uart->iu_scr = COSMISC_CONSTANT,
1161 uart->iu_lcr = lcr;
1162 uart->iu_lcr;
1163 serial8250_register_8250_port(&port);
1164}
1165
1166static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1167{
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1178 return;
1179
1180
1181
1182
1183
1184 ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
1185 ioc3->gpcr_s;
1186 ioc3->gppr_6 = 0;
1187 ioc3->gppr_6;
1188 ioc3->gppr_7 = 0;
1189 ioc3->gppr_7;
1190 ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
1191 ioc3->sscr_a;
1192 ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
1193 ioc3->sscr_b;
1194
1195 ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
1196 SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
1197 SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
1198 SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
1199 ioc3->sio_iec |= SIO_IR_SA_INT;
1200 ioc3->sscr_a = 0;
1201 ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
1202 SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
1203 SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
1204 SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
1205 ioc3->sio_iec |= SIO_IR_SB_INT;
1206 ioc3->sscr_b = 0;
1207
1208 ioc3_8250_register(&ioc3->sregs.uarta);
1209 ioc3_8250_register(&ioc3->sregs.uartb);
1210}
1211#endif
1212
1213static const struct net_device_ops ioc3_netdev_ops = {
1214 .ndo_open = ioc3_open,
1215 .ndo_stop = ioc3_close,
1216 .ndo_start_xmit = ioc3_start_xmit,
1217 .ndo_tx_timeout = ioc3_timeout,
1218 .ndo_get_stats = ioc3_get_stats,
1219 .ndo_set_rx_mode = ioc3_set_multicast_list,
1220 .ndo_do_ioctl = ioc3_ioctl,
1221 .ndo_validate_addr = eth_validate_addr,
1222 .ndo_set_mac_address = ioc3_set_mac_address,
1223};
1224
1225static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1226{
1227 unsigned int sw_physid1, sw_physid2;
1228 struct net_device *dev = NULL;
1229 struct ioc3_private *ip;
1230 struct ioc3 *ioc3;
1231 unsigned long ioc3_base, ioc3_size;
1232 u32 vendor, model, rev;
1233 int err, pci_using_dac;
1234
1235
1236 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1237 if (!err) {
1238 pci_using_dac = 1;
1239 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1240 if (err < 0) {
1241 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
1242 "for consistent allocations\n", pci_name(pdev));
1243 goto out;
1244 }
1245 } else {
1246 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1247 if (err) {
1248 printk(KERN_ERR "%s: No usable DMA configuration, "
1249 "aborting.\n", pci_name(pdev));
1250 goto out;
1251 }
1252 pci_using_dac = 0;
1253 }
1254
1255 if (pci_enable_device(pdev))
1256 return -ENODEV;
1257
1258 dev = alloc_etherdev(sizeof(struct ioc3_private));
1259 if (!dev) {
1260 err = -ENOMEM;
1261 goto out_disable;
1262 }
1263
1264 if (pci_using_dac)
1265 dev->features |= NETIF_F_HIGHDMA;
1266
1267 err = pci_request_regions(pdev, "ioc3");
1268 if (err)
1269 goto out_free;
1270
1271 SET_NETDEV_DEV(dev, &pdev->dev);
1272
1273 ip = netdev_priv(dev);
1274 ip->dev = dev;
1275
1276 dev->irq = pdev->irq;
1277
1278 ioc3_base = pci_resource_start(pdev, 0);
1279 ioc3_size = pci_resource_len(pdev, 0);
1280 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
1281 if (!ioc3) {
1282 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
1283 pci_name(pdev));
1284 err = -ENOMEM;
1285 goto out_res;
1286 }
1287 ip->regs = ioc3;
1288
1289#ifdef CONFIG_SERIAL_8250
1290 ioc3_serial_probe(pdev, ioc3);
1291#endif
1292
1293 spin_lock_init(&ip->ioc3_lock);
1294 init_timer(&ip->ioc3_timer);
1295
1296 ioc3_stop(ip);
1297 ioc3_init(dev);
1298
1299 ip->pdev = pdev;
1300
1301 ip->mii.phy_id_mask = 0x1f;
1302 ip->mii.reg_num_mask = 0x1f;
1303 ip->mii.dev = dev;
1304 ip->mii.mdio_read = ioc3_mdio_read;
1305 ip->mii.mdio_write = ioc3_mdio_write;
1306
1307 ioc3_mii_init(ip);
1308
1309 if (ip->mii.phy_id == -1) {
1310 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1311 pci_name(pdev));
1312 err = -ENODEV;
1313 goto out_stop;
1314 }
1315
1316 ioc3_mii_start(ip);
1317 ioc3_ssram_disc(ip);
1318 ioc3_get_eaddr(ip);
1319
1320
1321 dev->watchdog_timeo = 5 * HZ;
1322 dev->netdev_ops = &ioc3_netdev_ops;
1323 dev->ethtool_ops = &ioc3_ethtool_ops;
1324 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1325 dev->features = NETIF_F_IP_CSUM;
1326
1327 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
1328 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
1329
1330 err = register_netdev(dev);
1331 if (err)
1332 goto out_stop;
1333
1334 mii_check_media(&ip->mii, 1, 1);
1335 ioc3_setup_duplex(ip);
1336
1337 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
1338 model = (sw_physid2 >> 4) & 0x3f;
1339 rev = sw_physid2 & 0xf;
1340 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
1341 "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
1342 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
1343 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1344
1345 return 0;
1346
1347out_stop:
1348 ioc3_stop(ip);
1349 del_timer_sync(&ip->ioc3_timer);
1350 ioc3_free_rings(ip);
1351out_res:
1352 pci_release_regions(pdev);
1353out_free:
1354 free_netdev(dev);
1355out_disable:
1356
1357
1358
1359
1360out:
1361 return err;
1362}
1363
1364static void ioc3_remove_one(struct pci_dev *pdev)
1365{
1366 struct net_device *dev = pci_get_drvdata(pdev);
1367 struct ioc3_private *ip = netdev_priv(dev);
1368 struct ioc3 *ioc3 = ip->regs;
1369
1370 unregister_netdev(dev);
1371 del_timer_sync(&ip->ioc3_timer);
1372
1373 iounmap(ioc3);
1374 pci_release_regions(pdev);
1375 free_netdev(dev);
1376
1377
1378
1379
1380}
1381
1382static const struct pci_device_id ioc3_pci_tbl[] = {
1383 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1384 { 0 }
1385};
1386MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1387
1388static struct pci_driver ioc3_driver = {
1389 .name = "ioc3-eth",
1390 .id_table = ioc3_pci_tbl,
1391 .probe = ioc3_probe,
1392 .remove = ioc3_remove_one,
1393};
1394
1395static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1396{
1397 unsigned long data;
1398 struct ioc3_private *ip = netdev_priv(dev);
1399 struct ioc3 *ioc3 = ip->regs;
1400 unsigned int len;
1401 struct ioc3_etxd *desc;
1402 uint32_t w0 = 0;
1403 int produce;
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1414 const struct iphdr *ih = ip_hdr(skb);
1415 const int proto = ntohs(ih->protocol);
1416 unsigned int csoff;
1417 uint32_t csum, ehsum;
1418 uint16_t *eh;
1419
1420
1421
1422 eh = (uint16_t *) skb->data;
1423
1424
1425 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
1426
1427
1428 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1429 ehsum = (ehsum & 0xffff) + (ehsum >> 16);
1430
1431
1432
1433 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
1434 ih->tot_len - (ih->ihl << 2),
1435 proto, 0xffff ^ ehsum);
1436
1437 csum = (csum & 0xffff) + (csum >> 16);
1438 csum = (csum & 0xffff) + (csum >> 16);
1439
1440 csoff = ETH_HLEN + (ih->ihl << 2);
1441 if (proto == IPPROTO_UDP) {
1442 csoff += offsetof(struct udphdr, check);
1443 udp_hdr(skb)->check = csum;
1444 }
1445 if (proto == IPPROTO_TCP) {
1446 csoff += offsetof(struct tcphdr, check);
1447 tcp_hdr(skb)->check = csum;
1448 }
1449
1450 w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
1451 }
1452
1453 spin_lock_irq(&ip->ioc3_lock);
1454
1455 data = (unsigned long) skb->data;
1456 len = skb->len;
1457
1458 produce = ip->tx_pi;
1459 desc = &ip->txr[produce];
1460
1461 if (len <= 104) {
1462
1463 skb_copy_from_linear_data(skb, desc->data, skb->len);
1464 if (len < ETH_ZLEN) {
1465
1466 memset(desc->data + len, 0, ETH_ZLEN - len);
1467 len = ETH_ZLEN;
1468 }
1469 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
1470 desc->bufcnt = cpu_to_be32(len);
1471 } else if ((data ^ (data + len - 1)) & 0x4000) {
1472 unsigned long b2 = (data | 0x3fffUL) + 1UL;
1473 unsigned long s1 = b2 - data;
1474 unsigned long s2 = data + len - b2;
1475
1476 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1477 ETXD_B1V | ETXD_B2V | w0);
1478 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
1479 (s2 << ETXD_B2CNT_SHIFT));
1480 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1481 desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
1482 } else {
1483
1484 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
1485 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1486 desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
1487 }
1488
1489 BARRIER();
1490
1491 ip->tx_skbs[produce] = skb;
1492 produce = (produce + 1) & 127;
1493 ip->tx_pi = produce;
1494 ioc3_w_etpir(produce << 7);
1495
1496 ip->txqlen++;
1497
1498 if (ip->txqlen >= 127)
1499 netif_stop_queue(dev);
1500
1501 spin_unlock_irq(&ip->ioc3_lock);
1502
1503 return NETDEV_TX_OK;
1504}
1505
1506static void ioc3_timeout(struct net_device *dev)
1507{
1508 struct ioc3_private *ip = netdev_priv(dev);
1509
1510 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1511
1512 spin_lock_irq(&ip->ioc3_lock);
1513
1514 ioc3_stop(ip);
1515 ioc3_init(dev);
1516 ioc3_mii_init(ip);
1517 ioc3_mii_start(ip);
1518
1519 spin_unlock_irq(&ip->ioc3_lock);
1520
1521 netif_wake_queue(dev);
1522}
1523
1524
1525
1526
1527
1528
1529static inline unsigned int ioc3_hash(const unsigned char *addr)
1530{
1531 unsigned int temp = 0;
1532 u32 crc;
1533 int bits;
1534
1535 crc = ether_crc_le(ETH_ALEN, addr);
1536
1537 crc &= 0x3f;
1538 for (bits = 6; --bits >= 0; ) {
1539 temp <<= 1;
1540 temp |= (crc & 0x1);
1541 crc >>= 1;
1542 }
1543
1544 return temp;
1545}
1546
1547static void ioc3_get_drvinfo (struct net_device *dev,
1548 struct ethtool_drvinfo *info)
1549{
1550 struct ioc3_private *ip = netdev_priv(dev);
1551
1552 strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
1553 strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
1554 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
1555}
1556
1557static int ioc3_get_link_ksettings(struct net_device *dev,
1558 struct ethtool_link_ksettings *cmd)
1559{
1560 struct ioc3_private *ip = netdev_priv(dev);
1561
1562 spin_lock_irq(&ip->ioc3_lock);
1563 mii_ethtool_get_link_ksettings(&ip->mii, cmd);
1564 spin_unlock_irq(&ip->ioc3_lock);
1565
1566 return 0;
1567}
1568
1569static int ioc3_set_link_ksettings(struct net_device *dev,
1570 const struct ethtool_link_ksettings *cmd)
1571{
1572 struct ioc3_private *ip = netdev_priv(dev);
1573 int rc;
1574
1575 spin_lock_irq(&ip->ioc3_lock);
1576 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd);
1577 spin_unlock_irq(&ip->ioc3_lock);
1578
1579 return rc;
1580}
1581
1582static int ioc3_nway_reset(struct net_device *dev)
1583{
1584 struct ioc3_private *ip = netdev_priv(dev);
1585 int rc;
1586
1587 spin_lock_irq(&ip->ioc3_lock);
1588 rc = mii_nway_restart(&ip->mii);
1589 spin_unlock_irq(&ip->ioc3_lock);
1590
1591 return rc;
1592}
1593
1594static u32 ioc3_get_link(struct net_device *dev)
1595{
1596 struct ioc3_private *ip = netdev_priv(dev);
1597 int rc;
1598
1599 spin_lock_irq(&ip->ioc3_lock);
1600 rc = mii_link_ok(&ip->mii);
1601 spin_unlock_irq(&ip->ioc3_lock);
1602
1603 return rc;
1604}
1605
1606static const struct ethtool_ops ioc3_ethtool_ops = {
1607 .get_drvinfo = ioc3_get_drvinfo,
1608 .nway_reset = ioc3_nway_reset,
1609 .get_link = ioc3_get_link,
1610 .get_link_ksettings = ioc3_get_link_ksettings,
1611 .set_link_ksettings = ioc3_set_link_ksettings,
1612};
1613
1614static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1615{
1616 struct ioc3_private *ip = netdev_priv(dev);
1617 int rc;
1618
1619 spin_lock_irq(&ip->ioc3_lock);
1620 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
1621 spin_unlock_irq(&ip->ioc3_lock);
1622
1623 return rc;
1624}
1625
1626static void ioc3_set_multicast_list(struct net_device *dev)
1627{
1628 struct netdev_hw_addr *ha;
1629 struct ioc3_private *ip = netdev_priv(dev);
1630 struct ioc3 *ioc3 = ip->regs;
1631 u64 ehar = 0;
1632
1633 netif_stop_queue(dev);
1634
1635 if (dev->flags & IFF_PROMISC) {
1636 ip->emcr |= EMCR_PROMISC;
1637 ioc3_w_emcr(ip->emcr);
1638 (void) ioc3_r_emcr();
1639 } else {
1640 ip->emcr &= ~EMCR_PROMISC;
1641 ioc3_w_emcr(ip->emcr);
1642 (void) ioc3_r_emcr();
1643
1644 if ((dev->flags & IFF_ALLMULTI) ||
1645 (netdev_mc_count(dev) > 64)) {
1646
1647
1648
1649 ip->ehar_h = 0xffffffff;
1650 ip->ehar_l = 0xffffffff;
1651 } else {
1652 netdev_for_each_mc_addr(ha, dev) {
1653 ehar |= (1UL << ioc3_hash(ha->addr));
1654 }
1655 ip->ehar_h = ehar >> 32;
1656 ip->ehar_l = ehar & 0xffffffff;
1657 }
1658 ioc3_w_ehar_h(ip->ehar_h);
1659 ioc3_w_ehar_l(ip->ehar_l);
1660 }
1661
1662 netif_wake_queue(dev);
1663}
1664
1665module_pci_driver(ioc3_driver);
1666MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1667MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1668MODULE_LICENSE("GPL");
1669