1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/types.h>
73#include <linux/compiler.h>
74#include <linux/slab.h>
75#include <linux/delay.h>
76#include <linux/init.h>
77#include <linux/vmalloc.h>
78#include <linux/ioport.h>
79#include <linux/pci.h>
80#include <linux/mm.h>
81#include <linux/highmem.h>
82#include <linux/list.h>
83#include <linux/dma-mapping.h>
84
85#include <linux/netdevice.h>
86#include <linux/etherdevice.h>
87#include <linux/skbuff.h>
88#include <linux/ethtool.h>
89#include <linux/crc32.h>
90#include <linux/random.h>
91#include <linux/mii.h>
92#include <linux/ip.h>
93#include <linux/tcp.h>
94#include <linux/mutex.h>
95#include <linux/firmware.h>
96
97#include <net/checksum.h>
98
99#include <asm/atomic.h>
100#include <asm/system.h>
101#include <asm/io.h>
102#include <asm/byteorder.h>
103#include <asm/uaccess.h>
104
105#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
106#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
107#define CAS_NCPUS num_online_cpus()
108
109#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
110#define USE_NAPI
111#define cas_skb_release(x) netif_receive_skb(x)
112#else
113#define cas_skb_release(x) netif_rx(x)
114#endif
115
116
117#define USE_HP_WORKAROUND
118#define HP_WORKAROUND_DEFAULT
119#define CAS_HP_ALT_FIRMWARE cas_prog_null
120
121#include "cassini.h"
122
123#define USE_TX_COMPWB
124#define USE_CSMA_CD_PROTO
125#define USE_RX_BLANK
126#undef USE_ENTROPY_DEV
127
128
129
130
131#undef USE_PCI_INTB
132#undef USE_PCI_INTC
133#undef USE_PCI_INTD
134#undef USE_QOS
135
136#undef USE_VPD_DEBUG
137
138
139#define USE_PAGE_ORDER
140#define RX_DONT_BATCH 0
141#define RX_COPY_ALWAYS 0
142#define RX_COPY_MIN 64
143#undef RX_COUNT_BUFFERS
144
145#define DRV_MODULE_NAME "cassini"
146#define PFX DRV_MODULE_NAME ": "
147#define DRV_MODULE_VERSION "1.6"
148#define DRV_MODULE_RELDATE "21 May 2008"
149
150#define CAS_DEF_MSG_ENABLE \
151 (NETIF_MSG_DRV | \
152 NETIF_MSG_PROBE | \
153 NETIF_MSG_LINK | \
154 NETIF_MSG_TIMER | \
155 NETIF_MSG_IFDOWN | \
156 NETIF_MSG_IFUP | \
157 NETIF_MSG_RX_ERR | \
158 NETIF_MSG_TX_ERR)
159
160
161
162
163#define CAS_TX_TIMEOUT (HZ)
164#define CAS_LINK_TIMEOUT (22*HZ/10)
165#define CAS_LINK_FAST_TIMEOUT (1)
166
167
168
169
170#define STOP_TRIES_PHY 1000
171#define STOP_TRIES 5000
172
173
174
175
176
177#define CAS_MIN_FRAME 97
178#define CAS_1000MB_MIN_FRAME 255
179#define CAS_MIN_MTU 60
180#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
181
182#if 1
183
184
185
186
187#else
188#define CAS_RESET_MTU 1
189#define CAS_RESET_ALL 2
190#define CAS_RESET_SPARE 3
191#endif
192
193static char version[] __devinitdata =
194 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
195
196static int cassini_debug = -1;
197static int link_mode;
198
199MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
200MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
201MODULE_LICENSE("GPL");
202MODULE_FIRMWARE("sun/cassini.bin");
203module_param(cassini_debug, int, 0);
204MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
205module_param(link_mode, int, 0);
206MODULE_PARM_DESC(link_mode, "default link mode");
207
208
209
210
211
212#define DEFAULT_LINKDOWN_TIMEOUT 5
213
214
215
216static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
217module_param(linkdown_timeout, int, 0);
218MODULE_PARM_DESC(linkdown_timeout,
219"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
220
221
222
223
224
225
226static int link_transition_timeout;
227
228
229
230static u16 link_modes[] __devinitdata = {
231 BMCR_ANENABLE,
232 0,
233 BMCR_SPEED100,
234 BMCR_FULLDPLX,
235 BMCR_SPEED100|BMCR_FULLDPLX,
236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
237};
238
239static struct pci_device_id cas_pci_tbl[] __devinitdata = {
240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { 0, }
245};
246
247MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
248
249static void cas_set_link_modes(struct cas *cp);
250
251static inline void cas_lock_tx(struct cas *cp)
252{
253 int i;
254
255 for (i = 0; i < N_TX_RINGS; i++)
256 spin_lock(&cp->tx_lock[i]);
257}
258
259static inline void cas_lock_all(struct cas *cp)
260{
261 spin_lock_irq(&cp->lock);
262 cas_lock_tx(cp);
263}
264
265
266
267
268
269
270
271
272
273#define cas_lock_all_save(cp, flags) \
274do { \
275 struct cas *xxxcp = (cp); \
276 spin_lock_irqsave(&xxxcp->lock, flags); \
277 cas_lock_tx(xxxcp); \
278} while (0)
279
280static inline void cas_unlock_tx(struct cas *cp)
281{
282 int i;
283
284 for (i = N_TX_RINGS; i > 0; i--)
285 spin_unlock(&cp->tx_lock[i - 1]);
286}
287
288static inline void cas_unlock_all(struct cas *cp)
289{
290 cas_unlock_tx(cp);
291 spin_unlock_irq(&cp->lock);
292}
293
294#define cas_unlock_all_restore(cp, flags) \
295do { \
296 struct cas *xxxcp = (cp); \
297 cas_unlock_tx(xxxcp); \
298 spin_unlock_irqrestore(&xxxcp->lock, flags); \
299} while (0)
300
301static void cas_disable_irq(struct cas *cp, const int ring)
302{
303
304 if (ring == 0) {
305 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
306 return;
307 }
308
309
310 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
311 switch (ring) {
312#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
313#ifdef USE_PCI_INTB
314 case 1:
315#endif
316#ifdef USE_PCI_INTC
317 case 2:
318#endif
319#ifdef USE_PCI_INTD
320 case 3:
321#endif
322 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
323 cp->regs + REG_PLUS_INTRN_MASK(ring));
324 break;
325#endif
326 default:
327 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
328 REG_PLUS_INTRN_MASK(ring));
329 break;
330 }
331 }
332}
333
334static inline void cas_mask_intr(struct cas *cp)
335{
336 int i;
337
338 for (i = 0; i < N_RX_COMP_RINGS; i++)
339 cas_disable_irq(cp, i);
340}
341
342static void cas_enable_irq(struct cas *cp, const int ring)
343{
344 if (ring == 0) {
345 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
346 return;
347 }
348
349 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
350 switch (ring) {
351#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
352#ifdef USE_PCI_INTB
353 case 1:
354#endif
355#ifdef USE_PCI_INTC
356 case 2:
357#endif
358#ifdef USE_PCI_INTD
359 case 3:
360#endif
361 writel(INTRN_MASK_RX_EN, cp->regs +
362 REG_PLUS_INTRN_MASK(ring));
363 break;
364#endif
365 default:
366 break;
367 }
368 }
369}
370
371static inline void cas_unmask_intr(struct cas *cp)
372{
373 int i;
374
375 for (i = 0; i < N_RX_COMP_RINGS; i++)
376 cas_enable_irq(cp, i);
377}
378
379static inline void cas_entropy_gather(struct cas *cp)
380{
381#ifdef USE_ENTROPY_DEV
382 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
383 return;
384
385 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
386 readl(cp->regs + REG_ENTROPY_IV),
387 sizeof(uint64_t)*8);
388#endif
389}
390
391static inline void cas_entropy_reset(struct cas *cp)
392{
393#ifdef USE_ENTROPY_DEV
394 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
395 return;
396
397 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
398 cp->regs + REG_BIM_LOCAL_DEV_EN);
399 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
400 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
401
402
403 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
404 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
405#endif
406}
407
408
409
410
411static u16 cas_phy_read(struct cas *cp, int reg)
412{
413 u32 cmd;
414 int limit = STOP_TRIES_PHY;
415
416 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
417 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
418 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
419 cmd |= MIF_FRAME_TURN_AROUND_MSB;
420 writel(cmd, cp->regs + REG_MIF_FRAME);
421
422
423 while (limit-- > 0) {
424 udelay(10);
425 cmd = readl(cp->regs + REG_MIF_FRAME);
426 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
427 return (cmd & MIF_FRAME_DATA_MASK);
428 }
429 return 0xFFFF;
430}
431
432static int cas_phy_write(struct cas *cp, int reg, u16 val)
433{
434 int limit = STOP_TRIES_PHY;
435 u32 cmd;
436
437 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
438 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
440 cmd |= MIF_FRAME_TURN_AROUND_MSB;
441 cmd |= val & MIF_FRAME_DATA_MASK;
442 writel(cmd, cp->regs + REG_MIF_FRAME);
443
444
445 while (limit-- > 0) {
446 udelay(10);
447 cmd = readl(cp->regs + REG_MIF_FRAME);
448 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
449 return 0;
450 }
451 return -1;
452}
453
454static void cas_phy_powerup(struct cas *cp)
455{
456 u16 ctl = cas_phy_read(cp, MII_BMCR);
457
458 if ((ctl & BMCR_PDOWN) == 0)
459 return;
460 ctl &= ~BMCR_PDOWN;
461 cas_phy_write(cp, MII_BMCR, ctl);
462}
463
464static void cas_phy_powerdown(struct cas *cp)
465{
466 u16 ctl = cas_phy_read(cp, MII_BMCR);
467
468 if (ctl & BMCR_PDOWN)
469 return;
470 ctl |= BMCR_PDOWN;
471 cas_phy_write(cp, MII_BMCR, ctl);
472}
473
474
475static int cas_page_free(struct cas *cp, cas_page_t *page)
476{
477 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
478 PCI_DMA_FROMDEVICE);
479 __free_pages(page->buffer, cp->page_order);
480 kfree(page);
481 return 0;
482}
483
484#ifdef RX_COUNT_BUFFERS
485#define RX_USED_ADD(x, y) ((x)->used += (y))
486#define RX_USED_SET(x, y) ((x)->used = (y))
487#else
488#define RX_USED_ADD(x, y)
489#define RX_USED_SET(x, y)
490#endif
491
492
493
494
495static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
496{
497 cas_page_t *page;
498
499 page = kmalloc(sizeof(cas_page_t), flags);
500 if (!page)
501 return NULL;
502
503 INIT_LIST_HEAD(&page->list);
504 RX_USED_SET(page, 0);
505 page->buffer = alloc_pages(flags, cp->page_order);
506 if (!page->buffer)
507 goto page_err;
508 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
509 cp->page_size, PCI_DMA_FROMDEVICE);
510 return page;
511
512page_err:
513 kfree(page);
514 return NULL;
515}
516
517
518static void cas_spare_init(struct cas *cp)
519{
520 spin_lock(&cp->rx_inuse_lock);
521 INIT_LIST_HEAD(&cp->rx_inuse_list);
522 spin_unlock(&cp->rx_inuse_lock);
523
524 spin_lock(&cp->rx_spare_lock);
525 INIT_LIST_HEAD(&cp->rx_spare_list);
526 cp->rx_spares_needed = RX_SPARE_COUNT;
527 spin_unlock(&cp->rx_spare_lock);
528}
529
530
531static void cas_spare_free(struct cas *cp)
532{
533 struct list_head list, *elem, *tmp;
534
535
536 INIT_LIST_HEAD(&list);
537 spin_lock(&cp->rx_spare_lock);
538 list_splice_init(&cp->rx_spare_list, &list);
539 spin_unlock(&cp->rx_spare_lock);
540 list_for_each_safe(elem, tmp, &list) {
541 cas_page_free(cp, list_entry(elem, cas_page_t, list));
542 }
543
544 INIT_LIST_HEAD(&list);
545#if 1
546
547
548
549
550 spin_lock(&cp->rx_inuse_lock);
551 list_splice_init(&cp->rx_inuse_list, &list);
552 spin_unlock(&cp->rx_inuse_lock);
553#else
554 spin_lock(&cp->rx_spare_lock);
555 list_splice_init(&cp->rx_inuse_list, &list);
556 spin_unlock(&cp->rx_spare_lock);
557#endif
558 list_for_each_safe(elem, tmp, &list) {
559 cas_page_free(cp, list_entry(elem, cas_page_t, list));
560 }
561}
562
563
564static void cas_spare_recover(struct cas *cp, const gfp_t flags)
565{
566 struct list_head list, *elem, *tmp;
567 int needed, i;
568
569
570
571
572
573
574 INIT_LIST_HEAD(&list);
575 spin_lock(&cp->rx_inuse_lock);
576 list_splice_init(&cp->rx_inuse_list, &list);
577 spin_unlock(&cp->rx_inuse_lock);
578
579 list_for_each_safe(elem, tmp, &list) {
580 cas_page_t *page = list_entry(elem, cas_page_t, list);
581
582
583
584
585
586
587
588
589
590
591
592
593
594 if (page_count(page->buffer) > 1)
595 continue;
596
597 list_del(elem);
598 spin_lock(&cp->rx_spare_lock);
599 if (cp->rx_spares_needed > 0) {
600 list_add(elem, &cp->rx_spare_list);
601 cp->rx_spares_needed--;
602 spin_unlock(&cp->rx_spare_lock);
603 } else {
604 spin_unlock(&cp->rx_spare_lock);
605 cas_page_free(cp, page);
606 }
607 }
608
609
610 if (!list_empty(&list)) {
611 spin_lock(&cp->rx_inuse_lock);
612 list_splice(&list, &cp->rx_inuse_list);
613 spin_unlock(&cp->rx_inuse_lock);
614 }
615
616 spin_lock(&cp->rx_spare_lock);
617 needed = cp->rx_spares_needed;
618 spin_unlock(&cp->rx_spare_lock);
619 if (!needed)
620 return;
621
622
623 INIT_LIST_HEAD(&list);
624 i = 0;
625 while (i < needed) {
626 cas_page_t *spare = cas_page_alloc(cp, flags);
627 if (!spare)
628 break;
629 list_add(&spare->list, &list);
630 i++;
631 }
632
633 spin_lock(&cp->rx_spare_lock);
634 list_splice(&list, &cp->rx_spare_list);
635 cp->rx_spares_needed -= i;
636 spin_unlock(&cp->rx_spare_lock);
637}
638
639
640static cas_page_t *cas_page_dequeue(struct cas *cp)
641{
642 struct list_head *entry;
643 int recover;
644
645 spin_lock(&cp->rx_spare_lock);
646 if (list_empty(&cp->rx_spare_list)) {
647
648 spin_unlock(&cp->rx_spare_lock);
649 cas_spare_recover(cp, GFP_ATOMIC);
650 spin_lock(&cp->rx_spare_lock);
651 if (list_empty(&cp->rx_spare_list)) {
652 if (netif_msg_rx_err(cp))
653 printk(KERN_ERR "%s: no spare buffers "
654 "available.\n", cp->dev->name);
655 spin_unlock(&cp->rx_spare_lock);
656 return NULL;
657 }
658 }
659
660 entry = cp->rx_spare_list.next;
661 list_del(entry);
662 recover = ++cp->rx_spares_needed;
663 spin_unlock(&cp->rx_spare_lock);
664
665
666 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
667#if 1
668 atomic_inc(&cp->reset_task_pending);
669 atomic_inc(&cp->reset_task_pending_spare);
670 schedule_work(&cp->reset_task);
671#else
672 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
673 schedule_work(&cp->reset_task);
674#endif
675 }
676 return list_entry(entry, cas_page_t, list);
677}
678
679
680static void cas_mif_poll(struct cas *cp, const int enable)
681{
682 u32 cfg;
683
684 cfg = readl(cp->regs + REG_MIF_CFG);
685 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
686
687 if (cp->phy_type & CAS_PHY_MII_MDIO1)
688 cfg |= MIF_CFG_PHY_SELECT;
689
690
691 if (enable) {
692 cfg |= MIF_CFG_POLL_EN;
693 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
694 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
695 }
696 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
697 cp->regs + REG_MIF_MASK);
698 writel(cfg, cp->regs + REG_MIF_CFG);
699}
700
701
702static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
703{
704 u16 ctl;
705#if 1
706 int lcntl;
707 int changed = 0;
708 int oldstate = cp->lstate;
709 int link_was_not_down = !(oldstate == link_down);
710#endif
711
712 if (!ep)
713 goto start_aneg;
714 lcntl = cp->link_cntl;
715 if (ep->autoneg == AUTONEG_ENABLE)
716 cp->link_cntl = BMCR_ANENABLE;
717 else {
718 cp->link_cntl = 0;
719 if (ep->speed == SPEED_100)
720 cp->link_cntl |= BMCR_SPEED100;
721 else if (ep->speed == SPEED_1000)
722 cp->link_cntl |= CAS_BMCR_SPEED1000;
723 if (ep->duplex == DUPLEX_FULL)
724 cp->link_cntl |= BMCR_FULLDPLX;
725 }
726#if 1
727 changed = (lcntl != cp->link_cntl);
728#endif
729start_aneg:
730 if (cp->lstate == link_up) {
731 printk(KERN_INFO "%s: PCS link down.\n",
732 cp->dev->name);
733 } else {
734 if (changed) {
735 printk(KERN_INFO "%s: link configuration changed\n",
736 cp->dev->name);
737 }
738 }
739 cp->lstate = link_down;
740 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
741 if (!cp->hw_running)
742 return;
743#if 1
744
745
746
747
748
749 if (oldstate == link_up)
750 netif_carrier_off(cp->dev);
751 if (changed && link_was_not_down) {
752
753
754
755
756
757 atomic_inc(&cp->reset_task_pending);
758 atomic_inc(&cp->reset_task_pending_all);
759 schedule_work(&cp->reset_task);
760 cp->timer_ticks = 0;
761 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
762 return;
763 }
764#endif
765 if (cp->phy_type & CAS_PHY_SERDES) {
766 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
767
768 if (cp->link_cntl & BMCR_ANENABLE) {
769 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
770 cp->lstate = link_aneg;
771 } else {
772 if (cp->link_cntl & BMCR_FULLDPLX)
773 val |= PCS_MII_CTRL_DUPLEX;
774 val &= ~PCS_MII_AUTONEG_EN;
775 cp->lstate = link_force_ok;
776 }
777 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
778 writel(val, cp->regs + REG_PCS_MII_CTRL);
779
780 } else {
781 cas_mif_poll(cp, 0);
782 ctl = cas_phy_read(cp, MII_BMCR);
783 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
784 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
785 ctl |= cp->link_cntl;
786 if (ctl & BMCR_ANENABLE) {
787 ctl |= BMCR_ANRESTART;
788 cp->lstate = link_aneg;
789 } else {
790 cp->lstate = link_force_ok;
791 }
792 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
793 cas_phy_write(cp, MII_BMCR, ctl);
794 cas_mif_poll(cp, 1);
795 }
796
797 cp->timer_ticks = 0;
798 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
799}
800
801
802static int cas_reset_mii_phy(struct cas *cp)
803{
804 int limit = STOP_TRIES_PHY;
805 u16 val;
806
807 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
808 udelay(100);
809 while (--limit) {
810 val = cas_phy_read(cp, MII_BMCR);
811 if ((val & BMCR_RESET) == 0)
812 break;
813 udelay(10);
814 }
815 return (limit <= 0);
816}
817
818static int cas_saturn_firmware_init(struct cas *cp)
819{
820 const struct firmware *fw;
821 const char fw_name[] = "sun/cassini.bin";
822 int err;
823
824 if (PHY_NS_DP83065 != cp->phy_id)
825 return 0;
826
827 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
828 if (err) {
829 printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n",
830 fw_name);
831 return err;
832 }
833 if (fw->size < 2) {
834 printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n",
835 fw->size, fw_name);
836 err = -EINVAL;
837 goto out;
838 }
839 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
840 cp->fw_size = fw->size - 2;
841 cp->fw_data = vmalloc(cp->fw_size);
842 if (!cp->fw_data) {
843 err = -ENOMEM;
844 printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err);
845 goto out;
846 }
847 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
848out:
849 release_firmware(fw);
850 return err;
851}
852
853static void cas_saturn_firmware_load(struct cas *cp)
854{
855 int i;
856
857 cas_phy_powerdown(cp);
858
859
860 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
861
862
863 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
864 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
865 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
866 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
867 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
868 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
869 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
870 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
871
872
873 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
874 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
875 for (i = 0; i < cp->fw_size; i++)
876 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
877
878
879 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
880 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
881}
882
883
884
885static void cas_phy_init(struct cas *cp)
886{
887 u16 val;
888
889
890 if (CAS_PHY_MII(cp->phy_type)) {
891 writel(PCS_DATAPATH_MODE_MII,
892 cp->regs + REG_PCS_DATAPATH_MODE);
893
894 cas_mif_poll(cp, 0);
895 cas_reset_mii_phy(cp);
896
897 if (PHY_LUCENT_B0 == cp->phy_id) {
898
899 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
900 cas_phy_write(cp, MII_BMCR, 0x00f1);
901 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
902
903 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
904
905 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
906 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
907 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
908 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
909 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
910 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
911 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
912 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
913 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
914 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
915 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
916
917 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
918 val = cas_phy_read(cp, BROADCOM_MII_REG4);
919 val = cas_phy_read(cp, BROADCOM_MII_REG4);
920 if (val & 0x0080) {
921
922 cas_phy_write(cp, BROADCOM_MII_REG4,
923 val & ~0x0080);
924 }
925
926 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
927 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
928 SATURN_PCFG_FSI : 0x0,
929 cp->regs + REG_SATURN_PCFG);
930
931
932
933
934
935 if (PHY_NS_DP83065 == cp->phy_id) {
936 cas_saturn_firmware_load(cp);
937 }
938 cas_phy_powerup(cp);
939 }
940
941
942 val = cas_phy_read(cp, MII_BMCR);
943 val &= ~BMCR_ANENABLE;
944 cas_phy_write(cp, MII_BMCR, val);
945 udelay(10);
946
947 cas_phy_write(cp, MII_ADVERTISE,
948 cas_phy_read(cp, MII_ADVERTISE) |
949 (ADVERTISE_10HALF | ADVERTISE_10FULL |
950 ADVERTISE_100HALF | ADVERTISE_100FULL |
951 CAS_ADVERTISE_PAUSE |
952 CAS_ADVERTISE_ASYM_PAUSE));
953
954 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
955
956
957
958 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
959 val &= ~CAS_ADVERTISE_1000HALF;
960 val |= CAS_ADVERTISE_1000FULL;
961 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
962 }
963
964 } else {
965
966 u32 val;
967 int limit;
968
969 writel(PCS_DATAPATH_MODE_SERDES,
970 cp->regs + REG_PCS_DATAPATH_MODE);
971
972
973 if (cp->cas_flags & CAS_FLAG_SATURN)
974 writel(0, cp->regs + REG_SATURN_PCFG);
975
976
977 val = readl(cp->regs + REG_PCS_MII_CTRL);
978 val |= PCS_MII_RESET;
979 writel(val, cp->regs + REG_PCS_MII_CTRL);
980
981 limit = STOP_TRIES;
982 while (--limit > 0) {
983 udelay(10);
984 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
985 PCS_MII_RESET) == 0)
986 break;
987 }
988 if (limit <= 0)
989 printk(KERN_WARNING "%s: PCS reset bit would not "
990 "clear [%08x].\n", cp->dev->name,
991 readl(cp->regs + REG_PCS_STATE_MACHINE));
992
993
994
995
996 writel(0x0, cp->regs + REG_PCS_CFG);
997
998
999 val = readl(cp->regs + REG_PCS_MII_ADVERT);
1000 val &= ~PCS_MII_ADVERT_HD;
1001 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
1002 PCS_MII_ADVERT_ASYM_PAUSE);
1003 writel(val, cp->regs + REG_PCS_MII_ADVERT);
1004
1005
1006 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
1007
1008
1009 writel(PCS_SERDES_CTRL_SYNCD_EN,
1010 cp->regs + REG_PCS_SERDES_CTRL);
1011 }
1012}
1013
1014
1015static int cas_pcs_link_check(struct cas *cp)
1016{
1017 u32 stat, state_machine;
1018 int retval = 0;
1019
1020
1021
1022
1023
1024 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1025 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1026 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1027
1028
1029
1030
1031 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1032 PCS_MII_STATUS_REMOTE_FAULT)) ==
1033 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
1034 if (netif_msg_link(cp))
1035 printk(KERN_INFO "%s: PCS RemoteFault\n",
1036 cp->dev->name);
1037 }
1038
1039
1040
1041
1042 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1043 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1044 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1045 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1046 stat |= PCS_MII_STATUS_LINK_STATUS;
1047 }
1048
1049 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1050 if (cp->lstate != link_up) {
1051 if (cp->opened) {
1052 cp->lstate = link_up;
1053 cp->link_transition = LINK_TRANSITION_LINK_UP;
1054
1055 cas_set_link_modes(cp);
1056 netif_carrier_on(cp->dev);
1057 }
1058 }
1059 } else if (cp->lstate == link_up) {
1060 cp->lstate = link_down;
1061 if (link_transition_timeout != 0 &&
1062 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1063 !cp->link_transition_jiffies_valid) {
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 retval = 1;
1077 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1078 cp->link_transition_jiffies = jiffies;
1079 cp->link_transition_jiffies_valid = 1;
1080 } else {
1081 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1082 }
1083 netif_carrier_off(cp->dev);
1084 if (cp->opened && netif_msg_link(cp)) {
1085 printk(KERN_INFO "%s: PCS link down.\n",
1086 cp->dev->name);
1087 }
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1098
1099 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1100 if (stat == 0x03)
1101 return 1;
1102 }
1103 } else if (cp->lstate == link_down) {
1104 if (link_transition_timeout != 0 &&
1105 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1106 !cp->link_transition_jiffies_valid) {
1107
1108
1109
1110
1111
1112 retval = 1;
1113 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1114 cp->link_transition_jiffies = jiffies;
1115 cp->link_transition_jiffies_valid = 1;
1116 } else {
1117 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1118 }
1119 }
1120
1121 return retval;
1122}
1123
1124static int cas_pcs_interrupt(struct net_device *dev,
1125 struct cas *cp, u32 status)
1126{
1127 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1128
1129 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1130 return 0;
1131 return cas_pcs_link_check(cp);
1132}
1133
1134static int cas_txmac_interrupt(struct net_device *dev,
1135 struct cas *cp, u32 status)
1136{
1137 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1138
1139 if (!txmac_stat)
1140 return 0;
1141
1142 if (netif_msg_intr(cp))
1143 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
1144 cp->dev->name, txmac_stat);
1145
1146
1147
1148
1149 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1150 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1151 return 0;
1152
1153 spin_lock(&cp->stat_lock[0]);
1154 if (txmac_stat & MAC_TX_UNDERRUN) {
1155 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
1156 dev->name);
1157 cp->net_stats[0].tx_fifo_errors++;
1158 }
1159
1160 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1161 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
1162 dev->name);
1163 cp->net_stats[0].tx_errors++;
1164 }
1165
1166
1167
1168
1169 if (txmac_stat & MAC_TX_COLL_NORMAL)
1170 cp->net_stats[0].collisions += 0x10000;
1171
1172 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1173 cp->net_stats[0].tx_aborted_errors += 0x10000;
1174 cp->net_stats[0].collisions += 0x10000;
1175 }
1176
1177 if (txmac_stat & MAC_TX_COLL_LATE) {
1178 cp->net_stats[0].tx_aborted_errors += 0x10000;
1179 cp->net_stats[0].collisions += 0x10000;
1180 }
1181 spin_unlock(&cp->stat_lock[0]);
1182
1183
1184
1185
1186 return 0;
1187}
1188
1189static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1190{
1191 cas_hp_inst_t *inst;
1192 u32 val;
1193 int i;
1194
1195 i = 0;
1196 while ((inst = firmware) && inst->note) {
1197 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1198
1199 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1200 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1201 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1202
1203 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1204 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1205 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1206 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1207 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1208 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1209 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1210 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1211
1212 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1213 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1214 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1215 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1216 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1217 ++firmware;
1218 ++i;
1219 }
1220}
1221
1222static void cas_init_rx_dma(struct cas *cp)
1223{
1224 u64 desc_dma = cp->block_dvma;
1225 u32 val;
1226 int i, size;
1227
1228
1229 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1230 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1231 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1232 if ((N_RX_DESC_RINGS > 1) &&
1233 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1234 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1235 writel(val, cp->regs + REG_RX_CFG);
1236
1237 val = (unsigned long) cp->init_rxds[0] -
1238 (unsigned long) cp->init_block;
1239 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1240 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1241 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1242
1243 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1244
1245
1246
1247 val = (unsigned long) cp->init_rxds[1] -
1248 (unsigned long) cp->init_block;
1249 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1250 writel((desc_dma + val) & 0xffffffff, cp->regs +
1251 REG_PLUS_RX_DB1_LOW);
1252 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1253 REG_PLUS_RX_KICK1);
1254 }
1255
1256
1257 val = (unsigned long) cp->init_rxcs[0] -
1258 (unsigned long) cp->init_block;
1259 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1260 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1261
1262 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1263
1264 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1265 val = (unsigned long) cp->init_rxcs[i] -
1266 (unsigned long) cp->init_block;
1267 writel((desc_dma + val) >> 32, cp->regs +
1268 REG_PLUS_RX_CBN_HI(i));
1269 writel((desc_dma + val) & 0xffffffff, cp->regs +
1270 REG_PLUS_RX_CBN_LOW(i));
1271 }
1272 }
1273
1274
1275
1276
1277
1278 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1279 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1280 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1281 for (i = 1; i < N_RX_COMP_RINGS; i++)
1282 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1283
1284
1285 if (N_RX_COMP_RINGS > 1)
1286 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1287 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1288
1289 for (i = 2; i < N_RX_COMP_RINGS; i++)
1290 writel(INTR_RX_DONE_ALT,
1291 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1292 }
1293
1294
1295 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1296 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1297 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1298 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1299 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1300
1301
1302 for (i = 0; i < 64; i++) {
1303 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1304 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1305 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1306 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1307 }
1308
1309
1310 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1311 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1312
1313
1314#ifdef USE_RX_BLANK
1315 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1316 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1317 writel(val, cp->regs + REG_RX_BLANK);
1318#else
1319 writel(0x0, cp->regs + REG_RX_BLANK);
1320#endif
1321
1322
1323
1324
1325
1326
1327
1328 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1329 writel(val, cp->regs + REG_RX_AE_THRESH);
1330 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1331 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1332 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1333 }
1334
1335
1336
1337
1338 writel(0x0, cp->regs + REG_RX_RED);
1339
1340
1341 val = 0;
1342 if (cp->page_size == 0x1000)
1343 val = 0x1;
1344 else if (cp->page_size == 0x2000)
1345 val = 0x2;
1346 else if (cp->page_size == 0x4000)
1347 val = 0x3;
1348
1349
1350 size = cp->dev->mtu + 64;
1351 if (size > cp->page_size)
1352 size = cp->page_size;
1353
1354 if (size <= 0x400)
1355 i = 0x0;
1356 else if (size <= 0x800)
1357 i = 0x1;
1358 else if (size <= 0x1000)
1359 i = 0x2;
1360 else
1361 i = 0x3;
1362
1363 cp->mtu_stride = 1 << (i + 10);
1364 val = CAS_BASE(RX_PAGE_SIZE, val);
1365 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1366 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1367 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1368 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1369
1370
1371 if (CAS_HP_FIRMWARE == cas_prog_null)
1372 return;
1373
1374 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1375 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1376 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1377 writel(val, cp->regs + REG_HP_CFG);
1378}
1379
1380static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1381{
1382 memset(rxc, 0, sizeof(*rxc));
1383 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1384}
1385
1386
1387
1388
1389
1390static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1391{
1392 cas_page_t *page = cp->rx_pages[1][index];
1393 cas_page_t *new;
1394
1395 if (page_count(page->buffer) == 1)
1396 return page;
1397
1398 new = cas_page_dequeue(cp);
1399 if (new) {
1400 spin_lock(&cp->rx_inuse_lock);
1401 list_add(&page->list, &cp->rx_inuse_list);
1402 spin_unlock(&cp->rx_inuse_lock);
1403 }
1404 return new;
1405}
1406
1407
1408static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1409 const int index)
1410{
1411 cas_page_t **page0 = cp->rx_pages[0];
1412 cas_page_t **page1 = cp->rx_pages[1];
1413
1414
1415 if (page_count(page0[index]->buffer) > 1) {
1416 cas_page_t *new = cas_page_spare(cp, index);
1417 if (new) {
1418 page1[index] = page0[index];
1419 page0[index] = new;
1420 }
1421 }
1422 RX_USED_SET(page0[index], 0);
1423 return page0[index];
1424}
1425
1426static void cas_clean_rxds(struct cas *cp)
1427{
1428
1429 struct cas_rx_desc *rxd = cp->init_rxds[0];
1430 int i, size;
1431
1432
1433 for (i = 0; i < N_RX_FLOWS; i++) {
1434 struct sk_buff *skb;
1435 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1436 cas_skb_release(skb);
1437 }
1438 }
1439
1440
1441 size = RX_DESC_RINGN_SIZE(0);
1442 for (i = 0; i < size; i++) {
1443 cas_page_t *page = cas_page_swap(cp, 0, i);
1444 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1445 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1446 CAS_BASE(RX_INDEX_RING, 0));
1447 }
1448
1449 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1450 cp->rx_last[0] = 0;
1451 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1452}
1453
1454static void cas_clean_rxcs(struct cas *cp)
1455{
1456 int i, j;
1457
1458
1459 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1460 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1461 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1462 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1463 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1464 cas_rxc_init(rxc + j);
1465 }
1466 }
1467}
1468
1469#if 0
1470
1471
1472
1473
1474
1475
1476static int cas_rxmac_reset(struct cas *cp)
1477{
1478 struct net_device *dev = cp->dev;
1479 int limit;
1480 u32 val;
1481
1482
1483 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1484 for (limit = 0; limit < STOP_TRIES; limit++) {
1485 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1486 break;
1487 udelay(10);
1488 }
1489 if (limit == STOP_TRIES) {
1490 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
1491 "chip.\n", dev->name);
1492 return 1;
1493 }
1494
1495
1496 writel(0, cp->regs + REG_RX_CFG);
1497 for (limit = 0; limit < STOP_TRIES; limit++) {
1498 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1499 break;
1500 udelay(10);
1501 }
1502 if (limit == STOP_TRIES) {
1503 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
1504 "chip.\n", dev->name);
1505 return 1;
1506 }
1507
1508 mdelay(5);
1509
1510
1511 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1512 for (limit = 0; limit < STOP_TRIES; limit++) {
1513 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1514 break;
1515 udelay(10);
1516 }
1517 if (limit == STOP_TRIES) {
1518 printk(KERN_ERR "%s: RX reset command will not execute, "
1519 "resetting whole chip.\n", dev->name);
1520 return 1;
1521 }
1522
1523
1524 cas_clean_rxds(cp);
1525 cas_clean_rxcs(cp);
1526
1527
1528 cas_init_rx_dma(cp);
1529
1530
1531 val = readl(cp->regs + REG_RX_CFG);
1532 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1533 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1534 val = readl(cp->regs + REG_MAC_RX_CFG);
1535 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1536 return 0;
1537}
1538#endif
1539
1540static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1541 u32 status)
1542{
1543 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1544
1545 if (!stat)
1546 return 0;
1547
1548 if (netif_msg_intr(cp))
1549 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1550 cp->dev->name, stat);
1551
1552
1553 spin_lock(&cp->stat_lock[0]);
1554 if (stat & MAC_RX_ALIGN_ERR)
1555 cp->net_stats[0].rx_frame_errors += 0x10000;
1556
1557 if (stat & MAC_RX_CRC_ERR)
1558 cp->net_stats[0].rx_crc_errors += 0x10000;
1559
1560 if (stat & MAC_RX_LEN_ERR)
1561 cp->net_stats[0].rx_length_errors += 0x10000;
1562
1563 if (stat & MAC_RX_OVERFLOW) {
1564 cp->net_stats[0].rx_over_errors++;
1565 cp->net_stats[0].rx_fifo_errors++;
1566 }
1567
1568
1569
1570
1571 spin_unlock(&cp->stat_lock[0]);
1572 return 0;
1573}
1574
1575static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1576 u32 status)
1577{
1578 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1579
1580 if (!stat)
1581 return 0;
1582
1583 if (netif_msg_intr(cp))
1584 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
1585 cp->dev->name, stat);
1586
1587
1588
1589
1590
1591 if (stat & MAC_CTRL_PAUSE_STATE)
1592 cp->pause_entered++;
1593
1594 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1595 cp->pause_last_time_recvd = (stat >> 16);
1596
1597 return 0;
1598}
1599
1600
1601
1602static inline int cas_mdio_link_not_up(struct cas *cp)
1603{
1604 u16 val;
1605
1606 switch (cp->lstate) {
1607 case link_force_ret:
1608 if (netif_msg_link(cp))
1609 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1610 " forced mode\n", cp->dev->name);
1611 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1612 cp->timer_ticks = 5;
1613 cp->lstate = link_force_ok;
1614 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1615 break;
1616
1617 case link_aneg:
1618 val = cas_phy_read(cp, MII_BMCR);
1619
1620
1621
1622
1623 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1624 val |= BMCR_FULLDPLX;
1625 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1626 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1627 cas_phy_write(cp, MII_BMCR, val);
1628 cp->timer_ticks = 5;
1629 cp->lstate = link_force_try;
1630 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1631 break;
1632
1633 case link_force_try:
1634
1635 val = cas_phy_read(cp, MII_BMCR);
1636 cp->timer_ticks = 5;
1637 if (val & CAS_BMCR_SPEED1000) {
1638 val &= ~CAS_BMCR_SPEED1000;
1639 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1640 cas_phy_write(cp, MII_BMCR, val);
1641 break;
1642 }
1643
1644 if (val & BMCR_SPEED100) {
1645 if (val & BMCR_FULLDPLX)
1646 val &= ~BMCR_FULLDPLX;
1647 else {
1648 val &= ~BMCR_SPEED100;
1649 }
1650 cas_phy_write(cp, MII_BMCR, val);
1651 break;
1652 }
1653 default:
1654 break;
1655 }
1656 return 0;
1657}
1658
1659
1660
1661static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1662{
1663 int restart;
1664
1665 if (bmsr & BMSR_LSTATUS) {
1666
1667
1668
1669
1670
1671 if ((cp->lstate == link_force_try) &&
1672 (cp->link_cntl & BMCR_ANENABLE)) {
1673 cp->lstate = link_force_ret;
1674 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1675 cas_mif_poll(cp, 0);
1676 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1677 cp->timer_ticks = 5;
1678 if (cp->opened && netif_msg_link(cp))
1679 printk(KERN_INFO "%s: Got link after fallback, retrying"
1680 " autoneg once...\n", cp->dev->name);
1681 cas_phy_write(cp, MII_BMCR,
1682 cp->link_fcntl | BMCR_ANENABLE |
1683 BMCR_ANRESTART);
1684 cas_mif_poll(cp, 1);
1685
1686 } else if (cp->lstate != link_up) {
1687 cp->lstate = link_up;
1688 cp->link_transition = LINK_TRANSITION_LINK_UP;
1689
1690 if (cp->opened) {
1691 cas_set_link_modes(cp);
1692 netif_carrier_on(cp->dev);
1693 }
1694 }
1695 return 0;
1696 }
1697
1698
1699
1700
1701 restart = 0;
1702 if (cp->lstate == link_up) {
1703 cp->lstate = link_down;
1704 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1705
1706 netif_carrier_off(cp->dev);
1707 if (cp->opened && netif_msg_link(cp))
1708 printk(KERN_INFO "%s: Link down\n",
1709 cp->dev->name);
1710 restart = 1;
1711
1712 } else if (++cp->timer_ticks > 10)
1713 cas_mdio_link_not_up(cp);
1714
1715 return restart;
1716}
1717
1718static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1719 u32 status)
1720{
1721 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1722 u16 bmsr;
1723
1724
1725 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1726 return 0;
1727
1728 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1729 return cas_mii_link_check(cp, bmsr);
1730}
1731
1732static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1733 u32 status)
1734{
1735 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1736
1737 if (!stat)
1738 return 0;
1739
1740 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
1741 readl(cp->regs + REG_BIM_DIAG));
1742
1743
1744 if ((stat & PCI_ERR_BADACK) &&
1745 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1746 printk("<No ACK64# during ABS64 cycle> ");
1747
1748 if (stat & PCI_ERR_DTRTO)
1749 printk("<Delayed transaction timeout> ");
1750 if (stat & PCI_ERR_OTHER)
1751 printk("<other> ");
1752 if (stat & PCI_ERR_BIM_DMA_WRITE)
1753 printk("<BIM DMA 0 write req> ");
1754 if (stat & PCI_ERR_BIM_DMA_READ)
1755 printk("<BIM DMA 0 read req> ");
1756 printk("\n");
1757
1758 if (stat & PCI_ERR_OTHER) {
1759 u16 cfg;
1760
1761
1762
1763
1764 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1765 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
1766 dev->name, cfg);
1767 if (cfg & PCI_STATUS_PARITY)
1768 printk(KERN_ERR "%s: PCI parity error detected.\n",
1769 dev->name);
1770 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1771 printk(KERN_ERR "%s: PCI target abort.\n",
1772 dev->name);
1773 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1774 printk(KERN_ERR "%s: PCI master acks target abort.\n",
1775 dev->name);
1776 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1777 printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
1778 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1779 printk(KERN_ERR "%s: PCI system error SERR#.\n",
1780 dev->name);
1781 if (cfg & PCI_STATUS_DETECTED_PARITY)
1782 printk(KERN_ERR "%s: PCI parity error.\n",
1783 dev->name);
1784
1785
1786 cfg &= (PCI_STATUS_PARITY |
1787 PCI_STATUS_SIG_TARGET_ABORT |
1788 PCI_STATUS_REC_TARGET_ABORT |
1789 PCI_STATUS_REC_MASTER_ABORT |
1790 PCI_STATUS_SIG_SYSTEM_ERROR |
1791 PCI_STATUS_DETECTED_PARITY);
1792 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1793 }
1794
1795
1796 return 1;
1797}
1798
1799
1800
1801
1802
1803
1804static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1805 u32 status)
1806{
1807 if (status & INTR_RX_TAG_ERROR) {
1808
1809 if (netif_msg_rx_err(cp))
1810 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
1811 cp->dev->name);
1812 spin_lock(&cp->stat_lock[0]);
1813 cp->net_stats[0].rx_errors++;
1814 spin_unlock(&cp->stat_lock[0]);
1815 goto do_reset;
1816 }
1817
1818 if (status & INTR_RX_LEN_MISMATCH) {
1819
1820 if (netif_msg_rx_err(cp))
1821 printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
1822 cp->dev->name);
1823 spin_lock(&cp->stat_lock[0]);
1824 cp->net_stats[0].rx_errors++;
1825 spin_unlock(&cp->stat_lock[0]);
1826 goto do_reset;
1827 }
1828
1829 if (status & INTR_PCS_STATUS) {
1830 if (cas_pcs_interrupt(dev, cp, status))
1831 goto do_reset;
1832 }
1833
1834 if (status & INTR_TX_MAC_STATUS) {
1835 if (cas_txmac_interrupt(dev, cp, status))
1836 goto do_reset;
1837 }
1838
1839 if (status & INTR_RX_MAC_STATUS) {
1840 if (cas_rxmac_interrupt(dev, cp, status))
1841 goto do_reset;
1842 }
1843
1844 if (status & INTR_MAC_CTRL_STATUS) {
1845 if (cas_mac_interrupt(dev, cp, status))
1846 goto do_reset;
1847 }
1848
1849 if (status & INTR_MIF_STATUS) {
1850 if (cas_mif_interrupt(dev, cp, status))
1851 goto do_reset;
1852 }
1853
1854 if (status & INTR_PCI_ERROR_STATUS) {
1855 if (cas_pci_interrupt(dev, cp, status))
1856 goto do_reset;
1857 }
1858 return 0;
1859
1860do_reset:
1861#if 1
1862 atomic_inc(&cp->reset_task_pending);
1863 atomic_inc(&cp->reset_task_pending_all);
1864 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
1865 dev->name, status);
1866 schedule_work(&cp->reset_task);
1867#else
1868 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1869 printk(KERN_ERR "reset called in cas_abnormal_irq\n");
1870 schedule_work(&cp->reset_task);
1871#endif
1872 return 1;
1873}
1874
1875
1876
1877
1878#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1879#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1880static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1881 const int len)
1882{
1883 unsigned long off = addr + len;
1884
1885 if (CAS_TABORT(cp) == 1)
1886 return 0;
1887 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1888 return 0;
1889 return TX_TARGET_ABORT_LEN;
1890}
1891
1892static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1893{
1894 struct cas_tx_desc *txds;
1895 struct sk_buff **skbs;
1896 struct net_device *dev = cp->dev;
1897 int entry, count;
1898
1899 spin_lock(&cp->tx_lock[ring]);
1900 txds = cp->init_txds[ring];
1901 skbs = cp->tx_skbs[ring];
1902 entry = cp->tx_old[ring];
1903
1904 count = TX_BUFF_COUNT(ring, entry, limit);
1905 while (entry != limit) {
1906 struct sk_buff *skb = skbs[entry];
1907 dma_addr_t daddr;
1908 u32 dlen;
1909 int frag;
1910
1911 if (!skb) {
1912
1913 entry = TX_DESC_NEXT(ring, entry);
1914 continue;
1915 }
1916
1917
1918 count -= skb_shinfo(skb)->nr_frags +
1919 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1920 if (count < 0)
1921 break;
1922
1923 if (netif_msg_tx_done(cp))
1924 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
1925 cp->dev->name, ring, entry);
1926
1927 skbs[entry] = NULL;
1928 cp->tx_tiny_use[ring][entry].nbufs = 0;
1929
1930 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1931 struct cas_tx_desc *txd = txds + entry;
1932
1933 daddr = le64_to_cpu(txd->buffer);
1934 dlen = CAS_VAL(TX_DESC_BUFLEN,
1935 le64_to_cpu(txd->control));
1936 pci_unmap_page(cp->pdev, daddr, dlen,
1937 PCI_DMA_TODEVICE);
1938 entry = TX_DESC_NEXT(ring, entry);
1939
1940
1941 if (cp->tx_tiny_use[ring][entry].used) {
1942 cp->tx_tiny_use[ring][entry].used = 0;
1943 entry = TX_DESC_NEXT(ring, entry);
1944 }
1945 }
1946
1947 spin_lock(&cp->stat_lock[ring]);
1948 cp->net_stats[ring].tx_packets++;
1949 cp->net_stats[ring].tx_bytes += skb->len;
1950 spin_unlock(&cp->stat_lock[ring]);
1951 dev_kfree_skb_irq(skb);
1952 }
1953 cp->tx_old[ring] = entry;
1954
1955
1956
1957
1958
1959 if (netif_queue_stopped(dev) &&
1960 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1961 netif_wake_queue(dev);
1962 spin_unlock(&cp->tx_lock[ring]);
1963}
1964
1965static void cas_tx(struct net_device *dev, struct cas *cp,
1966 u32 status)
1967{
1968 int limit, ring;
1969#ifdef USE_TX_COMPWB
1970 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1971#endif
1972 if (netif_msg_intr(cp))
1973 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
1974 cp->dev->name, status, (unsigned long long)compwb);
1975
1976 for (ring = 0; ring < N_TX_RINGS; ring++) {
1977#ifdef USE_TX_COMPWB
1978
1979 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1980 CAS_VAL(TX_COMPWB_LSB, compwb);
1981 compwb = TX_COMPWB_NEXT(compwb);
1982#else
1983 limit = readl(cp->regs + REG_TX_COMPN(ring));
1984#endif
1985 if (cp->tx_old[ring] != limit)
1986 cas_tx_ringN(cp, ring, limit);
1987 }
1988}
1989
1990
1991static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1992 int entry, const u64 *words,
1993 struct sk_buff **skbref)
1994{
1995 int dlen, hlen, len, i, alloclen;
1996 int off, swivel = RX_SWIVEL_OFF_VAL;
1997 struct cas_page *page;
1998 struct sk_buff *skb;
1999 void *addr, *crcaddr;
2000 __sum16 csum;
2001 char *p;
2002
2003 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
2004 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
2005 len = hlen + dlen;
2006
2007 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
2008 alloclen = len;
2009 else
2010 alloclen = max(hlen, RX_COPY_MIN);
2011
2012 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
2013 if (skb == NULL)
2014 return -1;
2015
2016 *skbref = skb;
2017 skb_reserve(skb, swivel);
2018
2019 p = skb->data;
2020 addr = crcaddr = NULL;
2021 if (hlen) {
2022 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2023 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2024 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
2025 swivel;
2026
2027 i = hlen;
2028 if (!dlen)
2029 i += cp->crc_size;
2030 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2031 PCI_DMA_FROMDEVICE);
2032 addr = cas_page_map(page->buffer);
2033 memcpy(p, addr + off, i);
2034 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2035 PCI_DMA_FROMDEVICE);
2036 cas_page_unmap(addr);
2037 RX_USED_ADD(page, 0x100);
2038 p += hlen;
2039 swivel = 0;
2040 }
2041
2042
2043 if (alloclen < (hlen + dlen)) {
2044 skb_frag_t *frag = skb_shinfo(skb)->frags;
2045
2046
2047 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2048 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2049 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2050
2051 hlen = min(cp->page_size - off, dlen);
2052 if (hlen < 0) {
2053 if (netif_msg_rx_err(cp)) {
2054 printk(KERN_DEBUG "%s: rx page overflow: "
2055 "%d\n", cp->dev->name, hlen);
2056 }
2057 dev_kfree_skb_irq(skb);
2058 return -1;
2059 }
2060 i = hlen;
2061 if (i == dlen)
2062 i += cp->crc_size;
2063 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2064 PCI_DMA_FROMDEVICE);
2065
2066
2067 swivel = 0;
2068 if (p == (char *) skb->data) {
2069 addr = cas_page_map(page->buffer);
2070 memcpy(p, addr + off, RX_COPY_MIN);
2071 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2072 PCI_DMA_FROMDEVICE);
2073 cas_page_unmap(addr);
2074 off += RX_COPY_MIN;
2075 swivel = RX_COPY_MIN;
2076 RX_USED_ADD(page, cp->mtu_stride);
2077 } else {
2078 RX_USED_ADD(page, hlen);
2079 }
2080 skb_put(skb, alloclen);
2081
2082 skb_shinfo(skb)->nr_frags++;
2083 skb->data_len += hlen - swivel;
2084 skb->truesize += hlen - swivel;
2085 skb->len += hlen - swivel;
2086
2087 get_page(page->buffer);
2088 frag->page = page->buffer;
2089 frag->page_offset = off;
2090 frag->size = hlen - swivel;
2091
2092
2093 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2094 hlen = dlen;
2095 off = 0;
2096
2097 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2098 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2099 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2100 hlen + cp->crc_size,
2101 PCI_DMA_FROMDEVICE);
2102 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2103 hlen + cp->crc_size,
2104 PCI_DMA_FROMDEVICE);
2105
2106 skb_shinfo(skb)->nr_frags++;
2107 skb->data_len += hlen;
2108 skb->len += hlen;
2109 frag++;
2110
2111 get_page(page->buffer);
2112 frag->page = page->buffer;
2113 frag->page_offset = 0;
2114 frag->size = hlen;
2115 RX_USED_ADD(page, hlen + cp->crc_size);
2116 }
2117
2118 if (cp->crc_size) {
2119 addr = cas_page_map(page->buffer);
2120 crcaddr = addr + off + hlen;
2121 }
2122
2123 } else {
2124
2125 if (!dlen)
2126 goto end_copy_pkt;
2127
2128 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2129 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2130 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2131 hlen = min(cp->page_size - off, dlen);
2132 if (hlen < 0) {
2133 if (netif_msg_rx_err(cp)) {
2134 printk(KERN_DEBUG "%s: rx page overflow: "
2135 "%d\n", cp->dev->name, hlen);
2136 }
2137 dev_kfree_skb_irq(skb);
2138 return -1;
2139 }
2140 i = hlen;
2141 if (i == dlen)
2142 i += cp->crc_size;
2143 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2144 PCI_DMA_FROMDEVICE);
2145 addr = cas_page_map(page->buffer);
2146 memcpy(p, addr + off, i);
2147 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2148 PCI_DMA_FROMDEVICE);
2149 cas_page_unmap(addr);
2150 if (p == (char *) skb->data)
2151 RX_USED_ADD(page, cp->mtu_stride);
2152 else
2153 RX_USED_ADD(page, i);
2154
2155
2156 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2157 p += hlen;
2158 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2159 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2160 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2161 dlen + cp->crc_size,
2162 PCI_DMA_FROMDEVICE);
2163 addr = cas_page_map(page->buffer);
2164 memcpy(p, addr, dlen + cp->crc_size);
2165 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2166 dlen + cp->crc_size,
2167 PCI_DMA_FROMDEVICE);
2168 cas_page_unmap(addr);
2169 RX_USED_ADD(page, dlen + cp->crc_size);
2170 }
2171end_copy_pkt:
2172 if (cp->crc_size) {
2173 addr = NULL;
2174 crcaddr = skb->data + alloclen;
2175 }
2176 skb_put(skb, alloclen);
2177 }
2178
2179 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2180 if (cp->crc_size) {
2181
2182 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2183 csum_unfold(csum)));
2184 if (addr)
2185 cas_page_unmap(addr);
2186 }
2187 skb->protocol = eth_type_trans(skb, cp->dev);
2188 if (skb->protocol == htons(ETH_P_IP)) {
2189 skb->csum = csum_unfold(~csum);
2190 skb->ip_summed = CHECKSUM_COMPLETE;
2191 } else
2192 skb->ip_summed = CHECKSUM_NONE;
2193 return len;
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2212 struct sk_buff *skb)
2213{
2214 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2215 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2216
2217
2218
2219
2220
2221 __skb_queue_tail(flow, skb);
2222 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2223 while ((skb = __skb_dequeue(flow))) {
2224 cas_skb_release(skb);
2225 }
2226 }
2227}
2228
2229
2230
2231
2232static void cas_post_page(struct cas *cp, const int ring, const int index)
2233{
2234 cas_page_t *new;
2235 int entry;
2236
2237 entry = cp->rx_old[ring];
2238
2239 new = cas_page_swap(cp, ring, index);
2240 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2241 cp->init_rxds[ring][entry].index =
2242 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2243 CAS_BASE(RX_INDEX_RING, ring));
2244
2245 entry = RX_DESC_ENTRY(ring, entry + 1);
2246 cp->rx_old[ring] = entry;
2247
2248 if (entry % 4)
2249 return;
2250
2251 if (ring == 0)
2252 writel(entry, cp->regs + REG_RX_KICK);
2253 else if ((N_RX_DESC_RINGS > 1) &&
2254 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2255 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2256}
2257
2258
2259
2260static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2261{
2262 unsigned int entry, last, count, released;
2263 int cluster;
2264 cas_page_t **page = cp->rx_pages[ring];
2265
2266 entry = cp->rx_old[ring];
2267
2268 if (netif_msg_intr(cp))
2269 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
2270 cp->dev->name, ring, entry);
2271
2272 cluster = -1;
2273 count = entry & 0x3;
2274 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2275 released = 0;
2276 while (entry != last) {
2277
2278 if (page_count(page[entry]->buffer) > 1) {
2279 cas_page_t *new = cas_page_dequeue(cp);
2280 if (!new) {
2281
2282
2283
2284 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2285 if (!timer_pending(&cp->link_timer))
2286 mod_timer(&cp->link_timer, jiffies +
2287 CAS_LINK_FAST_TIMEOUT);
2288 cp->rx_old[ring] = entry;
2289 cp->rx_last[ring] = num ? num - released : 0;
2290 return -ENOMEM;
2291 }
2292 spin_lock(&cp->rx_inuse_lock);
2293 list_add(&page[entry]->list, &cp->rx_inuse_list);
2294 spin_unlock(&cp->rx_inuse_lock);
2295 cp->init_rxds[ring][entry].buffer =
2296 cpu_to_le64(new->dma_addr);
2297 page[entry] = new;
2298
2299 }
2300
2301 if (++count == 4) {
2302 cluster = entry;
2303 count = 0;
2304 }
2305 released++;
2306 entry = RX_DESC_ENTRY(ring, entry + 1);
2307 }
2308 cp->rx_old[ring] = entry;
2309
2310 if (cluster < 0)
2311 return 0;
2312
2313 if (ring == 0)
2314 writel(cluster, cp->regs + REG_RX_KICK);
2315 else if ((N_RX_DESC_RINGS > 1) &&
2316 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2317 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2318 return 0;
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2335{
2336 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2337 int entry, drops;
2338 int npackets = 0;
2339
2340 if (netif_msg_intr(cp))
2341 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
2342 cp->dev->name, ring,
2343 readl(cp->regs + REG_RX_COMP_HEAD),
2344 cp->rx_new[ring]);
2345
2346 entry = cp->rx_new[ring];
2347 drops = 0;
2348 while (1) {
2349 struct cas_rx_comp *rxc = rxcs + entry;
2350 struct sk_buff *uninitialized_var(skb);
2351 int type, len;
2352 u64 words[4];
2353 int i, dring;
2354
2355 words[0] = le64_to_cpu(rxc->word1);
2356 words[1] = le64_to_cpu(rxc->word2);
2357 words[2] = le64_to_cpu(rxc->word3);
2358 words[3] = le64_to_cpu(rxc->word4);
2359
2360
2361 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2362 if (type == 0)
2363 break;
2364
2365
2366 if (words[3] & RX_COMP4_ZERO) {
2367 break;
2368 }
2369
2370
2371 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2372 spin_lock(&cp->stat_lock[ring]);
2373 cp->net_stats[ring].rx_errors++;
2374 if (words[3] & RX_COMP4_LEN_MISMATCH)
2375 cp->net_stats[ring].rx_length_errors++;
2376 if (words[3] & RX_COMP4_BAD)
2377 cp->net_stats[ring].rx_crc_errors++;
2378 spin_unlock(&cp->stat_lock[ring]);
2379
2380
2381 drop_it:
2382 spin_lock(&cp->stat_lock[ring]);
2383 ++cp->net_stats[ring].rx_dropped;
2384 spin_unlock(&cp->stat_lock[ring]);
2385 goto next;
2386 }
2387
2388 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2389 if (len < 0) {
2390 ++drops;
2391 goto drop_it;
2392 }
2393
2394
2395
2396
2397 if (RX_DONT_BATCH || (type == 0x2)) {
2398
2399 cas_skb_release(skb);
2400 } else {
2401 cas_rx_flow_pkt(cp, words, skb);
2402 }
2403
2404 spin_lock(&cp->stat_lock[ring]);
2405 cp->net_stats[ring].rx_packets++;
2406 cp->net_stats[ring].rx_bytes += len;
2407 spin_unlock(&cp->stat_lock[ring]);
2408
2409 next:
2410 npackets++;
2411
2412
2413 if (words[0] & RX_COMP1_RELEASE_HDR) {
2414 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2415 dring = CAS_VAL(RX_INDEX_RING, i);
2416 i = CAS_VAL(RX_INDEX_NUM, i);
2417 cas_post_page(cp, dring, i);
2418 }
2419
2420 if (words[0] & RX_COMP1_RELEASE_DATA) {
2421 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2422 dring = CAS_VAL(RX_INDEX_RING, i);
2423 i = CAS_VAL(RX_INDEX_NUM, i);
2424 cas_post_page(cp, dring, i);
2425 }
2426
2427 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2428 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2429 dring = CAS_VAL(RX_INDEX_RING, i);
2430 i = CAS_VAL(RX_INDEX_NUM, i);
2431 cas_post_page(cp, dring, i);
2432 }
2433
2434
2435 entry = RX_COMP_ENTRY(ring, entry + 1 +
2436 CAS_VAL(RX_COMP1_SKIP, words[0]));
2437#ifdef USE_NAPI
2438 if (budget && (npackets >= budget))
2439 break;
2440#endif
2441 }
2442 cp->rx_new[ring] = entry;
2443
2444 if (drops)
2445 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
2446 cp->dev->name);
2447 return npackets;
2448}
2449
2450
2451
2452static void cas_post_rxcs_ringN(struct net_device *dev,
2453 struct cas *cp, int ring)
2454{
2455 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2456 int last, entry;
2457
2458 last = cp->rx_cur[ring];
2459 entry = cp->rx_new[ring];
2460 if (netif_msg_intr(cp))
2461 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
2462 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2463 entry);
2464
2465
2466 while (last != entry) {
2467 cas_rxc_init(rxc + last);
2468 last = RX_COMP_ENTRY(ring, last + 1);
2469 }
2470 cp->rx_cur[ring] = last;
2471
2472 if (ring == 0)
2473 writel(last, cp->regs + REG_RX_COMP_TAIL);
2474 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2475 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2476}
2477
2478
2479
2480
2481
2482
2483#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2484static inline void cas_handle_irqN(struct net_device *dev,
2485 struct cas *cp, const u32 status,
2486 const int ring)
2487{
2488 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2489 cas_post_rxcs_ringN(dev, cp, ring);
2490}
2491
2492static irqreturn_t cas_interruptN(int irq, void *dev_id)
2493{
2494 struct net_device *dev = dev_id;
2495 struct cas *cp = netdev_priv(dev);
2496 unsigned long flags;
2497 int ring;
2498 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2499
2500
2501 if (status == 0)
2502 return IRQ_NONE;
2503
2504 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2505 spin_lock_irqsave(&cp->lock, flags);
2506 if (status & INTR_RX_DONE_ALT) {
2507#ifdef USE_NAPI
2508 cas_mask_intr(cp);
2509 napi_schedule(&cp->napi);
2510#else
2511 cas_rx_ringN(cp, ring, 0);
2512#endif
2513 status &= ~INTR_RX_DONE_ALT;
2514 }
2515
2516 if (status)
2517 cas_handle_irqN(dev, cp, status, ring);
2518 spin_unlock_irqrestore(&cp->lock, flags);
2519 return IRQ_HANDLED;
2520}
2521#endif
2522
2523#ifdef USE_PCI_INTB
2524
2525static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2526{
2527 if (status & INTR_RX_BUF_UNAVAIL_1) {
2528
2529
2530 cas_post_rxds_ringN(cp, 1, 0);
2531 spin_lock(&cp->stat_lock[1]);
2532 cp->net_stats[1].rx_dropped++;
2533 spin_unlock(&cp->stat_lock[1]);
2534 }
2535
2536 if (status & INTR_RX_BUF_AE_1)
2537 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2538 RX_AE_FREEN_VAL(1));
2539
2540 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2541 cas_post_rxcs_ringN(cp, 1);
2542}
2543
2544
2545static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2546{
2547 struct net_device *dev = dev_id;
2548 struct cas *cp = netdev_priv(dev);
2549 unsigned long flags;
2550 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2551
2552
2553 if (status == 0)
2554 return IRQ_NONE;
2555
2556 spin_lock_irqsave(&cp->lock, flags);
2557 if (status & INTR_RX_DONE_ALT) {
2558#ifdef USE_NAPI
2559 cas_mask_intr(cp);
2560 napi_schedule(&cp->napi);
2561#else
2562 cas_rx_ringN(cp, 1, 0);
2563#endif
2564 status &= ~INTR_RX_DONE_ALT;
2565 }
2566 if (status)
2567 cas_handle_irq1(cp, status);
2568 spin_unlock_irqrestore(&cp->lock, flags);
2569 return IRQ_HANDLED;
2570}
2571#endif
2572
2573static inline void cas_handle_irq(struct net_device *dev,
2574 struct cas *cp, const u32 status)
2575{
2576
2577 if (status & INTR_ERROR_MASK)
2578 cas_abnormal_irq(dev, cp, status);
2579
2580 if (status & INTR_RX_BUF_UNAVAIL) {
2581
2582
2583
2584 cas_post_rxds_ringN(cp, 0, 0);
2585 spin_lock(&cp->stat_lock[0]);
2586 cp->net_stats[0].rx_dropped++;
2587 spin_unlock(&cp->stat_lock[0]);
2588 } else if (status & INTR_RX_BUF_AE) {
2589 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2590 RX_AE_FREEN_VAL(0));
2591 }
2592
2593 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2594 cas_post_rxcs_ringN(dev, cp, 0);
2595}
2596
2597static irqreturn_t cas_interrupt(int irq, void *dev_id)
2598{
2599 struct net_device *dev = dev_id;
2600 struct cas *cp = netdev_priv(dev);
2601 unsigned long flags;
2602 u32 status = readl(cp->regs + REG_INTR_STATUS);
2603
2604 if (status == 0)
2605 return IRQ_NONE;
2606
2607 spin_lock_irqsave(&cp->lock, flags);
2608 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2609 cas_tx(dev, cp, status);
2610 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2611 }
2612
2613 if (status & INTR_RX_DONE) {
2614#ifdef USE_NAPI
2615 cas_mask_intr(cp);
2616 napi_schedule(&cp->napi);
2617#else
2618 cas_rx_ringN(cp, 0, 0);
2619#endif
2620 status &= ~INTR_RX_DONE;
2621 }
2622
2623 if (status)
2624 cas_handle_irq(dev, cp, status);
2625 spin_unlock_irqrestore(&cp->lock, flags);
2626 return IRQ_HANDLED;
2627}
2628
2629
2630#ifdef USE_NAPI
2631static int cas_poll(struct napi_struct *napi, int budget)
2632{
2633 struct cas *cp = container_of(napi, struct cas, napi);
2634 struct net_device *dev = cp->dev;
2635 int i, enable_intr, credits;
2636 u32 status = readl(cp->regs + REG_INTR_STATUS);
2637 unsigned long flags;
2638
2639 spin_lock_irqsave(&cp->lock, flags);
2640 cas_tx(dev, cp, status);
2641 spin_unlock_irqrestore(&cp->lock, flags);
2642
2643
2644
2645
2646
2647
2648
2649
2650 enable_intr = 1;
2651 credits = 0;
2652 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2653 int j;
2654 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2655 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2656 if (credits >= budget) {
2657 enable_intr = 0;
2658 goto rx_comp;
2659 }
2660 }
2661 }
2662
2663rx_comp:
2664
2665 spin_lock_irqsave(&cp->lock, flags);
2666 if (status)
2667 cas_handle_irq(dev, cp, status);
2668
2669#ifdef USE_PCI_INTB
2670 if (N_RX_COMP_RINGS > 1) {
2671 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2672 if (status)
2673 cas_handle_irq1(dev, cp, status);
2674 }
2675#endif
2676
2677#ifdef USE_PCI_INTC
2678 if (N_RX_COMP_RINGS > 2) {
2679 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2680 if (status)
2681 cas_handle_irqN(dev, cp, status, 2);
2682 }
2683#endif
2684
2685#ifdef USE_PCI_INTD
2686 if (N_RX_COMP_RINGS > 3) {
2687 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2688 if (status)
2689 cas_handle_irqN(dev, cp, status, 3);
2690 }
2691#endif
2692 spin_unlock_irqrestore(&cp->lock, flags);
2693 if (enable_intr) {
2694 napi_complete(napi);
2695 cas_unmask_intr(cp);
2696 }
2697 return credits;
2698}
2699#endif
2700
2701#ifdef CONFIG_NET_POLL_CONTROLLER
2702static void cas_netpoll(struct net_device *dev)
2703{
2704 struct cas *cp = netdev_priv(dev);
2705
2706 cas_disable_irq(cp, 0);
2707 cas_interrupt(cp->pdev->irq, dev);
2708 cas_enable_irq(cp, 0);
2709
2710#ifdef USE_PCI_INTB
2711 if (N_RX_COMP_RINGS > 1) {
2712
2713 }
2714#endif
2715#ifdef USE_PCI_INTC
2716 if (N_RX_COMP_RINGS > 2) {
2717
2718 }
2719#endif
2720#ifdef USE_PCI_INTD
2721 if (N_RX_COMP_RINGS > 3) {
2722
2723 }
2724#endif
2725}
2726#endif
2727
2728static void cas_tx_timeout(struct net_device *dev)
2729{
2730 struct cas *cp = netdev_priv(dev);
2731
2732 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2733 if (!cp->hw_running) {
2734 printk("%s: hrm.. hw not running!\n", dev->name);
2735 return;
2736 }
2737
2738 printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
2739 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
2740
2741 printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
2742 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
2743
2744 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
2745 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2746 dev->name,
2747 readl(cp->regs + REG_TX_CFG),
2748 readl(cp->regs + REG_MAC_TX_STATUS),
2749 readl(cp->regs + REG_MAC_TX_CFG),
2750 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2751 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2752 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2753 readl(cp->regs + REG_TX_SM_1),
2754 readl(cp->regs + REG_TX_SM_2));
2755
2756 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
2757 dev->name,
2758 readl(cp->regs + REG_RX_CFG),
2759 readl(cp->regs + REG_MAC_RX_STATUS),
2760 readl(cp->regs + REG_MAC_RX_CFG));
2761
2762 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
2763 dev->name,
2764 readl(cp->regs + REG_HP_STATE_MACHINE),
2765 readl(cp->regs + REG_HP_STATUS0),
2766 readl(cp->regs + REG_HP_STATUS1),
2767 readl(cp->regs + REG_HP_STATUS2));
2768
2769#if 1
2770 atomic_inc(&cp->reset_task_pending);
2771 atomic_inc(&cp->reset_task_pending_all);
2772 schedule_work(&cp->reset_task);
2773#else
2774 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2775 schedule_work(&cp->reset_task);
2776#endif
2777}
2778
2779static inline int cas_intme(int ring, int entry)
2780{
2781
2782 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2783 return 1;
2784 return 0;
2785}
2786
2787
2788static void cas_write_txd(struct cas *cp, int ring, int entry,
2789 dma_addr_t mapping, int len, u64 ctrl, int last)
2790{
2791 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2792
2793 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2794 if (cas_intme(ring, entry))
2795 ctrl |= TX_DESC_INTME;
2796 if (last)
2797 ctrl |= TX_DESC_EOF;
2798 txd->control = cpu_to_le64(ctrl);
2799 txd->buffer = cpu_to_le64(mapping);
2800}
2801
2802static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2803 const int entry)
2804{
2805 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2806}
2807
2808static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2809 const int entry, const int tentry)
2810{
2811 cp->tx_tiny_use[ring][tentry].nbufs++;
2812 cp->tx_tiny_use[ring][entry].used = 1;
2813 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2814}
2815
2816static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2817 struct sk_buff *skb)
2818{
2819 struct net_device *dev = cp->dev;
2820 int entry, nr_frags, frag, tabort, tentry;
2821 dma_addr_t mapping;
2822 unsigned long flags;
2823 u64 ctrl;
2824 u32 len;
2825
2826 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2827
2828
2829 if (TX_BUFFS_AVAIL(cp, ring) <=
2830 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2831 netif_stop_queue(dev);
2832 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2833 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
2834 "queue awake!\n", dev->name);
2835 return 1;
2836 }
2837
2838 ctrl = 0;
2839 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2840 const u64 csum_start_off = skb_transport_offset(skb);
2841 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2842
2843 ctrl = TX_DESC_CSUM_EN |
2844 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2845 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2846 }
2847
2848 entry = cp->tx_new[ring];
2849 cp->tx_skbs[ring][entry] = skb;
2850
2851 nr_frags = skb_shinfo(skb)->nr_frags;
2852 len = skb_headlen(skb);
2853 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2854 offset_in_page(skb->data), len,
2855 PCI_DMA_TODEVICE);
2856
2857 tentry = entry;
2858 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2859 if (unlikely(tabort)) {
2860
2861 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2862 ctrl | TX_DESC_SOF, 0);
2863 entry = TX_DESC_NEXT(ring, entry);
2864
2865 skb_copy_from_linear_data_offset(skb, len - tabort,
2866 tx_tiny_buf(cp, ring, entry), tabort);
2867 mapping = tx_tiny_map(cp, ring, entry, tentry);
2868 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2869 (nr_frags == 0));
2870 } else {
2871 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2872 TX_DESC_SOF, (nr_frags == 0));
2873 }
2874 entry = TX_DESC_NEXT(ring, entry);
2875
2876 for (frag = 0; frag < nr_frags; frag++) {
2877 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2878
2879 len = fragp->size;
2880 mapping = pci_map_page(cp->pdev, fragp->page,
2881 fragp->page_offset, len,
2882 PCI_DMA_TODEVICE);
2883
2884 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2885 if (unlikely(tabort)) {
2886 void *addr;
2887
2888
2889 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2890 ctrl, 0);
2891 entry = TX_DESC_NEXT(ring, entry);
2892
2893 addr = cas_page_map(fragp->page);
2894 memcpy(tx_tiny_buf(cp, ring, entry),
2895 addr + fragp->page_offset + len - tabort,
2896 tabort);
2897 cas_page_unmap(addr);
2898 mapping = tx_tiny_map(cp, ring, entry, tentry);
2899 len = tabort;
2900 }
2901
2902 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2903 (frag + 1 == nr_frags));
2904 entry = TX_DESC_NEXT(ring, entry);
2905 }
2906
2907 cp->tx_new[ring] = entry;
2908 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2909 netif_stop_queue(dev);
2910
2911 if (netif_msg_tx_queued(cp))
2912 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
2913 "avail %d\n",
2914 dev->name, ring, entry, skb->len,
2915 TX_BUFFS_AVAIL(cp, ring));
2916 writel(entry, cp->regs + REG_TX_KICKN(ring));
2917 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2918 return 0;
2919}
2920
2921static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2922{
2923 struct cas *cp = netdev_priv(dev);
2924
2925
2926
2927
2928 static int ring;
2929
2930 if (skb_padto(skb, cp->min_frame_size))
2931 return NETDEV_TX_OK;
2932
2933
2934
2935
2936 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2937 return NETDEV_TX_BUSY;
2938 dev->trans_start = jiffies;
2939 return NETDEV_TX_OK;
2940}
2941
2942static void cas_init_tx_dma(struct cas *cp)
2943{
2944 u64 desc_dma = cp->block_dvma;
2945 unsigned long off;
2946 u32 val;
2947 int i;
2948
2949
2950#ifdef USE_TX_COMPWB
2951 off = offsetof(struct cas_init_block, tx_compwb);
2952 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2953 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2954#endif
2955
2956
2957
2958
2959 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2960 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2961 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2962 TX_CFG_INTR_COMPWB_DIS;
2963
2964
2965 for (i = 0; i < MAX_TX_RINGS; i++) {
2966 off = (unsigned long) cp->init_txds[i] -
2967 (unsigned long) cp->init_block;
2968
2969 val |= CAS_TX_RINGN_BASE(i);
2970 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2971 writel((desc_dma + off) & 0xffffffff, cp->regs +
2972 REG_TX_DBN_LOW(i));
2973
2974
2975
2976 }
2977 writel(val, cp->regs + REG_TX_CFG);
2978
2979
2980
2981
2982#ifdef USE_QOS
2983 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2984 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2985 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2986 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2987#else
2988 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2989 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2990 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2991 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2992#endif
2993}
2994
2995
2996static inline void cas_init_dma(struct cas *cp)
2997{
2998 cas_init_tx_dma(cp);
2999 cas_init_rx_dma(cp);
3000}
3001
3002
3003static u32 cas_setup_multicast(struct cas *cp)
3004{
3005 u32 rxcfg = 0;
3006 int i;
3007
3008 if (cp->dev->flags & IFF_PROMISC) {
3009 rxcfg |= MAC_RX_CFG_PROMISC_EN;
3010
3011 } else if (cp->dev->flags & IFF_ALLMULTI) {
3012 for (i=0; i < 16; i++)
3013 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
3014 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3015
3016 } else {
3017 u16 hash_table[16];
3018 u32 crc;
3019 struct dev_mc_list *dmi = cp->dev->mc_list;
3020 int i;
3021
3022
3023
3024
3025 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
3026 if (!dmi) {
3027 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
3028 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
3029 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
3030 continue;
3031 }
3032 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
3033 cp->regs + REG_MAC_ADDRN(i*3 + 0));
3034 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
3035 cp->regs + REG_MAC_ADDRN(i*3 + 1));
3036 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
3037 cp->regs + REG_MAC_ADDRN(i*3 + 2));
3038 dmi = dmi->next;
3039 }
3040
3041
3042
3043
3044 memset(hash_table, 0, sizeof(hash_table));
3045 while (dmi) {
3046 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
3047 crc >>= 24;
3048 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
3049 dmi = dmi->next;
3050 }
3051 for (i=0; i < 16; i++)
3052 writel(hash_table[i], cp->regs +
3053 REG_MAC_HASH_TABLEN(i));
3054 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3055 }
3056
3057 return rxcfg;
3058}
3059
3060
3061static void cas_clear_mac_err(struct cas *cp)
3062{
3063 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3064 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3065 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3066 writel(0, cp->regs + REG_MAC_COLL_LATE);
3067 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3068 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3069 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3070 writel(0, cp->regs + REG_MAC_LEN_ERR);
3071 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3072 writel(0, cp->regs + REG_MAC_FCS_ERR);
3073 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3074}
3075
3076
3077static void cas_mac_reset(struct cas *cp)
3078{
3079 int i;
3080
3081
3082 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3083 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3084
3085
3086 i = STOP_TRIES;
3087 while (i-- > 0) {
3088 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3089 break;
3090 udelay(10);
3091 }
3092
3093
3094 i = STOP_TRIES;
3095 while (i-- > 0) {
3096 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3097 break;
3098 udelay(10);
3099 }
3100
3101 if (readl(cp->regs + REG_MAC_TX_RESET) |
3102 readl(cp->regs + REG_MAC_RX_RESET))
3103 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
3104 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
3105 readl(cp->regs + REG_MAC_RX_RESET),
3106 readl(cp->regs + REG_MAC_STATE_MACHINE));
3107}
3108
3109
3110
3111static void cas_init_mac(struct cas *cp)
3112{
3113 unsigned char *e = &cp->dev->dev_addr[0];
3114 int i;
3115#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3116 u32 rxcfg;
3117#endif
3118 cas_mac_reset(cp);
3119
3120
3121 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3122
3123
3124#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3125
3126
3127
3128 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3129 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3130#endif
3131
3132 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3133
3134 writel(0x00, cp->regs + REG_MAC_IPG0);
3135 writel(0x08, cp->regs + REG_MAC_IPG1);
3136 writel(0x04, cp->regs + REG_MAC_IPG2);
3137
3138
3139 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3140
3141
3142 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3143
3144
3145
3146
3147
3148 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3149 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3150 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3151 cp->regs + REG_MAC_FRAMESIZE_MAX);
3152
3153
3154
3155
3156
3157 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3158 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3159 else
3160 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3161 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3162 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3163 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3164
3165 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3166
3167 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3168 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3169 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3170 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3171 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3172
3173
3174 for (i = 0; i < 45; i++)
3175 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3176
3177 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3178 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3179 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3180
3181 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3182 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3183 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3184
3185#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3186 cp->mac_rx_cfg = cas_setup_multicast(cp);
3187#else
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
3200 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
3201#endif
3202 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3203 cas_clear_mac_err(cp);
3204 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3205
3206
3207
3208
3209
3210 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3211 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3212
3213
3214
3215
3216 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3217}
3218
3219
3220static void cas_init_pause_thresholds(struct cas *cp)
3221{
3222
3223
3224
3225 if (cp->rx_fifo_size <= (2 * 1024)) {
3226 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3227 } else {
3228 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3229 if (max_frame * 3 > cp->rx_fifo_size) {
3230 cp->rx_pause_off = 7104;
3231 cp->rx_pause_on = 960;
3232 } else {
3233 int off = (cp->rx_fifo_size - (max_frame * 2));
3234 int on = off - max_frame;
3235 cp->rx_pause_off = off;
3236 cp->rx_pause_on = on;
3237 }
3238 }
3239}
3240
3241static int cas_vpd_match(const void __iomem *p, const char *str)
3242{
3243 int len = strlen(str) + 1;
3244 int i;
3245
3246 for (i = 0; i < len; i++) {
3247 if (readb(p + i) != str[i])
3248 return 0;
3249 }
3250 return 1;
3251}
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3266 const int offset)
3267{
3268 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3269 void __iomem *base, *kstart;
3270 int i, len;
3271 int found = 0;
3272#define VPD_FOUND_MAC 0x01
3273#define VPD_FOUND_PHY 0x02
3274
3275 int phy_type = CAS_PHY_MII_MDIO0;
3276 int mac_off = 0;
3277
3278
3279 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3280 cp->regs + REG_BIM_LOCAL_DEV_EN);
3281
3282
3283 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3284 goto use_random_mac_addr;
3285
3286
3287 base = NULL;
3288 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3289
3290 if ((readb(p + i + 0) == 0x50) &&
3291 (readb(p + i + 1) == 0x43) &&
3292 (readb(p + i + 2) == 0x49) &&
3293 (readb(p + i + 3) == 0x52)) {
3294 base = p + (readb(p + i + 8) |
3295 (readb(p + i + 9) << 8));
3296 break;
3297 }
3298 }
3299
3300 if (!base || (readb(base) != 0x82))
3301 goto use_random_mac_addr;
3302
3303 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3304 while (i < EXPANSION_ROM_SIZE) {
3305 if (readb(base + i) != 0x90)
3306 goto use_random_mac_addr;
3307
3308
3309 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3310
3311
3312 kstart = base + i + 3;
3313 p = kstart;
3314 while ((p - kstart) < len) {
3315 int klen = readb(p + 2);
3316 int j;
3317 char type;
3318
3319 p += 3;
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358 if (readb(p) != 'I')
3359 goto next;
3360
3361
3362 type = readb(p + 3);
3363 if (type == 'B') {
3364 if ((klen == 29) && readb(p + 4) == 6 &&
3365 cas_vpd_match(p + 5,
3366 "local-mac-address")) {
3367 if (mac_off++ > offset)
3368 goto next;
3369
3370
3371 for (j = 0; j < 6; j++)
3372 dev_addr[j] =
3373 readb(p + 23 + j);
3374 goto found_mac;
3375 }
3376 }
3377
3378 if (type != 'S')
3379 goto next;
3380
3381#ifdef USE_ENTROPY_DEV
3382 if ((klen == 24) &&
3383 cas_vpd_match(p + 5, "entropy-dev") &&
3384 cas_vpd_match(p + 17, "vms110")) {
3385 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3386 goto next;
3387 }
3388#endif
3389
3390 if (found & VPD_FOUND_PHY)
3391 goto next;
3392
3393 if ((klen == 18) && readb(p + 4) == 4 &&
3394 cas_vpd_match(p + 5, "phy-type")) {
3395 if (cas_vpd_match(p + 14, "pcs")) {
3396 phy_type = CAS_PHY_SERDES;
3397 goto found_phy;
3398 }
3399 }
3400
3401 if ((klen == 23) && readb(p + 4) == 4 &&
3402 cas_vpd_match(p + 5, "phy-interface")) {
3403 if (cas_vpd_match(p + 19, "pcs")) {
3404 phy_type = CAS_PHY_SERDES;
3405 goto found_phy;
3406 }
3407 }
3408found_mac:
3409 found |= VPD_FOUND_MAC;
3410 goto next;
3411
3412found_phy:
3413 found |= VPD_FOUND_PHY;
3414
3415next:
3416 p += klen;
3417 }
3418 i += len + 3;
3419 }
3420
3421use_random_mac_addr:
3422 if (found & VPD_FOUND_MAC)
3423 goto done;
3424
3425
3426 printk(PFX "MAC address not found in ROM VPD\n");
3427 dev_addr[0] = 0x08;
3428 dev_addr[1] = 0x00;
3429 dev_addr[2] = 0x20;
3430 get_random_bytes(dev_addr + 3, 3);
3431
3432done:
3433 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3434 return phy_type;
3435}
3436
3437
3438static void cas_check_pci_invariants(struct cas *cp)
3439{
3440 struct pci_dev *pdev = cp->pdev;
3441
3442 cp->cas_flags = 0;
3443 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3444 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3445 if (pdev->revision >= CAS_ID_REVPLUS)
3446 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3447 if (pdev->revision < CAS_ID_REVPLUS02u)
3448 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3449
3450
3451
3452
3453 if (pdev->revision < CAS_ID_REV2)
3454 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3455 } else {
3456
3457 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3458
3459
3460
3461
3462 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3463 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3464 cp->cas_flags |= CAS_FLAG_SATURN;
3465 }
3466}
3467
3468
3469static int cas_check_invariants(struct cas *cp)
3470{
3471 struct pci_dev *pdev = cp->pdev;
3472 u32 cfg;
3473 int i;
3474
3475
3476 cp->page_order = 0;
3477#ifdef USE_PAGE_ORDER
3478 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3479
3480 struct page *page = alloc_pages(GFP_ATOMIC,
3481 CAS_JUMBO_PAGE_SHIFT -
3482 PAGE_SHIFT);
3483 if (page) {
3484 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3485 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3486 } else {
3487 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
3488 }
3489 }
3490#endif
3491 cp->page_size = (PAGE_SIZE << cp->page_order);
3492
3493
3494 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3495 cp->rx_fifo_size = RX_FIFO_SIZE;
3496
3497
3498
3499
3500 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3501 PCI_SLOT(pdev->devfn));
3502 if (cp->phy_type & CAS_PHY_SERDES) {
3503 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3504 return 0;
3505 }
3506
3507
3508 cfg = readl(cp->regs + REG_MIF_CFG);
3509 if (cfg & MIF_CFG_MDIO_1) {
3510 cp->phy_type = CAS_PHY_MII_MDIO1;
3511 } else if (cfg & MIF_CFG_MDIO_0) {
3512 cp->phy_type = CAS_PHY_MII_MDIO0;
3513 }
3514
3515 cas_mif_poll(cp, 0);
3516 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3517
3518 for (i = 0; i < 32; i++) {
3519 u32 phy_id;
3520 int j;
3521
3522 for (j = 0; j < 3; j++) {
3523 cp->phy_addr = i;
3524 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3525 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3526 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3527 cp->phy_id = phy_id;
3528 goto done;
3529 }
3530 }
3531 }
3532 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
3533 readl(cp->regs + REG_MIF_STATE_MACHINE));
3534 return -1;
3535
3536done:
3537
3538 cfg = cas_phy_read(cp, MII_BMSR);
3539 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3540 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3541 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3542 return 0;
3543}
3544
3545
3546static inline void cas_start_dma(struct cas *cp)
3547{
3548 int i;
3549 u32 val;
3550 int txfailed = 0;
3551
3552
3553 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3554 writel(val, cp->regs + REG_TX_CFG);
3555 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3556 writel(val, cp->regs + REG_RX_CFG);
3557
3558
3559 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3560 writel(val, cp->regs + REG_MAC_TX_CFG);
3561 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3562 writel(val, cp->regs + REG_MAC_RX_CFG);
3563
3564 i = STOP_TRIES;
3565 while (i-- > 0) {
3566 val = readl(cp->regs + REG_MAC_TX_CFG);
3567 if ((val & MAC_TX_CFG_EN))
3568 break;
3569 udelay(10);
3570 }
3571 if (i < 0) txfailed = 1;
3572 i = STOP_TRIES;
3573 while (i-- > 0) {
3574 val = readl(cp->regs + REG_MAC_RX_CFG);
3575 if ((val & MAC_RX_CFG_EN)) {
3576 if (txfailed) {
3577 printk(KERN_ERR
3578 "%s: enabling mac failed [tx:%08x:%08x].\n",
3579 cp->dev->name,
3580 readl(cp->regs + REG_MIF_STATE_MACHINE),
3581 readl(cp->regs + REG_MAC_STATE_MACHINE));
3582 }
3583 goto enable_rx_done;
3584 }
3585 udelay(10);
3586 }
3587 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
3588 cp->dev->name,
3589 (txfailed? "tx,rx":"rx"),
3590 readl(cp->regs + REG_MIF_STATE_MACHINE),
3591 readl(cp->regs + REG_MAC_STATE_MACHINE));
3592
3593enable_rx_done:
3594 cas_unmask_intr(cp);
3595 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3596 writel(0, cp->regs + REG_RX_COMP_TAIL);
3597
3598 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3599 if (N_RX_DESC_RINGS > 1)
3600 writel(RX_DESC_RINGN_SIZE(1) - 4,
3601 cp->regs + REG_PLUS_RX_KICK1);
3602
3603 for (i = 1; i < N_RX_COMP_RINGS; i++)
3604 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3605 }
3606}
3607
3608
3609static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3610 int *pause)
3611{
3612 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3613 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3614 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3615 if (val & PCS_MII_LPA_ASYM_PAUSE)
3616 *pause |= 0x10;
3617 *spd = 1000;
3618}
3619
3620
3621static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3622 int *pause)
3623{
3624 u32 val;
3625
3626 *fd = 0;
3627 *spd = 10;
3628 *pause = 0;
3629
3630
3631 val = cas_phy_read(cp, MII_LPA);
3632 if (val & CAS_LPA_PAUSE)
3633 *pause = 0x01;
3634
3635 if (val & CAS_LPA_ASYM_PAUSE)
3636 *pause |= 0x10;
3637
3638 if (val & LPA_DUPLEX)
3639 *fd = 1;
3640 if (val & LPA_100)
3641 *spd = 100;
3642
3643 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3644 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3645 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3646 *spd = 1000;
3647 if (val & CAS_LPA_1000FULL)
3648 *fd = 1;
3649 }
3650}
3651
3652
3653
3654
3655
3656
3657static void cas_set_link_modes(struct cas *cp)
3658{
3659 u32 val;
3660 int full_duplex, speed, pause;
3661
3662 full_duplex = 0;
3663 speed = 10;
3664 pause = 0;
3665
3666 if (CAS_PHY_MII(cp->phy_type)) {
3667 cas_mif_poll(cp, 0);
3668 val = cas_phy_read(cp, MII_BMCR);
3669 if (val & BMCR_ANENABLE) {
3670 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3671 &pause);
3672 } else {
3673 if (val & BMCR_FULLDPLX)
3674 full_duplex = 1;
3675
3676 if (val & BMCR_SPEED100)
3677 speed = 100;
3678 else if (val & CAS_BMCR_SPEED1000)
3679 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3680 1000 : 100;
3681 }
3682 cas_mif_poll(cp, 1);
3683
3684 } else {
3685 val = readl(cp->regs + REG_PCS_MII_CTRL);
3686 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3687 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3688 if (val & PCS_MII_CTRL_DUPLEX)
3689 full_duplex = 1;
3690 }
3691 }
3692
3693 if (netif_msg_link(cp))
3694 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
3695 cp->dev->name, speed, (full_duplex ? "full" : "half"));
3696
3697 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3698 if (CAS_PHY_MII(cp->phy_type)) {
3699 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3700 if (!full_duplex)
3701 val |= MAC_XIF_DISABLE_ECHO;
3702 }
3703 if (full_duplex)
3704 val |= MAC_XIF_FDPLX_LED;
3705 if (speed == 1000)
3706 val |= MAC_XIF_GMII_MODE;
3707 writel(val, cp->regs + REG_MAC_XIF_CFG);
3708
3709
3710 val = MAC_TX_CFG_IPG_EN;
3711 if (full_duplex) {
3712 val |= MAC_TX_CFG_IGNORE_CARRIER;
3713 val |= MAC_TX_CFG_IGNORE_COLL;
3714 } else {
3715#ifndef USE_CSMA_CD_PROTO
3716 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3717 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3718#endif
3719 }
3720
3721
3722
3723
3724
3725
3726
3727 if ((speed == 1000) && !full_duplex) {
3728 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3729 cp->regs + REG_MAC_TX_CFG);
3730
3731 val = readl(cp->regs + REG_MAC_RX_CFG);
3732 val &= ~MAC_RX_CFG_STRIP_FCS;
3733 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3734 cp->regs + REG_MAC_RX_CFG);
3735
3736 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3737
3738 cp->crc_size = 4;
3739
3740 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3741
3742 } else {
3743 writel(val, cp->regs + REG_MAC_TX_CFG);
3744
3745
3746
3747
3748 val = readl(cp->regs + REG_MAC_RX_CFG);
3749 if (full_duplex) {
3750 val |= MAC_RX_CFG_STRIP_FCS;
3751 cp->crc_size = 0;
3752 cp->min_frame_size = CAS_MIN_MTU;
3753 } else {
3754 val &= ~MAC_RX_CFG_STRIP_FCS;
3755 cp->crc_size = 4;
3756 cp->min_frame_size = CAS_MIN_FRAME;
3757 }
3758 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3759 cp->regs + REG_MAC_RX_CFG);
3760 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3761 }
3762
3763 if (netif_msg_link(cp)) {
3764 if (pause & 0x01) {
3765 printk(KERN_INFO "%s: Pause is enabled "
3766 "(rxfifo: %d off: %d on: %d)\n",
3767 cp->dev->name,
3768 cp->rx_fifo_size,
3769 cp->rx_pause_off,
3770 cp->rx_pause_on);
3771 } else if (pause & 0x10) {
3772 printk(KERN_INFO "%s: TX pause enabled\n",
3773 cp->dev->name);
3774 } else {
3775 printk(KERN_INFO "%s: Pause is disabled\n",
3776 cp->dev->name);
3777 }
3778 }
3779
3780 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3781 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3782 if (pause) {
3783 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3784 if (pause & 0x01) {
3785 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3786 }
3787 }
3788 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3789 cas_start_dma(cp);
3790}
3791
3792
3793static void cas_init_hw(struct cas *cp, int restart_link)
3794{
3795 if (restart_link)
3796 cas_phy_init(cp);
3797
3798 cas_init_pause_thresholds(cp);
3799 cas_init_mac(cp);
3800 cas_init_dma(cp);
3801
3802 if (restart_link) {
3803
3804 cp->timer_ticks = 0;
3805 cas_begin_auto_negotiation(cp, NULL);
3806 } else if (cp->lstate == link_up) {
3807 cas_set_link_modes(cp);
3808 netif_carrier_on(cp->dev);
3809 }
3810}
3811
3812
3813
3814
3815
3816static void cas_hard_reset(struct cas *cp)
3817{
3818 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3819 udelay(20);
3820 pci_restore_state(cp->pdev);
3821}
3822
3823
3824static void cas_global_reset(struct cas *cp, int blkflag)
3825{
3826 int limit;
3827
3828
3829 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3830
3831
3832
3833
3834
3835
3836 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3837 cp->regs + REG_SW_RESET);
3838 } else {
3839 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3840 }
3841
3842
3843 mdelay(3);
3844
3845 limit = STOP_TRIES;
3846 while (limit-- > 0) {
3847 u32 val = readl(cp->regs + REG_SW_RESET);
3848 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3849 goto done;
3850 udelay(10);
3851 }
3852 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
3853
3854done:
3855
3856 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3857 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3858
3859
3860
3861
3862
3863 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3864 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3865 PCI_ERR_BIM_DMA_READ), cp->regs +
3866 REG_PCI_ERR_STATUS_MASK);
3867
3868
3869
3870
3871 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3872}
3873
3874static void cas_reset(struct cas *cp, int blkflag)
3875{
3876 u32 val;
3877
3878 cas_mask_intr(cp);
3879 cas_global_reset(cp, blkflag);
3880 cas_mac_reset(cp);
3881 cas_entropy_reset(cp);
3882
3883
3884 val = readl(cp->regs + REG_TX_CFG);
3885 val &= ~TX_CFG_DMA_EN;
3886 writel(val, cp->regs + REG_TX_CFG);
3887
3888 val = readl(cp->regs + REG_RX_CFG);
3889 val &= ~RX_CFG_DMA_EN;
3890 writel(val, cp->regs + REG_RX_CFG);
3891
3892
3893 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3894 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3895 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3896 } else {
3897 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3898 }
3899
3900
3901 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3902 cas_clear_mac_err(cp);
3903 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3904}
3905
3906
3907static void cas_shutdown(struct cas *cp)
3908{
3909 unsigned long flags;
3910
3911
3912 cp->hw_running = 0;
3913
3914 del_timer_sync(&cp->link_timer);
3915
3916
3917#if 0
3918 while (atomic_read(&cp->reset_task_pending_mtu) ||
3919 atomic_read(&cp->reset_task_pending_spare) ||
3920 atomic_read(&cp->reset_task_pending_all))
3921 schedule();
3922
3923#else
3924 while (atomic_read(&cp->reset_task_pending))
3925 schedule();
3926#endif
3927
3928 cas_lock_all_save(cp, flags);
3929 cas_reset(cp, 0);
3930 if (cp->cas_flags & CAS_FLAG_SATURN)
3931 cas_phy_powerdown(cp);
3932 cas_unlock_all_restore(cp, flags);
3933}
3934
3935static int cas_change_mtu(struct net_device *dev, int new_mtu)
3936{
3937 struct cas *cp = netdev_priv(dev);
3938
3939 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3940 return -EINVAL;
3941
3942 dev->mtu = new_mtu;
3943 if (!netif_running(dev) || !netif_device_present(dev))
3944 return 0;
3945
3946
3947#if 1
3948 atomic_inc(&cp->reset_task_pending);
3949 if ((cp->phy_type & CAS_PHY_SERDES)) {
3950 atomic_inc(&cp->reset_task_pending_all);
3951 } else {
3952 atomic_inc(&cp->reset_task_pending_mtu);
3953 }
3954 schedule_work(&cp->reset_task);
3955#else
3956 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3957 CAS_RESET_ALL : CAS_RESET_MTU);
3958 printk(KERN_ERR "reset called in cas_change_mtu\n");
3959 schedule_work(&cp->reset_task);
3960#endif
3961
3962 flush_scheduled_work();
3963 return 0;
3964}
3965
3966static void cas_clean_txd(struct cas *cp, int ring)
3967{
3968 struct cas_tx_desc *txd = cp->init_txds[ring];
3969 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3970 u64 daddr, dlen;
3971 int i, size;
3972
3973 size = TX_DESC_RINGN_SIZE(ring);
3974 for (i = 0; i < size; i++) {
3975 int frag;
3976
3977 if (skbs[i] == NULL)
3978 continue;
3979
3980 skb = skbs[i];
3981 skbs[i] = NULL;
3982
3983 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3984 int ent = i & (size - 1);
3985
3986
3987
3988
3989 daddr = le64_to_cpu(txd[ent].buffer);
3990 dlen = CAS_VAL(TX_DESC_BUFLEN,
3991 le64_to_cpu(txd[ent].control));
3992 pci_unmap_page(cp->pdev, daddr, dlen,
3993 PCI_DMA_TODEVICE);
3994
3995 if (frag != skb_shinfo(skb)->nr_frags) {
3996 i++;
3997
3998
3999
4000
4001 ent = i & (size - 1);
4002 if (cp->tx_tiny_use[ring][ent].used)
4003 i++;
4004 }
4005 }
4006 dev_kfree_skb_any(skb);
4007 }
4008
4009
4010 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
4011}
4012
4013
4014static inline void cas_free_rx_desc(struct cas *cp, int ring)
4015{
4016 cas_page_t **page = cp->rx_pages[ring];
4017 int i, size;
4018
4019 size = RX_DESC_RINGN_SIZE(ring);
4020 for (i = 0; i < size; i++) {
4021 if (page[i]) {
4022 cas_page_free(cp, page[i]);
4023 page[i] = NULL;
4024 }
4025 }
4026}
4027
4028static void cas_free_rxds(struct cas *cp)
4029{
4030 int i;
4031
4032 for (i = 0; i < N_RX_DESC_RINGS; i++)
4033 cas_free_rx_desc(cp, i);
4034}
4035
4036
4037static void cas_clean_rings(struct cas *cp)
4038{
4039 int i;
4040
4041
4042 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
4043 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
4044 for (i = 0; i < N_TX_RINGS; i++)
4045 cas_clean_txd(cp, i);
4046
4047
4048 memset(cp->init_block, 0, sizeof(struct cas_init_block));
4049 cas_clean_rxds(cp);
4050 cas_clean_rxcs(cp);
4051}
4052
4053
4054static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
4055{
4056 cas_page_t **page = cp->rx_pages[ring];
4057 int size, i = 0;
4058
4059 size = RX_DESC_RINGN_SIZE(ring);
4060 for (i = 0; i < size; i++) {
4061 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
4062 return -1;
4063 }
4064 return 0;
4065}
4066
4067static int cas_alloc_rxds(struct cas *cp)
4068{
4069 int i;
4070
4071 for (i = 0; i < N_RX_DESC_RINGS; i++) {
4072 if (cas_alloc_rx_desc(cp, i) < 0) {
4073 cas_free_rxds(cp);
4074 return -1;
4075 }
4076 }
4077 return 0;
4078}
4079
4080static void cas_reset_task(struct work_struct *work)
4081{
4082 struct cas *cp = container_of(work, struct cas, reset_task);
4083#if 0
4084 int pending = atomic_read(&cp->reset_task_pending);
4085#else
4086 int pending_all = atomic_read(&cp->reset_task_pending_all);
4087 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4088 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4089
4090 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4091
4092
4093
4094 atomic_dec(&cp->reset_task_pending);
4095 return;
4096 }
4097#endif
4098
4099
4100
4101
4102 if (cp->hw_running) {
4103 unsigned long flags;
4104
4105
4106 netif_device_detach(cp->dev);
4107 cas_lock_all_save(cp, flags);
4108
4109 if (cp->opened) {
4110
4111
4112
4113
4114 cas_spare_recover(cp, GFP_ATOMIC);
4115 }
4116#if 1
4117
4118 if (!pending_all && !pending_mtu)
4119 goto done;
4120#else
4121 if (pending == CAS_RESET_SPARE)
4122 goto done;
4123#endif
4124
4125
4126
4127
4128
4129
4130
4131#if 1
4132 cas_reset(cp, !(pending_all > 0));
4133 if (cp->opened)
4134 cas_clean_rings(cp);
4135 cas_init_hw(cp, (pending_all > 0));
4136#else
4137 cas_reset(cp, !(pending == CAS_RESET_ALL));
4138 if (cp->opened)
4139 cas_clean_rings(cp);
4140 cas_init_hw(cp, pending == CAS_RESET_ALL);
4141#endif
4142
4143done:
4144 cas_unlock_all_restore(cp, flags);
4145 netif_device_attach(cp->dev);
4146 }
4147#if 1
4148 atomic_sub(pending_all, &cp->reset_task_pending_all);
4149 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4150 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4151 atomic_dec(&cp->reset_task_pending);
4152#else
4153 atomic_set(&cp->reset_task_pending, 0);
4154#endif
4155}
4156
4157static void cas_link_timer(unsigned long data)
4158{
4159 struct cas *cp = (struct cas *) data;
4160 int mask, pending = 0, reset = 0;
4161 unsigned long flags;
4162
4163 if (link_transition_timeout != 0 &&
4164 cp->link_transition_jiffies_valid &&
4165 ((jiffies - cp->link_transition_jiffies) >
4166 (link_transition_timeout))) {
4167
4168
4169
4170
4171 cp->link_transition_jiffies_valid = 0;
4172 }
4173
4174 if (!cp->hw_running)
4175 return;
4176
4177 spin_lock_irqsave(&cp->lock, flags);
4178 cas_lock_tx(cp);
4179 cas_entropy_gather(cp);
4180
4181
4182
4183
4184#if 1
4185 if (atomic_read(&cp->reset_task_pending_all) ||
4186 atomic_read(&cp->reset_task_pending_spare) ||
4187 atomic_read(&cp->reset_task_pending_mtu))
4188 goto done;
4189#else
4190 if (atomic_read(&cp->reset_task_pending))
4191 goto done;
4192#endif
4193
4194
4195 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4196 int i, rmask;
4197
4198 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4199 rmask = CAS_FLAG_RXD_POST(i);
4200 if ((mask & rmask) == 0)
4201 continue;
4202
4203
4204 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4205 pending = 1;
4206 continue;
4207 }
4208 cp->cas_flags &= ~rmask;
4209 }
4210 }
4211
4212 if (CAS_PHY_MII(cp->phy_type)) {
4213 u16 bmsr;
4214 cas_mif_poll(cp, 0);
4215 bmsr = cas_phy_read(cp, MII_BMSR);
4216
4217
4218
4219
4220
4221 bmsr = cas_phy_read(cp, MII_BMSR);
4222 cas_mif_poll(cp, 1);
4223 readl(cp->regs + REG_MIF_STATUS);
4224 reset = cas_mii_link_check(cp, bmsr);
4225 } else {
4226 reset = cas_pcs_link_check(cp);
4227 }
4228
4229 if (reset)
4230 goto done;
4231
4232
4233 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4234 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4235 u32 wptr, rptr;
4236 int tlm = CAS_VAL(MAC_SM_TLM, val);
4237
4238 if (((tlm == 0x5) || (tlm == 0x3)) &&
4239 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4240 if (netif_msg_tx_err(cp))
4241 printk(KERN_DEBUG "%s: tx err: "
4242 "MAC_STATE[%08x]\n",
4243 cp->dev->name, val);
4244 reset = 1;
4245 goto done;
4246 }
4247
4248 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4249 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4250 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4251 if ((val == 0) && (wptr != rptr)) {
4252 if (netif_msg_tx_err(cp))
4253 printk(KERN_DEBUG "%s: tx err: "
4254 "TX_FIFO[%08x:%08x:%08x]\n",
4255 cp->dev->name, val, wptr, rptr);
4256 reset = 1;
4257 }
4258
4259 if (reset)
4260 cas_hard_reset(cp);
4261 }
4262
4263done:
4264 if (reset) {
4265#if 1
4266 atomic_inc(&cp->reset_task_pending);
4267 atomic_inc(&cp->reset_task_pending_all);
4268 schedule_work(&cp->reset_task);
4269#else
4270 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4271 printk(KERN_ERR "reset called in cas_link_timer\n");
4272 schedule_work(&cp->reset_task);
4273#endif
4274 }
4275
4276 if (!pending)
4277 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4278 cas_unlock_tx(cp);
4279 spin_unlock_irqrestore(&cp->lock, flags);
4280}
4281
4282
4283
4284
4285static void cas_tx_tiny_free(struct cas *cp)
4286{
4287 struct pci_dev *pdev = cp->pdev;
4288 int i;
4289
4290 for (i = 0; i < N_TX_RINGS; i++) {
4291 if (!cp->tx_tiny_bufs[i])
4292 continue;
4293
4294 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4295 cp->tx_tiny_bufs[i],
4296 cp->tx_tiny_dvma[i]);
4297 cp->tx_tiny_bufs[i] = NULL;
4298 }
4299}
4300
4301static int cas_tx_tiny_alloc(struct cas *cp)
4302{
4303 struct pci_dev *pdev = cp->pdev;
4304 int i;
4305
4306 for (i = 0; i < N_TX_RINGS; i++) {
4307 cp->tx_tiny_bufs[i] =
4308 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4309 &cp->tx_tiny_dvma[i]);
4310 if (!cp->tx_tiny_bufs[i]) {
4311 cas_tx_tiny_free(cp);
4312 return -1;
4313 }
4314 }
4315 return 0;
4316}
4317
4318
4319static int cas_open(struct net_device *dev)
4320{
4321 struct cas *cp = netdev_priv(dev);
4322 int hw_was_up, err;
4323 unsigned long flags;
4324
4325 mutex_lock(&cp->pm_mutex);
4326
4327 hw_was_up = cp->hw_running;
4328
4329
4330
4331
4332 if (!cp->hw_running) {
4333
4334 cas_lock_all_save(cp, flags);
4335
4336
4337
4338
4339
4340 cas_reset(cp, 0);
4341 cp->hw_running = 1;
4342 cas_unlock_all_restore(cp, flags);
4343 }
4344
4345 err = -ENOMEM;
4346 if (cas_tx_tiny_alloc(cp) < 0)
4347 goto err_unlock;
4348
4349
4350 if (cas_alloc_rxds(cp) < 0)
4351 goto err_tx_tiny;
4352
4353
4354 cas_spare_init(cp);
4355 cas_spare_recover(cp, GFP_KERNEL);
4356
4357
4358
4359
4360
4361
4362 if (request_irq(cp->pdev->irq, cas_interrupt,
4363 IRQF_SHARED, dev->name, (void *) dev)) {
4364 printk(KERN_ERR "%s: failed to request irq !\n",
4365 cp->dev->name);
4366 err = -EAGAIN;
4367 goto err_spare;
4368 }
4369
4370#ifdef USE_NAPI
4371 napi_enable(&cp->napi);
4372#endif
4373
4374 cas_lock_all_save(cp, flags);
4375 cas_clean_rings(cp);
4376 cas_init_hw(cp, !hw_was_up);
4377 cp->opened = 1;
4378 cas_unlock_all_restore(cp, flags);
4379
4380 netif_start_queue(dev);
4381 mutex_unlock(&cp->pm_mutex);
4382 return 0;
4383
4384err_spare:
4385 cas_spare_free(cp);
4386 cas_free_rxds(cp);
4387err_tx_tiny:
4388 cas_tx_tiny_free(cp);
4389err_unlock:
4390 mutex_unlock(&cp->pm_mutex);
4391 return err;
4392}
4393
4394static int cas_close(struct net_device *dev)
4395{
4396 unsigned long flags;
4397 struct cas *cp = netdev_priv(dev);
4398
4399#ifdef USE_NAPI
4400 napi_disable(&cp->napi);
4401#endif
4402
4403 mutex_lock(&cp->pm_mutex);
4404
4405 netif_stop_queue(dev);
4406
4407
4408 cas_lock_all_save(cp, flags);
4409 cp->opened = 0;
4410 cas_reset(cp, 0);
4411 cas_phy_init(cp);
4412 cas_begin_auto_negotiation(cp, NULL);
4413 cas_clean_rings(cp);
4414 cas_unlock_all_restore(cp, flags);
4415
4416 free_irq(cp->pdev->irq, (void *) dev);
4417 cas_spare_free(cp);
4418 cas_free_rxds(cp);
4419 cas_tx_tiny_free(cp);
4420 mutex_unlock(&cp->pm_mutex);
4421 return 0;
4422}
4423
4424static struct {
4425 const char name[ETH_GSTRING_LEN];
4426} ethtool_cassini_statnames[] = {
4427 {"collisions"},
4428 {"rx_bytes"},
4429 {"rx_crc_errors"},
4430 {"rx_dropped"},
4431 {"rx_errors"},
4432 {"rx_fifo_errors"},
4433 {"rx_frame_errors"},
4434 {"rx_length_errors"},
4435 {"rx_over_errors"},
4436 {"rx_packets"},
4437 {"tx_aborted_errors"},
4438 {"tx_bytes"},
4439 {"tx_dropped"},
4440 {"tx_errors"},
4441 {"tx_fifo_errors"},
4442 {"tx_packets"}
4443};
4444#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4445
4446static struct {
4447 const int offsets;
4448} ethtool_register_table[] = {
4449 {-MII_BMSR},
4450 {-MII_BMCR},
4451 {REG_CAWR},
4452 {REG_INF_BURST},
4453 {REG_BIM_CFG},
4454 {REG_RX_CFG},
4455 {REG_HP_CFG},
4456 {REG_MAC_TX_CFG},
4457 {REG_MAC_RX_CFG},
4458 {REG_MAC_CTRL_CFG},
4459 {REG_MAC_XIF_CFG},
4460 {REG_MIF_CFG},
4461 {REG_PCS_CFG},
4462 {REG_SATURN_PCFG},
4463 {REG_PCS_MII_STATUS},
4464 {REG_PCS_STATE_MACHINE},
4465 {REG_MAC_COLL_EXCESS},
4466 {REG_MAC_COLL_LATE}
4467};
4468#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4469#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4470
4471static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4472{
4473 u8 *p;
4474 int i;
4475 unsigned long flags;
4476
4477 spin_lock_irqsave(&cp->lock, flags);
4478 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4479 u16 hval;
4480 u32 val;
4481 if (ethtool_register_table[i].offsets < 0) {
4482 hval = cas_phy_read(cp,
4483 -ethtool_register_table[i].offsets);
4484 val = hval;
4485 } else {
4486 val= readl(cp->regs+ethtool_register_table[i].offsets);
4487 }
4488 memcpy(p, (u8 *)&val, sizeof(u32));
4489 }
4490 spin_unlock_irqrestore(&cp->lock, flags);
4491}
4492
4493static struct net_device_stats *cas_get_stats(struct net_device *dev)
4494{
4495 struct cas *cp = netdev_priv(dev);
4496 struct net_device_stats *stats = cp->net_stats;
4497 unsigned long flags;
4498 int i;
4499 unsigned long tmp;
4500
4501
4502 if (!cp->hw_running)
4503 return stats + N_TX_RINGS;
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4514 stats[N_TX_RINGS].rx_crc_errors +=
4515 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4516 stats[N_TX_RINGS].rx_frame_errors +=
4517 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4518 stats[N_TX_RINGS].rx_length_errors +=
4519 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4520#if 1
4521 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4522 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4523 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4524 stats[N_TX_RINGS].collisions +=
4525 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4526#else
4527 stats[N_TX_RINGS].tx_aborted_errors +=
4528 readl(cp->regs + REG_MAC_COLL_EXCESS);
4529 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4530 readl(cp->regs + REG_MAC_COLL_LATE);
4531#endif
4532 cas_clear_mac_err(cp);
4533
4534
4535 spin_lock(&cp->stat_lock[0]);
4536 stats[N_TX_RINGS].collisions += stats[0].collisions;
4537 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4538 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4539 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4540 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4541 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4542 spin_unlock(&cp->stat_lock[0]);
4543
4544 for (i = 0; i < N_TX_RINGS; i++) {
4545 spin_lock(&cp->stat_lock[i]);
4546 stats[N_TX_RINGS].rx_length_errors +=
4547 stats[i].rx_length_errors;
4548 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4549 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4550 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4551 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4552 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4553 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4554 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4555 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4556 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4557 memset(stats + i, 0, sizeof(struct net_device_stats));
4558 spin_unlock(&cp->stat_lock[i]);
4559 }
4560 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4561 return stats + N_TX_RINGS;
4562}
4563
4564
4565static void cas_set_multicast(struct net_device *dev)
4566{
4567 struct cas *cp = netdev_priv(dev);
4568 u32 rxcfg, rxcfg_new;
4569 unsigned long flags;
4570 int limit = STOP_TRIES;
4571
4572 if (!cp->hw_running)
4573 return;
4574
4575 spin_lock_irqsave(&cp->lock, flags);
4576 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4577
4578
4579 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4580 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4581 if (!limit--)
4582 break;
4583 udelay(10);
4584 }
4585
4586
4587 limit = STOP_TRIES;
4588 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4589 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4590 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4591 if (!limit--)
4592 break;
4593 udelay(10);
4594 }
4595
4596
4597 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4598 rxcfg |= rxcfg_new;
4599 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4600 spin_unlock_irqrestore(&cp->lock, flags);
4601}
4602
4603static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4604{
4605 struct cas *cp = netdev_priv(dev);
4606 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4607 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4608 info->fw_version[0] = '\0';
4609 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4610 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4611 cp->casreg_len : CAS_MAX_REGS;
4612 info->n_stats = CAS_NUM_STAT_KEYS;
4613}
4614
4615static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4616{
4617 struct cas *cp = netdev_priv(dev);
4618 u16 bmcr;
4619 int full_duplex, speed, pause;
4620 unsigned long flags;
4621 enum link_state linkstate = link_up;
4622
4623 cmd->advertising = 0;
4624 cmd->supported = SUPPORTED_Autoneg;
4625 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4626 cmd->supported |= SUPPORTED_1000baseT_Full;
4627 cmd->advertising |= ADVERTISED_1000baseT_Full;
4628 }
4629
4630
4631 spin_lock_irqsave(&cp->lock, flags);
4632 bmcr = 0;
4633 linkstate = cp->lstate;
4634 if (CAS_PHY_MII(cp->phy_type)) {
4635 cmd->port = PORT_MII;
4636 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4637 XCVR_INTERNAL : XCVR_EXTERNAL;
4638 cmd->phy_address = cp->phy_addr;
4639 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4640 ADVERTISED_10baseT_Half |
4641 ADVERTISED_10baseT_Full |
4642 ADVERTISED_100baseT_Half |
4643 ADVERTISED_100baseT_Full;
4644
4645 cmd->supported |=
4646 (SUPPORTED_10baseT_Half |
4647 SUPPORTED_10baseT_Full |
4648 SUPPORTED_100baseT_Half |
4649 SUPPORTED_100baseT_Full |
4650 SUPPORTED_TP | SUPPORTED_MII);
4651
4652 if (cp->hw_running) {
4653 cas_mif_poll(cp, 0);
4654 bmcr = cas_phy_read(cp, MII_BMCR);
4655 cas_read_mii_link_mode(cp, &full_duplex,
4656 &speed, &pause);
4657 cas_mif_poll(cp, 1);
4658 }
4659
4660 } else {
4661 cmd->port = PORT_FIBRE;
4662 cmd->transceiver = XCVR_INTERNAL;
4663 cmd->phy_address = 0;
4664 cmd->supported |= SUPPORTED_FIBRE;
4665 cmd->advertising |= ADVERTISED_FIBRE;
4666
4667 if (cp->hw_running) {
4668
4669 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4670 cas_read_pcs_link_mode(cp, &full_duplex,
4671 &speed, &pause);
4672 }
4673 }
4674 spin_unlock_irqrestore(&cp->lock, flags);
4675
4676 if (bmcr & BMCR_ANENABLE) {
4677 cmd->advertising |= ADVERTISED_Autoneg;
4678 cmd->autoneg = AUTONEG_ENABLE;
4679 cmd->speed = ((speed == 10) ?
4680 SPEED_10 :
4681 ((speed == 1000) ?
4682 SPEED_1000 : SPEED_100));
4683 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4684 } else {
4685 cmd->autoneg = AUTONEG_DISABLE;
4686 cmd->speed =
4687 (bmcr & CAS_BMCR_SPEED1000) ?
4688 SPEED_1000 :
4689 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4690 SPEED_10);
4691 cmd->duplex =
4692 (bmcr & BMCR_FULLDPLX) ?
4693 DUPLEX_FULL : DUPLEX_HALF;
4694 }
4695 if (linkstate != link_up) {
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706 if (cp->link_cntl & BMCR_ANENABLE) {
4707 cmd->speed = 0;
4708 cmd->duplex = 0xff;
4709 } else {
4710 cmd->speed = SPEED_10;
4711 if (cp->link_cntl & BMCR_SPEED100) {
4712 cmd->speed = SPEED_100;
4713 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4714 cmd->speed = SPEED_1000;
4715 }
4716 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4717 DUPLEX_FULL : DUPLEX_HALF;
4718 }
4719 }
4720 return 0;
4721}
4722
4723static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4724{
4725 struct cas *cp = netdev_priv(dev);
4726 unsigned long flags;
4727
4728
4729 if (cmd->autoneg != AUTONEG_ENABLE &&
4730 cmd->autoneg != AUTONEG_DISABLE)
4731 return -EINVAL;
4732
4733 if (cmd->autoneg == AUTONEG_DISABLE &&
4734 ((cmd->speed != SPEED_1000 &&
4735 cmd->speed != SPEED_100 &&
4736 cmd->speed != SPEED_10) ||
4737 (cmd->duplex != DUPLEX_HALF &&
4738 cmd->duplex != DUPLEX_FULL)))
4739 return -EINVAL;
4740
4741
4742 spin_lock_irqsave(&cp->lock, flags);
4743 cas_begin_auto_negotiation(cp, cmd);
4744 spin_unlock_irqrestore(&cp->lock, flags);
4745 return 0;
4746}
4747
4748static int cas_nway_reset(struct net_device *dev)
4749{
4750 struct cas *cp = netdev_priv(dev);
4751 unsigned long flags;
4752
4753 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4754 return -EINVAL;
4755
4756
4757 spin_lock_irqsave(&cp->lock, flags);
4758 cas_begin_auto_negotiation(cp, NULL);
4759 spin_unlock_irqrestore(&cp->lock, flags);
4760
4761 return 0;
4762}
4763
4764static u32 cas_get_link(struct net_device *dev)
4765{
4766 struct cas *cp = netdev_priv(dev);
4767 return cp->lstate == link_up;
4768}
4769
4770static u32 cas_get_msglevel(struct net_device *dev)
4771{
4772 struct cas *cp = netdev_priv(dev);
4773 return cp->msg_enable;
4774}
4775
4776static void cas_set_msglevel(struct net_device *dev, u32 value)
4777{
4778 struct cas *cp = netdev_priv(dev);
4779 cp->msg_enable = value;
4780}
4781
4782static int cas_get_regs_len(struct net_device *dev)
4783{
4784 struct cas *cp = netdev_priv(dev);
4785 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4786}
4787
4788static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4789 void *p)
4790{
4791 struct cas *cp = netdev_priv(dev);
4792 regs->version = 0;
4793
4794 cas_read_regs(cp, p, regs->len / sizeof(u32));
4795}
4796
4797static int cas_get_sset_count(struct net_device *dev, int sset)
4798{
4799 switch (sset) {
4800 case ETH_SS_STATS:
4801 return CAS_NUM_STAT_KEYS;
4802 default:
4803 return -EOPNOTSUPP;
4804 }
4805}
4806
4807static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4808{
4809 memcpy(data, ðtool_cassini_statnames,
4810 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4811}
4812
4813static void cas_get_ethtool_stats(struct net_device *dev,
4814 struct ethtool_stats *estats, u64 *data)
4815{
4816 struct cas *cp = netdev_priv(dev);
4817 struct net_device_stats *stats = cas_get_stats(cp->dev);
4818 int i = 0;
4819 data[i++] = stats->collisions;
4820 data[i++] = stats->rx_bytes;
4821 data[i++] = stats->rx_crc_errors;
4822 data[i++] = stats->rx_dropped;
4823 data[i++] = stats->rx_errors;
4824 data[i++] = stats->rx_fifo_errors;
4825 data[i++] = stats->rx_frame_errors;
4826 data[i++] = stats->rx_length_errors;
4827 data[i++] = stats->rx_over_errors;
4828 data[i++] = stats->rx_packets;
4829 data[i++] = stats->tx_aborted_errors;
4830 data[i++] = stats->tx_bytes;
4831 data[i++] = stats->tx_dropped;
4832 data[i++] = stats->tx_errors;
4833 data[i++] = stats->tx_fifo_errors;
4834 data[i++] = stats->tx_packets;
4835 BUG_ON(i != CAS_NUM_STAT_KEYS);
4836}
4837
4838static const struct ethtool_ops cas_ethtool_ops = {
4839 .get_drvinfo = cas_get_drvinfo,
4840 .get_settings = cas_get_settings,
4841 .set_settings = cas_set_settings,
4842 .nway_reset = cas_nway_reset,
4843 .get_link = cas_get_link,
4844 .get_msglevel = cas_get_msglevel,
4845 .set_msglevel = cas_set_msglevel,
4846 .get_regs_len = cas_get_regs_len,
4847 .get_regs = cas_get_regs,
4848 .get_sset_count = cas_get_sset_count,
4849 .get_strings = cas_get_strings,
4850 .get_ethtool_stats = cas_get_ethtool_stats,
4851};
4852
4853static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4854{
4855 struct cas *cp = netdev_priv(dev);
4856 struct mii_ioctl_data *data = if_mii(ifr);
4857 unsigned long flags;
4858 int rc = -EOPNOTSUPP;
4859
4860
4861
4862
4863 mutex_lock(&cp->pm_mutex);
4864 switch (cmd) {
4865 case SIOCGMIIPHY:
4866 data->phy_id = cp->phy_addr;
4867
4868
4869 case SIOCGMIIREG:
4870 spin_lock_irqsave(&cp->lock, flags);
4871 cas_mif_poll(cp, 0);
4872 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4873 cas_mif_poll(cp, 1);
4874 spin_unlock_irqrestore(&cp->lock, flags);
4875 rc = 0;
4876 break;
4877
4878 case SIOCSMIIREG:
4879 spin_lock_irqsave(&cp->lock, flags);
4880 cas_mif_poll(cp, 0);
4881 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4882 cas_mif_poll(cp, 1);
4883 spin_unlock_irqrestore(&cp->lock, flags);
4884 break;
4885 default:
4886 break;
4887 };
4888
4889 mutex_unlock(&cp->pm_mutex);
4890 return rc;
4891}
4892
4893
4894
4895
4896
4897static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
4898{
4899 struct pci_dev *pdev = cas_pdev->bus->self;
4900 u32 val;
4901
4902 if (!pdev)
4903 return;
4904
4905 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4906 return;
4907
4908
4909
4910
4911
4912
4913 pci_read_config_dword(pdev, 0x40, &val);
4914 val &= ~0x00040000;
4915 pci_write_config_dword(pdev, 0x40, val);
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961 pci_write_config_word(pdev, 0x52,
4962 (0x7 << 13) |
4963 (0x7 << 10) |
4964 (0x7 << 7) |
4965 (0x7 << 4) |
4966 (0xf << 0));
4967
4968
4969 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4970
4971
4972
4973
4974 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4975}
4976
4977static const struct net_device_ops cas_netdev_ops = {
4978 .ndo_open = cas_open,
4979 .ndo_stop = cas_close,
4980 .ndo_start_xmit = cas_start_xmit,
4981 .ndo_get_stats = cas_get_stats,
4982 .ndo_set_multicast_list = cas_set_multicast,
4983 .ndo_do_ioctl = cas_ioctl,
4984 .ndo_tx_timeout = cas_tx_timeout,
4985 .ndo_change_mtu = cas_change_mtu,
4986 .ndo_set_mac_address = eth_mac_addr,
4987 .ndo_validate_addr = eth_validate_addr,
4988#ifdef CONFIG_NET_POLL_CONTROLLER
4989 .ndo_poll_controller = cas_netpoll,
4990#endif
4991};
4992
4993static int __devinit cas_init_one(struct pci_dev *pdev,
4994 const struct pci_device_id *ent)
4995{
4996 static int cas_version_printed = 0;
4997 unsigned long casreg_len;
4998 struct net_device *dev;
4999 struct cas *cp;
5000 int i, err, pci_using_dac;
5001 u16 pci_cmd;
5002 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
5003
5004 if (cas_version_printed++ == 0)
5005 printk(KERN_INFO "%s", version);
5006
5007 err = pci_enable_device(pdev);
5008 if (err) {
5009 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
5010 return err;
5011 }
5012
5013 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5014 dev_err(&pdev->dev, "Cannot find proper PCI device "
5015 "base address, aborting.\n");
5016 err = -ENODEV;
5017 goto err_out_disable_pdev;
5018 }
5019
5020 dev = alloc_etherdev(sizeof(*cp));
5021 if (!dev) {
5022 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
5023 err = -ENOMEM;
5024 goto err_out_disable_pdev;
5025 }
5026 SET_NETDEV_DEV(dev, &pdev->dev);
5027
5028 err = pci_request_regions(pdev, dev->name);
5029 if (err) {
5030 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5031 goto err_out_free_netdev;
5032 }
5033 pci_set_master(pdev);
5034
5035
5036
5037
5038
5039 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5040 pci_cmd &= ~PCI_COMMAND_SERR;
5041 pci_cmd |= PCI_COMMAND_PARITY;
5042 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5043 if (pci_try_set_mwi(pdev))
5044 printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
5045 pci_name(pdev));
5046
5047 cas_program_bridge(pdev);
5048
5049
5050
5051
5052
5053
5054
5055#if 1
5056 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5057 &orig_cacheline_size);
5058 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
5059 cas_cacheline_size =
5060 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
5061 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
5062 if (pci_write_config_byte(pdev,
5063 PCI_CACHE_LINE_SIZE,
5064 cas_cacheline_size)) {
5065 dev_err(&pdev->dev, "Could not set PCI cache "
5066 "line size\n");
5067 goto err_write_cacheline;
5068 }
5069 }
5070#endif
5071
5072
5073
5074 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5075 pci_using_dac = 1;
5076 err = pci_set_consistent_dma_mask(pdev,
5077 DMA_BIT_MASK(64));
5078 if (err < 0) {
5079 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5080 "for consistent allocations\n");
5081 goto err_out_free_res;
5082 }
5083
5084 } else {
5085 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5086 if (err) {
5087 dev_err(&pdev->dev, "No usable DMA configuration, "
5088 "aborting.\n");
5089 goto err_out_free_res;
5090 }
5091 pci_using_dac = 0;
5092 }
5093
5094 casreg_len = pci_resource_len(pdev, 0);
5095
5096 cp = netdev_priv(dev);
5097 cp->pdev = pdev;
5098#if 1
5099
5100 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5101#endif
5102 cp->dev = dev;
5103 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5104 cassini_debug;
5105
5106 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5107 cp->link_transition_jiffies_valid = 0;
5108
5109 spin_lock_init(&cp->lock);
5110 spin_lock_init(&cp->rx_inuse_lock);
5111 spin_lock_init(&cp->rx_spare_lock);
5112 for (i = 0; i < N_TX_RINGS; i++) {
5113 spin_lock_init(&cp->stat_lock[i]);
5114 spin_lock_init(&cp->tx_lock[i]);
5115 }
5116 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5117 mutex_init(&cp->pm_mutex);
5118
5119 init_timer(&cp->link_timer);
5120 cp->link_timer.function = cas_link_timer;
5121 cp->link_timer.data = (unsigned long) cp;
5122
5123#if 1
5124
5125
5126
5127 atomic_set(&cp->reset_task_pending, 0);
5128 atomic_set(&cp->reset_task_pending_all, 0);
5129 atomic_set(&cp->reset_task_pending_spare, 0);
5130 atomic_set(&cp->reset_task_pending_mtu, 0);
5131#endif
5132 INIT_WORK(&cp->reset_task, cas_reset_task);
5133
5134
5135 if (link_mode >= 0 && link_mode <= 6)
5136 cp->link_cntl = link_modes[link_mode];
5137 else
5138 cp->link_cntl = BMCR_ANENABLE;
5139 cp->lstate = link_down;
5140 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5141 netif_carrier_off(cp->dev);
5142 cp->timer_ticks = 0;
5143
5144
5145 cp->regs = pci_iomap(pdev, 0, casreg_len);
5146 if (!cp->regs) {
5147 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
5148 goto err_out_free_res;
5149 }
5150 cp->casreg_len = casreg_len;
5151
5152 pci_save_state(pdev);
5153 cas_check_pci_invariants(cp);
5154 cas_hard_reset(cp);
5155 cas_reset(cp, 0);
5156 if (cas_check_invariants(cp))
5157 goto err_out_iounmap;
5158 if (cp->cas_flags & CAS_FLAG_SATURN)
5159 if (cas_saturn_firmware_init(cp))
5160 goto err_out_iounmap;
5161
5162 cp->init_block = (struct cas_init_block *)
5163 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5164 &cp->block_dvma);
5165 if (!cp->init_block) {
5166 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
5167 goto err_out_iounmap;
5168 }
5169
5170 for (i = 0; i < N_TX_RINGS; i++)
5171 cp->init_txds[i] = cp->init_block->txds[i];
5172
5173 for (i = 0; i < N_RX_DESC_RINGS; i++)
5174 cp->init_rxds[i] = cp->init_block->rxds[i];
5175
5176 for (i = 0; i < N_RX_COMP_RINGS; i++)
5177 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5178
5179 for (i = 0; i < N_RX_FLOWS; i++)
5180 skb_queue_head_init(&cp->rx_flows[i]);
5181
5182 dev->netdev_ops = &cas_netdev_ops;
5183 dev->ethtool_ops = &cas_ethtool_ops;
5184 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5185
5186#ifdef USE_NAPI
5187 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5188#endif
5189 dev->irq = pdev->irq;
5190 dev->dma = 0;
5191
5192
5193 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5194 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5195
5196 if (pci_using_dac)
5197 dev->features |= NETIF_F_HIGHDMA;
5198
5199 if (register_netdev(dev)) {
5200 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5201 goto err_out_free_consistent;
5202 }
5203
5204 i = readl(cp->regs + REG_BIM_CFG);
5205 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5206 "Ethernet[%d] %pM\n", dev->name,
5207 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5208 (i & BIM_CFG_32BIT) ? "32" : "64",
5209 (i & BIM_CFG_66MHZ) ? "66" : "33",
5210 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5211 dev->dev_addr);
5212
5213 pci_set_drvdata(pdev, dev);
5214 cp->hw_running = 1;
5215 cas_entropy_reset(cp);
5216 cas_phy_init(cp);
5217 cas_begin_auto_negotiation(cp, NULL);
5218 return 0;
5219
5220err_out_free_consistent:
5221 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5222 cp->init_block, cp->block_dvma);
5223
5224err_out_iounmap:
5225 mutex_lock(&cp->pm_mutex);
5226 if (cp->hw_running)
5227 cas_shutdown(cp);
5228 mutex_unlock(&cp->pm_mutex);
5229
5230 pci_iounmap(pdev, cp->regs);
5231
5232
5233err_out_free_res:
5234 pci_release_regions(pdev);
5235
5236err_write_cacheline:
5237
5238
5239
5240 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5241
5242err_out_free_netdev:
5243 free_netdev(dev);
5244
5245err_out_disable_pdev:
5246 pci_disable_device(pdev);
5247 pci_set_drvdata(pdev, NULL);
5248 return -ENODEV;
5249}
5250
5251static void __devexit cas_remove_one(struct pci_dev *pdev)
5252{
5253 struct net_device *dev = pci_get_drvdata(pdev);
5254 struct cas *cp;
5255 if (!dev)
5256 return;
5257
5258 cp = netdev_priv(dev);
5259 unregister_netdev(dev);
5260
5261 if (cp->fw_data)
5262 vfree(cp->fw_data);
5263
5264 mutex_lock(&cp->pm_mutex);
5265 flush_scheduled_work();
5266 if (cp->hw_running)
5267 cas_shutdown(cp);
5268 mutex_unlock(&cp->pm_mutex);
5269
5270#if 1
5271 if (cp->orig_cacheline_size) {
5272
5273
5274
5275 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5276 cp->orig_cacheline_size);
5277 }
5278#endif
5279 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5280 cp->init_block, cp->block_dvma);
5281 pci_iounmap(pdev, cp->regs);
5282 free_netdev(dev);
5283 pci_release_regions(pdev);
5284 pci_disable_device(pdev);
5285 pci_set_drvdata(pdev, NULL);
5286}
5287
5288#ifdef CONFIG_PM
5289static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5290{
5291 struct net_device *dev = pci_get_drvdata(pdev);
5292 struct cas *cp = netdev_priv(dev);
5293 unsigned long flags;
5294
5295 mutex_lock(&cp->pm_mutex);
5296
5297
5298 if (cp->opened) {
5299 netif_device_detach(dev);
5300
5301 cas_lock_all_save(cp, flags);
5302
5303
5304
5305
5306
5307
5308 cas_reset(cp, 0);
5309 cas_clean_rings(cp);
5310 cas_unlock_all_restore(cp, flags);
5311 }
5312
5313 if (cp->hw_running)
5314 cas_shutdown(cp);
5315 mutex_unlock(&cp->pm_mutex);
5316
5317 return 0;
5318}
5319
5320static int cas_resume(struct pci_dev *pdev)
5321{
5322 struct net_device *dev = pci_get_drvdata(pdev);
5323 struct cas *cp = netdev_priv(dev);
5324
5325 printk(KERN_INFO "%s: resuming\n", dev->name);
5326
5327 mutex_lock(&cp->pm_mutex);
5328 cas_hard_reset(cp);
5329 if (cp->opened) {
5330 unsigned long flags;
5331 cas_lock_all_save(cp, flags);
5332 cas_reset(cp, 0);
5333 cp->hw_running = 1;
5334 cas_clean_rings(