1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/types.h>
74#include <linux/compiler.h>
75#include <linux/slab.h>
76#include <linux/delay.h>
77#include <linux/init.h>
78#include <linux/interrupt.h>
79#include <linux/vmalloc.h>
80#include <linux/ioport.h>
81#include <linux/pci.h>
82#include <linux/mm.h>
83#include <linux/highmem.h>
84#include <linux/list.h>
85#include <linux/dma-mapping.h>
86
87#include <linux/netdevice.h>
88#include <linux/etherdevice.h>
89#include <linux/skbuff.h>
90#include <linux/ethtool.h>
91#include <linux/crc32.h>
92#include <linux/random.h>
93#include <linux/mii.h>
94#include <linux/ip.h>
95#include <linux/tcp.h>
96#include <linux/mutex.h>
97#include <linux/firmware.h>
98
99#include <net/checksum.h>
100
101#include <linux/atomic.h>
102#include <asm/system.h>
103#include <asm/io.h>
104#include <asm/byteorder.h>
105#include <asm/uaccess.h>
106
107#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
108#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
109#define CAS_NCPUS num_online_cpus()
110
111#define cas_skb_release(x) netif_rx(x)
112
113
114#define USE_HP_WORKAROUND
115#define HP_WORKAROUND_DEFAULT
116#define CAS_HP_ALT_FIRMWARE cas_prog_null
117
118#include "cassini.h"
119
120#define USE_TX_COMPWB
121#define USE_CSMA_CD_PROTO
122#define USE_RX_BLANK
123#undef USE_ENTROPY_DEV
124
125
126
127
128#undef USE_PCI_INTB
129#undef USE_PCI_INTC
130#undef USE_PCI_INTD
131#undef USE_QOS
132
133#undef USE_VPD_DEBUG
134
135
136#define USE_PAGE_ORDER
137#define RX_DONT_BATCH 0
138#define RX_COPY_ALWAYS 0
139#define RX_COPY_MIN 64
140#undef RX_COUNT_BUFFERS
141
142#define DRV_MODULE_NAME "cassini"
143#define DRV_MODULE_VERSION "1.6"
144#define DRV_MODULE_RELDATE "21 May 2008"
145
146#define CAS_DEF_MSG_ENABLE \
147 (NETIF_MSG_DRV | \
148 NETIF_MSG_PROBE | \
149 NETIF_MSG_LINK | \
150 NETIF_MSG_TIMER | \
151 NETIF_MSG_IFDOWN | \
152 NETIF_MSG_IFUP | \
153 NETIF_MSG_RX_ERR | \
154 NETIF_MSG_TX_ERR)
155
156
157
158
159#define CAS_TX_TIMEOUT (HZ)
160#define CAS_LINK_TIMEOUT (22*HZ/10)
161#define CAS_LINK_FAST_TIMEOUT (1)
162
163
164
165
166#define STOP_TRIES_PHY 1000
167#define STOP_TRIES 5000
168
169
170
171
172
173#define CAS_MIN_FRAME 97
174#define CAS_1000MB_MIN_FRAME 255
175#define CAS_MIN_MTU 60
176#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
177
178#if 1
179
180
181
182
183#else
184#define CAS_RESET_MTU 1
185#define CAS_RESET_ALL 2
186#define CAS_RESET_SPARE 3
187#endif
188
189static char version[] __devinitdata =
190 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
191
192static int cassini_debug = -1;
193static int link_mode;
194
195MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
196MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
197MODULE_LICENSE("GPL");
198MODULE_FIRMWARE("sun/cassini.bin");
199module_param(cassini_debug, int, 0);
200MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
201module_param(link_mode, int, 0);
202MODULE_PARM_DESC(link_mode, "default link mode");
203
204
205
206
207
208#define DEFAULT_LINKDOWN_TIMEOUT 5
209
210
211
212static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
213module_param(linkdown_timeout, int, 0);
214MODULE_PARM_DESC(linkdown_timeout,
215"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
216
217
218
219
220
221
222static int link_transition_timeout;
223
224
225
226static u16 link_modes[] __devinitdata = {
227 BMCR_ANENABLE,
228 0,
229 BMCR_SPEED100,
230 BMCR_FULLDPLX,
231 BMCR_SPEED100|BMCR_FULLDPLX,
232 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
233};
234
235static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
236 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { 0, }
241};
242
243MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
244
245static void cas_set_link_modes(struct cas *cp);
246
247static inline void cas_lock_tx(struct cas *cp)
248{
249 int i;
250
251 for (i = 0; i < N_TX_RINGS; i++)
252 spin_lock(&cp->tx_lock[i]);
253}
254
255static inline void cas_lock_all(struct cas *cp)
256{
257 spin_lock_irq(&cp->lock);
258 cas_lock_tx(cp);
259}
260
261
262
263
264
265
266
267
268
269#define cas_lock_all_save(cp, flags) \
270do { \
271 struct cas *xxxcp = (cp); \
272 spin_lock_irqsave(&xxxcp->lock, flags); \
273 cas_lock_tx(xxxcp); \
274} while (0)
275
276static inline void cas_unlock_tx(struct cas *cp)
277{
278 int i;
279
280 for (i = N_TX_RINGS; i > 0; i--)
281 spin_unlock(&cp->tx_lock[i - 1]);
282}
283
284static inline void cas_unlock_all(struct cas *cp)
285{
286 cas_unlock_tx(cp);
287 spin_unlock_irq(&cp->lock);
288}
289
290#define cas_unlock_all_restore(cp, flags) \
291do { \
292 struct cas *xxxcp = (cp); \
293 cas_unlock_tx(xxxcp); \
294 spin_unlock_irqrestore(&xxxcp->lock, flags); \
295} while (0)
296
297static void cas_disable_irq(struct cas *cp, const int ring)
298{
299
300 if (ring == 0) {
301 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
302 return;
303 }
304
305
306 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
307 switch (ring) {
308#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
309#ifdef USE_PCI_INTB
310 case 1:
311#endif
312#ifdef USE_PCI_INTC
313 case 2:
314#endif
315#ifdef USE_PCI_INTD
316 case 3:
317#endif
318 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
319 cp->regs + REG_PLUS_INTRN_MASK(ring));
320 break;
321#endif
322 default:
323 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
324 REG_PLUS_INTRN_MASK(ring));
325 break;
326 }
327 }
328}
329
330static inline void cas_mask_intr(struct cas *cp)
331{
332 int i;
333
334 for (i = 0; i < N_RX_COMP_RINGS; i++)
335 cas_disable_irq(cp, i);
336}
337
338static void cas_enable_irq(struct cas *cp, const int ring)
339{
340 if (ring == 0) {
341 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
342 return;
343 }
344
345 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
346 switch (ring) {
347#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
348#ifdef USE_PCI_INTB
349 case 1:
350#endif
351#ifdef USE_PCI_INTC
352 case 2:
353#endif
354#ifdef USE_PCI_INTD
355 case 3:
356#endif
357 writel(INTRN_MASK_RX_EN, cp->regs +
358 REG_PLUS_INTRN_MASK(ring));
359 break;
360#endif
361 default:
362 break;
363 }
364 }
365}
366
367static inline void cas_unmask_intr(struct cas *cp)
368{
369 int i;
370
371 for (i = 0; i < N_RX_COMP_RINGS; i++)
372 cas_enable_irq(cp, i);
373}
374
375static inline void cas_entropy_gather(struct cas *cp)
376{
377#ifdef USE_ENTROPY_DEV
378 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
379 return;
380
381 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
382 readl(cp->regs + REG_ENTROPY_IV),
383 sizeof(uint64_t)*8);
384#endif
385}
386
387static inline void cas_entropy_reset(struct cas *cp)
388{
389#ifdef USE_ENTROPY_DEV
390 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
391 return;
392
393 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
394 cp->regs + REG_BIM_LOCAL_DEV_EN);
395 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
396 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
397
398
399 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
400 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
401#endif
402}
403
404
405
406
407static u16 cas_phy_read(struct cas *cp, int reg)
408{
409 u32 cmd;
410 int limit = STOP_TRIES_PHY;
411
412 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
413 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
414 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
415 cmd |= MIF_FRAME_TURN_AROUND_MSB;
416 writel(cmd, cp->regs + REG_MIF_FRAME);
417
418
419 while (limit-- > 0) {
420 udelay(10);
421 cmd = readl(cp->regs + REG_MIF_FRAME);
422 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
423 return cmd & MIF_FRAME_DATA_MASK;
424 }
425 return 0xFFFF;
426}
427
428static int cas_phy_write(struct cas *cp, int reg, u16 val)
429{
430 int limit = STOP_TRIES_PHY;
431 u32 cmd;
432
433 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
434 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
435 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
436 cmd |= MIF_FRAME_TURN_AROUND_MSB;
437 cmd |= val & MIF_FRAME_DATA_MASK;
438 writel(cmd, cp->regs + REG_MIF_FRAME);
439
440
441 while (limit-- > 0) {
442 udelay(10);
443 cmd = readl(cp->regs + REG_MIF_FRAME);
444 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
445 return 0;
446 }
447 return -1;
448}
449
450static void cas_phy_powerup(struct cas *cp)
451{
452 u16 ctl = cas_phy_read(cp, MII_BMCR);
453
454 if ((ctl & BMCR_PDOWN) == 0)
455 return;
456 ctl &= ~BMCR_PDOWN;
457 cas_phy_write(cp, MII_BMCR, ctl);
458}
459
460static void cas_phy_powerdown(struct cas *cp)
461{
462 u16 ctl = cas_phy_read(cp, MII_BMCR);
463
464 if (ctl & BMCR_PDOWN)
465 return;
466 ctl |= BMCR_PDOWN;
467 cas_phy_write(cp, MII_BMCR, ctl);
468}
469
470
471static int cas_page_free(struct cas *cp, cas_page_t *page)
472{
473 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
474 PCI_DMA_FROMDEVICE);
475 __free_pages(page->buffer, cp->page_order);
476 kfree(page);
477 return 0;
478}
479
480#ifdef RX_COUNT_BUFFERS
481#define RX_USED_ADD(x, y) ((x)->used += (y))
482#define RX_USED_SET(x, y) ((x)->used = (y))
483#else
484#define RX_USED_ADD(x, y)
485#define RX_USED_SET(x, y)
486#endif
487
488
489
490
491static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
492{
493 cas_page_t *page;
494
495 page = kmalloc(sizeof(cas_page_t), flags);
496 if (!page)
497 return NULL;
498
499 INIT_LIST_HEAD(&page->list);
500 RX_USED_SET(page, 0);
501 page->buffer = alloc_pages(flags, cp->page_order);
502 if (!page->buffer)
503 goto page_err;
504 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
505 cp->page_size, PCI_DMA_FROMDEVICE);
506 return page;
507
508page_err:
509 kfree(page);
510 return NULL;
511}
512
513
514static void cas_spare_init(struct cas *cp)
515{
516 spin_lock(&cp->rx_inuse_lock);
517 INIT_LIST_HEAD(&cp->rx_inuse_list);
518 spin_unlock(&cp->rx_inuse_lock);
519
520 spin_lock(&cp->rx_spare_lock);
521 INIT_LIST_HEAD(&cp->rx_spare_list);
522 cp->rx_spares_needed = RX_SPARE_COUNT;
523 spin_unlock(&cp->rx_spare_lock);
524}
525
526
527static void cas_spare_free(struct cas *cp)
528{
529 struct list_head list, *elem, *tmp;
530
531
532 INIT_LIST_HEAD(&list);
533 spin_lock(&cp->rx_spare_lock);
534 list_splice_init(&cp->rx_spare_list, &list);
535 spin_unlock(&cp->rx_spare_lock);
536 list_for_each_safe(elem, tmp, &list) {
537 cas_page_free(cp, list_entry(elem, cas_page_t, list));
538 }
539
540 INIT_LIST_HEAD(&list);
541#if 1
542
543
544
545
546 spin_lock(&cp->rx_inuse_lock);
547 list_splice_init(&cp->rx_inuse_list, &list);
548 spin_unlock(&cp->rx_inuse_lock);
549#else
550 spin_lock(&cp->rx_spare_lock);
551 list_splice_init(&cp->rx_inuse_list, &list);
552 spin_unlock(&cp->rx_spare_lock);
553#endif
554 list_for_each_safe(elem, tmp, &list) {
555 cas_page_free(cp, list_entry(elem, cas_page_t, list));
556 }
557}
558
559
560static void cas_spare_recover(struct cas *cp, const gfp_t flags)
561{
562 struct list_head list, *elem, *tmp;
563 int needed, i;
564
565
566
567
568
569
570 INIT_LIST_HEAD(&list);
571 spin_lock(&cp->rx_inuse_lock);
572 list_splice_init(&cp->rx_inuse_list, &list);
573 spin_unlock(&cp->rx_inuse_lock);
574
575 list_for_each_safe(elem, tmp, &list) {
576 cas_page_t *page = list_entry(elem, cas_page_t, list);
577
578
579
580
581
582
583
584
585
586
587
588
589
590 if (page_count(page->buffer) > 1)
591 continue;
592
593 list_del(elem);
594 spin_lock(&cp->rx_spare_lock);
595 if (cp->rx_spares_needed > 0) {
596 list_add(elem, &cp->rx_spare_list);
597 cp->rx_spares_needed--;
598 spin_unlock(&cp->rx_spare_lock);
599 } else {
600 spin_unlock(&cp->rx_spare_lock);
601 cas_page_free(cp, page);
602 }
603 }
604
605
606 if (!list_empty(&list)) {
607 spin_lock(&cp->rx_inuse_lock);
608 list_splice(&list, &cp->rx_inuse_list);
609 spin_unlock(&cp->rx_inuse_lock);
610 }
611
612 spin_lock(&cp->rx_spare_lock);
613 needed = cp->rx_spares_needed;
614 spin_unlock(&cp->rx_spare_lock);
615 if (!needed)
616 return;
617
618
619 INIT_LIST_HEAD(&list);
620 i = 0;
621 while (i < needed) {
622 cas_page_t *spare = cas_page_alloc(cp, flags);
623 if (!spare)
624 break;
625 list_add(&spare->list, &list);
626 i++;
627 }
628
629 spin_lock(&cp->rx_spare_lock);
630 list_splice(&list, &cp->rx_spare_list);
631 cp->rx_spares_needed -= i;
632 spin_unlock(&cp->rx_spare_lock);
633}
634
635
636static cas_page_t *cas_page_dequeue(struct cas *cp)
637{
638 struct list_head *entry;
639 int recover;
640
641 spin_lock(&cp->rx_spare_lock);
642 if (list_empty(&cp->rx_spare_list)) {
643
644 spin_unlock(&cp->rx_spare_lock);
645 cas_spare_recover(cp, GFP_ATOMIC);
646 spin_lock(&cp->rx_spare_lock);
647 if (list_empty(&cp->rx_spare_list)) {
648 netif_err(cp, rx_err, cp->dev,
649 "no spare buffers available\n");
650 spin_unlock(&cp->rx_spare_lock);
651 return NULL;
652 }
653 }
654
655 entry = cp->rx_spare_list.next;
656 list_del(entry);
657 recover = ++cp->rx_spares_needed;
658 spin_unlock(&cp->rx_spare_lock);
659
660
661 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
662#if 1
663 atomic_inc(&cp->reset_task_pending);
664 atomic_inc(&cp->reset_task_pending_spare);
665 schedule_work(&cp->reset_task);
666#else
667 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
668 schedule_work(&cp->reset_task);
669#endif
670 }
671 return list_entry(entry, cas_page_t, list);
672}
673
674
675static void cas_mif_poll(struct cas *cp, const int enable)
676{
677 u32 cfg;
678
679 cfg = readl(cp->regs + REG_MIF_CFG);
680 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
681
682 if (cp->phy_type & CAS_PHY_MII_MDIO1)
683 cfg |= MIF_CFG_PHY_SELECT;
684
685
686 if (enable) {
687 cfg |= MIF_CFG_POLL_EN;
688 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
689 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
690 }
691 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
692 cp->regs + REG_MIF_MASK);
693 writel(cfg, cp->regs + REG_MIF_CFG);
694}
695
696
697static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
698{
699 u16 ctl;
700#if 1
701 int lcntl;
702 int changed = 0;
703 int oldstate = cp->lstate;
704 int link_was_not_down = !(oldstate == link_down);
705#endif
706
707 if (!ep)
708 goto start_aneg;
709 lcntl = cp->link_cntl;
710 if (ep->autoneg == AUTONEG_ENABLE)
711 cp->link_cntl = BMCR_ANENABLE;
712 else {
713 u32 speed = ethtool_cmd_speed(ep);
714 cp->link_cntl = 0;
715 if (speed == SPEED_100)
716 cp->link_cntl |= BMCR_SPEED100;
717 else if (speed == SPEED_1000)
718 cp->link_cntl |= CAS_BMCR_SPEED1000;
719 if (ep->duplex == DUPLEX_FULL)
720 cp->link_cntl |= BMCR_FULLDPLX;
721 }
722#if 1
723 changed = (lcntl != cp->link_cntl);
724#endif
725start_aneg:
726 if (cp->lstate == link_up) {
727 netdev_info(cp->dev, "PCS link down\n");
728 } else {
729 if (changed) {
730 netdev_info(cp->dev, "link configuration changed\n");
731 }
732 }
733 cp->lstate = link_down;
734 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
735 if (!cp->hw_running)
736 return;
737#if 1
738
739
740
741
742
743 if (oldstate == link_up)
744 netif_carrier_off(cp->dev);
745 if (changed && link_was_not_down) {
746
747
748
749
750
751 atomic_inc(&cp->reset_task_pending);
752 atomic_inc(&cp->reset_task_pending_all);
753 schedule_work(&cp->reset_task);
754 cp->timer_ticks = 0;
755 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
756 return;
757 }
758#endif
759 if (cp->phy_type & CAS_PHY_SERDES) {
760 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
761
762 if (cp->link_cntl & BMCR_ANENABLE) {
763 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
764 cp->lstate = link_aneg;
765 } else {
766 if (cp->link_cntl & BMCR_FULLDPLX)
767 val |= PCS_MII_CTRL_DUPLEX;
768 val &= ~PCS_MII_AUTONEG_EN;
769 cp->lstate = link_force_ok;
770 }
771 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
772 writel(val, cp->regs + REG_PCS_MII_CTRL);
773
774 } else {
775 cas_mif_poll(cp, 0);
776 ctl = cas_phy_read(cp, MII_BMCR);
777 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
778 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
779 ctl |= cp->link_cntl;
780 if (ctl & BMCR_ANENABLE) {
781 ctl |= BMCR_ANRESTART;
782 cp->lstate = link_aneg;
783 } else {
784 cp->lstate = link_force_ok;
785 }
786 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
787 cas_phy_write(cp, MII_BMCR, ctl);
788 cas_mif_poll(cp, 1);
789 }
790
791 cp->timer_ticks = 0;
792 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
793}
794
795
796static int cas_reset_mii_phy(struct cas *cp)
797{
798 int limit = STOP_TRIES_PHY;
799 u16 val;
800
801 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
802 udelay(100);
803 while (--limit) {
804 val = cas_phy_read(cp, MII_BMCR);
805 if ((val & BMCR_RESET) == 0)
806 break;
807 udelay(10);
808 }
809 return limit <= 0;
810}
811
812static int cas_saturn_firmware_init(struct cas *cp)
813{
814 const struct firmware *fw;
815 const char fw_name[] = "sun/cassini.bin";
816 int err;
817
818 if (PHY_NS_DP83065 != cp->phy_id)
819 return 0;
820
821 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
822 if (err) {
823 pr_err("Failed to load firmware \"%s\"\n",
824 fw_name);
825 return err;
826 }
827 if (fw->size < 2) {
828 pr_err("bogus length %zu in \"%s\"\n",
829 fw->size, fw_name);
830 err = -EINVAL;
831 goto out;
832 }
833 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
834 cp->fw_size = fw->size - 2;
835 cp->fw_data = vmalloc(cp->fw_size);
836 if (!cp->fw_data) {
837 err = -ENOMEM;
838 pr_err("\"%s\" Failed %d\n", fw_name, err);
839 goto out;
840 }
841 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
842out:
843 release_firmware(fw);
844 return err;
845}
846
847static void cas_saturn_firmware_load(struct cas *cp)
848{
849 int i;
850
851 cas_phy_powerdown(cp);
852
853
854 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
855
856
857 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
858 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
859 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
860 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
861 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
862 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
863 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
864 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
865
866
867 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
868 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
869 for (i = 0; i < cp->fw_size; i++)
870 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
871
872
873 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
874 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
875}
876
877
878
879static void cas_phy_init(struct cas *cp)
880{
881 u16 val;
882
883
884 if (CAS_PHY_MII(cp->phy_type)) {
885 writel(PCS_DATAPATH_MODE_MII,
886 cp->regs + REG_PCS_DATAPATH_MODE);
887
888 cas_mif_poll(cp, 0);
889 cas_reset_mii_phy(cp);
890
891 if (PHY_LUCENT_B0 == cp->phy_id) {
892
893 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
894 cas_phy_write(cp, MII_BMCR, 0x00f1);
895 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
896
897 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
898
899 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
900 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
901 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
902 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
903 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
904 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
905 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
906 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
907 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
908 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
909 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
910
911 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
912 val = cas_phy_read(cp, BROADCOM_MII_REG4);
913 val = cas_phy_read(cp, BROADCOM_MII_REG4);
914 if (val & 0x0080) {
915
916 cas_phy_write(cp, BROADCOM_MII_REG4,
917 val & ~0x0080);
918 }
919
920 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
921 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
922 SATURN_PCFG_FSI : 0x0,
923 cp->regs + REG_SATURN_PCFG);
924
925
926
927
928
929 if (PHY_NS_DP83065 == cp->phy_id) {
930 cas_saturn_firmware_load(cp);
931 }
932 cas_phy_powerup(cp);
933 }
934
935
936 val = cas_phy_read(cp, MII_BMCR);
937 val &= ~BMCR_ANENABLE;
938 cas_phy_write(cp, MII_BMCR, val);
939 udelay(10);
940
941 cas_phy_write(cp, MII_ADVERTISE,
942 cas_phy_read(cp, MII_ADVERTISE) |
943 (ADVERTISE_10HALF | ADVERTISE_10FULL |
944 ADVERTISE_100HALF | ADVERTISE_100FULL |
945 CAS_ADVERTISE_PAUSE |
946 CAS_ADVERTISE_ASYM_PAUSE));
947
948 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
949
950
951
952 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
953 val &= ~CAS_ADVERTISE_1000HALF;
954 val |= CAS_ADVERTISE_1000FULL;
955 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
956 }
957
958 } else {
959
960 u32 val;
961 int limit;
962
963 writel(PCS_DATAPATH_MODE_SERDES,
964 cp->regs + REG_PCS_DATAPATH_MODE);
965
966
967 if (cp->cas_flags & CAS_FLAG_SATURN)
968 writel(0, cp->regs + REG_SATURN_PCFG);
969
970
971 val = readl(cp->regs + REG_PCS_MII_CTRL);
972 val |= PCS_MII_RESET;
973 writel(val, cp->regs + REG_PCS_MII_CTRL);
974
975 limit = STOP_TRIES;
976 while (--limit > 0) {
977 udelay(10);
978 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
979 PCS_MII_RESET) == 0)
980 break;
981 }
982 if (limit <= 0)
983 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
984 readl(cp->regs + REG_PCS_STATE_MACHINE));
985
986
987
988
989 writel(0x0, cp->regs + REG_PCS_CFG);
990
991
992 val = readl(cp->regs + REG_PCS_MII_ADVERT);
993 val &= ~PCS_MII_ADVERT_HD;
994 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
995 PCS_MII_ADVERT_ASYM_PAUSE);
996 writel(val, cp->regs + REG_PCS_MII_ADVERT);
997
998
999 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
1000
1001
1002 writel(PCS_SERDES_CTRL_SYNCD_EN,
1003 cp->regs + REG_PCS_SERDES_CTRL);
1004 }
1005}
1006
1007
1008static int cas_pcs_link_check(struct cas *cp)
1009{
1010 u32 stat, state_machine;
1011 int retval = 0;
1012
1013
1014
1015
1016
1017 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1018 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1019 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1020
1021
1022
1023
1024 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1025 PCS_MII_STATUS_REMOTE_FAULT)) ==
1026 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1027 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1028
1029
1030
1031
1032 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1033 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1034 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1035 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1036 stat |= PCS_MII_STATUS_LINK_STATUS;
1037 }
1038
1039 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1040 if (cp->lstate != link_up) {
1041 if (cp->opened) {
1042 cp->lstate = link_up;
1043 cp->link_transition = LINK_TRANSITION_LINK_UP;
1044
1045 cas_set_link_modes(cp);
1046 netif_carrier_on(cp->dev);
1047 }
1048 }
1049 } else if (cp->lstate == link_up) {
1050 cp->lstate = link_down;
1051 if (link_transition_timeout != 0 &&
1052 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1053 !cp->link_transition_jiffies_valid) {
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 retval = 1;
1067 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1068 cp->link_transition_jiffies = jiffies;
1069 cp->link_transition_jiffies_valid = 1;
1070 } else {
1071 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1072 }
1073 netif_carrier_off(cp->dev);
1074 if (cp->opened)
1075 netif_info(cp, link, cp->dev, "PCS link down\n");
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1086
1087 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1088 if (stat == 0x03)
1089 return 1;
1090 }
1091 } else if (cp->lstate == link_down) {
1092 if (link_transition_timeout != 0 &&
1093 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1094 !cp->link_transition_jiffies_valid) {
1095
1096
1097
1098
1099
1100 retval = 1;
1101 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1102 cp->link_transition_jiffies = jiffies;
1103 cp->link_transition_jiffies_valid = 1;
1104 } else {
1105 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1106 }
1107 }
1108
1109 return retval;
1110}
1111
1112static int cas_pcs_interrupt(struct net_device *dev,
1113 struct cas *cp, u32 status)
1114{
1115 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1116
1117 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1118 return 0;
1119 return cas_pcs_link_check(cp);
1120}
1121
1122static int cas_txmac_interrupt(struct net_device *dev,
1123 struct cas *cp, u32 status)
1124{
1125 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1126
1127 if (!txmac_stat)
1128 return 0;
1129
1130 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1131 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1132
1133
1134
1135
1136 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1137 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1138 return 0;
1139
1140 spin_lock(&cp->stat_lock[0]);
1141 if (txmac_stat & MAC_TX_UNDERRUN) {
1142 netdev_err(dev, "TX MAC xmit underrun\n");
1143 cp->net_stats[0].tx_fifo_errors++;
1144 }
1145
1146 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1147 netdev_err(dev, "TX MAC max packet size error\n");
1148 cp->net_stats[0].tx_errors++;
1149 }
1150
1151
1152
1153
1154 if (txmac_stat & MAC_TX_COLL_NORMAL)
1155 cp->net_stats[0].collisions += 0x10000;
1156
1157 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1158 cp->net_stats[0].tx_aborted_errors += 0x10000;
1159 cp->net_stats[0].collisions += 0x10000;
1160 }
1161
1162 if (txmac_stat & MAC_TX_COLL_LATE) {
1163 cp->net_stats[0].tx_aborted_errors += 0x10000;
1164 cp->net_stats[0].collisions += 0x10000;
1165 }
1166 spin_unlock(&cp->stat_lock[0]);
1167
1168
1169
1170
1171 return 0;
1172}
1173
1174static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1175{
1176 cas_hp_inst_t *inst;
1177 u32 val;
1178 int i;
1179
1180 i = 0;
1181 while ((inst = firmware) && inst->note) {
1182 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1183
1184 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1185 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1186 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1187
1188 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1189 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1190 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1191 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1192 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1193 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1194 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1195 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1196
1197 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1198 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1199 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1200 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1201 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1202 ++firmware;
1203 ++i;
1204 }
1205}
1206
1207static void cas_init_rx_dma(struct cas *cp)
1208{
1209 u64 desc_dma = cp->block_dvma;
1210 u32 val;
1211 int i, size;
1212
1213
1214 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1215 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1216 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1217 if ((N_RX_DESC_RINGS > 1) &&
1218 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1219 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1220 writel(val, cp->regs + REG_RX_CFG);
1221
1222 val = (unsigned long) cp->init_rxds[0] -
1223 (unsigned long) cp->init_block;
1224 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1225 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1226 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1227
1228 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1229
1230
1231
1232 val = (unsigned long) cp->init_rxds[1] -
1233 (unsigned long) cp->init_block;
1234 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1235 writel((desc_dma + val) & 0xffffffff, cp->regs +
1236 REG_PLUS_RX_DB1_LOW);
1237 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1238 REG_PLUS_RX_KICK1);
1239 }
1240
1241
1242 val = (unsigned long) cp->init_rxcs[0] -
1243 (unsigned long) cp->init_block;
1244 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1245 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1246
1247 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1248
1249 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1250 val = (unsigned long) cp->init_rxcs[i] -
1251 (unsigned long) cp->init_block;
1252 writel((desc_dma + val) >> 32, cp->regs +
1253 REG_PLUS_RX_CBN_HI(i));
1254 writel((desc_dma + val) & 0xffffffff, cp->regs +
1255 REG_PLUS_RX_CBN_LOW(i));
1256 }
1257 }
1258
1259
1260
1261
1262
1263 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1264 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1265 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1266 for (i = 1; i < N_RX_COMP_RINGS; i++)
1267 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1268
1269
1270 if (N_RX_COMP_RINGS > 1)
1271 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1272 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1273
1274 for (i = 2; i < N_RX_COMP_RINGS; i++)
1275 writel(INTR_RX_DONE_ALT,
1276 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1277 }
1278
1279
1280 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1281 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1282 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1283 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1284 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1285
1286
1287 for (i = 0; i < 64; i++) {
1288 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1289 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1290 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1291 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1292 }
1293
1294
1295 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1296 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1297
1298
1299#ifdef USE_RX_BLANK
1300 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1301 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1302 writel(val, cp->regs + REG_RX_BLANK);
1303#else
1304 writel(0x0, cp->regs + REG_RX_BLANK);
1305#endif
1306
1307
1308
1309
1310
1311
1312
1313 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1314 writel(val, cp->regs + REG_RX_AE_THRESH);
1315 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1316 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1317 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1318 }
1319
1320
1321
1322
1323 writel(0x0, cp->regs + REG_RX_RED);
1324
1325
1326 val = 0;
1327 if (cp->page_size == 0x1000)
1328 val = 0x1;
1329 else if (cp->page_size == 0x2000)
1330 val = 0x2;
1331 else if (cp->page_size == 0x4000)
1332 val = 0x3;
1333
1334
1335 size = cp->dev->mtu + 64;
1336 if (size > cp->page_size)
1337 size = cp->page_size;
1338
1339 if (size <= 0x400)
1340 i = 0x0;
1341 else if (size <= 0x800)
1342 i = 0x1;
1343 else if (size <= 0x1000)
1344 i = 0x2;
1345 else
1346 i = 0x3;
1347
1348 cp->mtu_stride = 1 << (i + 10);
1349 val = CAS_BASE(RX_PAGE_SIZE, val);
1350 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1351 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1352 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1353 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1354
1355
1356 if (CAS_HP_FIRMWARE == cas_prog_null)
1357 return;
1358
1359 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1360 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1361 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1362 writel(val, cp->regs + REG_HP_CFG);
1363}
1364
1365static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1366{
1367 memset(rxc, 0, sizeof(*rxc));
1368 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1369}
1370
1371
1372
1373
1374
1375static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1376{
1377 cas_page_t *page = cp->rx_pages[1][index];
1378 cas_page_t *new;
1379
1380 if (page_count(page->buffer) == 1)
1381 return page;
1382
1383 new = cas_page_dequeue(cp);
1384 if (new) {
1385 spin_lock(&cp->rx_inuse_lock);
1386 list_add(&page->list, &cp->rx_inuse_list);
1387 spin_unlock(&cp->rx_inuse_lock);
1388 }
1389 return new;
1390}
1391
1392
1393static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1394 const int index)
1395{
1396 cas_page_t **page0 = cp->rx_pages[0];
1397 cas_page_t **page1 = cp->rx_pages[1];
1398
1399
1400 if (page_count(page0[index]->buffer) > 1) {
1401 cas_page_t *new = cas_page_spare(cp, index);
1402 if (new) {
1403 page1[index] = page0[index];
1404 page0[index] = new;
1405 }
1406 }
1407 RX_USED_SET(page0[index], 0);
1408 return page0[index];
1409}
1410
1411static void cas_clean_rxds(struct cas *cp)
1412{
1413
1414 struct cas_rx_desc *rxd = cp->init_rxds[0];
1415 int i, size;
1416
1417
1418 for (i = 0; i < N_RX_FLOWS; i++) {
1419 struct sk_buff *skb;
1420 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1421 cas_skb_release(skb);
1422 }
1423 }
1424
1425
1426 size = RX_DESC_RINGN_SIZE(0);
1427 for (i = 0; i < size; i++) {
1428 cas_page_t *page = cas_page_swap(cp, 0, i);
1429 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1430 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1431 CAS_BASE(RX_INDEX_RING, 0));
1432 }
1433
1434 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1435 cp->rx_last[0] = 0;
1436 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1437}
1438
1439static void cas_clean_rxcs(struct cas *cp)
1440{
1441 int i, j;
1442
1443
1444 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1445 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1446 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1447 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1448 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1449 cas_rxc_init(rxc + j);
1450 }
1451 }
1452}
1453
1454#if 0
1455
1456
1457
1458
1459
1460
1461static int cas_rxmac_reset(struct cas *cp)
1462{
1463 struct net_device *dev = cp->dev;
1464 int limit;
1465 u32 val;
1466
1467
1468 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1469 for (limit = 0; limit < STOP_TRIES; limit++) {
1470 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1471 break;
1472 udelay(10);
1473 }
1474 if (limit == STOP_TRIES) {
1475 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1476 return 1;
1477 }
1478
1479
1480 writel(0, cp->regs + REG_RX_CFG);
1481 for (limit = 0; limit < STOP_TRIES; limit++) {
1482 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1483 break;
1484 udelay(10);
1485 }
1486 if (limit == STOP_TRIES) {
1487 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1488 return 1;
1489 }
1490
1491 mdelay(5);
1492
1493
1494 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1495 for (limit = 0; limit < STOP_TRIES; limit++) {
1496 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1497 break;
1498 udelay(10);
1499 }
1500 if (limit == STOP_TRIES) {
1501 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1502 return 1;
1503 }
1504
1505
1506 cas_clean_rxds(cp);
1507 cas_clean_rxcs(cp);
1508
1509
1510 cas_init_rx_dma(cp);
1511
1512
1513 val = readl(cp->regs + REG_RX_CFG);
1514 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1515 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1516 val = readl(cp->regs + REG_MAC_RX_CFG);
1517 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1518 return 0;
1519}
1520#endif
1521
1522static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1523 u32 status)
1524{
1525 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1526
1527 if (!stat)
1528 return 0;
1529
1530 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1531
1532
1533 spin_lock(&cp->stat_lock[0]);
1534 if (stat & MAC_RX_ALIGN_ERR)
1535 cp->net_stats[0].rx_frame_errors += 0x10000;
1536
1537 if (stat & MAC_RX_CRC_ERR)
1538 cp->net_stats[0].rx_crc_errors += 0x10000;
1539
1540 if (stat & MAC_RX_LEN_ERR)
1541 cp->net_stats[0].rx_length_errors += 0x10000;
1542
1543 if (stat & MAC_RX_OVERFLOW) {
1544 cp->net_stats[0].rx_over_errors++;
1545 cp->net_stats[0].rx_fifo_errors++;
1546 }
1547
1548
1549
1550
1551 spin_unlock(&cp->stat_lock[0]);
1552 return 0;
1553}
1554
1555static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1556 u32 status)
1557{
1558 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1559
1560 if (!stat)
1561 return 0;
1562
1563 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1564 "mac interrupt, stat: 0x%x\n", stat);
1565
1566
1567
1568
1569
1570 if (stat & MAC_CTRL_PAUSE_STATE)
1571 cp->pause_entered++;
1572
1573 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1574 cp->pause_last_time_recvd = (stat >> 16);
1575
1576 return 0;
1577}
1578
1579
1580
1581static inline int cas_mdio_link_not_up(struct cas *cp)
1582{
1583 u16 val;
1584
1585 switch (cp->lstate) {
1586 case link_force_ret:
1587 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1588 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1589 cp->timer_ticks = 5;
1590 cp->lstate = link_force_ok;
1591 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1592 break;
1593
1594 case link_aneg:
1595 val = cas_phy_read(cp, MII_BMCR);
1596
1597
1598
1599
1600 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1601 val |= BMCR_FULLDPLX;
1602 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1603 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1604 cas_phy_write(cp, MII_BMCR, val);
1605 cp->timer_ticks = 5;
1606 cp->lstate = link_force_try;
1607 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1608 break;
1609
1610 case link_force_try:
1611
1612 val = cas_phy_read(cp, MII_BMCR);
1613 cp->timer_ticks = 5;
1614 if (val & CAS_BMCR_SPEED1000) {
1615 val &= ~CAS_BMCR_SPEED1000;
1616 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1617 cas_phy_write(cp, MII_BMCR, val);
1618 break;
1619 }
1620
1621 if (val & BMCR_SPEED100) {
1622 if (val & BMCR_FULLDPLX)
1623 val &= ~BMCR_FULLDPLX;
1624 else {
1625 val &= ~BMCR_SPEED100;
1626 }
1627 cas_phy_write(cp, MII_BMCR, val);
1628 break;
1629 }
1630 default:
1631 break;
1632 }
1633 return 0;
1634}
1635
1636
1637
1638static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1639{
1640 int restart;
1641
1642 if (bmsr & BMSR_LSTATUS) {
1643
1644
1645
1646
1647
1648 if ((cp->lstate == link_force_try) &&
1649 (cp->link_cntl & BMCR_ANENABLE)) {
1650 cp->lstate = link_force_ret;
1651 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1652 cas_mif_poll(cp, 0);
1653 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1654 cp->timer_ticks = 5;
1655 if (cp->opened)
1656 netif_info(cp, link, cp->dev,
1657 "Got link after fallback, retrying autoneg once...\n");
1658 cas_phy_write(cp, MII_BMCR,
1659 cp->link_fcntl | BMCR_ANENABLE |
1660 BMCR_ANRESTART);
1661 cas_mif_poll(cp, 1);
1662
1663 } else if (cp->lstate != link_up) {
1664 cp->lstate = link_up;
1665 cp->link_transition = LINK_TRANSITION_LINK_UP;
1666
1667 if (cp->opened) {
1668 cas_set_link_modes(cp);
1669 netif_carrier_on(cp->dev);
1670 }
1671 }
1672 return 0;
1673 }
1674
1675
1676
1677
1678 restart = 0;
1679 if (cp->lstate == link_up) {
1680 cp->lstate = link_down;
1681 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1682
1683 netif_carrier_off(cp->dev);
1684 if (cp->opened)
1685 netif_info(cp, link, cp->dev, "Link down\n");
1686 restart = 1;
1687
1688 } else if (++cp->timer_ticks > 10)
1689 cas_mdio_link_not_up(cp);
1690
1691 return restart;
1692}
1693
1694static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1695 u32 status)
1696{
1697 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1698 u16 bmsr;
1699
1700
1701 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1702 return 0;
1703
1704 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1705 return cas_mii_link_check(cp, bmsr);
1706}
1707
1708static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1709 u32 status)
1710{
1711 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1712
1713 if (!stat)
1714 return 0;
1715
1716 netdev_err(dev, "PCI error [%04x:%04x]",
1717 stat, readl(cp->regs + REG_BIM_DIAG));
1718
1719
1720 if ((stat & PCI_ERR_BADACK) &&
1721 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1722 pr_cont(" <No ACK64# during ABS64 cycle>");
1723
1724 if (stat & PCI_ERR_DTRTO)
1725 pr_cont(" <Delayed transaction timeout>");
1726 if (stat & PCI_ERR_OTHER)
1727 pr_cont(" <other>");
1728 if (stat & PCI_ERR_BIM_DMA_WRITE)
1729 pr_cont(" <BIM DMA 0 write req>");
1730 if (stat & PCI_ERR_BIM_DMA_READ)
1731 pr_cont(" <BIM DMA 0 read req>");
1732 pr_cont("\n");
1733
1734 if (stat & PCI_ERR_OTHER) {
1735 u16 cfg;
1736
1737
1738
1739
1740 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1741 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1742 if (cfg & PCI_STATUS_PARITY)
1743 netdev_err(dev, "PCI parity error detected\n");
1744 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1745 netdev_err(dev, "PCI target abort\n");
1746 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1747 netdev_err(dev, "PCI master acks target abort\n");
1748 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1749 netdev_err(dev, "PCI master abort\n");
1750 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1751 netdev_err(dev, "PCI system error SERR#\n");
1752 if (cfg & PCI_STATUS_DETECTED_PARITY)
1753 netdev_err(dev, "PCI parity error\n");
1754
1755
1756 cfg &= (PCI_STATUS_PARITY |
1757 PCI_STATUS_SIG_TARGET_ABORT |
1758 PCI_STATUS_REC_TARGET_ABORT |
1759 PCI_STATUS_REC_MASTER_ABORT |
1760 PCI_STATUS_SIG_SYSTEM_ERROR |
1761 PCI_STATUS_DETECTED_PARITY);
1762 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1763 }
1764
1765
1766 return 1;
1767}
1768
1769
1770
1771
1772
1773
1774static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1775 u32 status)
1776{
1777 if (status & INTR_RX_TAG_ERROR) {
1778
1779 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1780 "corrupt rx tag framing\n");
1781 spin_lock(&cp->stat_lock[0]);
1782 cp->net_stats[0].rx_errors++;
1783 spin_unlock(&cp->stat_lock[0]);
1784 goto do_reset;
1785 }
1786
1787 if (status & INTR_RX_LEN_MISMATCH) {
1788
1789 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1790 "length mismatch for rx frame\n");
1791 spin_lock(&cp->stat_lock[0]);
1792 cp->net_stats[0].rx_errors++;
1793 spin_unlock(&cp->stat_lock[0]);
1794 goto do_reset;
1795 }
1796
1797 if (status & INTR_PCS_STATUS) {
1798 if (cas_pcs_interrupt(dev, cp, status))
1799 goto do_reset;
1800 }
1801
1802 if (status & INTR_TX_MAC_STATUS) {
1803 if (cas_txmac_interrupt(dev, cp, status))
1804 goto do_reset;
1805 }
1806
1807 if (status & INTR_RX_MAC_STATUS) {
1808 if (cas_rxmac_interrupt(dev, cp, status))
1809 goto do_reset;
1810 }
1811
1812 if (status & INTR_MAC_CTRL_STATUS) {
1813 if (cas_mac_interrupt(dev, cp, status))
1814 goto do_reset;
1815 }
1816
1817 if (status & INTR_MIF_STATUS) {
1818 if (cas_mif_interrupt(dev, cp, status))
1819 goto do_reset;
1820 }
1821
1822 if (status & INTR_PCI_ERROR_STATUS) {
1823 if (cas_pci_interrupt(dev, cp, status))
1824 goto do_reset;
1825 }
1826 return 0;
1827
1828do_reset:
1829#if 1
1830 atomic_inc(&cp->reset_task_pending);
1831 atomic_inc(&cp->reset_task_pending_all);
1832 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1833 schedule_work(&cp->reset_task);
1834#else
1835 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1836 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1837 schedule_work(&cp->reset_task);
1838#endif
1839 return 1;
1840}
1841
1842
1843
1844
1845#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1846#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1847static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1848 const int len)
1849{
1850 unsigned long off = addr + len;
1851
1852 if (CAS_TABORT(cp) == 1)
1853 return 0;
1854 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1855 return 0;
1856 return TX_TARGET_ABORT_LEN;
1857}
1858
1859static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1860{
1861 struct cas_tx_desc *txds;
1862 struct sk_buff **skbs;
1863 struct net_device *dev = cp->dev;
1864 int entry, count;
1865
1866 spin_lock(&cp->tx_lock[ring]);
1867 txds = cp->init_txds[ring];
1868 skbs = cp->tx_skbs[ring];
1869 entry = cp->tx_old[ring];
1870
1871 count = TX_BUFF_COUNT(ring, entry, limit);
1872 while (entry != limit) {
1873 struct sk_buff *skb = skbs[entry];
1874 dma_addr_t daddr;
1875 u32 dlen;
1876 int frag;
1877
1878 if (!skb) {
1879
1880 entry = TX_DESC_NEXT(ring, entry);
1881 continue;
1882 }
1883
1884
1885 count -= skb_shinfo(skb)->nr_frags +
1886 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1887 if (count < 0)
1888 break;
1889
1890 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1891 "tx[%d] done, slot %d\n", ring, entry);
1892
1893 skbs[entry] = NULL;
1894 cp->tx_tiny_use[ring][entry].nbufs = 0;
1895
1896 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1897 struct cas_tx_desc *txd = txds + entry;
1898
1899 daddr = le64_to_cpu(txd->buffer);
1900 dlen = CAS_VAL(TX_DESC_BUFLEN,
1901 le64_to_cpu(txd->control));
1902 pci_unmap_page(cp->pdev, daddr, dlen,
1903 PCI_DMA_TODEVICE);
1904 entry = TX_DESC_NEXT(ring, entry);
1905
1906
1907 if (cp->tx_tiny_use[ring][entry].used) {
1908 cp->tx_tiny_use[ring][entry].used = 0;
1909 entry = TX_DESC_NEXT(ring, entry);
1910 }
1911 }
1912
1913 spin_lock(&cp->stat_lock[ring]);
1914 cp->net_stats[ring].tx_packets++;
1915 cp->net_stats[ring].tx_bytes += skb->len;
1916 spin_unlock(&cp->stat_lock[ring]);
1917 dev_kfree_skb_irq(skb);
1918 }
1919 cp->tx_old[ring] = entry;
1920
1921
1922
1923
1924
1925 if (netif_queue_stopped(dev) &&
1926 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1927 netif_wake_queue(dev);
1928 spin_unlock(&cp->tx_lock[ring]);
1929}
1930
1931static void cas_tx(struct net_device *dev, struct cas *cp,
1932 u32 status)
1933{
1934 int limit, ring;
1935#ifdef USE_TX_COMPWB
1936 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1937#endif
1938 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1939 "tx interrupt, status: 0x%x, %llx\n",
1940 status, (unsigned long long)compwb);
1941
1942 for (ring = 0; ring < N_TX_RINGS; ring++) {
1943#ifdef USE_TX_COMPWB
1944
1945 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1946 CAS_VAL(TX_COMPWB_LSB, compwb);
1947 compwb = TX_COMPWB_NEXT(compwb);
1948#else
1949 limit = readl(cp->regs + REG_TX_COMPN(ring));
1950#endif
1951 if (cp->tx_old[ring] != limit)
1952 cas_tx_ringN(cp, ring, limit);
1953 }
1954}
1955
1956
1957static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1958 int entry, const u64 *words,
1959 struct sk_buff **skbref)
1960{
1961 int dlen, hlen, len, i, alloclen;
1962 int off, swivel = RX_SWIVEL_OFF_VAL;
1963 struct cas_page *page;
1964 struct sk_buff *skb;
1965 void *addr, *crcaddr;
1966 __sum16 csum;
1967 char *p;
1968
1969 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1970 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1971 len = hlen + dlen;
1972
1973 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1974 alloclen = len;
1975 else
1976 alloclen = max(hlen, RX_COPY_MIN);
1977
1978 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
1979 if (skb == NULL)
1980 return -1;
1981
1982 *skbref = skb;
1983 skb_reserve(skb, swivel);
1984
1985 p = skb->data;
1986 addr = crcaddr = NULL;
1987 if (hlen) {
1988 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1989 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1990 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1991 swivel;
1992
1993 i = hlen;
1994 if (!dlen)
1995 i += cp->crc_size;
1996 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1997 PCI_DMA_FROMDEVICE);
1998 addr = cas_page_map(page->buffer);
1999 memcpy(p, addr + off, i);
2000 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2001 PCI_DMA_FROMDEVICE);
2002 cas_page_unmap(addr);
2003 RX_USED_ADD(page, 0x100);
2004 p += hlen;
2005 swivel = 0;
2006 }
2007
2008
2009 if (alloclen < (hlen + dlen)) {
2010 skb_frag_t *frag = skb_shinfo(skb)->frags;
2011
2012
2013 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2014 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2015 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2016
2017 hlen = min(cp->page_size - off, dlen);
2018 if (hlen < 0) {
2019 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2020 "rx page overflow: %d\n", hlen);
2021 dev_kfree_skb_irq(skb);
2022 return -1;
2023 }
2024 i = hlen;
2025 if (i == dlen)
2026 i += cp->crc_size;
2027 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2028 PCI_DMA_FROMDEVICE);
2029
2030
2031 swivel = 0;
2032 if (p == (char *) skb->data) {
2033 addr = cas_page_map(page->buffer);
2034 memcpy(p, addr + off, RX_COPY_MIN);
2035 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2036 PCI_DMA_FROMDEVICE);
2037 cas_page_unmap(addr);
2038 off += RX_COPY_MIN;
2039 swivel = RX_COPY_MIN;
2040 RX_USED_ADD(page, cp->mtu_stride);
2041 } else {
2042 RX_USED_ADD(page, hlen);
2043 }
2044 skb_put(skb, alloclen);
2045
2046 skb_shinfo(skb)->nr_frags++;
2047 skb->data_len += hlen - swivel;
2048 skb->truesize += hlen - swivel;
2049 skb->len += hlen - swivel;
2050
2051 get_page(page->buffer);
2052 frag->page = page->buffer;
2053 frag->page_offset = off;
2054 frag->size = hlen - swivel;
2055
2056
2057 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2058 hlen = dlen;
2059 off = 0;
2060
2061 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2062 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2063 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2064 hlen + cp->crc_size,
2065 PCI_DMA_FROMDEVICE);
2066 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2067 hlen + cp->crc_size,
2068 PCI_DMA_FROMDEVICE);
2069
2070 skb_shinfo(skb)->nr_frags++;
2071 skb->data_len += hlen;
2072 skb->len += hlen;
2073 frag++;
2074
2075 get_page(page->buffer);
2076 frag->page = page->buffer;
2077 frag->page_offset = 0;
2078 frag->size = hlen;
2079 RX_USED_ADD(page, hlen + cp->crc_size);
2080 }
2081
2082 if (cp->crc_size) {
2083 addr = cas_page_map(page->buffer);
2084 crcaddr = addr + off + hlen;
2085 }
2086
2087 } else {
2088
2089 if (!dlen)
2090 goto end_copy_pkt;
2091
2092 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2093 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2094 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2095 hlen = min(cp->page_size - off, dlen);
2096 if (hlen < 0) {
2097 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2098 "rx page overflow: %d\n", hlen);
2099 dev_kfree_skb_irq(skb);
2100 return -1;
2101 }
2102 i = hlen;
2103 if (i == dlen)
2104 i += cp->crc_size;
2105 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2106 PCI_DMA_FROMDEVICE);
2107 addr = cas_page_map(page->buffer);
2108 memcpy(p, addr + off, i);
2109 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2110 PCI_DMA_FROMDEVICE);
2111 cas_page_unmap(addr);
2112 if (p == (char *) skb->data)
2113 RX_USED_ADD(page, cp->mtu_stride);
2114 else
2115 RX_USED_ADD(page, i);
2116
2117
2118 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2119 p += hlen;
2120 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2121 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2122 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2123 dlen + cp->crc_size,
2124 PCI_DMA_FROMDEVICE);
2125 addr = cas_page_map(page->buffer);
2126 memcpy(p, addr, dlen + cp->crc_size);
2127 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2128 dlen + cp->crc_size,
2129 PCI_DMA_FROMDEVICE);
2130 cas_page_unmap(addr);
2131 RX_USED_ADD(page, dlen + cp->crc_size);
2132 }
2133end_copy_pkt:
2134 if (cp->crc_size) {
2135 addr = NULL;
2136 crcaddr = skb->data + alloclen;
2137 }
2138 skb_put(skb, alloclen);
2139 }
2140
2141 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2142 if (cp->crc_size) {
2143
2144 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2145 csum_unfold(csum)));
2146 if (addr)
2147 cas_page_unmap(addr);
2148 }
2149 skb->protocol = eth_type_trans(skb, cp->dev);
2150 if (skb->protocol == htons(ETH_P_IP)) {
2151 skb->csum = csum_unfold(~csum);
2152 skb->ip_summed = CHECKSUM_COMPLETE;
2153 } else
2154 skb_checksum_none_assert(skb);
2155 return len;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2174 struct sk_buff *skb)
2175{
2176 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2177 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2178
2179
2180
2181
2182
2183 __skb_queue_tail(flow, skb);
2184 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2185 while ((skb = __skb_dequeue(flow))) {
2186 cas_skb_release(skb);
2187 }
2188 }
2189}
2190
2191
2192
2193
2194static void cas_post_page(struct cas *cp, const int ring, const int index)
2195{
2196 cas_page_t *new;
2197 int entry;
2198
2199 entry = cp->rx_old[ring];
2200
2201 new = cas_page_swap(cp, ring, index);
2202 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2203 cp->init_rxds[ring][entry].index =
2204 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2205 CAS_BASE(RX_INDEX_RING, ring));
2206
2207 entry = RX_DESC_ENTRY(ring, entry + 1);
2208 cp->rx_old[ring] = entry;
2209
2210 if (entry % 4)
2211 return;
2212
2213 if (ring == 0)
2214 writel(entry, cp->regs + REG_RX_KICK);
2215 else if ((N_RX_DESC_RINGS > 1) &&
2216 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2217 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2218}
2219
2220
2221
2222static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2223{
2224 unsigned int entry, last, count, released;
2225 int cluster;
2226 cas_page_t **page = cp->rx_pages[ring];
2227
2228 entry = cp->rx_old[ring];
2229
2230 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2231 "rxd[%d] interrupt, done: %d\n", ring, entry);
2232
2233 cluster = -1;
2234 count = entry & 0x3;
2235 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2236 released = 0;
2237 while (entry != last) {
2238
2239 if (page_count(page[entry]->buffer) > 1) {
2240 cas_page_t *new = cas_page_dequeue(cp);
2241 if (!new) {
2242
2243
2244
2245 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2246 if (!timer_pending(&cp->link_timer))
2247 mod_timer(&cp->link_timer, jiffies +
2248 CAS_LINK_FAST_TIMEOUT);
2249 cp->rx_old[ring] = entry;
2250 cp->rx_last[ring] = num ? num - released : 0;
2251 return -ENOMEM;
2252 }
2253 spin_lock(&cp->rx_inuse_lock);
2254 list_add(&page[entry]->list, &cp->rx_inuse_list);
2255 spin_unlock(&cp->rx_inuse_lock);
2256 cp->init_rxds[ring][entry].buffer =
2257 cpu_to_le64(new->dma_addr);
2258 page[entry] = new;
2259
2260 }
2261
2262 if (++count == 4) {
2263 cluster = entry;
2264 count = 0;
2265 }
2266 released++;
2267 entry = RX_DESC_ENTRY(ring, entry + 1);
2268 }
2269 cp->rx_old[ring] = entry;
2270
2271 if (cluster < 0)
2272 return 0;
2273
2274 if (ring == 0)
2275 writel(cluster, cp->regs + REG_RX_KICK);
2276 else if ((N_RX_DESC_RINGS > 1) &&
2277 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2278 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2279 return 0;
2280}
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2296{
2297 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2298 int entry, drops;
2299 int npackets = 0;
2300
2301 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2302 "rx[%d] interrupt, done: %d/%d\n",
2303 ring,
2304 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2305
2306 entry = cp->rx_new[ring];
2307 drops = 0;
2308 while (1) {
2309 struct cas_rx_comp *rxc = rxcs + entry;
2310 struct sk_buff *uninitialized_var(skb);
2311 int type, len;
2312 u64 words[4];
2313 int i, dring;
2314
2315 words[0] = le64_to_cpu(rxc->word1);
2316 words[1] = le64_to_cpu(rxc->word2);
2317 words[2] = le64_to_cpu(rxc->word3);
2318 words[3] = le64_to_cpu(rxc->word4);
2319
2320
2321 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2322 if (type == 0)
2323 break;
2324
2325
2326 if (words[3] & RX_COMP4_ZERO) {
2327 break;
2328 }
2329
2330
2331 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2332 spin_lock(&cp->stat_lock[ring]);
2333 cp->net_stats[ring].rx_errors++;
2334 if (words[3] & RX_COMP4_LEN_MISMATCH)
2335 cp->net_stats[ring].rx_length_errors++;
2336 if (words[3] & RX_COMP4_BAD)
2337 cp->net_stats[ring].rx_crc_errors++;
2338 spin_unlock(&cp->stat_lock[ring]);
2339
2340
2341 drop_it:
2342 spin_lock(&cp->stat_lock[ring]);
2343 ++cp->net_stats[ring].rx_dropped;
2344 spin_unlock(&cp->stat_lock[ring]);
2345 goto next;
2346 }
2347
2348 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2349 if (len < 0) {
2350 ++drops;
2351 goto drop_it;
2352 }
2353
2354
2355
2356
2357 if (RX_DONT_BATCH || (type == 0x2)) {
2358
2359 cas_skb_release(skb);
2360 } else {
2361 cas_rx_flow_pkt(cp, words, skb);
2362 }
2363
2364 spin_lock(&cp->stat_lock[ring]);
2365 cp->net_stats[ring].rx_packets++;
2366 cp->net_stats[ring].rx_bytes += len;
2367 spin_unlock(&cp->stat_lock[ring]);
2368
2369 next:
2370 npackets++;
2371
2372
2373 if (words[0] & RX_COMP1_RELEASE_HDR) {
2374 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2375 dring = CAS_VAL(RX_INDEX_RING, i);
2376 i = CAS_VAL(RX_INDEX_NUM, i);
2377 cas_post_page(cp, dring, i);
2378 }
2379
2380 if (words[0] & RX_COMP1_RELEASE_DATA) {
2381 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2382 dring = CAS_VAL(RX_INDEX_RING, i);
2383 i = CAS_VAL(RX_INDEX_NUM, i);
2384 cas_post_page(cp, dring, i);
2385 }
2386
2387 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2388 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2389 dring = CAS_VAL(RX_INDEX_RING, i);
2390 i = CAS_VAL(RX_INDEX_NUM, i);
2391 cas_post_page(cp, dring, i);
2392 }
2393
2394
2395 entry = RX_COMP_ENTRY(ring, entry + 1 +
2396 CAS_VAL(RX_COMP1_SKIP, words[0]));
2397#ifdef USE_NAPI
2398 if (budget && (npackets >= budget))
2399 break;
2400#endif
2401 }
2402 cp->rx_new[ring] = entry;
2403
2404 if (drops)
2405 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2406 return npackets;
2407}
2408
2409
2410
2411static void cas_post_rxcs_ringN(struct net_device *dev,
2412 struct cas *cp, int ring)
2413{
2414 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2415 int last, entry;
2416
2417 last = cp->rx_cur[ring];
2418 entry = cp->rx_new[ring];
2419 netif_printk(cp, intr, KERN_DEBUG, dev,
2420 "rxc[%d] interrupt, done: %d/%d\n",
2421 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2422
2423
2424 while (last != entry) {
2425 cas_rxc_init(rxc + last);
2426 last = RX_COMP_ENTRY(ring, last + 1);
2427 }
2428 cp->rx_cur[ring] = last;
2429
2430 if (ring == 0)
2431 writel(last, cp->regs + REG_RX_COMP_TAIL);
2432 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2433 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2434}
2435
2436
2437
2438
2439
2440
2441#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2442static inline void cas_handle_irqN(struct net_device *dev,
2443 struct cas *cp, const u32 status,
2444 const int ring)
2445{
2446 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2447 cas_post_rxcs_ringN(dev, cp, ring);
2448}
2449
2450static irqreturn_t cas_interruptN(int irq, void *dev_id)
2451{
2452 struct net_device *dev = dev_id;
2453 struct cas *cp = netdev_priv(dev);
2454 unsigned long flags;
2455 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2457
2458
2459 if (status == 0)
2460 return IRQ_NONE;
2461
2462 spin_lock_irqsave(&cp->lock, flags);
2463 if (status & INTR_RX_DONE_ALT) {
2464#ifdef USE_NAPI
2465 cas_mask_intr(cp);
2466 napi_schedule(&cp->napi);
2467#else
2468 cas_rx_ringN(cp, ring, 0);
2469#endif
2470 status &= ~INTR_RX_DONE_ALT;
2471 }
2472
2473 if (status)
2474 cas_handle_irqN(dev, cp, status, ring);
2475 spin_unlock_irqrestore(&cp->lock, flags);
2476 return IRQ_HANDLED;
2477}
2478#endif
2479
2480#ifdef USE_PCI_INTB
2481
2482static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2483{
2484 if (status & INTR_RX_BUF_UNAVAIL_1) {
2485
2486
2487 cas_post_rxds_ringN(cp, 1, 0);
2488 spin_lock(&cp->stat_lock[1]);
2489 cp->net_stats[1].rx_dropped++;
2490 spin_unlock(&cp->stat_lock[1]);
2491 }
2492
2493 if (status & INTR_RX_BUF_AE_1)
2494 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2495 RX_AE_FREEN_VAL(1));
2496
2497 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2498 cas_post_rxcs_ringN(cp, 1);
2499}
2500
2501
2502static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2503{
2504 struct net_device *dev = dev_id;
2505 struct cas *cp = netdev_priv(dev);
2506 unsigned long flags;
2507 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2508
2509
2510 if (status == 0)
2511 return IRQ_NONE;
2512
2513 spin_lock_irqsave(&cp->lock, flags);
2514 if (status & INTR_RX_DONE_ALT) {
2515#ifdef USE_NAPI
2516 cas_mask_intr(cp);
2517 napi_schedule(&cp->napi);
2518#else
2519 cas_rx_ringN(cp, 1, 0);
2520#endif
2521 status &= ~INTR_RX_DONE_ALT;
2522 }
2523 if (status)
2524 cas_handle_irq1(cp, status);
2525 spin_unlock_irqrestore(&cp->lock, flags);
2526 return IRQ_HANDLED;
2527}
2528#endif
2529
2530static inline void cas_handle_irq(struct net_device *dev,
2531 struct cas *cp, const u32 status)
2532{
2533
2534 if (status & INTR_ERROR_MASK)
2535 cas_abnormal_irq(dev, cp, status);
2536
2537 if (status & INTR_RX_BUF_UNAVAIL) {
2538
2539
2540
2541 cas_post_rxds_ringN(cp, 0, 0);
2542 spin_lock(&cp->stat_lock[0]);
2543 cp->net_stats[0].rx_dropped++;
2544 spin_unlock(&cp->stat_lock[0]);
2545 } else if (status & INTR_RX_BUF_AE) {
2546 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2547 RX_AE_FREEN_VAL(0));
2548 }
2549
2550 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2551 cas_post_rxcs_ringN(dev, cp, 0);
2552}
2553
2554static irqreturn_t cas_interrupt(int irq, void *dev_id)
2555{
2556 struct net_device *dev = dev_id;
2557 struct cas *cp = netdev_priv(dev);
2558 unsigned long flags;
2559 u32 status = readl(cp->regs + REG_INTR_STATUS);
2560
2561 if (status == 0)
2562 return IRQ_NONE;
2563
2564 spin_lock_irqsave(&cp->lock, flags);
2565 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2566 cas_tx(dev, cp, status);
2567 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2568 }
2569
2570 if (status & INTR_RX_DONE) {
2571#ifdef USE_NAPI
2572 cas_mask_intr(cp);
2573 napi_schedule(&cp->napi);
2574#else
2575 cas_rx_ringN(cp, 0, 0);
2576#endif
2577 status &= ~INTR_RX_DONE;
2578 }
2579
2580 if (status)
2581 cas_handle_irq(dev, cp, status);
2582 spin_unlock_irqrestore(&cp->lock, flags);
2583 return IRQ_HANDLED;
2584}
2585
2586
2587#ifdef USE_NAPI
2588static int cas_poll(struct napi_struct *napi, int budget)
2589{
2590 struct cas *cp = container_of(napi, struct cas, napi);
2591 struct net_device *dev = cp->dev;
2592 int i, enable_intr, credits;
2593 u32 status = readl(cp->regs + REG_INTR_STATUS);
2594 unsigned long flags;
2595
2596 spin_lock_irqsave(&cp->lock, flags);
2597 cas_tx(dev, cp, status);
2598 spin_unlock_irqrestore(&cp->lock, flags);
2599
2600
2601
2602
2603
2604
2605
2606
2607 enable_intr = 1;
2608 credits = 0;
2609 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2610 int j;
2611 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2612 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2613 if (credits >= budget) {
2614 enable_intr = 0;
2615 goto rx_comp;
2616 }
2617 }
2618 }
2619
2620rx_comp:
2621
2622 spin_lock_irqsave(&cp->lock, flags);
2623 if (status)
2624 cas_handle_irq(dev, cp, status);
2625
2626#ifdef USE_PCI_INTB
2627 if (N_RX_COMP_RINGS > 1) {
2628 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2629 if (status)
2630 cas_handle_irq1(dev, cp, status);
2631 }
2632#endif
2633
2634#ifdef USE_PCI_INTC
2635 if (N_RX_COMP_RINGS > 2) {
2636 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2637 if (status)
2638 cas_handle_irqN(dev, cp, status, 2);
2639 }
2640#endif
2641
2642#ifdef USE_PCI_INTD
2643 if (N_RX_COMP_RINGS > 3) {
2644 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2645 if (status)
2646 cas_handle_irqN(dev, cp, status, 3);
2647 }
2648#endif
2649 spin_unlock_irqrestore(&cp->lock, flags);
2650 if (enable_intr) {
2651 napi_complete(napi);
2652 cas_unmask_intr(cp);
2653 }
2654 return credits;
2655}
2656#endif
2657
2658#ifdef CONFIG_NET_POLL_CONTROLLER
2659static void cas_netpoll(struct net_device *dev)
2660{
2661 struct cas *cp = netdev_priv(dev);
2662
2663 cas_disable_irq(cp, 0);
2664 cas_interrupt(cp->pdev->irq, dev);
2665 cas_enable_irq(cp, 0);
2666
2667#ifdef USE_PCI_INTB
2668 if (N_RX_COMP_RINGS > 1) {
2669
2670 }
2671#endif
2672#ifdef USE_PCI_INTC
2673 if (N_RX_COMP_RINGS > 2) {
2674
2675 }
2676#endif
2677#ifdef USE_PCI_INTD
2678 if (N_RX_COMP_RINGS > 3) {
2679
2680 }
2681#endif
2682}
2683#endif
2684
2685static void cas_tx_timeout(struct net_device *dev)
2686{
2687 struct cas *cp = netdev_priv(dev);
2688
2689 netdev_err(dev, "transmit timed out, resetting\n");
2690 if (!cp->hw_running) {
2691 netdev_err(dev, "hrm.. hw not running!\n");
2692 return;
2693 }
2694
2695 netdev_err(dev, "MIF_STATE[%08x]\n",
2696 readl(cp->regs + REG_MIF_STATE_MACHINE));
2697
2698 netdev_err(dev, "MAC_STATE[%08x]\n",
2699 readl(cp->regs + REG_MAC_STATE_MACHINE));
2700
2701 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2702 readl(cp->regs + REG_TX_CFG),
2703 readl(cp->regs + REG_MAC_TX_STATUS),
2704 readl(cp->regs + REG_MAC_TX_CFG),
2705 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2706 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2707 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2708 readl(cp->regs + REG_TX_SM_1),
2709 readl(cp->regs + REG_TX_SM_2));
2710
2711 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2712 readl(cp->regs + REG_RX_CFG),
2713 readl(cp->regs + REG_MAC_RX_STATUS),
2714 readl(cp->regs + REG_MAC_RX_CFG));
2715
2716 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2717 readl(cp->regs + REG_HP_STATE_MACHINE),
2718 readl(cp->regs + REG_HP_STATUS0),
2719 readl(cp->regs + REG_HP_STATUS1),
2720 readl(cp->regs + REG_HP_STATUS2));
2721
2722#if 1
2723 atomic_inc(&cp->reset_task_pending);
2724 atomic_inc(&cp->reset_task_pending_all);
2725 schedule_work(&cp->reset_task);
2726#else
2727 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2728 schedule_work(&cp->reset_task);
2729#endif
2730}
2731
2732static inline int cas_intme(int ring, int entry)
2733{
2734
2735 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2736 return 1;
2737 return 0;
2738}
2739
2740
2741static void cas_write_txd(struct cas *cp, int ring, int entry,
2742 dma_addr_t mapping, int len, u64 ctrl, int last)
2743{
2744 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2745
2746 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2747 if (cas_intme(ring, entry))
2748 ctrl |= TX_DESC_INTME;
2749 if (last)
2750 ctrl |= TX_DESC_EOF;
2751 txd->control = cpu_to_le64(ctrl);
2752 txd->buffer = cpu_to_le64(mapping);
2753}
2754
2755static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2756 const int entry)
2757{
2758 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2759}
2760
2761static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2762 const int entry, const int tentry)
2763{
2764 cp->tx_tiny_use[ring][tentry].nbufs++;
2765 cp->tx_tiny_use[ring][entry].used = 1;
2766 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2767}
2768
2769static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2770 struct sk_buff *skb)
2771{
2772 struct net_device *dev = cp->dev;
2773 int entry, nr_frags, frag, tabort, tentry;
2774 dma_addr_t mapping;
2775 unsigned long flags;
2776 u64 ctrl;
2777 u32 len;
2778
2779 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2780
2781
2782 if (TX_BUFFS_AVAIL(cp, ring) <=
2783 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2784 netif_stop_queue(dev);
2785 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2786 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2787 return 1;
2788 }
2789
2790 ctrl = 0;
2791 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2792 const u64 csum_start_off = skb_checksum_start_offset(skb);
2793 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2794
2795 ctrl = TX_DESC_CSUM_EN |
2796 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2797 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2798 }
2799
2800 entry = cp->tx_new[ring];
2801 cp->tx_skbs[ring][entry] = skb;
2802
2803 nr_frags = skb_shinfo(skb)->nr_frags;
2804 len = skb_headlen(skb);
2805 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2806 offset_in_page(skb->data), len,
2807 PCI_DMA_TODEVICE);
2808
2809 tentry = entry;
2810 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2811 if (unlikely(tabort)) {
2812
2813 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2814 ctrl | TX_DESC_SOF, 0);
2815 entry = TX_DESC_NEXT(ring, entry);
2816
2817 skb_copy_from_linear_data_offset(skb, len - tabort,
2818 tx_tiny_buf(cp, ring, entry), tabort);
2819 mapping = tx_tiny_map(cp, ring, entry, tentry);
2820 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2821 (nr_frags == 0));
2822 } else {
2823 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2824 TX_DESC_SOF, (nr_frags == 0));
2825 }
2826 entry = TX_DESC_NEXT(ring, entry);
2827
2828 for (frag = 0; frag < nr_frags; frag++) {
2829 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2830
2831 len = fragp->size;
2832 mapping = pci_map_page(cp->pdev, fragp->page,
2833 fragp->page_offset, len,
2834 PCI_DMA_TODEVICE);
2835
2836 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2837 if (unlikely(tabort)) {
2838 void *addr;
2839
2840
2841 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2842 ctrl, 0);
2843 entry = TX_DESC_NEXT(ring, entry);
2844
2845 addr = cas_page_map(fragp->page);
2846 memcpy(tx_tiny_buf(cp, ring, entry),
2847 addr + fragp->page_offset + len - tabort,
2848 tabort);
2849 cas_page_unmap(addr);
2850 mapping = tx_tiny_map(cp, ring, entry, tentry);
2851 len = tabort;
2852 }
2853
2854 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2855 (frag + 1 == nr_frags));
2856 entry = TX_DESC_NEXT(ring, entry);
2857 }
2858
2859 cp->tx_new[ring] = entry;
2860 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2861 netif_stop_queue(dev);
2862
2863 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2864 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2865 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2866 writel(entry, cp->regs + REG_TX_KICKN(ring));
2867 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2868 return 0;
2869}
2870
2871static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2872{
2873 struct cas *cp = netdev_priv(dev);
2874
2875
2876
2877
2878 static int ring;
2879
2880 if (skb_padto(skb, cp->min_frame_size))
2881 return NETDEV_TX_OK;
2882
2883
2884
2885
2886 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2887 return NETDEV_TX_BUSY;
2888 return NETDEV_TX_OK;
2889}
2890
2891static void cas_init_tx_dma(struct cas *cp)
2892{
2893 u64 desc_dma = cp->block_dvma;
2894 unsigned long off;
2895 u32 val;
2896 int i;
2897
2898
2899#ifdef USE_TX_COMPWB
2900 off = offsetof(struct cas_init_block, tx_compwb);
2901 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2902 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2903#endif
2904
2905
2906
2907
2908 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2909 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2910 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2911 TX_CFG_INTR_COMPWB_DIS;
2912
2913
2914 for (i = 0; i < MAX_TX_RINGS; i++) {
2915 off = (unsigned long) cp->init_txds[i] -
2916 (unsigned long) cp->init_block;
2917
2918 val |= CAS_TX_RINGN_BASE(i);
2919 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2920 writel((desc_dma + off) & 0xffffffff, cp->regs +
2921 REG_TX_DBN_LOW(i));
2922
2923
2924
2925 }
2926 writel(val, cp->regs + REG_TX_CFG);
2927
2928
2929
2930
2931#ifdef USE_QOS
2932 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2933 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2934 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2935 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2936#else
2937 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2938 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2939 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2940 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2941#endif
2942}
2943
2944
2945static inline void cas_init_dma(struct cas *cp)
2946{
2947 cas_init_tx_dma(cp);
2948 cas_init_rx_dma(cp);
2949}
2950
2951static void cas_process_mc_list(struct cas *cp)
2952{
2953 u16 hash_table[16];
2954 u32 crc;
2955 struct netdev_hw_addr *ha;
2956 int i = 1;
2957
2958 memset(hash_table, 0, sizeof(hash_table));
2959 netdev_for_each_mc_addr(ha, cp->dev) {
2960 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2961
2962
2963
2964 writel((ha->addr[4] << 8) | ha->addr[5],
2965 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2966 writel((ha->addr[2] << 8) | ha->addr[3],
2967 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2968 writel((ha->addr[0] << 8) | ha->addr[1],
2969 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2970 i++;
2971 }
2972 else {
2973
2974
2975
2976 crc = ether_crc_le(ETH_ALEN, ha->addr);
2977 crc >>= 24;
2978 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2979 }
2980 }
2981 for (i = 0; i < 16; i++)
2982 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2983}
2984
2985
2986static u32 cas_setup_multicast(struct cas *cp)
2987{
2988 u32 rxcfg = 0;
2989 int i;
2990
2991 if (cp->dev->flags & IFF_PROMISC) {
2992 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2993
2994 } else if (cp->dev->flags & IFF_ALLMULTI) {
2995 for (i=0; i < 16; i++)
2996 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2997 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2998
2999 } else {
3000 cas_process_mc_list(cp);
3001 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3002 }
3003
3004 return rxcfg;
3005}
3006
3007
3008static void cas_clear_mac_err(struct cas *cp)
3009{
3010 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3011 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3012 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3013 writel(0, cp->regs + REG_MAC_COLL_LATE);
3014 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3015 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3016 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3017 writel(0, cp->regs + REG_MAC_LEN_ERR);
3018 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3019 writel(0, cp->regs + REG_MAC_FCS_ERR);
3020 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3021}
3022
3023
3024static void cas_mac_reset(struct cas *cp)
3025{
3026 int i;
3027
3028
3029 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3030 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3031
3032
3033 i = STOP_TRIES;
3034 while (i-- > 0) {
3035 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3036 break;
3037 udelay(10);
3038 }
3039
3040
3041 i = STOP_TRIES;
3042 while (i-- > 0) {
3043 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3044 break;
3045 udelay(10);
3046 }
3047
3048 if (readl(cp->regs + REG_MAC_TX_RESET) |
3049 readl(cp->regs + REG_MAC_RX_RESET))
3050 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3051 readl(cp->regs + REG_MAC_TX_RESET),
3052 readl(cp->regs + REG_MAC_RX_RESET),
3053 readl(cp->regs + REG_MAC_STATE_MACHINE));
3054}
3055
3056
3057
3058static void cas_init_mac(struct cas *cp)
3059{
3060 unsigned char *e = &cp->dev->dev_addr[0];
3061 int i;
3062 cas_mac_reset(cp);
3063
3064
3065 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3066
3067
3068#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3069
3070
3071
3072 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3073 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3074#endif
3075
3076 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3077
3078 writel(0x00, cp->regs + REG_MAC_IPG0);
3079 writel(0x08, cp->regs + REG_MAC_IPG1);
3080 writel(0x04, cp->regs + REG_MAC_IPG2);
3081
3082
3083 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3084
3085
3086 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3087
3088
3089
3090
3091
3092 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3093 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3094 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3095 cp->regs + REG_MAC_FRAMESIZE_MAX);
3096
3097
3098
3099
3100
3101 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3102 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3103 else
3104 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3105 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3106 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3107 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3108
3109 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3110
3111 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3112 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3113 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3114 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3115 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3116
3117
3118 for (i = 0; i < 45; i++)
3119 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3120
3121 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3122 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3123 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3124
3125 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3126 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3127 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3128
3129 cp->mac_rx_cfg = cas_setup_multicast(cp);
3130
3131 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3132 cas_clear_mac_err(cp);
3133 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3134
3135
3136
3137
3138
3139 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3140 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3141
3142
3143
3144
3145 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3146}
3147
3148
3149static void cas_init_pause_thresholds(struct cas *cp)
3150{
3151
3152
3153
3154 if (cp->rx_fifo_size <= (2 * 1024)) {
3155 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3156 } else {
3157 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3158 if (max_frame * 3 > cp->rx_fifo_size) {
3159 cp->rx_pause_off = 7104;
3160 cp->rx_pause_on = 960;
3161 } else {
3162 int off = (cp->rx_fifo_size - (max_frame * 2));
3163 int on = off - max_frame;
3164 cp->rx_pause_off = off;
3165 cp->rx_pause_on = on;
3166 }
3167 }
3168}
3169
3170static int cas_vpd_match(const void __iomem *p, const char *str)
3171{
3172 int len = strlen(str) + 1;
3173 int i;
3174
3175 for (i = 0; i < len; i++) {
3176 if (readb(p + i) != str[i])
3177 return 0;
3178 }
3179 return 1;
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3195 const int offset)
3196{
3197 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3198 void __iomem *base, *kstart;
3199 int i, len;
3200 int found = 0;
3201#define VPD_FOUND_MAC 0x01
3202#define VPD_FOUND_PHY 0x02
3203
3204 int phy_type = CAS_PHY_MII_MDIO0;
3205 int mac_off = 0;
3206
3207#if defined(CONFIG_SPARC)
3208 const unsigned char *addr;
3209#endif
3210
3211
3212 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3213 cp->regs + REG_BIM_LOCAL_DEV_EN);
3214
3215
3216 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3217 goto use_random_mac_addr;
3218
3219
3220 base = NULL;
3221 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3222
3223 if ((readb(p + i + 0) == 0x50) &&
3224 (readb(p + i + 1) == 0x43) &&
3225 (readb(p + i + 2) == 0x49) &&
3226 (readb(p + i + 3) == 0x52)) {
3227 base = p + (readb(p + i + 8) |
3228 (readb(p + i + 9) << 8));
3229 break;
3230 }
3231 }
3232
3233 if (!base || (readb(base) != 0x82))
3234 goto use_random_mac_addr;
3235
3236 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3237 while (i < EXPANSION_ROM_SIZE) {
3238 if (readb(base + i) != 0x90)
3239 goto use_random_mac_addr;
3240
3241
3242 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3243
3244
3245 kstart = base + i + 3;
3246 p = kstart;
3247 while ((p - kstart) < len) {
3248 int klen = readb(p + 2);
3249 int j;
3250 char type;
3251
3252 p += 3;
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291 if (readb(p) != 'I')
3292 goto next;
3293
3294
3295 type = readb(p + 3);
3296 if (type == 'B') {
3297 if ((klen == 29) && readb(p + 4) == 6 &&
3298 cas_vpd_match(p + 5,
3299 "local-mac-address")) {
3300 if (mac_off++ > offset)
3301 goto next;
3302
3303
3304 for (j = 0; j < 6; j++)
3305 dev_addr[j] =
3306 readb(p + 23 + j);
3307 goto found_mac;
3308 }
3309 }
3310
3311 if (type != 'S')
3312 goto next;
3313
3314#ifdef USE_ENTROPY_DEV
3315 if ((klen == 24) &&
3316 cas_vpd_match(p + 5, "entropy-dev") &&
3317 cas_vpd_match(p + 17, "vms110")) {
3318 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3319 goto next;
3320 }
3321#endif
3322
3323 if (found & VPD_FOUND_PHY)
3324 goto next;
3325
3326 if ((klen == 18) && readb(p + 4) == 4 &&
3327 cas_vpd_match(p + 5, "phy-type")) {
3328 if (cas_vpd_match(p + 14, "pcs")) {
3329 phy_type = CAS_PHY_SERDES;
3330 goto found_phy;
3331 }
3332 }
3333
3334 if ((klen == 23) && readb(p + 4) == 4 &&
3335 cas_vpd_match(p + 5, "phy-interface")) {
3336 if (cas_vpd_match(p + 19, "pcs")) {
3337 phy_type = CAS_PHY_SERDES;
3338 goto found_phy;
3339 }
3340 }
3341found_mac:
3342 found |= VPD_FOUND_MAC;
3343 goto next;
3344
3345found_phy:
3346 found |= VPD_FOUND_PHY;
3347
3348next:
3349 p += klen;
3350 }
3351 i += len + 3;
3352 }
3353
3354use_random_mac_addr:
3355 if (found & VPD_FOUND_MAC)
3356 goto done;
3357
3358#if defined(CONFIG_SPARC)
3359 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3360 if (addr != NULL) {
3361 memcpy(dev_addr, addr, 6);
3362 goto done;
3363 }
3364#endif
3365
3366
3367 pr_info("MAC address not found in ROM VPD\n");
3368 dev_addr[0] = 0x08;
3369 dev_addr[1] = 0x00;
3370 dev_addr[2] = 0x20;
3371 get_random_bytes(dev_addr + 3, 3);
3372
3373done:
3374 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3375 return phy_type;
3376}
3377
3378
3379static void cas_check_pci_invariants(struct cas *cp)
3380{
3381 struct pci_dev *pdev = cp->pdev;
3382
3383 cp->cas_flags = 0;
3384 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3385 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3386 if (pdev->revision >= CAS_ID_REVPLUS)
3387 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3388 if (pdev->revision < CAS_ID_REVPLUS02u)
3389 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3390
3391
3392
3393
3394 if (pdev->revision < CAS_ID_REV2)
3395 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3396 } else {
3397
3398 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3399
3400
3401
3402
3403 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3404 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3405 cp->cas_flags |= CAS_FLAG_SATURN;
3406 }
3407}
3408
3409
3410static int cas_check_invariants(struct cas *cp)
3411{
3412 struct pci_dev *pdev = cp->pdev;
3413 u32 cfg;
3414 int i;
3415
3416
3417 cp->page_order = 0;
3418#ifdef USE_PAGE_ORDER
3419 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3420
3421 struct page *page = alloc_pages(GFP_ATOMIC,
3422 CAS_JUMBO_PAGE_SHIFT -
3423 PAGE_SHIFT);
3424 if (page) {
3425 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3426 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3427 } else {
3428 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3429 }
3430 }
3431#endif
3432 cp->page_size = (PAGE_SIZE << cp->page_order);
3433
3434
3435 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3436 cp->rx_fifo_size = RX_FIFO_SIZE;
3437
3438
3439
3440
3441 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3442 PCI_SLOT(pdev->devfn));
3443 if (cp->phy_type & CAS_PHY_SERDES) {
3444 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3445 return 0;
3446 }
3447
3448
3449 cfg = readl(cp->regs + REG_MIF_CFG);
3450 if (cfg & MIF_CFG_MDIO_1) {
3451 cp->phy_type = CAS_PHY_MII_MDIO1;
3452 } else if (cfg & MIF_CFG_MDIO_0) {
3453 cp->phy_type = CAS_PHY_MII_MDIO0;
3454 }
3455
3456 cas_mif_poll(cp, 0);
3457 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3458
3459 for (i = 0; i < 32; i++) {
3460 u32 phy_id;
3461 int j;
3462
3463 for (j = 0; j < 3; j++) {
3464 cp->phy_addr = i;
3465 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3466 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3467 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3468 cp->phy_id = phy_id;
3469 goto done;
3470 }
3471 }
3472 }
3473 pr_err("MII phy did not respond [%08x]\n",
3474 readl(cp->regs + REG_MIF_STATE_MACHINE));
3475 return -1;
3476
3477done:
3478
3479 cfg = cas_phy_read(cp, MII_BMSR);
3480 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3481 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3482 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3483 return 0;
3484}
3485
3486
3487static inline void cas_start_dma(struct cas *cp)
3488{
3489 int i;
3490 u32 val;
3491 int txfailed = 0;
3492
3493
3494 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3495 writel(val, cp->regs + REG_TX_CFG);
3496 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3497 writel(val, cp->regs + REG_RX_CFG);
3498
3499
3500 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3501 writel(val, cp->regs + REG_MAC_TX_CFG);
3502 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3503 writel(val, cp->regs + REG_MAC_RX_CFG);
3504
3505 i = STOP_TRIES;
3506 while (i-- > 0) {
3507 val = readl(cp->regs + REG_MAC_TX_CFG);
3508 if ((val & MAC_TX_CFG_EN))
3509 break;
3510 udelay(10);
3511 }
3512 if (i < 0) txfailed = 1;
3513 i = STOP_TRIES;
3514 while (i-- > 0) {
3515 val = readl(cp->regs + REG_MAC_RX_CFG);
3516 if ((val & MAC_RX_CFG_EN)) {
3517 if (txfailed) {
3518 netdev_err(cp->dev,
3519 "enabling mac failed [tx:%08x:%08x]\n",
3520 readl(cp->regs + REG_MIF_STATE_MACHINE),
3521 readl(cp->regs + REG_MAC_STATE_MACHINE));
3522 }
3523 goto enable_rx_done;
3524 }
3525 udelay(10);
3526 }
3527 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3528 (txfailed ? "tx,rx" : "rx"),
3529 readl(cp->regs + REG_MIF_STATE_MACHINE),
3530 readl(cp->regs + REG_MAC_STATE_MACHINE));
3531
3532enable_rx_done:
3533 cas_unmask_intr(cp);
3534 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3535 writel(0, cp->regs + REG_RX_COMP_TAIL);
3536
3537 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3538 if (N_RX_DESC_RINGS > 1)
3539 writel(RX_DESC_RINGN_SIZE(1) - 4,
3540 cp->regs + REG_PLUS_RX_KICK1);
3541
3542 for (i = 1; i < N_RX_COMP_RINGS; i++)
3543 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3544 }
3545}
3546
3547
3548static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3549 int *pause)
3550{
3551 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3552 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3553 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3554 if (val & PCS_MII_LPA_ASYM_PAUSE)
3555 *pause |= 0x10;
3556 *spd = 1000;
3557}
3558
3559
3560static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3561 int *pause)
3562{
3563 u32 val;
3564
3565 *fd = 0;
3566 *spd = 10;
3567 *pause = 0;
3568
3569
3570 val = cas_phy_read(cp, MII_LPA);
3571 if (val & CAS_LPA_PAUSE)
3572 *pause = 0x01;
3573
3574 if (val & CAS_LPA_ASYM_PAUSE)
3575 *pause |= 0x10;
3576
3577 if (val & LPA_DUPLEX)
3578 *fd = 1;
3579 if (val & LPA_100)
3580 *spd = 100;
3581
3582 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3583 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3584 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3585 *spd = 1000;
3586 if (val & CAS_LPA_1000FULL)
3587 *fd = 1;
3588 }
3589}
3590
3591
3592
3593
3594
3595
3596static void cas_set_link_modes(struct cas *cp)
3597{
3598 u32 val;
3599 int full_duplex, speed, pause;
3600
3601 full_duplex = 0;
3602 speed = 10;
3603 pause = 0;
3604
3605 if (CAS_PHY_MII(cp->phy_type)) {
3606 cas_mif_poll(cp, 0);
3607 val = cas_phy_read(cp, MII_BMCR);
3608 if (val & BMCR_ANENABLE) {
3609 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3610 &pause);
3611 } else {
3612 if (val & BMCR_FULLDPLX)
3613 full_duplex = 1;
3614
3615 if (val & BMCR_SPEED100)
3616 speed = 100;
3617 else if (val & CAS_BMCR_SPEED1000)
3618 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3619 1000 : 100;
3620 }
3621 cas_mif_poll(cp, 1);
3622
3623 } else {
3624 val = readl(cp->regs + REG_PCS_MII_CTRL);
3625 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3626 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3627 if (val & PCS_MII_CTRL_DUPLEX)
3628 full_duplex = 1;
3629 }
3630 }
3631
3632 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3633 speed, full_duplex ? "full" : "half");
3634
3635 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3636 if (CAS_PHY_MII(cp->phy_type)) {
3637 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3638 if (!full_duplex)
3639 val |= MAC_XIF_DISABLE_ECHO;
3640 }
3641 if (full_duplex)
3642 val |= MAC_XIF_FDPLX_LED;
3643 if (speed == 1000)
3644 val |= MAC_XIF_GMII_MODE;
3645 writel(val, cp->regs + REG_MAC_XIF_CFG);
3646
3647
3648 val = MAC_TX_CFG_IPG_EN;
3649 if (full_duplex) {
3650 val |= MAC_TX_CFG_IGNORE_CARRIER;
3651 val |= MAC_TX_CFG_IGNORE_COLL;
3652 } else {
3653#ifndef USE_CSMA_CD_PROTO
3654 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3655 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3656#endif
3657 }
3658
3659
3660
3661
3662
3663
3664
3665 if ((speed == 1000) && !full_duplex) {
3666 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3667 cp->regs + REG_MAC_TX_CFG);
3668
3669 val = readl(cp->regs + REG_MAC_RX_CFG);
3670 val &= ~MAC_RX_CFG_STRIP_FCS;
3671 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3672 cp->regs + REG_MAC_RX_CFG);
3673
3674 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3675
3676 cp->crc_size = 4;
3677
3678 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3679
3680 } else {
3681 writel(val, cp->regs + REG_MAC_TX_CFG);
3682
3683
3684
3685
3686 val = readl(cp->regs + REG_MAC_RX_CFG);
3687 if (full_duplex) {
3688 val |= MAC_RX_CFG_STRIP_FCS;
3689 cp->crc_size = 0;
3690 cp->min_frame_size = CAS_MIN_MTU;
3691 } else {
3692 val &= ~MAC_RX_CFG_STRIP_FCS;
3693 cp->crc_size = 4;
3694 cp->min_frame_size = CAS_MIN_FRAME;
3695 }
3696 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3697 cp->regs + REG_MAC_RX_CFG);
3698 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3699 }
3700
3701 if (netif_msg_link(cp)) {
3702 if (pause & 0x01) {
3703 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3704 cp->rx_fifo_size,
3705 cp->rx_pause_off,
3706 cp->rx_pause_on);
3707 } else if (pause & 0x10) {
3708 netdev_info(cp->dev, "TX pause enabled\n");
3709 } else {
3710 netdev_info(cp->dev, "Pause is disabled\n");
3711 }
3712 }
3713
3714 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3715 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3716 if (pause) {
3717 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3718 if (pause & 0x01) {
3719 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3720 }
3721 }
3722 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3723 cas_start_dma(cp);
3724}
3725
3726
3727static void cas_init_hw(struct cas *cp, int restart_link)
3728{
3729 if (restart_link)
3730 cas_phy_init(cp);
3731
3732 cas_init_pause_thresholds(cp);
3733 cas_init_mac(cp);
3734 cas_init_dma(cp);
3735
3736 if (restart_link) {
3737
3738 cp->timer_ticks = 0;
3739 cas_begin_auto_negotiation(cp, NULL);
3740 } else if (cp->lstate == link_up) {
3741 cas_set_link_modes(cp);
3742 netif_carrier_on(cp->dev);
3743 }
3744}
3745
3746
3747
3748
3749
3750static void cas_hard_reset(struct cas *cp)
3751{
3752 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3753 udelay(20);
3754 pci_restore_state(cp->pdev);
3755}
3756
3757
3758static void cas_global_reset(struct cas *cp, int blkflag)
3759{
3760 int limit;
3761
3762
3763 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3764
3765
3766
3767
3768
3769
3770 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3771 cp->regs + REG_SW_RESET);
3772 } else {
3773 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3774 }
3775
3776
3777 mdelay(3);
3778
3779 limit = STOP_TRIES;
3780 while (limit-- > 0) {
3781 u32 val = readl(cp->regs + REG_SW_RESET);
3782 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3783 goto done;
3784 udelay(10);
3785 }
3786 netdev_err(cp->dev, "sw reset failed\n");
3787
3788done:
3789
3790 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3791 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3792
3793
3794
3795
3796
3797 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3798 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3799 PCI_ERR_BIM_DMA_READ), cp->regs +
3800 REG_PCI_ERR_STATUS_MASK);
3801
3802
3803
3804
3805 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3806}
3807
3808static void cas_reset(struct cas *cp, int blkflag)
3809{
3810 u32 val;
3811
3812 cas_mask_intr(cp);
3813 cas_global_reset(cp, blkflag);
3814 cas_mac_reset(cp);
3815 cas_entropy_reset(cp);
3816
3817
3818 val = readl(cp->regs + REG_TX_CFG);
3819 val &= ~TX_CFG_DMA_EN;
3820 writel(val, cp->regs + REG_TX_CFG);
3821
3822 val = readl(cp->regs + REG_RX_CFG);
3823 val &= ~RX_CFG_DMA_EN;
3824 writel(val, cp->regs + REG_RX_CFG);
3825
3826
3827 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3828 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3829 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3830 } else {
3831 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3832 }
3833
3834
3835 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3836 cas_clear_mac_err(cp);
3837 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3838}
3839
3840
3841static void cas_shutdown(struct cas *cp)
3842{
3843 unsigned long flags;
3844
3845
3846 cp->hw_running = 0;
3847
3848 del_timer_sync(&cp->link_timer);
3849
3850
3851#if 0
3852 while (atomic_read(&cp->reset_task_pending_mtu) ||
3853 atomic_read(&cp->reset_task_pending_spare) ||
3854 atomic_read(&cp->reset_task_pending_all))
3855 schedule();
3856
3857#else
3858 while (atomic_read(&cp->reset_task_pending))
3859 schedule();
3860#endif
3861
3862 cas_lock_all_save(cp, flags);
3863 cas_reset(cp, 0);
3864 if (cp->cas_flags & CAS_FLAG_SATURN)
3865 cas_phy_powerdown(cp);
3866 cas_unlock_all_restore(cp, flags);
3867}
3868
3869static int cas_change_mtu(struct net_device *dev, int new_mtu)
3870{
3871 struct cas *cp = netdev_priv(dev);
3872
3873 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3874 return -EINVAL;
3875
3876 dev->mtu = new_mtu;
3877 if (!netif_running(dev) || !netif_device_present(dev))
3878 return 0;
3879
3880
3881#if 1
3882 atomic_inc(&cp->reset_task_pending);
3883 if ((cp->phy_type & CAS_PHY_SERDES)) {
3884 atomic_inc(&cp->reset_task_pending_all);
3885 } else {
3886 atomic_inc(&cp->reset_task_pending_mtu);
3887 }
3888 schedule_work(&cp->reset_task);
3889#else
3890 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3891 CAS_RESET_ALL : CAS_RESET_MTU);
3892 pr_err("reset called in cas_change_mtu\n");
3893 schedule_work(&cp->reset_task);
3894#endif
3895
3896 flush_work_sync(&cp->reset_task);
3897 return 0;
3898}
3899
3900static void cas_clean_txd(struct cas *cp, int ring)
3901{
3902 struct cas_tx_desc *txd = cp->init_txds[ring];
3903 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3904 u64 daddr, dlen;
3905 int i, size;
3906
3907 size = TX_DESC_RINGN_SIZE(ring);
3908 for (i = 0; i < size; i++) {
3909 int frag;
3910
3911 if (skbs[i] == NULL)
3912 continue;
3913
3914 skb = skbs[i];
3915 skbs[i] = NULL;
3916
3917 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3918 int ent = i & (size - 1);
3919
3920
3921
3922
3923 daddr = le64_to_cpu(txd[ent].buffer);
3924 dlen = CAS_VAL(TX_DESC_BUFLEN,
3925 le64_to_cpu(txd[ent].control));
3926 pci_unmap_page(cp->pdev, daddr, dlen,
3927 PCI_DMA_TODEVICE);
3928
3929 if (frag != skb_shinfo(skb)->nr_frags) {
3930 i++;
3931
3932
3933
3934
3935 ent = i & (size - 1);
3936 if (cp->tx_tiny_use[ring][ent].used)
3937 i++;
3938 }
3939 }
3940 dev_kfree_skb_any(skb);
3941 }
3942
3943
3944 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3945}
3946
3947
3948static inline void cas_free_rx_desc(struct cas *cp, int ring)
3949{
3950 cas_page_t **page = cp->rx_pages[ring];
3951 int i, size;
3952
3953 size = RX_DESC_RINGN_SIZE(ring);
3954 for (i = 0; i < size; i++) {
3955 if (page[i]) {
3956 cas_page_free(cp, page[i]);
3957 page[i] = NULL;
3958 }
3959 }
3960}
3961
3962static void cas_free_rxds(struct cas *cp)
3963{
3964 int i;
3965
3966 for (i = 0; i < N_RX_DESC_RINGS; i++)
3967 cas_free_rx_desc(cp, i);
3968}
3969
3970
3971static void cas_clean_rings(struct cas *cp)
3972{
3973 int i;
3974
3975
3976 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3977 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3978 for (i = 0; i < N_TX_RINGS; i++)
3979 cas_clean_txd(cp, i);
3980
3981
3982 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3983 cas_clean_rxds(cp);
3984 cas_clean_rxcs(cp);
3985}
3986
3987
3988static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3989{
3990 cas_page_t **page = cp->rx_pages[ring];
3991 int size, i = 0;
3992
3993 size = RX_DESC_RINGN_SIZE(ring);
3994 for (i = 0; i < size; i++) {
3995 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3996 return -1;
3997 }
3998 return 0;
3999}
4000
4001static int cas_alloc_rxds(struct cas *cp)
4002{
4003 int i;
4004
4005 for (i = 0; i < N_RX_DESC_RINGS; i++) {
4006 if (cas_alloc_rx_desc(cp, i) < 0) {
4007 cas_free_rxds(cp);
4008 return -1;
4009 }
4010 }
4011 return 0;
4012}
4013
4014static void cas_reset_task(struct work_struct *work)
4015{
4016 struct cas *cp = container_of(work, struct cas, reset_task);
4017#if 0
4018 int pending = atomic_read(&cp->reset_task_pending);
4019#else
4020 int pending_all = atomic_read(&cp->reset_task_pending_all);
4021 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4022 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4023
4024 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4025
4026
4027
4028 atomic_dec(&cp->reset_task_pending);
4029 return;
4030 }
4031#endif
4032
4033
4034
4035
4036 if (cp->hw_running) {
4037 unsigned long flags;
4038
4039
4040 netif_device_detach(cp->dev);
4041 cas_lock_all_save(cp, flags);
4042
4043 if (cp->opened) {
4044
4045
4046
4047
4048 cas_spare_recover(cp, GFP_ATOMIC);
4049 }
4050#if 1
4051
4052 if (!pending_all && !pending_mtu)
4053 goto done;
4054#else
4055 if (pending == CAS_RESET_SPARE)
4056 goto done;
4057#endif
4058
4059
4060
4061
4062
4063
4064
4065#if 1
4066 cas_reset(cp, !(pending_all > 0));
4067 if (cp->opened)
4068 cas_clean_rings(cp);
4069 cas_init_hw(cp, (pending_all > 0));
4070#else
4071 cas_reset(cp, !(pending == CAS_RESET_ALL));
4072 if (cp->opened)
4073 cas_clean_rings(cp);
4074 cas_init_hw(cp, pending == CAS_RESET_ALL);
4075#endif
4076
4077done:
4078 cas_unlock_all_restore(cp, flags);
4079 netif_device_attach(cp->dev);
4080 }
4081#if 1
4082 atomic_sub(pending_all, &cp->reset_task_pending_all);
4083 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4084 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4085 atomic_dec(&cp->reset_task_pending);
4086#else
4087 atomic_set(&cp->reset_task_pending, 0);
4088#endif
4089}
4090
4091static void cas_link_timer(unsigned long data)
4092{
4093 struct cas *cp = (struct cas *) data;
4094 int mask, pending = 0, reset = 0;
4095 unsigned long flags;
4096
4097 if (link_transition_timeout != 0 &&
4098 cp->link_transition_jiffies_valid &&
4099 ((jiffies - cp->link_transition_jiffies) >
4100 (link_transition_timeout))) {
4101
4102
4103
4104
4105 cp->link_transition_jiffies_valid = 0;
4106 }
4107
4108 if (!cp->hw_running)
4109 return;
4110
4111 spin_lock_irqsave(&cp->lock, flags);
4112 cas_lock_tx(cp);
4113 cas_entropy_gather(cp);
4114
4115
4116
4117
4118#if 1
4119 if (atomic_read(&cp->reset_task_pending_all) ||
4120 atomic_read(&cp->reset_task_pending_spare) ||
4121 atomic_read(&cp->reset_task_pending_mtu))
4122 goto done;
4123#else
4124 if (atomic_read(&cp->reset_task_pending))
4125 goto done;
4126#endif
4127
4128
4129 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4130 int i, rmask;
4131
4132 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4133 rmask = CAS_FLAG_RXD_POST(i);
4134 if ((mask & rmask) == 0)
4135 continue;
4136
4137
4138 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4139 pending = 1;
4140 continue;
4141 }
4142 cp->cas_flags &= ~rmask;
4143 }
4144 }
4145
4146 if (CAS_PHY_MII(cp->phy_type)) {
4147 u16 bmsr;
4148 cas_mif_poll(cp, 0);
4149 bmsr = cas_phy_read(cp, MII_BMSR);
4150
4151
4152
4153
4154
4155 bmsr = cas_phy_read(cp, MII_BMSR);
4156 cas_mif_poll(cp, 1);
4157 readl(cp->regs + REG_MIF_STATUS);
4158 reset = cas_mii_link_check(cp, bmsr);
4159 } else {
4160 reset = cas_pcs_link_check(cp);
4161 }
4162
4163 if (reset)
4164 goto done;
4165
4166
4167 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4168 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4169 u32 wptr, rptr;
4170 int tlm = CAS_VAL(MAC_SM_TLM, val);
4171
4172 if (((tlm == 0x5) || (tlm == 0x3)) &&
4173 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4174 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4175 "tx err: MAC_STATE[%08x]\n", val);
4176 reset = 1;
4177 goto done;
4178 }
4179
4180 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4181 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4182 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4183 if ((val == 0) && (wptr != rptr)) {
4184 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4185 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4186 val, wptr, rptr);
4187 reset = 1;
4188 }
4189
4190 if (reset)
4191 cas_hard_reset(cp);
4192 }
4193
4194done:
4195 if (reset) {
4196#if 1
4197 atomic_inc(&cp->reset_task_pending);
4198 atomic_inc(&cp->reset_task_pending_all);
4199 schedule_work(&cp->reset_task);
4200#else
4201 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4202 pr_err("reset called in cas_link_timer\n");
4203 schedule_work(&cp->reset_task);
4204#endif
4205 }
4206
4207 if (!pending)
4208 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4209 cas_unlock_tx(cp);
4210 spin_unlock_irqrestore(&cp->lock, flags);
4211}
4212
4213
4214
4215
4216static void cas_tx_tiny_free(struct cas *cp)
4217{
4218 struct pci_dev *pdev = cp->pdev;
4219 int i;
4220
4221 for (i = 0; i < N_TX_RINGS; i++) {
4222 if (!cp->tx_tiny_bufs[i])
4223 continue;
4224
4225 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4226 cp->tx_tiny_bufs[i],
4227 cp->tx_tiny_dvma[i]);
4228 cp->tx_tiny_bufs[i] = NULL;
4229 }
4230}
4231
4232static int cas_tx_tiny_alloc(struct cas *cp)
4233{
4234 struct pci_dev *pdev = cp->pdev;
4235 int i;
4236
4237 for (i = 0; i < N_TX_RINGS; i++) {
4238 cp->tx_tiny_bufs[i] =
4239 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4240 &cp->tx_tiny_dvma[i]);
4241 if (!cp->tx_tiny_bufs[i]) {
4242 cas_tx_tiny_free(cp);
4243 return -1;
4244 }
4245 }
4246 return 0;
4247}
4248
4249
4250static int cas_open(struct net_device *dev)
4251{
4252 struct cas *cp = netdev_priv(dev);
4253 int hw_was_up, err;
4254 unsigned long flags;
4255
4256 mutex_lock(&cp->pm_mutex);
4257
4258 hw_was_up = cp->hw_running;
4259
4260
4261
4262
4263 if (!cp->hw_running) {
4264
4265 cas_lock_all_save(cp, flags);
4266
4267
4268
4269
4270
4271 cas_reset(cp, 0);
4272 cp->hw_running = 1;
4273 cas_unlock_all_restore(cp, flags);
4274 }
4275
4276 err = -ENOMEM;
4277 if (cas_tx_tiny_alloc(cp) < 0)
4278 goto err_unlock;
4279
4280
4281 if (cas_alloc_rxds(cp) < 0)
4282 goto err_tx_tiny;
4283
4284
4285 cas_spare_init(cp);
4286 cas_spare_recover(cp, GFP_KERNEL);
4287
4288
4289
4290
4291
4292
4293 if (request_irq(cp->pdev->irq, cas_interrupt,
4294 IRQF_SHARED, dev->name, (void *) dev)) {
4295 netdev_err(cp->dev, "failed to request irq !\n");
4296 err = -EAGAIN;
4297 goto err_spare;
4298 }
4299
4300#ifdef USE_NAPI
4301 napi_enable(&cp->napi);
4302#endif
4303
4304 cas_lock_all_save(cp, flags);
4305 cas_clean_rings(cp);
4306 cas_init_hw(cp, !hw_was_up);
4307 cp->opened = 1;
4308 cas_unlock_all_restore(cp, flags);
4309
4310 netif_start_queue(dev);
4311 mutex_unlock(&cp->pm_mutex);
4312 return 0;
4313
4314err_spare:
4315 cas_spare_free(cp);
4316 cas_free_rxds(cp);
4317err_tx_tiny:
4318 cas_tx_tiny_free(cp);
4319err_unlock:
4320 mutex_unlock(&cp->pm_mutex);
4321 return err;
4322}
4323
4324static int cas_close(struct net_device *dev)
4325{
4326 unsigned long flags;
4327 struct cas *cp = netdev_priv(dev);
4328
4329#ifdef USE_NAPI
4330 napi_disable(&cp->napi);
4331#endif
4332
4333 mutex_lock(&cp->pm_mutex);
4334
4335 netif_stop_queue(dev);
4336
4337
4338 cas_lock_all_save(cp, flags);
4339 cp->opened = 0;
4340 cas_reset(cp, 0);
4341 cas_phy_init(cp);
4342 cas_begin_auto_negotiation(cp, NULL);
4343 cas_clean_rings(cp);
4344 cas_unlock_all_restore(cp, flags);
4345
4346 free_irq(cp->pdev->irq, (void *) dev);
4347 cas_spare_free(cp);
4348 cas_free_rxds(cp);
4349 cas_tx_tiny_free(cp);
4350 mutex_unlock(&cp->pm_mutex);
4351 return 0;
4352}
4353
4354static struct {
4355 const char name[ETH_GSTRING_LEN];
4356} ethtool_cassini_statnames[] = {
4357 {"collisions"},
4358 {"rx_bytes"},
4359 {"rx_crc_errors"},
4360 {"rx_dropped"},
4361 {"rx_errors"},
4362 {"rx_fifo_errors"},
4363 {"rx_frame_errors"},
4364 {"rx_length_errors"},
4365 {"rx_over_errors"},
4366 {"rx_packets"},
4367 {"tx_aborted_errors"},
4368 {"tx_bytes"},
4369 {"tx_dropped"},
4370 {"tx_errors"},
4371 {"tx_fifo_errors"},
4372 {"tx_packets"}
4373};
4374#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4375
4376static struct {
4377 const int offsets;
4378} ethtool_register_table[] = {
4379 {-MII_BMSR},
4380 {-MII_BMCR},
4381 {REG_CAWR},
4382 {REG_INF_BURST},
4383 {REG_BIM_CFG},
4384 {REG_RX_CFG},
4385 {REG_HP_CFG},
4386 {REG_MAC_TX_CFG},
4387 {REG_MAC_RX_CFG},
4388 {REG_MAC_CTRL_CFG},
4389 {REG_MAC_XIF_CFG},
4390 {REG_MIF_CFG},
4391 {REG_PCS_CFG},
4392 {REG_SATURN_PCFG},
4393 {REG_PCS_MII_STATUS},
4394 {REG_PCS_STATE_MACHINE},
4395 {REG_MAC_COLL_EXCESS},
4396 {REG_MAC_COLL_LATE}
4397};
4398#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4399#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4400
4401static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4402{
4403 u8 *p;
4404 int i;
4405 unsigned long flags;
4406
4407 spin_lock_irqsave(&cp->lock, flags);
4408 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4409 u16 hval;
4410 u32 val;
4411 if (ethtool_register_table[i].offsets < 0) {
4412 hval = cas_phy_read(cp,
4413 -ethtool_register_table[i].offsets);
4414 val = hval;
4415 } else {
4416 val= readl(cp->regs+ethtool_register_table[i].offsets);
4417 }
4418 memcpy(p, (u8 *)&val, sizeof(u32));
4419 }
4420 spin_unlock_irqrestore(&cp->lock, flags);
4421}
4422
4423static struct net_device_stats *cas_get_stats(struct net_device *dev)
4424{
4425 struct cas *cp = netdev_priv(dev);
4426 struct net_device_stats *stats = cp->net_stats;
4427 unsigned long flags;
4428 int i;
4429 unsigned long tmp;
4430
4431
4432 if (!cp->hw_running)
4433 return stats + N_TX_RINGS;
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4444 stats[N_TX_RINGS].rx_crc_errors +=
4445 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4446 stats[N_TX_RINGS].rx_frame_errors +=
4447 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4448 stats[N_TX_RINGS].rx_length_errors +=
4449 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4450#if 1
4451 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4452 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4453 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4454 stats[N_TX_RINGS].collisions +=
4455 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4456#else
4457 stats[N_TX_RINGS].tx_aborted_errors +=
4458 readl(cp->regs + REG_MAC_COLL_EXCESS);
4459 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4460 readl(cp->regs + REG_MAC_COLL_LATE);
4461#endif
4462 cas_clear_mac_err(cp);
4463
4464
4465 spin_lock(&cp->stat_lock[0]);
4466 stats[N_TX_RINGS].collisions += stats[0].collisions;
4467 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4468 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4469 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4470 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4471 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4472 spin_unlock(&cp->stat_lock[0]);
4473
4474 for (i = 0; i < N_TX_RINGS; i++) {
4475 spin_lock(&cp->stat_lock[i]);
4476 stats[N_TX_RINGS].rx_length_errors +=
4477 stats[i].rx_length_errors;
4478 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4479 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4480 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4481 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4482 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4483 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4484 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4485 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4486 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4487 memset(stats + i, 0, sizeof(struct net_device_stats));
4488 spin_unlock(&cp->stat_lock[i]);
4489 }
4490 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4491 return stats + N_TX_RINGS;
4492}
4493
4494
4495static void cas_set_multicast(struct net_device *dev)
4496{
4497 struct cas *cp = netdev_priv(dev);
4498 u32 rxcfg, rxcfg_new;
4499 unsigned long flags;
4500 int limit = STOP_TRIES;
4501
4502 if (!cp->hw_running)
4503 return;
4504
4505 spin_lock_irqsave(&cp->lock, flags);
4506 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4507
4508
4509 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4510 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4511 if (!limit--)
4512 break;
4513 udelay(10);
4514 }
4515
4516
4517 limit = STOP_TRIES;
4518 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4519 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4520 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4521 if (!limit--)
4522 break;
4523 udelay(10);
4524 }
4525
4526
4527 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4528 rxcfg |= rxcfg_new;
4529 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4530 spin_unlock_irqrestore(&cp->lock, flags);
4531}
4532
4533static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4534{
4535 struct cas *cp = netdev_priv(dev);
4536 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4537 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4538 info->fw_version[0] = '\0';
4539 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4540 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4541 cp->casreg_len : CAS_MAX_REGS;
4542 info->n_stats = CAS_NUM_STAT_KEYS;
4543}
4544
4545static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4546{
4547 struct cas *cp = netdev_priv(dev);
4548 u16 bmcr;
4549 int full_duplex, speed, pause;
4550 unsigned long flags;
4551 enum link_state linkstate = link_up;
4552
4553 cmd->advertising = 0;
4554 cmd->supported = SUPPORTED_Autoneg;
4555 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4556 cmd->supported |= SUPPORTED_1000baseT_Full;
4557 cmd->advertising |= ADVERTISED_1000baseT_Full;
4558 }
4559
4560
4561 spin_lock_irqsave(&cp->lock, flags);
4562 bmcr = 0;
4563 linkstate = cp->lstate;
4564 if (CAS_PHY_MII(cp->phy_type)) {
4565 cmd->port = PORT_MII;
4566 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4567 XCVR_INTERNAL : XCVR_EXTERNAL;
4568 cmd->phy_address = cp->phy_addr;
4569 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4570 ADVERTISED_10baseT_Half |
4571 ADVERTISED_10baseT_Full |
4572 ADVERTISED_100baseT_Half |
4573 ADVERTISED_100baseT_Full;
4574
4575 cmd->supported |=
4576 (SUPPORTED_10baseT_Half |
4577 SUPPORTED_10baseT_Full |
4578 SUPPORTED_100baseT_Half |
4579 SUPPORTED_100baseT_Full |
4580 SUPPORTED_TP | SUPPORTED_MII);
4581
4582 if (cp->hw_running) {
4583 cas_mif_poll(cp, 0);
4584 bmcr = cas_phy_read(cp, MII_BMCR);
4585 cas_read_mii_link_mode(cp, &full_duplex,
4586 &speed, &pause);
4587 cas_mif_poll(cp, 1);
4588 }
4589
4590 } else {
4591 cmd->port = PORT_FIBRE;
4592 cmd->transceiver = XCVR_INTERNAL;
4593 cmd->phy_address = 0;
4594 cmd->supported |= SUPPORTED_FIBRE;
4595 cmd->advertising |= ADVERTISED_FIBRE;
4596
4597 if (cp->hw_running) {
4598
4599 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4600 cas_read_pcs_link_mode(cp, &full_duplex,
4601 &speed, &pause);
4602 }
4603 }
4604 spin_unlock_irqrestore(&cp->lock, flags);
4605
4606 if (bmcr & BMCR_ANENABLE) {
4607 cmd->advertising |= ADVERTISED_Autoneg;
4608 cmd->autoneg = AUTONEG_ENABLE;
4609 ethtool_cmd_speed_set(cmd, ((speed == 10) ?
4610 SPEED_10 :
4611 ((speed == 1000) ?
4612 SPEED_1000 : SPEED_100)));
4613 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4614 } else {
4615 cmd->autoneg = AUTONEG_DISABLE;
4616 ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
4617 SPEED_1000 :
4618 ((bmcr & BMCR_SPEED100) ?
4619 SPEED_100 : SPEED_10)));
4620 cmd->duplex =
4621 (bmcr & BMCR_FULLDPLX) ?
4622 DUPLEX_FULL : DUPLEX_HALF;
4623 }
4624 if (linkstate != link_up) {
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635 if (cp->link_cntl & BMCR_ANENABLE) {
4636 ethtool_cmd_speed_set(cmd, 0);
4637 cmd->duplex = 0xff;
4638 } else {
4639 ethtool_cmd_speed_set(cmd, SPEED_10);
4640 if (cp->link_cntl & BMCR_SPEED100) {
4641 ethtool_cmd_speed_set(cmd, SPEED_100);
4642 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4643 ethtool_cmd_speed_set(cmd, SPEED_1000);
4644 }
4645 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4646 DUPLEX_FULL : DUPLEX_HALF;
4647 }
4648 }
4649 return 0;
4650}
4651
4652static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4653{
4654 struct cas *cp = netdev_priv(dev);
4655 unsigned long flags;
4656 u32 speed = ethtool_cmd_speed(cmd);
4657
4658
4659 if (cmd->autoneg != AUTONEG_ENABLE &&
4660 cmd->autoneg != AUTONEG_DISABLE)
4661 return -EINVAL;
4662
4663 if (cmd->autoneg == AUTONEG_DISABLE &&
4664 ((speed != SPEED_1000 &&
4665 speed != SPEED_100 &&
4666 speed != SPEED_10) ||
4667 (cmd->duplex != DUPLEX_HALF &&
4668 cmd->duplex != DUPLEX_FULL)))
4669 return -EINVAL;
4670
4671
4672 spin_lock_irqsave(&cp->lock, flags);
4673 cas_begin_auto_negotiation(cp, cmd);
4674 spin_unlock_irqrestore(&cp->lock, flags);
4675 return 0;
4676}
4677
4678static int cas_nway_reset(struct net_device *dev)
4679{
4680 struct cas *cp = netdev_priv(dev);
4681 unsigned long flags;
4682
4683 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4684 return -EINVAL;
4685
4686
4687 spin_lock_irqsave(&cp->lock, flags);
4688 cas_begin_auto_negotiation(cp, NULL);
4689 spin_unlock_irqrestore(&cp->lock, flags);
4690
4691 return 0;
4692}
4693
4694static u32 cas_get_link(struct net_device *dev)
4695{
4696 struct cas *cp = netdev_priv(dev);
4697 return cp->lstate == link_up;
4698}
4699
4700static u32 cas_get_msglevel(struct net_device *dev)
4701{
4702 struct cas *cp = netdev_priv(dev);
4703 return cp->msg_enable;
4704}
4705
4706static void cas_set_msglevel(struct net_device *dev, u32 value)
4707{
4708 struct cas *cp = netdev_priv(dev);
4709 cp->msg_enable = value;
4710}
4711
4712static int cas_get_regs_len(struct net_device *dev)
4713{
4714 struct cas *cp = netdev_priv(dev);
4715 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4716}
4717
4718static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4719 void *p)
4720{
4721 struct cas *cp = netdev_priv(dev);
4722 regs->version = 0;
4723
4724 cas_read_regs(cp, p, regs->len / sizeof(u32));
4725}
4726
4727static int cas_get_sset_count(struct net_device *dev, int sset)
4728{
4729 switch (sset) {
4730 case ETH_SS_STATS:
4731 return CAS_NUM_STAT_KEYS;
4732 default:
4733 return -EOPNOTSUPP;
4734 }
4735}
4736
4737static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4738{
4739 memcpy(data, ðtool_cassini_statnames,
4740 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4741}
4742
4743static void cas_get_ethtool_stats(struct net_device *dev,
4744 struct ethtool_stats *estats, u64 *data)
4745{
4746 struct cas *cp = netdev_priv(dev);
4747 struct net_device_stats *stats = cas_get_stats(cp->dev);
4748 int i = 0;
4749 data[i++] = stats->collisions;
4750 data[i++] = stats->rx_bytes;
4751 data[i++] = stats->rx_crc_errors;
4752 data[i++] = stats->rx_dropped;
4753 data[i++] = stats->rx_errors;
4754 data[i++] = stats->rx_fifo_errors;
4755 data[i++] = stats->rx_frame_errors;
4756 data[i++] = stats->rx_length_errors;
4757 data[i++] = stats->rx_over_errors;
4758 data[i++] = stats->rx_packets;
4759 data[i++] = stats->tx_aborted_errors;
4760 data[i++] = stats->tx_bytes;
4761 data[i++] = stats->tx_dropped;
4762 data[i++] = stats->tx_errors;
4763 data[i++] = stats->tx_fifo_errors;
4764 data[i++] = stats->tx_packets;
4765 BUG_ON(i != CAS_NUM_STAT_KEYS);
4766}
4767
4768static const struct ethtool_ops cas_ethtool_ops = {
4769 .get_drvinfo = cas_get_drvinfo,
4770 .get_settings = cas_get_settings,
4771 .set_settings = cas_set_settings,
4772 .nway_reset = cas_nway_reset,
4773 .get_link = cas_get_link,
4774 .get_msglevel = cas_get_msglevel,
4775 .set_msglevel = cas_set_msglevel,
4776 .get_regs_len = cas_get_regs_len,
4777 .get_regs = cas_get_regs,
4778 .get_sset_count = cas_get_sset_count,
4779 .get_strings = cas_get_strings,
4780 .get_ethtool_stats = cas_get_ethtool_stats,
4781};
4782
4783static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4784{
4785 struct cas *cp = netdev_priv(dev);
4786 struct mii_ioctl_data *data = if_mii(ifr);
4787 unsigned long flags;
4788 int rc = -EOPNOTSUPP;
4789
4790
4791
4792
4793 mutex_lock(&cp->pm_mutex);
4794 switch (cmd) {
4795 case SIOCGMIIPHY:
4796 data->phy_id = cp->phy_addr;
4797
4798
4799 case SIOCGMIIREG:
4800 spin_lock_irqsave(&cp->lock, flags);
4801 cas_mif_poll(cp, 0);
4802 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4803 cas_mif_poll(cp, 1);
4804 spin_unlock_irqrestore(&cp->lock, flags);
4805 rc = 0;
4806 break;
4807
4808 case SIOCSMIIREG:
4809 spin_lock_irqsave(&cp->lock, flags);
4810 cas_mif_poll(cp, 0);
4811 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4812 cas_mif_poll(cp, 1);
4813 spin_unlock_irqrestore(&cp->lock, flags);
4814 break;
4815 default:
4816 break;
4817 }
4818
4819 mutex_unlock(&cp->pm_mutex);
4820 return rc;
4821}
4822
4823
4824
4825
4826
4827static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
4828{
4829 struct pci_dev *pdev = cas_pdev->bus->self;
4830 u32 val;
4831
4832 if (!pdev)
4833 return;
4834
4835 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4836 return;
4837
4838
4839
4840
4841
4842
4843 pci_read_config_dword(pdev, 0x40, &val);
4844 val &= ~0x00040000;
4845 pci_write_config_dword(pdev, 0x40, val);
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891 pci_write_config_word(pdev, 0x52,
4892 (0x7 << 13) |
4893 (0x7 << 10) |
4894 (0x7 << 7) |
4895 (0x7 << 4) |
4896 (0xf << 0));
4897
4898
4899 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4900
4901
4902
4903
4904 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4905}
4906
4907static const struct net_device_ops cas_netdev_ops = {
4908 .ndo_open = cas_open,
4909 .ndo_stop = cas_close,
4910 .ndo_start_xmit = cas_start_xmit,
4911 .ndo_get_stats = cas_get_stats,
4912 .ndo_set_multicast_list = cas_set_multicast,
4913 .ndo_do_ioctl = cas_ioctl,
4914 .ndo_tx_timeout = cas_tx_timeout,
4915 .ndo_change_mtu = cas_change_mtu,
4916 .ndo_set_mac_address = eth_mac_addr,
4917 .ndo_validate_addr = eth_validate_addr,
4918#ifdef CONFIG_NET_POLL_CONTROLLER
4919 .ndo_poll_controller = cas_netpoll,
4920#endif
4921};
4922
4923static int __devinit cas_init_one(struct pci_dev *pdev,
4924 const struct pci_device_id *ent)
4925{
4926 static int cas_version_printed = 0;
4927 unsigned long casreg_len;
4928 struct net_device *dev;
4929 struct cas *cp;
4930 int i, err, pci_using_dac;
4931 u16 pci_cmd;
4932 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4933
4934 if (cas_version_printed++ == 0)
4935 pr_info("%s", version);
4936
4937 err = pci_enable_device(pdev);
4938 if (err) {
4939 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4940 return err;
4941 }
4942
4943 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4944 dev_err(&pdev->dev, "Cannot find proper PCI device "
4945 "base address, aborting\n");
4946 err = -ENODEV;
4947 goto err_out_disable_pdev;
4948 }
4949
4950 dev = alloc_etherdev(sizeof(*cp));
4951 if (!dev) {
4952 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
4953 err = -ENOMEM;
4954 goto err_out_disable_pdev;
4955 }
4956 SET_NETDEV_DEV(dev, &pdev->dev);
4957
4958 err = pci_request_regions(pdev, dev->name);
4959 if (err) {
4960 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4961 goto err_out_free_netdev;
4962 }
4963 pci_set_master(pdev);
4964
4965
4966
4967
4968
4969 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4970 pci_cmd &= ~PCI_COMMAND_SERR;
4971 pci_cmd |= PCI_COMMAND_PARITY;
4972 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4973 if (pci_try_set_mwi(pdev))
4974 pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
4975
4976 cas_program_bridge(pdev);
4977
4978
4979
4980
4981
4982
4983
4984#if 1
4985 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4986 &orig_cacheline_size);
4987 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4988 cas_cacheline_size =
4989 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4990 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4991 if (pci_write_config_byte(pdev,
4992 PCI_CACHE_LINE_SIZE,
4993 cas_cacheline_size)) {
4994 dev_err(&pdev->dev, "Could not set PCI cache "
4995 "line size\n");
4996 goto err_write_cacheline;
4997 }
4998 }
4999#endif
5000
5001
5002
5003 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5004 pci_using_dac = 1;
5005 err = pci_set_consistent_dma_mask(pdev,
5006 DMA_BIT_MASK(64));
5007 if (err < 0) {
5008 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5009 "for consistent allocations\n");
5010 goto err_out_free_res;
5011 }
5012
5013 } else {
5014 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5015 if (err) {
5016 dev_err(&pdev->dev, "No usable DMA configuration, "
5017 "aborting\n");
5018 goto err_out_free_res;
5019 }
5020 pci_using_dac = 0;
5021 }
5022
5023 casreg_len = pci_resource_len(pdev, 0);
5024
5025 cp = netdev_priv(dev);
5026 cp->pdev = pdev;
5027#if 1
5028
5029 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5030#endif
5031 cp->dev = dev;
5032 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5033 cassini_debug;
5034
5035#if defined(CONFIG_SPARC)
5036 cp->of_node = pci_device_to_OF_node(pdev);
5037#endif
5038
5039 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5040 cp->link_transition_jiffies_valid = 0;
5041
5042 spin_lock_init(&cp->lock);
5043 spin_lock_init(&cp->rx_inuse_lock);
5044 spin_lock_init(&cp->rx_spare_lock);
5045 for (i = 0; i < N_TX_RINGS; i++) {
5046 spin_lock_init(&cp->stat_lock[i]);
5047 spin_lock_init(&cp->tx_lock[i]);
5048 }
5049 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5050 mutex_init(&cp->pm_mutex);
5051
5052 init_timer(&cp->link_timer);
5053 cp->link_timer.function = cas_link_timer;
5054 cp->link_timer.data = (unsigned long) cp;
5055
5056#if 1
5057
5058
5059
5060 atomic_set(&cp->reset_task_pending, 0);
5061 atomic_set(&cp->reset_task_pending_all, 0);
5062 atomic_set(&cp->reset_task_pending_spare, 0);
5063 atomic_set(&cp->reset_task_pending_mtu, 0);
5064#endif
5065 INIT_WORK(&cp->reset_task, cas_reset_task);
5066
5067
5068 if (link_mode >= 0 && link_mode < 6)
5069 cp->link_cntl = link_modes[link_mode];
5070 else
5071 cp->link_cntl = BMCR_ANENABLE;
5072 cp->lstate = link_down;
5073 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5074 netif_carrier_off(cp->dev);
5075 cp->timer_ticks = 0;
5076
5077
5078 cp->regs = pci_iomap(pdev, 0, casreg_len);
5079 if (!cp->regs) {
5080 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5081 goto err_out_free_res;
5082 }
5083 cp->casreg_len = casreg_len;
5084
5085 pci_save_state(pdev);
5086 cas_check_pci_invariants(cp);
5087 cas_hard_reset(cp);
5088 cas_reset(cp, 0);
5089 if (cas_check_invariants(cp))
5090 goto err_out_iounmap;
5091 if (cp->cas_flags & CAS_FLAG_SATURN)
5092 if (cas_saturn_firmware_init(cp))
5093 goto err_out_iounmap;
5094
5095 cp->init_block = (struct cas_init_block *)
5096 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5097 &cp->block_dvma);
5098 if (!cp->init_block) {
5099 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5100 goto err_out_iounmap;
5101 }
5102
5103 for (i = 0; i < N_TX_RINGS; i++)
5104 cp->init_txds[i] = cp->init_block->txds[i];
5105
5106 for (i = 0; i < N_RX_DESC_RINGS; i++)
5107 cp->init_rxds[i] = cp->init_block->rxds[i];
5108
5109 for (i = 0; i < N_RX_COMP_RINGS; i++)
5110 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5111
5112 for (i = 0; i < N_RX_FLOWS; i++)
5113 skb_queue_head_init(&cp->rx_flows[i]);
5114
5115 dev->netdev_ops = &cas_netdev_ops;
5116 dev->ethtool_ops = &cas_ethtool_ops;
5117 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5118
5119#ifdef USE_NAPI
5120 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5121#endif
5122 dev->irq = pdev->irq;
5123 dev->dma = 0;
5124
5125
5126 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5127 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5128
5129 if (pci_using_dac)
5130 dev->features |= NETIF_F_HIGHDMA;
5131
5132 if (register_netdev(dev)) {
5133 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5134 goto err_out_free_consistent;
5135 }
5136
5137 i = readl(cp->regs + REG_BIM_CFG);
5138 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5139 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5140 (i & BIM_CFG_32BIT) ? "32" : "64",
5141 (i & BIM_CFG_66MHZ) ? "66" : "33",
5142 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5143 dev->dev_addr);
5144
5145 pci_set_drvdata(pdev, dev);
5146 cp->hw_running = 1;
5147 cas_entropy_reset(cp);
5148 cas_phy_init(cp);
5149 cas_begin_auto_negotiation(cp, NULL);
5150 return 0;
5151
5152err_out_free_consistent:
5153 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5154 cp->init_block, cp->block_dvma);
5155
5156err_out_iounmap:
5157 mutex_lock(&cp->pm_mutex);
5158 if (cp->hw_running)
5159 cas_shutdown(cp);
5160 mutex_unlock(&cp->pm_mutex);
5161
5162 pci_iounmap(pdev, cp->regs);
5163
5164
5165err_out_free_res:
5166 pci_release_regions(pdev);
5167
5168err_write_cacheline:
5169
5170
5171
5172 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5173
5174err_out_free_netdev:
5175 free_netdev(dev);
5176
5177err_out_disable_pdev:
5178 pci_disable_device(pdev);
5179 pci_set_drvdata(pdev, NULL);
5180 return -ENODEV;
5181}
5182
5183static void __devexit cas_remove_one(struct pci_dev *pdev)
5184{
5185 struct net_device *dev = pci_get_drvdata(pdev);
5186 struct cas *cp;
5187 if (!dev)
5188 return;
5189
5190 cp = netdev_priv(dev);
5191 unregister_netdev(dev);
5192
5193 if (cp->fw_data)
5194 vfree(cp->fw_data);
5195
5196 mutex_lock(&cp->pm_mutex);
5197 cancel_work_sync(&cp->reset_task);
5198 if (cp->hw_running)
5199 cas_shutdown(cp);
5200 mutex_unlock(&cp->pm_mutex);
5201
5202#if 1
5203 if (cp->orig_cacheline_size) {
5204
5205
5206
5207 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5208 cp->orig_cacheline_size);
5209 }
5210#endif
5211 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5212 cp->init_block, cp->block_dvma);
5213 pci_iounmap(pdev, cp->regs);
5214 free_netdev(dev);
5215 pci_release_regions(pdev);
5216 pci_disable_device(pdev);
5217 pci_set_drvdata(pdev, NULL);
5218}
5219
5220#ifdef CONFIG_PM
5221static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5222{
5223 struct net_device *dev = pci_get_drvdata(pdev);
5224 struct cas *cp = netdev_priv(dev);
5225 unsigned long flags;
5226
5227 mutex_lock(&cp->pm_mutex);
5228
5229
5230 if (cp->opened) {
5231 netif_device_detach(dev);
5232
5233 cas_lock_all_save(cp, flags);
5234
5235
5236
5237
5238
5239
5240 cas_reset(cp, 0);
5241 cas_clean_rings(cp);
5242 cas_unlock_all_restore(cp, flags);
5243 }
5244
5245 if (cp->hw_running)
5246 cas_shutdown(cp);
5247 mutex_unlock(&cp->pm_mutex);
5248
5249 return 0;
5250}
5251
5252static int cas_resume(struct pci_dev *pdev)
5253{
5254 struct net_device *dev = pci_get_drvdata(pdev);
5255 struct cas *cp = netdev_priv(dev);
5256
5257 netdev_info(dev, "resuming\n");
5258
5259 mutex_lock(&cp->pm_mutex);
5260 cas_hard_reset(cp);
5261 if (cp->opened) {
5262 unsigned long flags;
5263 cas_lock_all_save(cp, flags);
5264 cas_reset(cp, 0);
5265 cp->hw_running = 1;
5266 cas_clean_rings(cp);
5267 cas_init_hw(cp, 1);
5268 cas_unlock_all_restore(cp, flags);
5269
5270 netif_device_attach(dev);
5271 }
5272 mutex_unlock(&cp->pm_mutex);
5273 return 0;
5274}
5275#endif
5276
5277static struct pci_driver cas_driver = {
5278 .name = DRV_MODULE_NAME,
5279 .id_table = cas_pci_tbl,
5280 .probe = cas_init_one,
5281 .remove = __devexit_p(cas_remove_one),
5282#ifdef CONFIG_PM
5283 .suspend = cas_suspend,
5284 .resume = cas_resume
5285#endif
5286};
5287
5288static int __init cas_init(void)
5289{
5290 if (linkdown_timeout > 0)
5291 link_transition_timeout = linkdown_timeout * HZ;
5292 else
5293 link_transition_timeout = 0;
5294
5295 return pci_register_driver(&cas_driver);
5296}
5297
5298static void __exit cas_cleanup(void)
5299{
5300 pci_unregister_driver(&cas_driver);
5301}
5302
5303module_init(cas_init);
5304module_exit(cas_cleanup);
5305