1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
57#include <linux/module.h>
58#include <linux/kernel.h>
59#include <linux/types.h>
60#include <linux/compiler.h>
61#include <linux/slab.h>
62#include <linux/delay.h>
63#include <linux/init.h>
64#include <linux/interrupt.h>
65#include <linux/vmalloc.h>
66#include <linux/ioport.h>
67#include <linux/pci.h>
68#include <linux/mm.h>
69#include <linux/highmem.h>
70#include <linux/list.h>
71#include <linux/dma-mapping.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/ethtool.h>
77#include <linux/crc32.h>
78#include <linux/random.h>
79#include <linux/mii.h>
80#include <linux/ip.h>
81#include <linux/tcp.h>
82#include <linux/mutex.h>
83#include <linux/firmware.h>
84
85#include <net/checksum.h>
86
87#include <linux/atomic.h>
88#include <asm/io.h>
89#include <asm/byteorder.h>
90#include <linux/uaccess.h>
91
92#define cas_page_map(x) kmap_atomic((x))
93#define cas_page_unmap(x) kunmap_atomic((x))
94#define CAS_NCPUS num_online_cpus()
95
96#define cas_skb_release(x) netif_rx(x)
97
98
99#define USE_HP_WORKAROUND
100#define HP_WORKAROUND_DEFAULT
101#define CAS_HP_ALT_FIRMWARE cas_prog_null
102
103#include "cassini.h"
104
105#define USE_TX_COMPWB
106#define USE_CSMA_CD_PROTO
107#define USE_RX_BLANK
108#undef USE_ENTROPY_DEV
109
110
111
112
113#undef USE_PCI_INTB
114#undef USE_PCI_INTC
115#undef USE_PCI_INTD
116#undef USE_QOS
117
118#undef USE_VPD_DEBUG
119
120
121#define USE_PAGE_ORDER
122#define RX_DONT_BATCH 0
123#define RX_COPY_ALWAYS 0
124#define RX_COPY_MIN 64
125#undef RX_COUNT_BUFFERS
126
127#define DRV_MODULE_NAME "cassini"
128#define DRV_MODULE_VERSION "1.6"
129#define DRV_MODULE_RELDATE "21 May 2008"
130
131#define CAS_DEF_MSG_ENABLE \
132 (NETIF_MSG_DRV | \
133 NETIF_MSG_PROBE | \
134 NETIF_MSG_LINK | \
135 NETIF_MSG_TIMER | \
136 NETIF_MSG_IFDOWN | \
137 NETIF_MSG_IFUP | \
138 NETIF_MSG_RX_ERR | \
139 NETIF_MSG_TX_ERR)
140
141
142
143
144#define CAS_TX_TIMEOUT (HZ)
145#define CAS_LINK_TIMEOUT (22*HZ/10)
146#define CAS_LINK_FAST_TIMEOUT (1)
147
148
149
150
151#define STOP_TRIES_PHY 1000
152#define STOP_TRIES 5000
153
154
155
156
157
158#define CAS_MIN_FRAME 97
159#define CAS_1000MB_MIN_FRAME 255
160#define CAS_MIN_MTU 60
161#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
162
163#if 1
164
165
166
167
168#else
169#define CAS_RESET_MTU 1
170#define CAS_RESET_ALL 2
171#define CAS_RESET_SPARE 3
172#endif
173
174static char version[] =
175 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
176
177static int cassini_debug = -1;
178static int link_mode;
179
180MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
181MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
182MODULE_LICENSE("GPL");
183MODULE_FIRMWARE("sun/cassini.bin");
184module_param(cassini_debug, int, 0);
185MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
186module_param(link_mode, int, 0);
187MODULE_PARM_DESC(link_mode, "default link mode");
188
189
190
191
192
193#define DEFAULT_LINKDOWN_TIMEOUT 5
194
195
196
197static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
198module_param(linkdown_timeout, int, 0);
199MODULE_PARM_DESC(linkdown_timeout,
200"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
201
202
203
204
205
206
207static int link_transition_timeout;
208
209
210
211static u16 link_modes[] = {
212 BMCR_ANENABLE,
213 0,
214 BMCR_SPEED100,
215 BMCR_FULLDPLX,
216 BMCR_SPEED100|BMCR_FULLDPLX,
217 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
218};
219
220static const struct pci_device_id cas_pci_tbl[] = {
221 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { 0, }
226};
227
228MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
229
230static void cas_set_link_modes(struct cas *cp);
231
232static inline void cas_lock_tx(struct cas *cp)
233{
234 int i;
235
236 for (i = 0; i < N_TX_RINGS; i++)
237 spin_lock_nested(&cp->tx_lock[i], i);
238}
239
240static inline void cas_lock_all(struct cas *cp)
241{
242 spin_lock_irq(&cp->lock);
243 cas_lock_tx(cp);
244}
245
246
247
248
249
250
251
252
253
254#define cas_lock_all_save(cp, flags) \
255do { \
256 struct cas *xxxcp = (cp); \
257 spin_lock_irqsave(&xxxcp->lock, flags); \
258 cas_lock_tx(xxxcp); \
259} while (0)
260
261static inline void cas_unlock_tx(struct cas *cp)
262{
263 int i;
264
265 for (i = N_TX_RINGS; i > 0; i--)
266 spin_unlock(&cp->tx_lock[i - 1]);
267}
268
269static inline void cas_unlock_all(struct cas *cp)
270{
271 cas_unlock_tx(cp);
272 spin_unlock_irq(&cp->lock);
273}
274
275#define cas_unlock_all_restore(cp, flags) \
276do { \
277 struct cas *xxxcp = (cp); \
278 cas_unlock_tx(xxxcp); \
279 spin_unlock_irqrestore(&xxxcp->lock, flags); \
280} while (0)
281
282static void cas_disable_irq(struct cas *cp, const int ring)
283{
284
285 if (ring == 0) {
286 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
287 return;
288 }
289
290
291 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
292 switch (ring) {
293#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
294#ifdef USE_PCI_INTB
295 case 1:
296#endif
297#ifdef USE_PCI_INTC
298 case 2:
299#endif
300#ifdef USE_PCI_INTD
301 case 3:
302#endif
303 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
304 cp->regs + REG_PLUS_INTRN_MASK(ring));
305 break;
306#endif
307 default:
308 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
309 REG_PLUS_INTRN_MASK(ring));
310 break;
311 }
312 }
313}
314
315static inline void cas_mask_intr(struct cas *cp)
316{
317 int i;
318
319 for (i = 0; i < N_RX_COMP_RINGS; i++)
320 cas_disable_irq(cp, i);
321}
322
323static void cas_enable_irq(struct cas *cp, const int ring)
324{
325 if (ring == 0) {
326 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
327 return;
328 }
329
330 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
331 switch (ring) {
332#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
333#ifdef USE_PCI_INTB
334 case 1:
335#endif
336#ifdef USE_PCI_INTC
337 case 2:
338#endif
339#ifdef USE_PCI_INTD
340 case 3:
341#endif
342 writel(INTRN_MASK_RX_EN, cp->regs +
343 REG_PLUS_INTRN_MASK(ring));
344 break;
345#endif
346 default:
347 break;
348 }
349 }
350}
351
352static inline void cas_unmask_intr(struct cas *cp)
353{
354 int i;
355
356 for (i = 0; i < N_RX_COMP_RINGS; i++)
357 cas_enable_irq(cp, i);
358}
359
360static inline void cas_entropy_gather(struct cas *cp)
361{
362#ifdef USE_ENTROPY_DEV
363 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
364 return;
365
366 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
367 readl(cp->regs + REG_ENTROPY_IV),
368 sizeof(uint64_t)*8);
369#endif
370}
371
372static inline void cas_entropy_reset(struct cas *cp)
373{
374#ifdef USE_ENTROPY_DEV
375 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
376 return;
377
378 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
379 cp->regs + REG_BIM_LOCAL_DEV_EN);
380 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
381 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
382
383
384 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
385 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
386#endif
387}
388
389
390
391
392static u16 cas_phy_read(struct cas *cp, int reg)
393{
394 u32 cmd;
395 int limit = STOP_TRIES_PHY;
396
397 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
398 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
399 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
400 cmd |= MIF_FRAME_TURN_AROUND_MSB;
401 writel(cmd, cp->regs + REG_MIF_FRAME);
402
403
404 while (limit-- > 0) {
405 udelay(10);
406 cmd = readl(cp->regs + REG_MIF_FRAME);
407 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
408 return cmd & MIF_FRAME_DATA_MASK;
409 }
410 return 0xFFFF;
411}
412
413static int cas_phy_write(struct cas *cp, int reg, u16 val)
414{
415 int limit = STOP_TRIES_PHY;
416 u32 cmd;
417
418 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
419 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
420 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
421 cmd |= MIF_FRAME_TURN_AROUND_MSB;
422 cmd |= val & MIF_FRAME_DATA_MASK;
423 writel(cmd, cp->regs + REG_MIF_FRAME);
424
425
426 while (limit-- > 0) {
427 udelay(10);
428 cmd = readl(cp->regs + REG_MIF_FRAME);
429 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
430 return 0;
431 }
432 return -1;
433}
434
435static void cas_phy_powerup(struct cas *cp)
436{
437 u16 ctl = cas_phy_read(cp, MII_BMCR);
438
439 if ((ctl & BMCR_PDOWN) == 0)
440 return;
441 ctl &= ~BMCR_PDOWN;
442 cas_phy_write(cp, MII_BMCR, ctl);
443}
444
445static void cas_phy_powerdown(struct cas *cp)
446{
447 u16 ctl = cas_phy_read(cp, MII_BMCR);
448
449 if (ctl & BMCR_PDOWN)
450 return;
451 ctl |= BMCR_PDOWN;
452 cas_phy_write(cp, MII_BMCR, ctl);
453}
454
455
456static int cas_page_free(struct cas *cp, cas_page_t *page)
457{
458 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
459 PCI_DMA_FROMDEVICE);
460 __free_pages(page->buffer, cp->page_order);
461 kfree(page);
462 return 0;
463}
464
465#ifdef RX_COUNT_BUFFERS
466#define RX_USED_ADD(x, y) ((x)->used += (y))
467#define RX_USED_SET(x, y) ((x)->used = (y))
468#else
469#define RX_USED_ADD(x, y)
470#define RX_USED_SET(x, y)
471#endif
472
473
474
475
476static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
477{
478 cas_page_t *page;
479
480 page = kmalloc(sizeof(cas_page_t), flags);
481 if (!page)
482 return NULL;
483
484 INIT_LIST_HEAD(&page->list);
485 RX_USED_SET(page, 0);
486 page->buffer = alloc_pages(flags, cp->page_order);
487 if (!page->buffer)
488 goto page_err;
489 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
490 cp->page_size, PCI_DMA_FROMDEVICE);
491 return page;
492
493page_err:
494 kfree(page);
495 return NULL;
496}
497
498
499static void cas_spare_init(struct cas *cp)
500{
501 spin_lock(&cp->rx_inuse_lock);
502 INIT_LIST_HEAD(&cp->rx_inuse_list);
503 spin_unlock(&cp->rx_inuse_lock);
504
505 spin_lock(&cp->rx_spare_lock);
506 INIT_LIST_HEAD(&cp->rx_spare_list);
507 cp->rx_spares_needed = RX_SPARE_COUNT;
508 spin_unlock(&cp->rx_spare_lock);
509}
510
511
512static void cas_spare_free(struct cas *cp)
513{
514 struct list_head list, *elem, *tmp;
515
516
517 INIT_LIST_HEAD(&list);
518 spin_lock(&cp->rx_spare_lock);
519 list_splice_init(&cp->rx_spare_list, &list);
520 spin_unlock(&cp->rx_spare_lock);
521 list_for_each_safe(elem, tmp, &list) {
522 cas_page_free(cp, list_entry(elem, cas_page_t, list));
523 }
524
525 INIT_LIST_HEAD(&list);
526#if 1
527
528
529
530
531 spin_lock(&cp->rx_inuse_lock);
532 list_splice_init(&cp->rx_inuse_list, &list);
533 spin_unlock(&cp->rx_inuse_lock);
534#else
535 spin_lock(&cp->rx_spare_lock);
536 list_splice_init(&cp->rx_inuse_list, &list);
537 spin_unlock(&cp->rx_spare_lock);
538#endif
539 list_for_each_safe(elem, tmp, &list) {
540 cas_page_free(cp, list_entry(elem, cas_page_t, list));
541 }
542}
543
544
545static void cas_spare_recover(struct cas *cp, const gfp_t flags)
546{
547 struct list_head list, *elem, *tmp;
548 int needed, i;
549
550
551
552
553
554
555 INIT_LIST_HEAD(&list);
556 spin_lock(&cp->rx_inuse_lock);
557 list_splice_init(&cp->rx_inuse_list, &list);
558 spin_unlock(&cp->rx_inuse_lock);
559
560 list_for_each_safe(elem, tmp, &list) {
561 cas_page_t *page = list_entry(elem, cas_page_t, list);
562
563
564
565
566
567
568
569
570
571
572
573
574
575 if (page_count(page->buffer) > 1)
576 continue;
577
578 list_del(elem);
579 spin_lock(&cp->rx_spare_lock);
580 if (cp->rx_spares_needed > 0) {
581 list_add(elem, &cp->rx_spare_list);
582 cp->rx_spares_needed--;
583 spin_unlock(&cp->rx_spare_lock);
584 } else {
585 spin_unlock(&cp->rx_spare_lock);
586 cas_page_free(cp, page);
587 }
588 }
589
590
591 if (!list_empty(&list)) {
592 spin_lock(&cp->rx_inuse_lock);
593 list_splice(&list, &cp->rx_inuse_list);
594 spin_unlock(&cp->rx_inuse_lock);
595 }
596
597 spin_lock(&cp->rx_spare_lock);
598 needed = cp->rx_spares_needed;
599 spin_unlock(&cp->rx_spare_lock);
600 if (!needed)
601 return;
602
603
604 INIT_LIST_HEAD(&list);
605 i = 0;
606 while (i < needed) {
607 cas_page_t *spare = cas_page_alloc(cp, flags);
608 if (!spare)
609 break;
610 list_add(&spare->list, &list);
611 i++;
612 }
613
614 spin_lock(&cp->rx_spare_lock);
615 list_splice(&list, &cp->rx_spare_list);
616 cp->rx_spares_needed -= i;
617 spin_unlock(&cp->rx_spare_lock);
618}
619
620
621static cas_page_t *cas_page_dequeue(struct cas *cp)
622{
623 struct list_head *entry;
624 int recover;
625
626 spin_lock(&cp->rx_spare_lock);
627 if (list_empty(&cp->rx_spare_list)) {
628
629 spin_unlock(&cp->rx_spare_lock);
630 cas_spare_recover(cp, GFP_ATOMIC);
631 spin_lock(&cp->rx_spare_lock);
632 if (list_empty(&cp->rx_spare_list)) {
633 netif_err(cp, rx_err, cp->dev,
634 "no spare buffers available\n");
635 spin_unlock(&cp->rx_spare_lock);
636 return NULL;
637 }
638 }
639
640 entry = cp->rx_spare_list.next;
641 list_del(entry);
642 recover = ++cp->rx_spares_needed;
643 spin_unlock(&cp->rx_spare_lock);
644
645
646 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
647#if 1
648 atomic_inc(&cp->reset_task_pending);
649 atomic_inc(&cp->reset_task_pending_spare);
650 schedule_work(&cp->reset_task);
651#else
652 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
653 schedule_work(&cp->reset_task);
654#endif
655 }
656 return list_entry(entry, cas_page_t, list);
657}
658
659
660static void cas_mif_poll(struct cas *cp, const int enable)
661{
662 u32 cfg;
663
664 cfg = readl(cp->regs + REG_MIF_CFG);
665 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
666
667 if (cp->phy_type & CAS_PHY_MII_MDIO1)
668 cfg |= MIF_CFG_PHY_SELECT;
669
670
671 if (enable) {
672 cfg |= MIF_CFG_POLL_EN;
673 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
674 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
675 }
676 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
677 cp->regs + REG_MIF_MASK);
678 writel(cfg, cp->regs + REG_MIF_CFG);
679}
680
681
682static void cas_begin_auto_negotiation(struct cas *cp,
683 const struct ethtool_link_ksettings *ep)
684{
685 u16 ctl;
686#if 1
687 int lcntl;
688 int changed = 0;
689 int oldstate = cp->lstate;
690 int link_was_not_down = !(oldstate == link_down);
691#endif
692
693 if (!ep)
694 goto start_aneg;
695 lcntl = cp->link_cntl;
696 if (ep->base.autoneg == AUTONEG_ENABLE) {
697 cp->link_cntl = BMCR_ANENABLE;
698 } else {
699 u32 speed = ep->base.speed;
700 cp->link_cntl = 0;
701 if (speed == SPEED_100)
702 cp->link_cntl |= BMCR_SPEED100;
703 else if (speed == SPEED_1000)
704 cp->link_cntl |= CAS_BMCR_SPEED1000;
705 if (ep->base.duplex == DUPLEX_FULL)
706 cp->link_cntl |= BMCR_FULLDPLX;
707 }
708#if 1
709 changed = (lcntl != cp->link_cntl);
710#endif
711start_aneg:
712 if (cp->lstate == link_up) {
713 netdev_info(cp->dev, "PCS link down\n");
714 } else {
715 if (changed) {
716 netdev_info(cp->dev, "link configuration changed\n");
717 }
718 }
719 cp->lstate = link_down;
720 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
721 if (!cp->hw_running)
722 return;
723#if 1
724
725
726
727
728
729 if (oldstate == link_up)
730 netif_carrier_off(cp->dev);
731 if (changed && link_was_not_down) {
732
733
734
735
736
737 atomic_inc(&cp->reset_task_pending);
738 atomic_inc(&cp->reset_task_pending_all);
739 schedule_work(&cp->reset_task);
740 cp->timer_ticks = 0;
741 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
742 return;
743 }
744#endif
745 if (cp->phy_type & CAS_PHY_SERDES) {
746 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
747
748 if (cp->link_cntl & BMCR_ANENABLE) {
749 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
750 cp->lstate = link_aneg;
751 } else {
752 if (cp->link_cntl & BMCR_FULLDPLX)
753 val |= PCS_MII_CTRL_DUPLEX;
754 val &= ~PCS_MII_AUTONEG_EN;
755 cp->lstate = link_force_ok;
756 }
757 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
758 writel(val, cp->regs + REG_PCS_MII_CTRL);
759
760 } else {
761 cas_mif_poll(cp, 0);
762 ctl = cas_phy_read(cp, MII_BMCR);
763 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
764 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
765 ctl |= cp->link_cntl;
766 if (ctl & BMCR_ANENABLE) {
767 ctl |= BMCR_ANRESTART;
768 cp->lstate = link_aneg;
769 } else {
770 cp->lstate = link_force_ok;
771 }
772 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
773 cas_phy_write(cp, MII_BMCR, ctl);
774 cas_mif_poll(cp, 1);
775 }
776
777 cp->timer_ticks = 0;
778 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
779}
780
781
782static int cas_reset_mii_phy(struct cas *cp)
783{
784 int limit = STOP_TRIES_PHY;
785 u16 val;
786
787 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
788 udelay(100);
789 while (--limit) {
790 val = cas_phy_read(cp, MII_BMCR);
791 if ((val & BMCR_RESET) == 0)
792 break;
793 udelay(10);
794 }
795 return limit <= 0;
796}
797
798static void cas_saturn_firmware_init(struct cas *cp)
799{
800 const struct firmware *fw;
801 const char fw_name[] = "sun/cassini.bin";
802 int err;
803
804 if (PHY_NS_DP83065 != cp->phy_id)
805 return;
806
807 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
808 if (err) {
809 pr_err("Failed to load firmware \"%s\"\n",
810 fw_name);
811 return;
812 }
813 if (fw->size < 2) {
814 pr_err("bogus length %zu in \"%s\"\n",
815 fw->size, fw_name);
816 goto out;
817 }
818 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
819 cp->fw_size = fw->size - 2;
820 cp->fw_data = vmalloc(cp->fw_size);
821 if (!cp->fw_data)
822 goto out;
823 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
824out:
825 release_firmware(fw);
826}
827
828static void cas_saturn_firmware_load(struct cas *cp)
829{
830 int i;
831
832 if (!cp->fw_data)
833 return;
834
835 cas_phy_powerdown(cp);
836
837
838 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
839
840
841 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
842 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
843 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
844 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
845 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
846 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
847 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
848 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
849
850
851 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
852 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
853 for (i = 0; i < cp->fw_size; i++)
854 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
855
856
857 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
858 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
859}
860
861
862
863static void cas_phy_init(struct cas *cp)
864{
865 u16 val;
866
867
868 if (CAS_PHY_MII(cp->phy_type)) {
869 writel(PCS_DATAPATH_MODE_MII,
870 cp->regs + REG_PCS_DATAPATH_MODE);
871
872 cas_mif_poll(cp, 0);
873 cas_reset_mii_phy(cp);
874
875 if (PHY_LUCENT_B0 == cp->phy_id) {
876
877 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
878 cas_phy_write(cp, MII_BMCR, 0x00f1);
879 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
880
881 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
882
883 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
884 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
885 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
886 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
887 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
888 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
889 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
890 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
891 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
892 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
893 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
894
895 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
896 val = cas_phy_read(cp, BROADCOM_MII_REG4);
897 val = cas_phy_read(cp, BROADCOM_MII_REG4);
898 if (val & 0x0080) {
899
900 cas_phy_write(cp, BROADCOM_MII_REG4,
901 val & ~0x0080);
902 }
903
904 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
905 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
906 SATURN_PCFG_FSI : 0x0,
907 cp->regs + REG_SATURN_PCFG);
908
909
910
911
912
913 if (PHY_NS_DP83065 == cp->phy_id) {
914 cas_saturn_firmware_load(cp);
915 }
916 cas_phy_powerup(cp);
917 }
918
919
920 val = cas_phy_read(cp, MII_BMCR);
921 val &= ~BMCR_ANENABLE;
922 cas_phy_write(cp, MII_BMCR, val);
923 udelay(10);
924
925 cas_phy_write(cp, MII_ADVERTISE,
926 cas_phy_read(cp, MII_ADVERTISE) |
927 (ADVERTISE_10HALF | ADVERTISE_10FULL |
928 ADVERTISE_100HALF | ADVERTISE_100FULL |
929 CAS_ADVERTISE_PAUSE |
930 CAS_ADVERTISE_ASYM_PAUSE));
931
932 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
933
934
935
936 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
937 val &= ~CAS_ADVERTISE_1000HALF;
938 val |= CAS_ADVERTISE_1000FULL;
939 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
940 }
941
942 } else {
943
944 u32 val;
945 int limit;
946
947 writel(PCS_DATAPATH_MODE_SERDES,
948 cp->regs + REG_PCS_DATAPATH_MODE);
949
950
951 if (cp->cas_flags & CAS_FLAG_SATURN)
952 writel(0, cp->regs + REG_SATURN_PCFG);
953
954
955 val = readl(cp->regs + REG_PCS_MII_CTRL);
956 val |= PCS_MII_RESET;
957 writel(val, cp->regs + REG_PCS_MII_CTRL);
958
959 limit = STOP_TRIES;
960 while (--limit > 0) {
961 udelay(10);
962 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
963 PCS_MII_RESET) == 0)
964 break;
965 }
966 if (limit <= 0)
967 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
968 readl(cp->regs + REG_PCS_STATE_MACHINE));
969
970
971
972
973 writel(0x0, cp->regs + REG_PCS_CFG);
974
975
976 val = readl(cp->regs + REG_PCS_MII_ADVERT);
977 val &= ~PCS_MII_ADVERT_HD;
978 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
979 PCS_MII_ADVERT_ASYM_PAUSE);
980 writel(val, cp->regs + REG_PCS_MII_ADVERT);
981
982
983 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
984
985
986 writel(PCS_SERDES_CTRL_SYNCD_EN,
987 cp->regs + REG_PCS_SERDES_CTRL);
988 }
989}
990
991
992static int cas_pcs_link_check(struct cas *cp)
993{
994 u32 stat, state_machine;
995 int retval = 0;
996
997
998
999
1000
1001 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1002 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1003 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1004
1005
1006
1007
1008 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1009 PCS_MII_STATUS_REMOTE_FAULT)) ==
1010 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1011 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1012
1013
1014
1015
1016 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1017 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1018 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1019 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1020 stat |= PCS_MII_STATUS_LINK_STATUS;
1021 }
1022
1023 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1024 if (cp->lstate != link_up) {
1025 if (cp->opened) {
1026 cp->lstate = link_up;
1027 cp->link_transition = LINK_TRANSITION_LINK_UP;
1028
1029 cas_set_link_modes(cp);
1030 netif_carrier_on(cp->dev);
1031 }
1032 }
1033 } else if (cp->lstate == link_up) {
1034 cp->lstate = link_down;
1035 if (link_transition_timeout != 0 &&
1036 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1037 !cp->link_transition_jiffies_valid) {
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 retval = 1;
1051 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1052 cp->link_transition_jiffies = jiffies;
1053 cp->link_transition_jiffies_valid = 1;
1054 } else {
1055 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1056 }
1057 netif_carrier_off(cp->dev);
1058 if (cp->opened)
1059 netif_info(cp, link, cp->dev, "PCS link down\n");
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1070
1071 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1072 if (stat == 0x03)
1073 return 1;
1074 }
1075 } else if (cp->lstate == link_down) {
1076 if (link_transition_timeout != 0 &&
1077 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1078 !cp->link_transition_jiffies_valid) {
1079
1080
1081
1082
1083
1084 retval = 1;
1085 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1086 cp->link_transition_jiffies = jiffies;
1087 cp->link_transition_jiffies_valid = 1;
1088 } else {
1089 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1090 }
1091 }
1092
1093 return retval;
1094}
1095
1096static int cas_pcs_interrupt(struct net_device *dev,
1097 struct cas *cp, u32 status)
1098{
1099 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1100
1101 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1102 return 0;
1103 return cas_pcs_link_check(cp);
1104}
1105
1106static int cas_txmac_interrupt(struct net_device *dev,
1107 struct cas *cp, u32 status)
1108{
1109 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1110
1111 if (!txmac_stat)
1112 return 0;
1113
1114 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1115 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1116
1117
1118
1119
1120 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1121 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1122 return 0;
1123
1124 spin_lock(&cp->stat_lock[0]);
1125 if (txmac_stat & MAC_TX_UNDERRUN) {
1126 netdev_err(dev, "TX MAC xmit underrun\n");
1127 cp->net_stats[0].tx_fifo_errors++;
1128 }
1129
1130 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1131 netdev_err(dev, "TX MAC max packet size error\n");
1132 cp->net_stats[0].tx_errors++;
1133 }
1134
1135
1136
1137
1138 if (txmac_stat & MAC_TX_COLL_NORMAL)
1139 cp->net_stats[0].collisions += 0x10000;
1140
1141 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1142 cp->net_stats[0].tx_aborted_errors += 0x10000;
1143 cp->net_stats[0].collisions += 0x10000;
1144 }
1145
1146 if (txmac_stat & MAC_TX_COLL_LATE) {
1147 cp->net_stats[0].tx_aborted_errors += 0x10000;
1148 cp->net_stats[0].collisions += 0x10000;
1149 }
1150 spin_unlock(&cp->stat_lock[0]);
1151
1152
1153
1154
1155 return 0;
1156}
1157
1158static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1159{
1160 cas_hp_inst_t *inst;
1161 u32 val;
1162 int i;
1163
1164 i = 0;
1165 while ((inst = firmware) && inst->note) {
1166 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1167
1168 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1169 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1170 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1171
1172 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1173 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1174 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1175 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1176 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1177 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1178 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1179 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1180
1181 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1182 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1183 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1184 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1185 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1186 ++firmware;
1187 ++i;
1188 }
1189}
1190
1191static void cas_init_rx_dma(struct cas *cp)
1192{
1193 u64 desc_dma = cp->block_dvma;
1194 u32 val;
1195 int i, size;
1196
1197
1198 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1199 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1200 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1201 if ((N_RX_DESC_RINGS > 1) &&
1202 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1203 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1204 writel(val, cp->regs + REG_RX_CFG);
1205
1206 val = (unsigned long) cp->init_rxds[0] -
1207 (unsigned long) cp->init_block;
1208 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1209 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1210 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1211
1212 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1213
1214
1215
1216 val = (unsigned long) cp->init_rxds[1] -
1217 (unsigned long) cp->init_block;
1218 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1219 writel((desc_dma + val) & 0xffffffff, cp->regs +
1220 REG_PLUS_RX_DB1_LOW);
1221 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1222 REG_PLUS_RX_KICK1);
1223 }
1224
1225
1226 val = (unsigned long) cp->init_rxcs[0] -
1227 (unsigned long) cp->init_block;
1228 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1229 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1230
1231 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1232
1233 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1234 val = (unsigned long) cp->init_rxcs[i] -
1235 (unsigned long) cp->init_block;
1236 writel((desc_dma + val) >> 32, cp->regs +
1237 REG_PLUS_RX_CBN_HI(i));
1238 writel((desc_dma + val) & 0xffffffff, cp->regs +
1239 REG_PLUS_RX_CBN_LOW(i));
1240 }
1241 }
1242
1243
1244
1245
1246
1247 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1248 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1249 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1250 for (i = 1; i < N_RX_COMP_RINGS; i++)
1251 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1252
1253
1254 if (N_RX_COMP_RINGS > 1)
1255 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1256 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1257
1258 for (i = 2; i < N_RX_COMP_RINGS; i++)
1259 writel(INTR_RX_DONE_ALT,
1260 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1261 }
1262
1263
1264 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1265 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1266 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1267 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1268 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1269
1270
1271 for (i = 0; i < 64; i++) {
1272 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1273 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1274 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1275 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1276 }
1277
1278
1279 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1280 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1281
1282
1283#ifdef USE_RX_BLANK
1284 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1285 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1286 writel(val, cp->regs + REG_RX_BLANK);
1287#else
1288 writel(0x0, cp->regs + REG_RX_BLANK);
1289#endif
1290
1291
1292
1293
1294
1295
1296
1297 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1298 writel(val, cp->regs + REG_RX_AE_THRESH);
1299 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1300 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1301 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1302 }
1303
1304
1305
1306
1307 writel(0x0, cp->regs + REG_RX_RED);
1308
1309
1310 val = 0;
1311 if (cp->page_size == 0x1000)
1312 val = 0x1;
1313 else if (cp->page_size == 0x2000)
1314 val = 0x2;
1315 else if (cp->page_size == 0x4000)
1316 val = 0x3;
1317
1318
1319 size = cp->dev->mtu + 64;
1320 if (size > cp->page_size)
1321 size = cp->page_size;
1322
1323 if (size <= 0x400)
1324 i = 0x0;
1325 else if (size <= 0x800)
1326 i = 0x1;
1327 else if (size <= 0x1000)
1328 i = 0x2;
1329 else
1330 i = 0x3;
1331
1332 cp->mtu_stride = 1 << (i + 10);
1333 val = CAS_BASE(RX_PAGE_SIZE, val);
1334 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1335 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1336 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1337 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1338
1339
1340 if (CAS_HP_FIRMWARE == cas_prog_null)
1341 return;
1342
1343 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1344 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1345 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1346 writel(val, cp->regs + REG_HP_CFG);
1347}
1348
1349static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1350{
1351 memset(rxc, 0, sizeof(*rxc));
1352 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1353}
1354
1355
1356
1357
1358
1359static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1360{
1361 cas_page_t *page = cp->rx_pages[1][index];
1362 cas_page_t *new;
1363
1364 if (page_count(page->buffer) == 1)
1365 return page;
1366
1367 new = cas_page_dequeue(cp);
1368 if (new) {
1369 spin_lock(&cp->rx_inuse_lock);
1370 list_add(&page->list, &cp->rx_inuse_list);
1371 spin_unlock(&cp->rx_inuse_lock);
1372 }
1373 return new;
1374}
1375
1376
1377static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1378 const int index)
1379{
1380 cas_page_t **page0 = cp->rx_pages[0];
1381 cas_page_t **page1 = cp->rx_pages[1];
1382
1383
1384 if (page_count(page0[index]->buffer) > 1) {
1385 cas_page_t *new = cas_page_spare(cp, index);
1386 if (new) {
1387 page1[index] = page0[index];
1388 page0[index] = new;
1389 }
1390 }
1391 RX_USED_SET(page0[index], 0);
1392 return page0[index];
1393}
1394
1395static void cas_clean_rxds(struct cas *cp)
1396{
1397
1398 struct cas_rx_desc *rxd = cp->init_rxds[0];
1399 int i, size;
1400
1401
1402 for (i = 0; i < N_RX_FLOWS; i++) {
1403 struct sk_buff *skb;
1404 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1405 cas_skb_release(skb);
1406 }
1407 }
1408
1409
1410 size = RX_DESC_RINGN_SIZE(0);
1411 for (i = 0; i < size; i++) {
1412 cas_page_t *page = cas_page_swap(cp, 0, i);
1413 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1414 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1415 CAS_BASE(RX_INDEX_RING, 0));
1416 }
1417
1418 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1419 cp->rx_last[0] = 0;
1420 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1421}
1422
1423static void cas_clean_rxcs(struct cas *cp)
1424{
1425 int i, j;
1426
1427
1428 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1429 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1430 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1431 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1432 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1433 cas_rxc_init(rxc + j);
1434 }
1435 }
1436}
1437
1438#if 0
1439
1440
1441
1442
1443
1444
1445static int cas_rxmac_reset(struct cas *cp)
1446{
1447 struct net_device *dev = cp->dev;
1448 int limit;
1449 u32 val;
1450
1451
1452 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1453 for (limit = 0; limit < STOP_TRIES; limit++) {
1454 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1455 break;
1456 udelay(10);
1457 }
1458 if (limit == STOP_TRIES) {
1459 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1460 return 1;
1461 }
1462
1463
1464 writel(0, cp->regs + REG_RX_CFG);
1465 for (limit = 0; limit < STOP_TRIES; limit++) {
1466 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1467 break;
1468 udelay(10);
1469 }
1470 if (limit == STOP_TRIES) {
1471 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1472 return 1;
1473 }
1474
1475 mdelay(5);
1476
1477
1478 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1479 for (limit = 0; limit < STOP_TRIES; limit++) {
1480 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1481 break;
1482 udelay(10);
1483 }
1484 if (limit == STOP_TRIES) {
1485 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1486 return 1;
1487 }
1488
1489
1490 cas_clean_rxds(cp);
1491 cas_clean_rxcs(cp);
1492
1493
1494 cas_init_rx_dma(cp);
1495
1496
1497 val = readl(cp->regs + REG_RX_CFG);
1498 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1499 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1500 val = readl(cp->regs + REG_MAC_RX_CFG);
1501 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1502 return 0;
1503}
1504#endif
1505
1506static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1507 u32 status)
1508{
1509 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1510
1511 if (!stat)
1512 return 0;
1513
1514 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1515
1516
1517 spin_lock(&cp->stat_lock[0]);
1518 if (stat & MAC_RX_ALIGN_ERR)
1519 cp->net_stats[0].rx_frame_errors += 0x10000;
1520
1521 if (stat & MAC_RX_CRC_ERR)
1522 cp->net_stats[0].rx_crc_errors += 0x10000;
1523
1524 if (stat & MAC_RX_LEN_ERR)
1525 cp->net_stats[0].rx_length_errors += 0x10000;
1526
1527 if (stat & MAC_RX_OVERFLOW) {
1528 cp->net_stats[0].rx_over_errors++;
1529 cp->net_stats[0].rx_fifo_errors++;
1530 }
1531
1532
1533
1534
1535 spin_unlock(&cp->stat_lock[0]);
1536 return 0;
1537}
1538
1539static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1540 u32 status)
1541{
1542 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1543
1544 if (!stat)
1545 return 0;
1546
1547 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1548 "mac interrupt, stat: 0x%x\n", stat);
1549
1550
1551
1552
1553
1554 if (stat & MAC_CTRL_PAUSE_STATE)
1555 cp->pause_entered++;
1556
1557 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1558 cp->pause_last_time_recvd = (stat >> 16);
1559
1560 return 0;
1561}
1562
1563
1564
1565static inline int cas_mdio_link_not_up(struct cas *cp)
1566{
1567 u16 val;
1568
1569 switch (cp->lstate) {
1570 case link_force_ret:
1571 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1572 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1573 cp->timer_ticks = 5;
1574 cp->lstate = link_force_ok;
1575 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1576 break;
1577
1578 case link_aneg:
1579 val = cas_phy_read(cp, MII_BMCR);
1580
1581
1582
1583
1584 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1585 val |= BMCR_FULLDPLX;
1586 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1587 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1588 cas_phy_write(cp, MII_BMCR, val);
1589 cp->timer_ticks = 5;
1590 cp->lstate = link_force_try;
1591 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1592 break;
1593
1594 case link_force_try:
1595
1596 val = cas_phy_read(cp, MII_BMCR);
1597 cp->timer_ticks = 5;
1598 if (val & CAS_BMCR_SPEED1000) {
1599 val &= ~CAS_BMCR_SPEED1000;
1600 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1601 cas_phy_write(cp, MII_BMCR, val);
1602 break;
1603 }
1604
1605 if (val & BMCR_SPEED100) {
1606 if (val & BMCR_FULLDPLX)
1607 val &= ~BMCR_FULLDPLX;
1608 else {
1609 val &= ~BMCR_SPEED100;
1610 }
1611 cas_phy_write(cp, MII_BMCR, val);
1612 break;
1613 }
1614 default:
1615 break;
1616 }
1617 return 0;
1618}
1619
1620
1621
1622static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1623{
1624 int restart;
1625
1626 if (bmsr & BMSR_LSTATUS) {
1627
1628
1629
1630
1631
1632 if ((cp->lstate == link_force_try) &&
1633 (cp->link_cntl & BMCR_ANENABLE)) {
1634 cp->lstate = link_force_ret;
1635 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1636 cas_mif_poll(cp, 0);
1637 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1638 cp->timer_ticks = 5;
1639 if (cp->opened)
1640 netif_info(cp, link, cp->dev,
1641 "Got link after fallback, retrying autoneg once...\n");
1642 cas_phy_write(cp, MII_BMCR,
1643 cp->link_fcntl | BMCR_ANENABLE |
1644 BMCR_ANRESTART);
1645 cas_mif_poll(cp, 1);
1646
1647 } else if (cp->lstate != link_up) {
1648 cp->lstate = link_up;
1649 cp->link_transition = LINK_TRANSITION_LINK_UP;
1650
1651 if (cp->opened) {
1652 cas_set_link_modes(cp);
1653 netif_carrier_on(cp->dev);
1654 }
1655 }
1656 return 0;
1657 }
1658
1659
1660
1661
1662 restart = 0;
1663 if (cp->lstate == link_up) {
1664 cp->lstate = link_down;
1665 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1666
1667 netif_carrier_off(cp->dev);
1668 if (cp->opened)
1669 netif_info(cp, link, cp->dev, "Link down\n");
1670 restart = 1;
1671
1672 } else if (++cp->timer_ticks > 10)
1673 cas_mdio_link_not_up(cp);
1674
1675 return restart;
1676}
1677
1678static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1679 u32 status)
1680{
1681 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1682 u16 bmsr;
1683
1684
1685 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1686 return 0;
1687
1688 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1689 return cas_mii_link_check(cp, bmsr);
1690}
1691
1692static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1693 u32 status)
1694{
1695 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1696
1697 if (!stat)
1698 return 0;
1699
1700 netdev_err(dev, "PCI error [%04x:%04x]",
1701 stat, readl(cp->regs + REG_BIM_DIAG));
1702
1703
1704 if ((stat & PCI_ERR_BADACK) &&
1705 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1706 pr_cont(" <No ACK64# during ABS64 cycle>");
1707
1708 if (stat & PCI_ERR_DTRTO)
1709 pr_cont(" <Delayed transaction timeout>");
1710 if (stat & PCI_ERR_OTHER)
1711 pr_cont(" <other>");
1712 if (stat & PCI_ERR_BIM_DMA_WRITE)
1713 pr_cont(" <BIM DMA 0 write req>");
1714 if (stat & PCI_ERR_BIM_DMA_READ)
1715 pr_cont(" <BIM DMA 0 read req>");
1716 pr_cont("\n");
1717
1718 if (stat & PCI_ERR_OTHER) {
1719 int pci_errs;
1720
1721
1722
1723
1724 pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1725
1726 netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
1727 if (pci_errs & PCI_STATUS_PARITY)
1728 netdev_err(dev, "PCI parity error detected\n");
1729 if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
1730 netdev_err(dev, "PCI target abort\n");
1731 if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
1732 netdev_err(dev, "PCI master acks target abort\n");
1733 if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
1734 netdev_err(dev, "PCI master abort\n");
1735 if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
1736 netdev_err(dev, "PCI system error SERR#\n");
1737 if (pci_errs & PCI_STATUS_DETECTED_PARITY)
1738 netdev_err(dev, "PCI parity error\n");
1739 }
1740
1741
1742 return 1;
1743}
1744
1745
1746
1747
1748
1749
1750static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1751 u32 status)
1752{
1753 if (status & INTR_RX_TAG_ERROR) {
1754
1755 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1756 "corrupt rx tag framing\n");
1757 spin_lock(&cp->stat_lock[0]);
1758 cp->net_stats[0].rx_errors++;
1759 spin_unlock(&cp->stat_lock[0]);
1760 goto do_reset;
1761 }
1762
1763 if (status & INTR_RX_LEN_MISMATCH) {
1764
1765 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1766 "length mismatch for rx frame\n");
1767 spin_lock(&cp->stat_lock[0]);
1768 cp->net_stats[0].rx_errors++;
1769 spin_unlock(&cp->stat_lock[0]);
1770 goto do_reset;
1771 }
1772
1773 if (status & INTR_PCS_STATUS) {
1774 if (cas_pcs_interrupt(dev, cp, status))
1775 goto do_reset;
1776 }
1777
1778 if (status & INTR_TX_MAC_STATUS) {
1779 if (cas_txmac_interrupt(dev, cp, status))
1780 goto do_reset;
1781 }
1782
1783 if (status & INTR_RX_MAC_STATUS) {
1784 if (cas_rxmac_interrupt(dev, cp, status))
1785 goto do_reset;
1786 }
1787
1788 if (status & INTR_MAC_CTRL_STATUS) {
1789 if (cas_mac_interrupt(dev, cp, status))
1790 goto do_reset;
1791 }
1792
1793 if (status & INTR_MIF_STATUS) {
1794 if (cas_mif_interrupt(dev, cp, status))
1795 goto do_reset;
1796 }
1797
1798 if (status & INTR_PCI_ERROR_STATUS) {
1799 if (cas_pci_interrupt(dev, cp, status))
1800 goto do_reset;
1801 }
1802 return 0;
1803
1804do_reset:
1805#if 1
1806 atomic_inc(&cp->reset_task_pending);
1807 atomic_inc(&cp->reset_task_pending_all);
1808 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1809 schedule_work(&cp->reset_task);
1810#else
1811 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1812 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1813 schedule_work(&cp->reset_task);
1814#endif
1815 return 1;
1816}
1817
1818
1819
1820
1821#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1822#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1823static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1824 const int len)
1825{
1826 unsigned long off = addr + len;
1827
1828 if (CAS_TABORT(cp) == 1)
1829 return 0;
1830 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1831 return 0;
1832 return TX_TARGET_ABORT_LEN;
1833}
1834
1835static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1836{
1837 struct cas_tx_desc *txds;
1838 struct sk_buff **skbs;
1839 struct net_device *dev = cp->dev;
1840 int entry, count;
1841
1842 spin_lock(&cp->tx_lock[ring]);
1843 txds = cp->init_txds[ring];
1844 skbs = cp->tx_skbs[ring];
1845 entry = cp->tx_old[ring];
1846
1847 count = TX_BUFF_COUNT(ring, entry, limit);
1848 while (entry != limit) {
1849 struct sk_buff *skb = skbs[entry];
1850 dma_addr_t daddr;
1851 u32 dlen;
1852 int frag;
1853
1854 if (!skb) {
1855
1856 entry = TX_DESC_NEXT(ring, entry);
1857 continue;
1858 }
1859
1860
1861 count -= skb_shinfo(skb)->nr_frags +
1862 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1863 if (count < 0)
1864 break;
1865
1866 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1867 "tx[%d] done, slot %d\n", ring, entry);
1868
1869 skbs[entry] = NULL;
1870 cp->tx_tiny_use[ring][entry].nbufs = 0;
1871
1872 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1873 struct cas_tx_desc *txd = txds + entry;
1874
1875 daddr = le64_to_cpu(txd->buffer);
1876 dlen = CAS_VAL(TX_DESC_BUFLEN,
1877 le64_to_cpu(txd->control));
1878 pci_unmap_page(cp->pdev, daddr, dlen,
1879 PCI_DMA_TODEVICE);
1880 entry = TX_DESC_NEXT(ring, entry);
1881
1882
1883 if (cp->tx_tiny_use[ring][entry].used) {
1884 cp->tx_tiny_use[ring][entry].used = 0;
1885 entry = TX_DESC_NEXT(ring, entry);
1886 }
1887 }
1888
1889 spin_lock(&cp->stat_lock[ring]);
1890 cp->net_stats[ring].tx_packets++;
1891 cp->net_stats[ring].tx_bytes += skb->len;
1892 spin_unlock(&cp->stat_lock[ring]);
1893 dev_consume_skb_irq(skb);
1894 }
1895 cp->tx_old[ring] = entry;
1896
1897
1898
1899
1900
1901 if (netif_queue_stopped(dev) &&
1902 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1903 netif_wake_queue(dev);
1904 spin_unlock(&cp->tx_lock[ring]);
1905}
1906
1907static void cas_tx(struct net_device *dev, struct cas *cp,
1908 u32 status)
1909{
1910 int limit, ring;
1911#ifdef USE_TX_COMPWB
1912 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1913#endif
1914 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1915 "tx interrupt, status: 0x%x, %llx\n",
1916 status, (unsigned long long)compwb);
1917
1918 for (ring = 0; ring < N_TX_RINGS; ring++) {
1919#ifdef USE_TX_COMPWB
1920
1921 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1922 CAS_VAL(TX_COMPWB_LSB, compwb);
1923 compwb = TX_COMPWB_NEXT(compwb);
1924#else
1925 limit = readl(cp->regs + REG_TX_COMPN(ring));
1926#endif
1927 if (cp->tx_old[ring] != limit)
1928 cas_tx_ringN(cp, ring, limit);
1929 }
1930}
1931
1932
1933static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1934 int entry, const u64 *words,
1935 struct sk_buff **skbref)
1936{
1937 int dlen, hlen, len, i, alloclen;
1938 int off, swivel = RX_SWIVEL_OFF_VAL;
1939 struct cas_page *page;
1940 struct sk_buff *skb;
1941 void *addr, *crcaddr;
1942 __sum16 csum;
1943 char *p;
1944
1945 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1946 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1947 len = hlen + dlen;
1948
1949 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1950 alloclen = len;
1951 else
1952 alloclen = max(hlen, RX_COPY_MIN);
1953
1954 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1955 if (skb == NULL)
1956 return -1;
1957
1958 *skbref = skb;
1959 skb_reserve(skb, swivel);
1960
1961 p = skb->data;
1962 addr = crcaddr = NULL;
1963 if (hlen) {
1964 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1965 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1966 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1967 swivel;
1968
1969 i = hlen;
1970 if (!dlen)
1971 i += cp->crc_size;
1972 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1973 PCI_DMA_FROMDEVICE);
1974 addr = cas_page_map(page->buffer);
1975 memcpy(p, addr + off, i);
1976 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1977 PCI_DMA_FROMDEVICE);
1978 cas_page_unmap(addr);
1979 RX_USED_ADD(page, 0x100);
1980 p += hlen;
1981 swivel = 0;
1982 }
1983
1984
1985 if (alloclen < (hlen + dlen)) {
1986 skb_frag_t *frag = skb_shinfo(skb)->frags;
1987
1988
1989 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
1990 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1991 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
1992
1993 hlen = min(cp->page_size - off, dlen);
1994 if (hlen < 0) {
1995 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1996 "rx page overflow: %d\n", hlen);
1997 dev_kfree_skb_irq(skb);
1998 return -1;
1999 }
2000 i = hlen;
2001 if (i == dlen)
2002 i += cp->crc_size;
2003 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2004 PCI_DMA_FROMDEVICE);
2005
2006
2007 swivel = 0;
2008 if (p == (char *) skb->data) {
2009 addr = cas_page_map(page->buffer);
2010 memcpy(p, addr + off, RX_COPY_MIN);
2011 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2012 PCI_DMA_FROMDEVICE);
2013 cas_page_unmap(addr);
2014 off += RX_COPY_MIN;
2015 swivel = RX_COPY_MIN;
2016 RX_USED_ADD(page, cp->mtu_stride);
2017 } else {
2018 RX_USED_ADD(page, hlen);
2019 }
2020 skb_put(skb, alloclen);
2021
2022 skb_shinfo(skb)->nr_frags++;
2023 skb->data_len += hlen - swivel;
2024 skb->truesize += hlen - swivel;
2025 skb->len += hlen - swivel;
2026
2027 __skb_frag_set_page(frag, page->buffer);
2028 __skb_frag_ref(frag);
2029 skb_frag_off_set(frag, off);
2030 skb_frag_size_set(frag, hlen - swivel);
2031
2032
2033 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2034 hlen = dlen;
2035 off = 0;
2036
2037 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2038 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2039 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2040 hlen + cp->crc_size,
2041 PCI_DMA_FROMDEVICE);
2042 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2043 hlen + cp->crc_size,
2044 PCI_DMA_FROMDEVICE);
2045
2046 skb_shinfo(skb)->nr_frags++;
2047 skb->data_len += hlen;
2048 skb->len += hlen;
2049 frag++;
2050
2051 __skb_frag_set_page(frag, page->buffer);
2052 __skb_frag_ref(frag);
2053 skb_frag_off_set(frag, 0);
2054 skb_frag_size_set(frag, hlen);
2055 RX_USED_ADD(page, hlen + cp->crc_size);
2056 }
2057
2058 if (cp->crc_size) {
2059 addr = cas_page_map(page->buffer);
2060 crcaddr = addr + off + hlen;
2061 }
2062
2063 } else {
2064
2065 if (!dlen)
2066 goto end_copy_pkt;
2067
2068 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2069 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2070 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2071 hlen = min(cp->page_size - off, dlen);
2072 if (hlen < 0) {
2073 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2074 "rx page overflow: %d\n", hlen);
2075 dev_kfree_skb_irq(skb);
2076 return -1;
2077 }
2078 i = hlen;
2079 if (i == dlen)
2080 i += cp->crc_size;
2081 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2082 PCI_DMA_FROMDEVICE);
2083 addr = cas_page_map(page->buffer);
2084 memcpy(p, addr + off, i);
2085 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2086 PCI_DMA_FROMDEVICE);
2087 cas_page_unmap(addr);
2088 if (p == (char *) skb->data)
2089 RX_USED_ADD(page, cp->mtu_stride);
2090 else
2091 RX_USED_ADD(page, i);
2092
2093
2094 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2095 p += hlen;
2096 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2097 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2098 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2099 dlen + cp->crc_size,
2100 PCI_DMA_FROMDEVICE);
2101 addr = cas_page_map(page->buffer);
2102 memcpy(p, addr, dlen + cp->crc_size);
2103 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2104 dlen + cp->crc_size,
2105 PCI_DMA_FROMDEVICE);
2106 cas_page_unmap(addr);
2107 RX_USED_ADD(page, dlen + cp->crc_size);
2108 }
2109end_copy_pkt:
2110 if (cp->crc_size) {
2111 addr = NULL;
2112 crcaddr = skb->data + alloclen;
2113 }
2114 skb_put(skb, alloclen);
2115 }
2116
2117 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2118 if (cp->crc_size) {
2119
2120 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2121 csum_unfold(csum)));
2122 if (addr)
2123 cas_page_unmap(addr);
2124 }
2125 skb->protocol = eth_type_trans(skb, cp->dev);
2126 if (skb->protocol == htons(ETH_P_IP)) {
2127 skb->csum = csum_unfold(~csum);
2128 skb->ip_summed = CHECKSUM_COMPLETE;
2129 } else
2130 skb_checksum_none_assert(skb);
2131 return len;
2132}
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2150 struct sk_buff *skb)
2151{
2152 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2153 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2154
2155
2156
2157
2158
2159 __skb_queue_tail(flow, skb);
2160 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2161 while ((skb = __skb_dequeue(flow))) {
2162 cas_skb_release(skb);
2163 }
2164 }
2165}
2166
2167
2168
2169
2170static void cas_post_page(struct cas *cp, const int ring, const int index)
2171{
2172 cas_page_t *new;
2173 int entry;
2174
2175 entry = cp->rx_old[ring];
2176
2177 new = cas_page_swap(cp, ring, index);
2178 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2179 cp->init_rxds[ring][entry].index =
2180 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2181 CAS_BASE(RX_INDEX_RING, ring));
2182
2183 entry = RX_DESC_ENTRY(ring, entry + 1);
2184 cp->rx_old[ring] = entry;
2185
2186 if (entry % 4)
2187 return;
2188
2189 if (ring == 0)
2190 writel(entry, cp->regs + REG_RX_KICK);
2191 else if ((N_RX_DESC_RINGS > 1) &&
2192 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2193 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2194}
2195
2196
2197
2198static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2199{
2200 unsigned int entry, last, count, released;
2201 int cluster;
2202 cas_page_t **page = cp->rx_pages[ring];
2203
2204 entry = cp->rx_old[ring];
2205
2206 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2207 "rxd[%d] interrupt, done: %d\n", ring, entry);
2208
2209 cluster = -1;
2210 count = entry & 0x3;
2211 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2212 released = 0;
2213 while (entry != last) {
2214
2215 if (page_count(page[entry]->buffer) > 1) {
2216 cas_page_t *new = cas_page_dequeue(cp);
2217 if (!new) {
2218
2219
2220
2221 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2222 if (!timer_pending(&cp->link_timer))
2223 mod_timer(&cp->link_timer, jiffies +
2224 CAS_LINK_FAST_TIMEOUT);
2225 cp->rx_old[ring] = entry;
2226 cp->rx_last[ring] = num ? num - released : 0;
2227 return -ENOMEM;
2228 }
2229 spin_lock(&cp->rx_inuse_lock);
2230 list_add(&page[entry]->list, &cp->rx_inuse_list);
2231 spin_unlock(&cp->rx_inuse_lock);
2232 cp->init_rxds[ring][entry].buffer =
2233 cpu_to_le64(new->dma_addr);
2234 page[entry] = new;
2235
2236 }
2237
2238 if (++count == 4) {
2239 cluster = entry;
2240 count = 0;
2241 }
2242 released++;
2243 entry = RX_DESC_ENTRY(ring, entry + 1);
2244 }
2245 cp->rx_old[ring] = entry;
2246
2247 if (cluster < 0)
2248 return 0;
2249
2250 if (ring == 0)
2251 writel(cluster, cp->regs + REG_RX_KICK);
2252 else if ((N_RX_DESC_RINGS > 1) &&
2253 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2254 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2272{
2273 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2274 int entry, drops;
2275 int npackets = 0;
2276
2277 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2278 "rx[%d] interrupt, done: %d/%d\n",
2279 ring,
2280 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2281
2282 entry = cp->rx_new[ring];
2283 drops = 0;
2284 while (1) {
2285 struct cas_rx_comp *rxc = rxcs + entry;
2286 struct sk_buff *uninitialized_var(skb);
2287 int type, len;
2288 u64 words[4];
2289 int i, dring;
2290
2291 words[0] = le64_to_cpu(rxc->word1);
2292 words[1] = le64_to_cpu(rxc->word2);
2293 words[2] = le64_to_cpu(rxc->word3);
2294 words[3] = le64_to_cpu(rxc->word4);
2295
2296
2297 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2298 if (type == 0)
2299 break;
2300
2301
2302 if (words[3] & RX_COMP4_ZERO) {
2303 break;
2304 }
2305
2306
2307 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2308 spin_lock(&cp->stat_lock[ring]);
2309 cp->net_stats[ring].rx_errors++;
2310 if (words[3] & RX_COMP4_LEN_MISMATCH)
2311 cp->net_stats[ring].rx_length_errors++;
2312 if (words[3] & RX_COMP4_BAD)
2313 cp->net_stats[ring].rx_crc_errors++;
2314 spin_unlock(&cp->stat_lock[ring]);
2315
2316
2317 drop_it:
2318 spin_lock(&cp->stat_lock[ring]);
2319 ++cp->net_stats[ring].rx_dropped;
2320 spin_unlock(&cp->stat_lock[ring]);
2321 goto next;
2322 }
2323
2324 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2325 if (len < 0) {
2326 ++drops;
2327 goto drop_it;
2328 }
2329
2330
2331
2332
2333 if (RX_DONT_BATCH || (type == 0x2)) {
2334
2335 cas_skb_release(skb);
2336 } else {
2337 cas_rx_flow_pkt(cp, words, skb);
2338 }
2339
2340 spin_lock(&cp->stat_lock[ring]);
2341 cp->net_stats[ring].rx_packets++;
2342 cp->net_stats[ring].rx_bytes += len;
2343 spin_unlock(&cp->stat_lock[ring]);
2344
2345 next:
2346 npackets++;
2347
2348
2349 if (words[0] & RX_COMP1_RELEASE_HDR) {
2350 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2351 dring = CAS_VAL(RX_INDEX_RING, i);
2352 i = CAS_VAL(RX_INDEX_NUM, i);
2353 cas_post_page(cp, dring, i);
2354 }
2355
2356 if (words[0] & RX_COMP1_RELEASE_DATA) {
2357 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2358 dring = CAS_VAL(RX_INDEX_RING, i);
2359 i = CAS_VAL(RX_INDEX_NUM, i);
2360 cas_post_page(cp, dring, i);
2361 }
2362
2363 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2364 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2365 dring = CAS_VAL(RX_INDEX_RING, i);
2366 i = CAS_VAL(RX_INDEX_NUM, i);
2367 cas_post_page(cp, dring, i);
2368 }
2369
2370
2371 entry = RX_COMP_ENTRY(ring, entry + 1 +
2372 CAS_VAL(RX_COMP1_SKIP, words[0]));
2373#ifdef USE_NAPI
2374 if (budget && (npackets >= budget))
2375 break;
2376#endif
2377 }
2378 cp->rx_new[ring] = entry;
2379
2380 if (drops)
2381 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2382 return npackets;
2383}
2384
2385
2386
2387static void cas_post_rxcs_ringN(struct net_device *dev,
2388 struct cas *cp, int ring)
2389{
2390 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2391 int last, entry;
2392
2393 last = cp->rx_cur[ring];
2394 entry = cp->rx_new[ring];
2395 netif_printk(cp, intr, KERN_DEBUG, dev,
2396 "rxc[%d] interrupt, done: %d/%d\n",
2397 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2398
2399
2400 while (last != entry) {
2401 cas_rxc_init(rxc + last);
2402 last = RX_COMP_ENTRY(ring, last + 1);
2403 }
2404 cp->rx_cur[ring] = last;
2405
2406 if (ring == 0)
2407 writel(last, cp->regs + REG_RX_COMP_TAIL);
2408 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2409 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2410}
2411
2412
2413
2414
2415
2416
2417#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2418static inline void cas_handle_irqN(struct net_device *dev,
2419 struct cas *cp, const u32 status,
2420 const int ring)
2421{
2422 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2423 cas_post_rxcs_ringN(dev, cp, ring);
2424}
2425
2426static irqreturn_t cas_interruptN(int irq, void *dev_id)
2427{
2428 struct net_device *dev = dev_id;
2429 struct cas *cp = netdev_priv(dev);
2430 unsigned long flags;
2431 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2432 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2433
2434
2435 if (status == 0)
2436 return IRQ_NONE;
2437
2438 spin_lock_irqsave(&cp->lock, flags);
2439 if (status & INTR_RX_DONE_ALT) {
2440#ifdef USE_NAPI
2441 cas_mask_intr(cp);
2442 napi_schedule(&cp->napi);
2443#else
2444 cas_rx_ringN(cp, ring, 0);
2445#endif
2446 status &= ~INTR_RX_DONE_ALT;
2447 }
2448
2449 if (status)
2450 cas_handle_irqN(dev, cp, status, ring);
2451 spin_unlock_irqrestore(&cp->lock, flags);
2452 return IRQ_HANDLED;
2453}
2454#endif
2455
2456#ifdef USE_PCI_INTB
2457
2458static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2459{
2460 if (status & INTR_RX_BUF_UNAVAIL_1) {
2461
2462
2463 cas_post_rxds_ringN(cp, 1, 0);
2464 spin_lock(&cp->stat_lock[1]);
2465 cp->net_stats[1].rx_dropped++;
2466 spin_unlock(&cp->stat_lock[1]);
2467 }
2468
2469 if (status & INTR_RX_BUF_AE_1)
2470 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2471 RX_AE_FREEN_VAL(1));
2472
2473 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2474 cas_post_rxcs_ringN(cp, 1);
2475}
2476
2477
2478static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2479{
2480 struct net_device *dev = dev_id;
2481 struct cas *cp = netdev_priv(dev);
2482 unsigned long flags;
2483 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2484
2485
2486 if (status == 0)
2487 return IRQ_NONE;
2488
2489 spin_lock_irqsave(&cp->lock, flags);
2490 if (status & INTR_RX_DONE_ALT) {
2491#ifdef USE_NAPI
2492 cas_mask_intr(cp);
2493 napi_schedule(&cp->napi);
2494#else
2495 cas_rx_ringN(cp, 1, 0);
2496#endif
2497 status &= ~INTR_RX_DONE_ALT;
2498 }
2499 if (status)
2500 cas_handle_irq1(cp, status);
2501 spin_unlock_irqrestore(&cp->lock, flags);
2502 return IRQ_HANDLED;
2503}
2504#endif
2505
2506static inline void cas_handle_irq(struct net_device *dev,
2507 struct cas *cp, const u32 status)
2508{
2509
2510 if (status & INTR_ERROR_MASK)
2511 cas_abnormal_irq(dev, cp, status);
2512
2513 if (status & INTR_RX_BUF_UNAVAIL) {
2514
2515
2516
2517 cas_post_rxds_ringN(cp, 0, 0);
2518 spin_lock(&cp->stat_lock[0]);
2519 cp->net_stats[0].rx_dropped++;
2520 spin_unlock(&cp->stat_lock[0]);
2521 } else if (status & INTR_RX_BUF_AE) {
2522 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2523 RX_AE_FREEN_VAL(0));
2524 }
2525
2526 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2527 cas_post_rxcs_ringN(dev, cp, 0);
2528}
2529
2530static irqreturn_t cas_interrupt(int irq, void *dev_id)
2531{
2532 struct net_device *dev = dev_id;
2533 struct cas *cp = netdev_priv(dev);
2534 unsigned long flags;
2535 u32 status = readl(cp->regs + REG_INTR_STATUS);
2536
2537 if (status == 0)
2538 return IRQ_NONE;
2539
2540 spin_lock_irqsave(&cp->lock, flags);
2541 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2542 cas_tx(dev, cp, status);
2543 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2544 }
2545
2546 if (status & INTR_RX_DONE) {
2547#ifdef USE_NAPI
2548 cas_mask_intr(cp);
2549 napi_schedule(&cp->napi);
2550#else
2551 cas_rx_ringN(cp, 0, 0);
2552#endif
2553 status &= ~INTR_RX_DONE;
2554 }
2555
2556 if (status)
2557 cas_handle_irq(dev, cp, status);
2558 spin_unlock_irqrestore(&cp->lock, flags);
2559 return IRQ_HANDLED;
2560}
2561
2562
2563#ifdef USE_NAPI
2564static int cas_poll(struct napi_struct *napi, int budget)
2565{
2566 struct cas *cp = container_of(napi, struct cas, napi);
2567 struct net_device *dev = cp->dev;
2568 int i, enable_intr, credits;
2569 u32 status = readl(cp->regs + REG_INTR_STATUS);
2570 unsigned long flags;
2571
2572 spin_lock_irqsave(&cp->lock, flags);
2573 cas_tx(dev, cp, status);
2574 spin_unlock_irqrestore(&cp->lock, flags);
2575
2576
2577
2578
2579
2580
2581
2582
2583 enable_intr = 1;
2584 credits = 0;
2585 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2586 int j;
2587 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2588 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2589 if (credits >= budget) {
2590 enable_intr = 0;
2591 goto rx_comp;
2592 }
2593 }
2594 }
2595
2596rx_comp:
2597
2598 spin_lock_irqsave(&cp->lock, flags);
2599 if (status)
2600 cas_handle_irq(dev, cp, status);
2601
2602#ifdef USE_PCI_INTB
2603 if (N_RX_COMP_RINGS > 1) {
2604 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2605 if (status)
2606 cas_handle_irq1(dev, cp, status);
2607 }
2608#endif
2609
2610#ifdef USE_PCI_INTC
2611 if (N_RX_COMP_RINGS > 2) {
2612 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2613 if (status)
2614 cas_handle_irqN(dev, cp, status, 2);
2615 }
2616#endif
2617
2618#ifdef USE_PCI_INTD
2619 if (N_RX_COMP_RINGS > 3) {
2620 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2621 if (status)
2622 cas_handle_irqN(dev, cp, status, 3);
2623 }
2624#endif
2625 spin_unlock_irqrestore(&cp->lock, flags);
2626 if (enable_intr) {
2627 napi_complete(napi);
2628 cas_unmask_intr(cp);
2629 }
2630 return credits;
2631}
2632#endif
2633
2634#ifdef CONFIG_NET_POLL_CONTROLLER
2635static void cas_netpoll(struct net_device *dev)
2636{
2637 struct cas *cp = netdev_priv(dev);
2638
2639 cas_disable_irq(cp, 0);
2640 cas_interrupt(cp->pdev->irq, dev);
2641 cas_enable_irq(cp, 0);
2642
2643#ifdef USE_PCI_INTB
2644 if (N_RX_COMP_RINGS > 1) {
2645
2646 }
2647#endif
2648#ifdef USE_PCI_INTC
2649 if (N_RX_COMP_RINGS > 2) {
2650
2651 }
2652#endif
2653#ifdef USE_PCI_INTD
2654 if (N_RX_COMP_RINGS > 3) {
2655
2656 }
2657#endif
2658}
2659#endif
2660
2661static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2662{
2663 struct cas *cp = netdev_priv(dev);
2664
2665 netdev_err(dev, "transmit timed out, resetting\n");
2666 if (!cp->hw_running) {
2667 netdev_err(dev, "hrm.. hw not running!\n");
2668 return;
2669 }
2670
2671 netdev_err(dev, "MIF_STATE[%08x]\n",
2672 readl(cp->regs + REG_MIF_STATE_MACHINE));
2673
2674 netdev_err(dev, "MAC_STATE[%08x]\n",
2675 readl(cp->regs + REG_MAC_STATE_MACHINE));
2676
2677 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2678 readl(cp->regs + REG_TX_CFG),
2679 readl(cp->regs + REG_MAC_TX_STATUS),
2680 readl(cp->regs + REG_MAC_TX_CFG),
2681 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2682 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2683 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2684 readl(cp->regs + REG_TX_SM_1),
2685 readl(cp->regs + REG_TX_SM_2));
2686
2687 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2688 readl(cp->regs + REG_RX_CFG),
2689 readl(cp->regs + REG_MAC_RX_STATUS),
2690 readl(cp->regs + REG_MAC_RX_CFG));
2691
2692 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2693 readl(cp->regs + REG_HP_STATE_MACHINE),
2694 readl(cp->regs + REG_HP_STATUS0),
2695 readl(cp->regs + REG_HP_STATUS1),
2696 readl(cp->regs + REG_HP_STATUS2));
2697
2698#if 1
2699 atomic_inc(&cp->reset_task_pending);
2700 atomic_inc(&cp->reset_task_pending_all);
2701 schedule_work(&cp->reset_task);
2702#else
2703 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2704 schedule_work(&cp->reset_task);
2705#endif
2706}
2707
2708static inline int cas_intme(int ring, int entry)
2709{
2710
2711 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2712 return 1;
2713 return 0;
2714}
2715
2716
2717static void cas_write_txd(struct cas *cp, int ring, int entry,
2718 dma_addr_t mapping, int len, u64 ctrl, int last)
2719{
2720 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2721
2722 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2723 if (cas_intme(ring, entry))
2724 ctrl |= TX_DESC_INTME;
2725 if (last)
2726 ctrl |= TX_DESC_EOF;
2727 txd->control = cpu_to_le64(ctrl);
2728 txd->buffer = cpu_to_le64(mapping);
2729}
2730
2731static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2732 const int entry)
2733{
2734 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2735}
2736
2737static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2738 const int entry, const int tentry)
2739{
2740 cp->tx_tiny_use[ring][tentry].nbufs++;
2741 cp->tx_tiny_use[ring][entry].used = 1;
2742 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2743}
2744
2745static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2746 struct sk_buff *skb)
2747{
2748 struct net_device *dev = cp->dev;
2749 int entry, nr_frags, frag, tabort, tentry;
2750 dma_addr_t mapping;
2751 unsigned long flags;
2752 u64 ctrl;
2753 u32 len;
2754
2755 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2756
2757
2758 if (TX_BUFFS_AVAIL(cp, ring) <=
2759 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2760 netif_stop_queue(dev);
2761 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2762 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2763 return 1;
2764 }
2765
2766 ctrl = 0;
2767 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2768 const u64 csum_start_off = skb_checksum_start_offset(skb);
2769 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2770
2771 ctrl = TX_DESC_CSUM_EN |
2772 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2773 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2774 }
2775
2776 entry = cp->tx_new[ring];
2777 cp->tx_skbs[ring][entry] = skb;
2778
2779 nr_frags = skb_shinfo(skb)->nr_frags;
2780 len = skb_headlen(skb);
2781 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2782 offset_in_page(skb->data), len,
2783 PCI_DMA_TODEVICE);
2784
2785 tentry = entry;
2786 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2787 if (unlikely(tabort)) {
2788
2789 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2790 ctrl | TX_DESC_SOF, 0);
2791 entry = TX_DESC_NEXT(ring, entry);
2792
2793 skb_copy_from_linear_data_offset(skb, len - tabort,
2794 tx_tiny_buf(cp, ring, entry), tabort);
2795 mapping = tx_tiny_map(cp, ring, entry, tentry);
2796 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2797 (nr_frags == 0));
2798 } else {
2799 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2800 TX_DESC_SOF, (nr_frags == 0));
2801 }
2802 entry = TX_DESC_NEXT(ring, entry);
2803
2804 for (frag = 0; frag < nr_frags; frag++) {
2805 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2806
2807 len = skb_frag_size(fragp);
2808 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2809 DMA_TO_DEVICE);
2810
2811 tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
2812 if (unlikely(tabort)) {
2813 void *addr;
2814
2815
2816 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2817 ctrl, 0);
2818 entry = TX_DESC_NEXT(ring, entry);
2819
2820 addr = cas_page_map(skb_frag_page(fragp));
2821 memcpy(tx_tiny_buf(cp, ring, entry),
2822 addr + skb_frag_off(fragp) + len - tabort,
2823 tabort);
2824 cas_page_unmap(addr);
2825 mapping = tx_tiny_map(cp, ring, entry, tentry);
2826 len = tabort;
2827 }
2828
2829 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2830 (frag + 1 == nr_frags));
2831 entry = TX_DESC_NEXT(ring, entry);
2832 }
2833
2834 cp->tx_new[ring] = entry;
2835 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2836 netif_stop_queue(dev);
2837
2838 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2839 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2840 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2841 writel(entry, cp->regs + REG_TX_KICKN(ring));
2842 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2843 return 0;
2844}
2845
2846static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2847{
2848 struct cas *cp = netdev_priv(dev);
2849
2850
2851
2852
2853 static int ring;
2854
2855 if (skb_padto(skb, cp->min_frame_size))
2856 return NETDEV_TX_OK;
2857
2858
2859
2860
2861 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2862 return NETDEV_TX_BUSY;
2863 return NETDEV_TX_OK;
2864}
2865
2866static void cas_init_tx_dma(struct cas *cp)
2867{
2868 u64 desc_dma = cp->block_dvma;
2869 unsigned long off;
2870 u32 val;
2871 int i;
2872
2873
2874#ifdef USE_TX_COMPWB
2875 off = offsetof(struct cas_init_block, tx_compwb);
2876 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2877 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2878#endif
2879
2880
2881
2882
2883 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2884 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2885 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2886 TX_CFG_INTR_COMPWB_DIS;
2887
2888
2889 for (i = 0; i < MAX_TX_RINGS; i++) {
2890 off = (unsigned long) cp->init_txds[i] -
2891 (unsigned long) cp->init_block;
2892
2893 val |= CAS_TX_RINGN_BASE(i);
2894 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2895 writel((desc_dma + off) & 0xffffffff, cp->regs +
2896 REG_TX_DBN_LOW(i));
2897
2898
2899
2900 }
2901 writel(val, cp->regs + REG_TX_CFG);
2902
2903
2904
2905
2906#ifdef USE_QOS
2907 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2908 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2909 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2910 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2911#else
2912 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2913 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2914 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2915 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2916#endif
2917}
2918
2919
2920static inline void cas_init_dma(struct cas *cp)
2921{
2922 cas_init_tx_dma(cp);
2923 cas_init_rx_dma(cp);
2924}
2925
2926static void cas_process_mc_list(struct cas *cp)
2927{
2928 u16 hash_table[16];
2929 u32 crc;
2930 struct netdev_hw_addr *ha;
2931 int i = 1;
2932
2933 memset(hash_table, 0, sizeof(hash_table));
2934 netdev_for_each_mc_addr(ha, cp->dev) {
2935 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2936
2937
2938
2939 writel((ha->addr[4] << 8) | ha->addr[5],
2940 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2941 writel((ha->addr[2] << 8) | ha->addr[3],
2942 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2943 writel((ha->addr[0] << 8) | ha->addr[1],
2944 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2945 i++;
2946 }
2947 else {
2948
2949
2950
2951 crc = ether_crc_le(ETH_ALEN, ha->addr);
2952 crc >>= 24;
2953 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2954 }
2955 }
2956 for (i = 0; i < 16; i++)
2957 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2958}
2959
2960
2961static u32 cas_setup_multicast(struct cas *cp)
2962{
2963 u32 rxcfg = 0;
2964 int i;
2965
2966 if (cp->dev->flags & IFF_PROMISC) {
2967 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2968
2969 } else if (cp->dev->flags & IFF_ALLMULTI) {
2970 for (i=0; i < 16; i++)
2971 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2972 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2973
2974 } else {
2975 cas_process_mc_list(cp);
2976 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2977 }
2978
2979 return rxcfg;
2980}
2981
2982
2983static void cas_clear_mac_err(struct cas *cp)
2984{
2985 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2986 writel(0, cp->regs + REG_MAC_COLL_FIRST);
2987 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2988 writel(0, cp->regs + REG_MAC_COLL_LATE);
2989 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2990 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2991 writel(0, cp->regs + REG_MAC_RECV_FRAME);
2992 writel(0, cp->regs + REG_MAC_LEN_ERR);
2993 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2994 writel(0, cp->regs + REG_MAC_FCS_ERR);
2995 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2996}
2997
2998
2999static void cas_mac_reset(struct cas *cp)
3000{
3001 int i;
3002
3003
3004 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3005 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3006
3007
3008 i = STOP_TRIES;
3009 while (i-- > 0) {
3010 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3011 break;
3012 udelay(10);
3013 }
3014
3015
3016 i = STOP_TRIES;
3017 while (i-- > 0) {
3018 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3019 break;
3020 udelay(10);
3021 }
3022
3023 if (readl(cp->regs + REG_MAC_TX_RESET) |
3024 readl(cp->regs + REG_MAC_RX_RESET))
3025 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3026 readl(cp->regs + REG_MAC_TX_RESET),
3027 readl(cp->regs + REG_MAC_RX_RESET),
3028 readl(cp->regs + REG_MAC_STATE_MACHINE));
3029}
3030
3031
3032
3033static void cas_init_mac(struct cas *cp)
3034{
3035 unsigned char *e = &cp->dev->dev_addr[0];
3036 int i;
3037 cas_mac_reset(cp);
3038
3039
3040 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3041
3042#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3043
3044
3045
3046 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3047 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3048#endif
3049
3050 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3051
3052 writel(0x00, cp->regs + REG_MAC_IPG0);
3053 writel(0x08, cp->regs + REG_MAC_IPG1);
3054 writel(0x04, cp->regs + REG_MAC_IPG2);
3055
3056
3057 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3058
3059
3060 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3061
3062
3063
3064
3065
3066 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3067 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3068 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3069 cp->regs + REG_MAC_FRAMESIZE_MAX);
3070
3071
3072
3073
3074
3075 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3076 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3077 else
3078 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3079 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3080 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3081 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3082
3083 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3084
3085 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3086 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3087 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3088 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3089 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3090
3091
3092 for (i = 0; i < 45; i++)
3093 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3094
3095 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3096 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3097 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3098
3099 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3100 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3101 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3102
3103 cp->mac_rx_cfg = cas_setup_multicast(cp);
3104
3105 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3106 cas_clear_mac_err(cp);
3107 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3108
3109
3110
3111
3112
3113 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3114 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3115
3116
3117
3118
3119 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3120}
3121
3122
3123static void cas_init_pause_thresholds(struct cas *cp)
3124{
3125
3126
3127
3128 if (cp->rx_fifo_size <= (2 * 1024)) {
3129 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3130 } else {
3131 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3132 if (max_frame * 3 > cp->rx_fifo_size) {
3133 cp->rx_pause_off = 7104;
3134 cp->rx_pause_on = 960;
3135 } else {
3136 int off = (cp->rx_fifo_size - (max_frame * 2));
3137 int on = off - max_frame;
3138 cp->rx_pause_off = off;
3139 cp->rx_pause_on = on;
3140 }
3141 }
3142}
3143
3144static int cas_vpd_match(const void __iomem *p, const char *str)
3145{
3146 int len = strlen(str) + 1;
3147 int i;
3148
3149 for (i = 0; i < len; i++) {
3150 if (readb(p + i) != str[i])
3151 return 0;
3152 }
3153 return 1;
3154}
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3169 const int offset)
3170{
3171 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3172 void __iomem *base, *kstart;
3173 int i, len;
3174 int found = 0;
3175#define VPD_FOUND_MAC 0x01
3176#define VPD_FOUND_PHY 0x02
3177
3178 int phy_type = CAS_PHY_MII_MDIO0;
3179 int mac_off = 0;
3180
3181#if defined(CONFIG_SPARC)
3182 const unsigned char *addr;
3183#endif
3184
3185
3186 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3187 cp->regs + REG_BIM_LOCAL_DEV_EN);
3188
3189
3190 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3191 goto use_random_mac_addr;
3192
3193
3194 base = NULL;
3195 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3196
3197 if ((readb(p + i + 0) == 0x50) &&
3198 (readb(p + i + 1) == 0x43) &&
3199 (readb(p + i + 2) == 0x49) &&
3200 (readb(p + i + 3) == 0x52)) {
3201 base = p + (readb(p + i + 8) |
3202 (readb(p + i + 9) << 8));
3203 break;
3204 }
3205 }
3206
3207 if (!base || (readb(base) != 0x82))
3208 goto use_random_mac_addr;
3209
3210 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3211 while (i < EXPANSION_ROM_SIZE) {
3212 if (readb(base + i) != 0x90)
3213 goto use_random_mac_addr;
3214
3215
3216 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3217
3218
3219 kstart = base + i + 3;
3220 p = kstart;
3221 while ((p - kstart) < len) {
3222 int klen = readb(p + 2);
3223 int j;
3224 char type;
3225
3226 p += 3;
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265 if (readb(p) != 'I')
3266 goto next;
3267
3268
3269 type = readb(p + 3);
3270 if (type == 'B') {
3271 if ((klen == 29) && readb(p + 4) == 6 &&
3272 cas_vpd_match(p + 5,
3273 "local-mac-address")) {
3274 if (mac_off++ > offset)
3275 goto next;
3276
3277
3278 for (j = 0; j < 6; j++)
3279 dev_addr[j] =
3280 readb(p + 23 + j);
3281 goto found_mac;
3282 }
3283 }
3284
3285 if (type != 'S')
3286 goto next;
3287
3288#ifdef USE_ENTROPY_DEV
3289 if ((klen == 24) &&
3290 cas_vpd_match(p + 5, "entropy-dev") &&
3291 cas_vpd_match(p + 17, "vms110")) {
3292 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3293 goto next;
3294 }
3295#endif
3296
3297 if (found & VPD_FOUND_PHY)
3298 goto next;
3299
3300 if ((klen == 18) && readb(p + 4) == 4 &&
3301 cas_vpd_match(p + 5, "phy-type")) {
3302 if (cas_vpd_match(p + 14, "pcs")) {
3303 phy_type = CAS_PHY_SERDES;
3304 goto found_phy;
3305 }
3306 }
3307
3308 if ((klen == 23) && readb(p + 4) == 4 &&
3309 cas_vpd_match(p + 5, "phy-interface")) {
3310 if (cas_vpd_match(p + 19, "pcs")) {
3311 phy_type = CAS_PHY_SERDES;
3312 goto found_phy;
3313 }
3314 }
3315found_mac:
3316 found |= VPD_FOUND_MAC;
3317 goto next;
3318
3319found_phy:
3320 found |= VPD_FOUND_PHY;
3321
3322next:
3323 p += klen;
3324 }
3325 i += len + 3;
3326 }
3327
3328use_random_mac_addr:
3329 if (found & VPD_FOUND_MAC)
3330 goto done;
3331
3332#if defined(CONFIG_SPARC)
3333 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3334 if (addr != NULL) {
3335 memcpy(dev_addr, addr, ETH_ALEN);
3336 goto done;
3337 }
3338#endif
3339
3340
3341 pr_info("MAC address not found in ROM VPD\n");
3342 dev_addr[0] = 0x08;
3343 dev_addr[1] = 0x00;
3344 dev_addr[2] = 0x20;
3345 get_random_bytes(dev_addr + 3, 3);
3346
3347done:
3348 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3349 return phy_type;
3350}
3351
3352
3353static void cas_check_pci_invariants(struct cas *cp)
3354{
3355 struct pci_dev *pdev = cp->pdev;
3356
3357 cp->cas_flags = 0;
3358 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3359 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3360 if (pdev->revision >= CAS_ID_REVPLUS)
3361 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3362 if (pdev->revision < CAS_ID_REVPLUS02u)
3363 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3364
3365
3366
3367
3368 if (pdev->revision < CAS_ID_REV2)
3369 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3370 } else {
3371
3372 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3373
3374
3375
3376
3377 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3378 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3379 cp->cas_flags |= CAS_FLAG_SATURN;
3380 }
3381}
3382
3383
3384static int cas_check_invariants(struct cas *cp)
3385{
3386 struct pci_dev *pdev = cp->pdev;
3387 u32 cfg;
3388 int i;
3389
3390
3391 cp->page_order = 0;
3392#ifdef USE_PAGE_ORDER
3393 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3394
3395 struct page *page = alloc_pages(GFP_ATOMIC,
3396 CAS_JUMBO_PAGE_SHIFT -
3397 PAGE_SHIFT);
3398 if (page) {
3399 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3400 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3401 } else {
3402 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3403 }
3404 }
3405#endif
3406 cp->page_size = (PAGE_SIZE << cp->page_order);
3407
3408
3409 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3410 cp->rx_fifo_size = RX_FIFO_SIZE;
3411
3412
3413
3414
3415 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3416 PCI_SLOT(pdev->devfn));
3417 if (cp->phy_type & CAS_PHY_SERDES) {
3418 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3419 return 0;
3420 }
3421
3422
3423 cfg = readl(cp->regs + REG_MIF_CFG);
3424 if (cfg & MIF_CFG_MDIO_1) {
3425 cp->phy_type = CAS_PHY_MII_MDIO1;
3426 } else if (cfg & MIF_CFG_MDIO_0) {
3427 cp->phy_type = CAS_PHY_MII_MDIO0;
3428 }
3429
3430 cas_mif_poll(cp, 0);
3431 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3432
3433 for (i = 0; i < 32; i++) {
3434 u32 phy_id;
3435 int j;
3436
3437 for (j = 0; j < 3; j++) {
3438 cp->phy_addr = i;
3439 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3440 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3441 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3442 cp->phy_id = phy_id;
3443 goto done;
3444 }
3445 }
3446 }
3447 pr_err("MII phy did not respond [%08x]\n",
3448 readl(cp->regs + REG_MIF_STATE_MACHINE));
3449 return -1;
3450
3451done:
3452
3453 cfg = cas_phy_read(cp, MII_BMSR);
3454 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3455 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3456 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3457 return 0;
3458}
3459
3460
3461static inline void cas_start_dma(struct cas *cp)
3462{
3463 int i;
3464 u32 val;
3465 int txfailed = 0;
3466
3467
3468 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3469 writel(val, cp->regs + REG_TX_CFG);
3470 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3471 writel(val, cp->regs + REG_RX_CFG);
3472
3473
3474 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3475 writel(val, cp->regs + REG_MAC_TX_CFG);
3476 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3477 writel(val, cp->regs + REG_MAC_RX_CFG);
3478
3479 i = STOP_TRIES;
3480 while (i-- > 0) {
3481 val = readl(cp->regs + REG_MAC_TX_CFG);
3482 if ((val & MAC_TX_CFG_EN))
3483 break;
3484 udelay(10);
3485 }
3486 if (i < 0) txfailed = 1;
3487 i = STOP_TRIES;
3488 while (i-- > 0) {
3489 val = readl(cp->regs + REG_MAC_RX_CFG);
3490 if ((val & MAC_RX_CFG_EN)) {
3491 if (txfailed) {
3492 netdev_err(cp->dev,
3493 "enabling mac failed [tx:%08x:%08x]\n",
3494 readl(cp->regs + REG_MIF_STATE_MACHINE),
3495 readl(cp->regs + REG_MAC_STATE_MACHINE));
3496 }
3497 goto enable_rx_done;
3498 }
3499 udelay(10);
3500 }
3501 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3502 (txfailed ? "tx,rx" : "rx"),
3503 readl(cp->regs + REG_MIF_STATE_MACHINE),
3504 readl(cp->regs + REG_MAC_STATE_MACHINE));
3505
3506enable_rx_done:
3507 cas_unmask_intr(cp);
3508 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3509 writel(0, cp->regs + REG_RX_COMP_TAIL);
3510
3511 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3512 if (N_RX_DESC_RINGS > 1)
3513 writel(RX_DESC_RINGN_SIZE(1) - 4,
3514 cp->regs + REG_PLUS_RX_KICK1);
3515
3516 for (i = 1; i < N_RX_COMP_RINGS; i++)
3517 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3518 }
3519}
3520
3521
3522static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3523 int *pause)
3524{
3525 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3526 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3527 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3528 if (val & PCS_MII_LPA_ASYM_PAUSE)
3529 *pause |= 0x10;
3530 *spd = 1000;
3531}
3532
3533
3534static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3535 int *pause)
3536{
3537 u32 val;
3538
3539 *fd = 0;
3540 *spd = 10;
3541 *pause = 0;
3542
3543
3544 val = cas_phy_read(cp, MII_LPA);
3545 if (val & CAS_LPA_PAUSE)
3546 *pause = 0x01;
3547
3548 if (val & CAS_LPA_ASYM_PAUSE)
3549 *pause |= 0x10;
3550
3551 if (val & LPA_DUPLEX)
3552 *fd = 1;
3553 if (val & LPA_100)
3554 *spd = 100;
3555
3556 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3557 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3558 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3559 *spd = 1000;
3560 if (val & CAS_LPA_1000FULL)
3561 *fd = 1;
3562 }
3563}
3564
3565
3566
3567
3568
3569
3570static void cas_set_link_modes(struct cas *cp)
3571{
3572 u32 val;
3573 int full_duplex, speed, pause;
3574
3575 full_duplex = 0;
3576 speed = 10;
3577 pause = 0;
3578
3579 if (CAS_PHY_MII(cp->phy_type)) {
3580 cas_mif_poll(cp, 0);
3581 val = cas_phy_read(cp, MII_BMCR);
3582 if (val & BMCR_ANENABLE) {
3583 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3584 &pause);
3585 } else {
3586 if (val & BMCR_FULLDPLX)
3587 full_duplex = 1;
3588
3589 if (val & BMCR_SPEED100)
3590 speed = 100;
3591 else if (val & CAS_BMCR_SPEED1000)
3592 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3593 1000 : 100;
3594 }
3595 cas_mif_poll(cp, 1);
3596
3597 } else {
3598 val = readl(cp->regs + REG_PCS_MII_CTRL);
3599 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3600 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3601 if (val & PCS_MII_CTRL_DUPLEX)
3602 full_duplex = 1;
3603 }
3604 }
3605
3606 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3607 speed, full_duplex ? "full" : "half");
3608
3609 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3610 if (CAS_PHY_MII(cp->phy_type)) {
3611 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3612 if (!full_duplex)
3613 val |= MAC_XIF_DISABLE_ECHO;
3614 }
3615 if (full_duplex)
3616 val |= MAC_XIF_FDPLX_LED;
3617 if (speed == 1000)
3618 val |= MAC_XIF_GMII_MODE;
3619 writel(val, cp->regs + REG_MAC_XIF_CFG);
3620
3621
3622 val = MAC_TX_CFG_IPG_EN;
3623 if (full_duplex) {
3624 val |= MAC_TX_CFG_IGNORE_CARRIER;
3625 val |= MAC_TX_CFG_IGNORE_COLL;
3626 } else {
3627#ifndef USE_CSMA_CD_PROTO
3628 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3629 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3630#endif
3631 }
3632
3633
3634
3635
3636
3637
3638
3639 if ((speed == 1000) && !full_duplex) {
3640 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3641 cp->regs + REG_MAC_TX_CFG);
3642
3643 val = readl(cp->regs + REG_MAC_RX_CFG);
3644 val &= ~MAC_RX_CFG_STRIP_FCS;
3645 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3646 cp->regs + REG_MAC_RX_CFG);
3647
3648 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3649
3650 cp->crc_size = 4;
3651
3652 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3653
3654 } else {
3655 writel(val, cp->regs + REG_MAC_TX_CFG);
3656
3657
3658
3659
3660 val = readl(cp->regs + REG_MAC_RX_CFG);
3661 if (full_duplex) {
3662 val |= MAC_RX_CFG_STRIP_FCS;
3663 cp->crc_size = 0;
3664 cp->min_frame_size = CAS_MIN_MTU;
3665 } else {
3666 val &= ~MAC_RX_CFG_STRIP_FCS;
3667 cp->crc_size = 4;
3668 cp->min_frame_size = CAS_MIN_FRAME;
3669 }
3670 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3671 cp->regs + REG_MAC_RX_CFG);
3672 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3673 }
3674
3675 if (netif_msg_link(cp)) {
3676 if (pause & 0x01) {
3677 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3678 cp->rx_fifo_size,
3679 cp->rx_pause_off,
3680 cp->rx_pause_on);
3681 } else if (pause & 0x10) {
3682 netdev_info(cp->dev, "TX pause enabled\n");
3683 } else {
3684 netdev_info(cp->dev, "Pause is disabled\n");
3685 }
3686 }
3687
3688 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3689 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3690 if (pause) {
3691 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3692 if (pause & 0x01) {
3693 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3694 }
3695 }
3696 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3697 cas_start_dma(cp);
3698}
3699
3700
3701static void cas_init_hw(struct cas *cp, int restart_link)
3702{
3703 if (restart_link)
3704 cas_phy_init(cp);
3705
3706 cas_init_pause_thresholds(cp);
3707 cas_init_mac(cp);
3708 cas_init_dma(cp);
3709
3710 if (restart_link) {
3711
3712 cp->timer_ticks = 0;
3713 cas_begin_auto_negotiation(cp, NULL);
3714 } else if (cp->lstate == link_up) {
3715 cas_set_link_modes(cp);
3716 netif_carrier_on(cp->dev);
3717 }
3718}
3719
3720
3721
3722
3723
3724static void cas_hard_reset(struct cas *cp)
3725{
3726 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3727 udelay(20);
3728 pci_restore_state(cp->pdev);
3729}
3730
3731
3732static void cas_global_reset(struct cas *cp, int blkflag)
3733{
3734 int limit;
3735
3736
3737 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3738
3739
3740
3741
3742
3743
3744 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3745 cp->regs + REG_SW_RESET);
3746 } else {
3747 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3748 }
3749
3750
3751 mdelay(3);
3752
3753 limit = STOP_TRIES;
3754 while (limit-- > 0) {
3755 u32 val = readl(cp->regs + REG_SW_RESET);
3756 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3757 goto done;
3758 udelay(10);
3759 }
3760 netdev_err(cp->dev, "sw reset failed\n");
3761
3762done:
3763
3764 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3765 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3766
3767
3768
3769
3770
3771 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3772 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3773 PCI_ERR_BIM_DMA_READ), cp->regs +
3774 REG_PCI_ERR_STATUS_MASK);
3775
3776
3777
3778
3779 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3780}
3781
3782static void cas_reset(struct cas *cp, int blkflag)
3783{
3784 u32 val;
3785
3786 cas_mask_intr(cp);
3787 cas_global_reset(cp, blkflag);
3788 cas_mac_reset(cp);
3789 cas_entropy_reset(cp);
3790
3791
3792 val = readl(cp->regs + REG_TX_CFG);
3793 val &= ~TX_CFG_DMA_EN;
3794 writel(val, cp->regs + REG_TX_CFG);
3795
3796 val = readl(cp->regs + REG_RX_CFG);
3797 val &= ~RX_CFG_DMA_EN;
3798 writel(val, cp->regs + REG_RX_CFG);
3799
3800
3801 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3802 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3803 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3804 } else {
3805 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3806 }
3807
3808
3809 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3810 cas_clear_mac_err(cp);
3811 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3812}
3813
3814
3815static void cas_shutdown(struct cas *cp)
3816{
3817 unsigned long flags;
3818
3819
3820 cp->hw_running = 0;
3821
3822 del_timer_sync(&cp->link_timer);
3823
3824
3825#if 0
3826 while (atomic_read(&cp->reset_task_pending_mtu) ||
3827 atomic_read(&cp->reset_task_pending_spare) ||
3828 atomic_read(&cp->reset_task_pending_all))
3829 schedule();
3830
3831#else
3832 while (atomic_read(&cp->reset_task_pending))
3833 schedule();
3834#endif
3835
3836 cas_lock_all_save(cp, flags);
3837 cas_reset(cp, 0);
3838 if (cp->cas_flags & CAS_FLAG_SATURN)
3839 cas_phy_powerdown(cp);
3840 cas_unlock_all_restore(cp, flags);
3841}
3842
3843static int cas_change_mtu(struct net_device *dev, int new_mtu)
3844{
3845 struct cas *cp = netdev_priv(dev);
3846
3847 dev->mtu = new_mtu;
3848 if (!netif_running(dev) || !netif_device_present(dev))
3849 return 0;
3850
3851
3852#if 1
3853 atomic_inc(&cp->reset_task_pending);
3854 if ((cp->phy_type & CAS_PHY_SERDES)) {
3855 atomic_inc(&cp->reset_task_pending_all);
3856 } else {
3857 atomic_inc(&cp->reset_task_pending_mtu);
3858 }
3859 schedule_work(&cp->reset_task);
3860#else
3861 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3862 CAS_RESET_ALL : CAS_RESET_MTU);
3863 pr_err("reset called in cas_change_mtu\n");
3864 schedule_work(&cp->reset_task);
3865#endif
3866
3867 flush_work(&cp->reset_task);
3868 return 0;
3869}
3870
3871static void cas_clean_txd(struct cas *cp, int ring)
3872{
3873 struct cas_tx_desc *txd = cp->init_txds[ring];
3874 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3875 u64 daddr, dlen;
3876 int i, size;
3877
3878 size = TX_DESC_RINGN_SIZE(ring);
3879 for (i = 0; i < size; i++) {
3880 int frag;
3881
3882 if (skbs[i] == NULL)
3883 continue;
3884
3885 skb = skbs[i];
3886 skbs[i] = NULL;
3887
3888 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3889 int ent = i & (size - 1);
3890
3891
3892
3893
3894 daddr = le64_to_cpu(txd[ent].buffer);
3895 dlen = CAS_VAL(TX_DESC_BUFLEN,
3896 le64_to_cpu(txd[ent].control));
3897 pci_unmap_page(cp->pdev, daddr, dlen,
3898 PCI_DMA_TODEVICE);
3899
3900 if (frag != skb_shinfo(skb)->nr_frags) {
3901 i++;
3902
3903
3904
3905
3906 ent = i & (size - 1);
3907 if (cp->tx_tiny_use[ring][ent].used)
3908 i++;
3909 }
3910 }
3911 dev_kfree_skb_any(skb);
3912 }
3913
3914
3915 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3916}
3917
3918
3919static inline void cas_free_rx_desc(struct cas *cp, int ring)
3920{
3921 cas_page_t **page = cp->rx_pages[ring];
3922 int i, size;
3923
3924 size = RX_DESC_RINGN_SIZE(ring);
3925 for (i = 0; i < size; i++) {
3926 if (page[i]) {
3927 cas_page_free(cp, page[i]);
3928 page[i] = NULL;
3929 }
3930 }
3931}
3932
3933static void cas_free_rxds(struct cas *cp)
3934{
3935 int i;
3936
3937 for (i = 0; i < N_RX_DESC_RINGS; i++)
3938 cas_free_rx_desc(cp, i);
3939}
3940
3941
3942static void cas_clean_rings(struct cas *cp)
3943{
3944 int i;
3945
3946
3947 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3948 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3949 for (i = 0; i < N_TX_RINGS; i++)
3950 cas_clean_txd(cp, i);
3951
3952
3953 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3954 cas_clean_rxds(cp);
3955 cas_clean_rxcs(cp);
3956}
3957
3958
3959static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3960{
3961 cas_page_t **page = cp->rx_pages[ring];
3962 int size, i = 0;
3963
3964 size = RX_DESC_RINGN_SIZE(ring);
3965 for (i = 0; i < size; i++) {
3966 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3967 return -1;
3968 }
3969 return 0;
3970}
3971
3972static int cas_alloc_rxds(struct cas *cp)
3973{
3974 int i;
3975
3976 for (i = 0; i < N_RX_DESC_RINGS; i++) {
3977 if (cas_alloc_rx_desc(cp, i) < 0) {
3978 cas_free_rxds(cp);
3979 return -1;
3980 }
3981 }
3982 return 0;
3983}
3984
3985static void cas_reset_task(struct work_struct *work)
3986{
3987 struct cas *cp = container_of(work, struct cas, reset_task);
3988#if 0
3989 int pending = atomic_read(&cp->reset_task_pending);
3990#else
3991 int pending_all = atomic_read(&cp->reset_task_pending_all);
3992 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3993 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3994
3995 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
3996
3997
3998
3999 atomic_dec(&cp->reset_task_pending);
4000 return;
4001 }
4002#endif
4003
4004
4005
4006
4007 if (cp->hw_running) {
4008 unsigned long flags;
4009
4010
4011 netif_device_detach(cp->dev);
4012 cas_lock_all_save(cp, flags);
4013
4014 if (cp->opened) {
4015
4016
4017
4018
4019 cas_spare_recover(cp, GFP_ATOMIC);
4020 }
4021#if 1
4022
4023 if (!pending_all && !pending_mtu)
4024 goto done;
4025#else
4026 if (pending == CAS_RESET_SPARE)
4027 goto done;
4028#endif
4029
4030
4031
4032
4033
4034
4035
4036#if 1
4037 cas_reset(cp, !(pending_all > 0));
4038 if (cp->opened)
4039 cas_clean_rings(cp);
4040 cas_init_hw(cp, (pending_all > 0));
4041#else
4042 cas_reset(cp, !(pending == CAS_RESET_ALL));
4043 if (cp->opened)
4044 cas_clean_rings(cp);
4045 cas_init_hw(cp, pending == CAS_RESET_ALL);
4046#endif
4047
4048done:
4049 cas_unlock_all_restore(cp, flags);
4050 netif_device_attach(cp->dev);
4051 }
4052#if 1
4053 atomic_sub(pending_all, &cp->reset_task_pending_all);
4054 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4055 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4056 atomic_dec(&cp->reset_task_pending);
4057#else
4058 atomic_set(&cp->reset_task_pending, 0);
4059#endif
4060}
4061
4062static void cas_link_timer(struct timer_list *t)
4063{
4064 struct cas *cp = from_timer(cp, t, link_timer);
4065 int mask, pending = 0, reset = 0;
4066 unsigned long flags;
4067
4068 if (link_transition_timeout != 0 &&
4069 cp->link_transition_jiffies_valid &&
4070 ((jiffies - cp->link_transition_jiffies) >
4071 (link_transition_timeout))) {
4072
4073
4074
4075
4076 cp->link_transition_jiffies_valid = 0;
4077 }
4078
4079 if (!cp->hw_running)
4080 return;
4081
4082 spin_lock_irqsave(&cp->lock, flags);
4083 cas_lock_tx(cp);
4084 cas_entropy_gather(cp);
4085
4086
4087
4088
4089#if 1
4090 if (atomic_read(&cp->reset_task_pending_all) ||
4091 atomic_read(&cp->reset_task_pending_spare) ||
4092 atomic_read(&cp->reset_task_pending_mtu))
4093 goto done;
4094#else
4095 if (atomic_read(&cp->reset_task_pending))
4096 goto done;
4097#endif
4098
4099
4100 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4101 int i, rmask;
4102
4103 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4104 rmask = CAS_FLAG_RXD_POST(i);
4105 if ((mask & rmask) == 0)
4106 continue;
4107
4108
4109 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4110 pending = 1;
4111 continue;
4112 }
4113 cp->cas_flags &= ~rmask;
4114 }
4115 }
4116
4117 if (CAS_PHY_MII(cp->phy_type)) {
4118 u16 bmsr;
4119 cas_mif_poll(cp, 0);
4120 bmsr = cas_phy_read(cp, MII_BMSR);
4121
4122
4123
4124
4125
4126 bmsr = cas_phy_read(cp, MII_BMSR);
4127 cas_mif_poll(cp, 1);
4128 readl(cp->regs + REG_MIF_STATUS);
4129 reset = cas_mii_link_check(cp, bmsr);
4130 } else {
4131 reset = cas_pcs_link_check(cp);
4132 }
4133
4134 if (reset)
4135 goto done;
4136
4137
4138 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4139 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4140 u32 wptr, rptr;
4141 int tlm = CAS_VAL(MAC_SM_TLM, val);
4142
4143 if (((tlm == 0x5) || (tlm == 0x3)) &&
4144 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4145 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4146 "tx err: MAC_STATE[%08x]\n", val);
4147 reset = 1;
4148 goto done;
4149 }
4150
4151 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4152 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4153 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4154 if ((val == 0) && (wptr != rptr)) {
4155 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4156 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4157 val, wptr, rptr);
4158 reset = 1;
4159 }
4160
4161 if (reset)
4162 cas_hard_reset(cp);
4163 }
4164
4165done:
4166 if (reset) {
4167#if 1
4168 atomic_inc(&cp->reset_task_pending);
4169 atomic_inc(&cp->reset_task_pending_all);
4170 schedule_work(&cp->reset_task);
4171#else
4172 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4173 pr_err("reset called in cas_link_timer\n");
4174 schedule_work(&cp->reset_task);
4175#endif
4176 }
4177
4178 if (!pending)
4179 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4180 cas_unlock_tx(cp);
4181 spin_unlock_irqrestore(&cp->lock, flags);
4182}
4183
4184
4185
4186
4187static void cas_tx_tiny_free(struct cas *cp)
4188{
4189 struct pci_dev *pdev = cp->pdev;
4190 int i;
4191
4192 for (i = 0; i < N_TX_RINGS; i++) {
4193 if (!cp->tx_tiny_bufs[i])
4194 continue;
4195
4196 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4197 cp->tx_tiny_bufs[i],
4198 cp->tx_tiny_dvma[i]);
4199 cp->tx_tiny_bufs[i] = NULL;
4200 }
4201}
4202
4203static int cas_tx_tiny_alloc(struct cas *cp)
4204{
4205 struct pci_dev *pdev = cp->pdev;
4206 int i;
4207
4208 for (i = 0; i < N_TX_RINGS; i++) {
4209 cp->tx_tiny_bufs[i] =
4210 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4211 &cp->tx_tiny_dvma[i]);
4212 if (!cp->tx_tiny_bufs[i]) {
4213 cas_tx_tiny_free(cp);
4214 return -1;
4215 }
4216 }
4217 return 0;
4218}
4219
4220
4221static int cas_open(struct net_device *dev)
4222{
4223 struct cas *cp = netdev_priv(dev);
4224 int hw_was_up, err;
4225 unsigned long flags;
4226
4227 mutex_lock(&cp->pm_mutex);
4228
4229 hw_was_up = cp->hw_running;
4230
4231
4232
4233
4234 if (!cp->hw_running) {
4235
4236 cas_lock_all_save(cp, flags);
4237
4238
4239
4240
4241
4242 cas_reset(cp, 0);
4243 cp->hw_running = 1;
4244 cas_unlock_all_restore(cp, flags);
4245 }
4246
4247 err = -ENOMEM;
4248 if (cas_tx_tiny_alloc(cp) < 0)
4249 goto err_unlock;
4250
4251
4252 if (cas_alloc_rxds(cp) < 0)
4253 goto err_tx_tiny;
4254
4255
4256 cas_spare_init(cp);
4257 cas_spare_recover(cp, GFP_KERNEL);
4258
4259
4260
4261
4262
4263
4264 if (request_irq(cp->pdev->irq, cas_interrupt,
4265 IRQF_SHARED, dev->name, (void *) dev)) {
4266 netdev_err(cp->dev, "failed to request irq !\n");
4267 err = -EAGAIN;
4268 goto err_spare;
4269 }
4270
4271#ifdef USE_NAPI
4272 napi_enable(&cp->napi);
4273#endif
4274
4275 cas_lock_all_save(cp, flags);
4276 cas_clean_rings(cp);
4277 cas_init_hw(cp, !hw_was_up);
4278 cp->opened = 1;
4279 cas_unlock_all_restore(cp, flags);
4280
4281 netif_start_queue(dev);
4282 mutex_unlock(&cp->pm_mutex);
4283 return 0;
4284
4285err_spare:
4286 cas_spare_free(cp);
4287 cas_free_rxds(cp);
4288err_tx_tiny:
4289 cas_tx_tiny_free(cp);
4290err_unlock:
4291 mutex_unlock(&cp->pm_mutex);
4292 return err;
4293}
4294
4295static int cas_close(struct net_device *dev)
4296{
4297 unsigned long flags;
4298 struct cas *cp = netdev_priv(dev);
4299
4300#ifdef USE_NAPI
4301 napi_disable(&cp->napi);
4302#endif
4303
4304 mutex_lock(&cp->pm_mutex);
4305
4306 netif_stop_queue(dev);
4307
4308
4309 cas_lock_all_save(cp, flags);
4310 cp->opened = 0;
4311 cas_reset(cp, 0);
4312 cas_phy_init(cp);
4313 cas_begin_auto_negotiation(cp, NULL);
4314 cas_clean_rings(cp);
4315 cas_unlock_all_restore(cp, flags);
4316
4317 free_irq(cp->pdev->irq, (void *) dev);
4318 cas_spare_free(cp);
4319 cas_free_rxds(cp);
4320 cas_tx_tiny_free(cp);
4321 mutex_unlock(&cp->pm_mutex);
4322 return 0;
4323}
4324
4325static struct {
4326 const char name[ETH_GSTRING_LEN];
4327} ethtool_cassini_statnames[] = {
4328 {"collisions"},
4329 {"rx_bytes"},
4330 {"rx_crc_errors"},
4331 {"rx_dropped"},
4332 {"rx_errors"},
4333 {"rx_fifo_errors"},
4334 {"rx_frame_errors"},
4335 {"rx_length_errors"},
4336 {"rx_over_errors"},
4337 {"rx_packets"},
4338 {"tx_aborted_errors"},
4339 {"tx_bytes"},
4340 {"tx_dropped"},
4341 {"tx_errors"},
4342 {"tx_fifo_errors"},
4343 {"tx_packets"}
4344};
4345#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4346
4347static struct {
4348 const int offsets;
4349} ethtool_register_table[] = {
4350 {-MII_BMSR},
4351 {-MII_BMCR},
4352 {REG_CAWR},
4353 {REG_INF_BURST},
4354 {REG_BIM_CFG},
4355 {REG_RX_CFG},
4356 {REG_HP_CFG},
4357 {REG_MAC_TX_CFG},
4358 {REG_MAC_RX_CFG},
4359 {REG_MAC_CTRL_CFG},
4360 {REG_MAC_XIF_CFG},
4361 {REG_MIF_CFG},
4362 {REG_PCS_CFG},
4363 {REG_SATURN_PCFG},
4364 {REG_PCS_MII_STATUS},
4365 {REG_PCS_STATE_MACHINE},
4366 {REG_MAC_COLL_EXCESS},
4367 {REG_MAC_COLL_LATE}
4368};
4369#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4370#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4371
4372static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4373{
4374 u8 *p;
4375 int i;
4376 unsigned long flags;
4377
4378 spin_lock_irqsave(&cp->lock, flags);
4379 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4380 u16 hval;
4381 u32 val;
4382 if (ethtool_register_table[i].offsets < 0) {
4383 hval = cas_phy_read(cp,
4384 -ethtool_register_table[i].offsets);
4385 val = hval;
4386 } else {
4387 val= readl(cp->regs+ethtool_register_table[i].offsets);
4388 }
4389 memcpy(p, (u8 *)&val, sizeof(u32));
4390 }
4391 spin_unlock_irqrestore(&cp->lock, flags);
4392}
4393
4394static struct net_device_stats *cas_get_stats(struct net_device *dev)
4395{
4396 struct cas *cp = netdev_priv(dev);
4397 struct net_device_stats *stats = cp->net_stats;
4398 unsigned long flags;
4399 int i;
4400 unsigned long tmp;
4401
4402
4403 if (!cp->hw_running)
4404 return stats + N_TX_RINGS;
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4415 stats[N_TX_RINGS].rx_crc_errors +=
4416 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4417 stats[N_TX_RINGS].rx_frame_errors +=
4418 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4419 stats[N_TX_RINGS].rx_length_errors +=
4420 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4421#if 1
4422 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4423 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4424 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4425 stats[N_TX_RINGS].collisions +=
4426 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4427#else
4428 stats[N_TX_RINGS].tx_aborted_errors +=
4429 readl(cp->regs + REG_MAC_COLL_EXCESS);
4430 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4431 readl(cp->regs + REG_MAC_COLL_LATE);
4432#endif
4433 cas_clear_mac_err(cp);
4434
4435
4436 spin_lock(&cp->stat_lock[0]);
4437 stats[N_TX_RINGS].collisions += stats[0].collisions;
4438 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4439 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4440 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4441 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4442 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4443 spin_unlock(&cp->stat_lock[0]);
4444
4445 for (i = 0; i < N_TX_RINGS; i++) {
4446 spin_lock(&cp->stat_lock[i]);
4447 stats[N_TX_RINGS].rx_length_errors +=
4448 stats[i].rx_length_errors;
4449 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4450 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4451 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4452 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4453 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4454 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4455 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4456 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4457 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4458 memset(stats + i, 0, sizeof(struct net_device_stats));
4459 spin_unlock(&cp->stat_lock[i]);
4460 }
4461 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4462 return stats + N_TX_RINGS;
4463}
4464
4465
4466static void cas_set_multicast(struct net_device *dev)
4467{
4468 struct cas *cp = netdev_priv(dev);
4469 u32 rxcfg, rxcfg_new;
4470 unsigned long flags;
4471 int limit = STOP_TRIES;
4472
4473 if (!cp->hw_running)
4474 return;
4475
4476 spin_lock_irqsave(&cp->lock, flags);
4477 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4478
4479
4480 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4481 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4482 if (!limit--)
4483 break;
4484 udelay(10);
4485 }
4486
4487
4488 limit = STOP_TRIES;
4489 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4490 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4491 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4492 if (!limit--)
4493 break;
4494 udelay(10);
4495 }
4496
4497
4498 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4499 rxcfg |= rxcfg_new;
4500 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4501 spin_unlock_irqrestore(&cp->lock, flags);
4502}
4503
4504static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4505{
4506 struct cas *cp = netdev_priv(dev);
4507 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4508 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4509 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4510}
4511
4512static int cas_get_link_ksettings(struct net_device *dev,
4513 struct ethtool_link_ksettings *cmd)
4514{
4515 struct cas *cp = netdev_priv(dev);
4516 u16 bmcr;
4517 int full_duplex, speed, pause;
4518 unsigned long flags;
4519 enum link_state linkstate = link_up;
4520 u32 supported, advertising;
4521
4522 advertising = 0;
4523 supported = SUPPORTED_Autoneg;
4524 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4525 supported |= SUPPORTED_1000baseT_Full;
4526 advertising |= ADVERTISED_1000baseT_Full;
4527 }
4528
4529
4530 spin_lock_irqsave(&cp->lock, flags);
4531 bmcr = 0;
4532 linkstate = cp->lstate;
4533 if (CAS_PHY_MII(cp->phy_type)) {
4534 cmd->base.port = PORT_MII;
4535 cmd->base.phy_address = cp->phy_addr;
4536 advertising |= ADVERTISED_TP | ADVERTISED_MII |
4537 ADVERTISED_10baseT_Half |
4538 ADVERTISED_10baseT_Full |
4539 ADVERTISED_100baseT_Half |
4540 ADVERTISED_100baseT_Full;
4541
4542 supported |=
4543 (SUPPORTED_10baseT_Half |
4544 SUPPORTED_10baseT_Full |
4545 SUPPORTED_100baseT_Half |
4546 SUPPORTED_100baseT_Full |
4547 SUPPORTED_TP | SUPPORTED_MII);
4548
4549 if (cp->hw_running) {
4550 cas_mif_poll(cp, 0);
4551 bmcr = cas_phy_read(cp, MII_BMCR);
4552 cas_read_mii_link_mode(cp, &full_duplex,
4553 &speed, &pause);
4554 cas_mif_poll(cp, 1);
4555 }
4556
4557 } else {
4558 cmd->base.port = PORT_FIBRE;
4559 cmd->base.phy_address = 0;
4560 supported |= SUPPORTED_FIBRE;
4561 advertising |= ADVERTISED_FIBRE;
4562
4563 if (cp->hw_running) {
4564
4565 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4566 cas_read_pcs_link_mode(cp, &full_duplex,
4567 &speed, &pause);
4568 }
4569 }
4570 spin_unlock_irqrestore(&cp->lock, flags);
4571
4572 if (bmcr & BMCR_ANENABLE) {
4573 advertising |= ADVERTISED_Autoneg;
4574 cmd->base.autoneg = AUTONEG_ENABLE;
4575 cmd->base.speed = ((speed == 10) ?
4576 SPEED_10 :
4577 ((speed == 1000) ?
4578 SPEED_1000 : SPEED_100));
4579 cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4580 } else {
4581 cmd->base.autoneg = AUTONEG_DISABLE;
4582 cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4583 SPEED_1000 :
4584 ((bmcr & BMCR_SPEED100) ?
4585 SPEED_100 : SPEED_10));
4586 cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4587 DUPLEX_FULL : DUPLEX_HALF;
4588 }
4589 if (linkstate != link_up) {
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600 if (cp->link_cntl & BMCR_ANENABLE) {
4601 cmd->base.speed = 0;
4602 cmd->base.duplex = 0xff;
4603 } else {
4604 cmd->base.speed = SPEED_10;
4605 if (cp->link_cntl & BMCR_SPEED100) {
4606 cmd->base.speed = SPEED_100;
4607 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4608 cmd->base.speed = SPEED_1000;
4609 }
4610 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4611 DUPLEX_FULL : DUPLEX_HALF;
4612 }
4613 }
4614
4615 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4616 supported);
4617 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4618 advertising);
4619
4620 return 0;
4621}
4622
4623static int cas_set_link_ksettings(struct net_device *dev,
4624 const struct ethtool_link_ksettings *cmd)
4625{
4626 struct cas *cp = netdev_priv(dev);
4627 unsigned long flags;
4628 u32 speed = cmd->base.speed;
4629
4630
4631 if (cmd->base.autoneg != AUTONEG_ENABLE &&
4632 cmd->base.autoneg != AUTONEG_DISABLE)
4633 return -EINVAL;
4634
4635 if (cmd->base.autoneg == AUTONEG_DISABLE &&
4636 ((speed != SPEED_1000 &&
4637 speed != SPEED_100 &&
4638 speed != SPEED_10) ||
4639 (cmd->base.duplex != DUPLEX_HALF &&
4640 cmd->base.duplex != DUPLEX_FULL)))
4641 return -EINVAL;
4642
4643
4644 spin_lock_irqsave(&cp->lock, flags);
4645 cas_begin_auto_negotiation(cp, cmd);
4646 spin_unlock_irqrestore(&cp->lock, flags);
4647 return 0;
4648}
4649
4650static int cas_nway_reset(struct net_device *dev)
4651{
4652 struct cas *cp = netdev_priv(dev);
4653 unsigned long flags;
4654
4655 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4656 return -EINVAL;
4657
4658
4659 spin_lock_irqsave(&cp->lock, flags);
4660 cas_begin_auto_negotiation(cp, NULL);
4661 spin_unlock_irqrestore(&cp->lock, flags);
4662
4663 return 0;
4664}
4665
4666static u32 cas_get_link(struct net_device *dev)
4667{
4668 struct cas *cp = netdev_priv(dev);
4669 return cp->lstate == link_up;
4670}
4671
4672static u32 cas_get_msglevel(struct net_device *dev)
4673{
4674 struct cas *cp = netdev_priv(dev);
4675 return cp->msg_enable;
4676}
4677
4678static void cas_set_msglevel(struct net_device *dev, u32 value)
4679{
4680 struct cas *cp = netdev_priv(dev);
4681 cp->msg_enable = value;
4682}
4683
4684static int cas_get_regs_len(struct net_device *dev)
4685{
4686 struct cas *cp = netdev_priv(dev);
4687 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4688}
4689
4690static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4691 void *p)
4692{
4693 struct cas *cp = netdev_priv(dev);
4694 regs->version = 0;
4695
4696 cas_read_regs(cp, p, regs->len / sizeof(u32));
4697}
4698
4699static int cas_get_sset_count(struct net_device *dev, int sset)
4700{
4701 switch (sset) {
4702 case ETH_SS_STATS:
4703 return CAS_NUM_STAT_KEYS;
4704 default:
4705 return -EOPNOTSUPP;
4706 }
4707}
4708
4709static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4710{
4711 memcpy(data, ðtool_cassini_statnames,
4712 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4713}
4714
4715static void cas_get_ethtool_stats(struct net_device *dev,
4716 struct ethtool_stats *estats, u64 *data)
4717{
4718 struct cas *cp = netdev_priv(dev);
4719 struct net_device_stats *stats = cas_get_stats(cp->dev);
4720 int i = 0;
4721 data[i++] = stats->collisions;
4722 data[i++] = stats->rx_bytes;
4723 data[i++] = stats->rx_crc_errors;
4724 data[i++] = stats->rx_dropped;
4725 data[i++] = stats->rx_errors;
4726 data[i++] = stats->rx_fifo_errors;
4727 data[i++] = stats->rx_frame_errors;
4728 data[i++] = stats->rx_length_errors;
4729 data[i++] = stats->rx_over_errors;
4730 data[i++] = stats->rx_packets;
4731 data[i++] = stats->tx_aborted_errors;
4732 data[i++] = stats->tx_bytes;
4733 data[i++] = stats->tx_dropped;
4734 data[i++] = stats->tx_errors;
4735 data[i++] = stats->tx_fifo_errors;
4736 data[i++] = stats->tx_packets;
4737 BUG_ON(i != CAS_NUM_STAT_KEYS);
4738}
4739
4740static const struct ethtool_ops cas_ethtool_ops = {
4741 .get_drvinfo = cas_get_drvinfo,
4742 .nway_reset = cas_nway_reset,
4743 .get_link = cas_get_link,
4744 .get_msglevel = cas_get_msglevel,
4745 .set_msglevel = cas_set_msglevel,
4746 .get_regs_len = cas_get_regs_len,
4747 .get_regs = cas_get_regs,
4748 .get_sset_count = cas_get_sset_count,
4749 .get_strings = cas_get_strings,
4750 .get_ethtool_stats = cas_get_ethtool_stats,
4751 .get_link_ksettings = cas_get_link_ksettings,
4752 .set_link_ksettings = cas_set_link_ksettings,
4753};
4754
4755static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4756{
4757 struct cas *cp = netdev_priv(dev);
4758 struct mii_ioctl_data *data = if_mii(ifr);
4759 unsigned long flags;
4760 int rc = -EOPNOTSUPP;
4761
4762
4763
4764
4765 mutex_lock(&cp->pm_mutex);
4766 switch (cmd) {
4767 case SIOCGMIIPHY:
4768 data->phy_id = cp->phy_addr;
4769
4770
4771 case SIOCGMIIREG:
4772 spin_lock_irqsave(&cp->lock, flags);
4773 cas_mif_poll(cp, 0);
4774 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4775 cas_mif_poll(cp, 1);
4776 spin_unlock_irqrestore(&cp->lock, flags);
4777 rc = 0;
4778 break;
4779
4780 case SIOCSMIIREG:
4781 spin_lock_irqsave(&cp->lock, flags);
4782 cas_mif_poll(cp, 0);
4783 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4784 cas_mif_poll(cp, 1);
4785 spin_unlock_irqrestore(&cp->lock, flags);
4786 break;
4787 default:
4788 break;
4789 }
4790
4791 mutex_unlock(&cp->pm_mutex);
4792 return rc;
4793}
4794
4795
4796
4797
4798
4799static void cas_program_bridge(struct pci_dev *cas_pdev)
4800{
4801 struct pci_dev *pdev = cas_pdev->bus->self;
4802 u32 val;
4803
4804 if (!pdev)
4805 return;
4806
4807 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4808 return;
4809
4810
4811
4812
4813
4814
4815 pci_read_config_dword(pdev, 0x40, &val);
4816 val &= ~0x00040000;
4817 pci_write_config_dword(pdev, 0x40, val);
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863 pci_write_config_word(pdev, 0x52,
4864 (0x7 << 13) |
4865 (0x7 << 10) |
4866 (0x7 << 7) |
4867 (0x7 << 4) |
4868 (0xf << 0));
4869
4870
4871 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4872
4873
4874
4875
4876 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4877}
4878
4879static const struct net_device_ops cas_netdev_ops = {
4880 .ndo_open = cas_open,
4881 .ndo_stop = cas_close,
4882 .ndo_start_xmit = cas_start_xmit,
4883 .ndo_get_stats = cas_get_stats,
4884 .ndo_set_rx_mode = cas_set_multicast,
4885 .ndo_do_ioctl = cas_ioctl,
4886 .ndo_tx_timeout = cas_tx_timeout,
4887 .ndo_change_mtu = cas_change_mtu,
4888 .ndo_set_mac_address = eth_mac_addr,
4889 .ndo_validate_addr = eth_validate_addr,
4890#ifdef CONFIG_NET_POLL_CONTROLLER
4891 .ndo_poll_controller = cas_netpoll,
4892#endif
4893};
4894
4895static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4896{
4897 static int cas_version_printed = 0;
4898 unsigned long casreg_len;
4899 struct net_device *dev;
4900 struct cas *cp;
4901 int i, err, pci_using_dac;
4902 u16 pci_cmd;
4903 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4904
4905 if (cas_version_printed++ == 0)
4906 pr_info("%s", version);
4907
4908 err = pci_enable_device(pdev);
4909 if (err) {
4910 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4911 return err;
4912 }
4913
4914 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4915 dev_err(&pdev->dev, "Cannot find proper PCI device "
4916 "base address, aborting\n");
4917 err = -ENODEV;
4918 goto err_out_disable_pdev;
4919 }
4920
4921 dev = alloc_etherdev(sizeof(*cp));
4922 if (!dev) {
4923 err = -ENOMEM;
4924 goto err_out_disable_pdev;
4925 }
4926 SET_NETDEV_DEV(dev, &pdev->dev);
4927
4928 err = pci_request_regions(pdev, dev->name);
4929 if (err) {
4930 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4931 goto err_out_free_netdev;
4932 }
4933 pci_set_master(pdev);
4934
4935
4936
4937
4938
4939 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4940 pci_cmd &= ~PCI_COMMAND_SERR;
4941 pci_cmd |= PCI_COMMAND_PARITY;
4942 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4943 if (pci_try_set_mwi(pdev))
4944 pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4945
4946 cas_program_bridge(pdev);
4947
4948
4949
4950
4951
4952
4953
4954#if 1
4955 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4956 &orig_cacheline_size);
4957 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4958 cas_cacheline_size =
4959 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4960 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4961 if (pci_write_config_byte(pdev,
4962 PCI_CACHE_LINE_SIZE,
4963 cas_cacheline_size)) {
4964 dev_err(&pdev->dev, "Could not set PCI cache "
4965 "line size\n");
4966 goto err_out_free_res;
4967 }
4968 }
4969#endif
4970
4971
4972
4973 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4974 pci_using_dac = 1;
4975 err = pci_set_consistent_dma_mask(pdev,
4976 DMA_BIT_MASK(64));
4977 if (err < 0) {
4978 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
4979 "for consistent allocations\n");
4980 goto err_out_free_res;
4981 }
4982
4983 } else {
4984 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4985 if (err) {
4986 dev_err(&pdev->dev, "No usable DMA configuration, "
4987 "aborting\n");
4988 goto err_out_free_res;
4989 }
4990 pci_using_dac = 0;
4991 }
4992
4993 casreg_len = pci_resource_len(pdev, 0);
4994
4995 cp = netdev_priv(dev);
4996 cp->pdev = pdev;
4997#if 1
4998
4999 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5000#endif
5001 cp->dev = dev;
5002 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5003 cassini_debug;
5004
5005#if defined(CONFIG_SPARC)
5006 cp->of_node = pci_device_to_OF_node(pdev);
5007#endif
5008
5009 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5010 cp->link_transition_jiffies_valid = 0;
5011
5012 spin_lock_init(&cp->lock);
5013 spin_lock_init(&cp->rx_inuse_lock);
5014 spin_lock_init(&cp->rx_spare_lock);
5015 for (i = 0; i < N_TX_RINGS; i++) {
5016 spin_lock_init(&cp->stat_lock[i]);
5017 spin_lock_init(&cp->tx_lock[i]);
5018 }
5019 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5020 mutex_init(&cp->pm_mutex);
5021
5022 timer_setup(&cp->link_timer, cas_link_timer, 0);
5023
5024#if 1
5025
5026
5027
5028 atomic_set(&cp->reset_task_pending, 0);
5029 atomic_set(&cp->reset_task_pending_all, 0);
5030 atomic_set(&cp->reset_task_pending_spare, 0);
5031 atomic_set(&cp->reset_task_pending_mtu, 0);
5032#endif
5033 INIT_WORK(&cp->reset_task, cas_reset_task);
5034
5035
5036 if (link_mode >= 0 && link_mode < 6)
5037 cp->link_cntl = link_modes[link_mode];
5038 else
5039 cp->link_cntl = BMCR_ANENABLE;
5040 cp->lstate = link_down;
5041 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5042 netif_carrier_off(cp->dev);
5043 cp->timer_ticks = 0;
5044
5045
5046 cp->regs = pci_iomap(pdev, 0, casreg_len);
5047 if (!cp->regs) {
5048 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5049 goto err_out_free_res;
5050 }
5051 cp->casreg_len = casreg_len;
5052
5053 pci_save_state(pdev);
5054 cas_check_pci_invariants(cp);
5055 cas_hard_reset(cp);
5056 cas_reset(cp, 0);
5057 if (cas_check_invariants(cp))
5058 goto err_out_iounmap;
5059 if (cp->cas_flags & CAS_FLAG_SATURN)
5060 cas_saturn_firmware_init(cp);
5061
5062 cp->init_block = (struct cas_init_block *)
5063 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5064 &cp->block_dvma);
5065 if (!cp->init_block) {
5066 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5067 goto err_out_iounmap;
5068 }
5069
5070 for (i = 0; i < N_TX_RINGS; i++)
5071 cp->init_txds[i] = cp->init_block->txds[i];
5072
5073 for (i = 0; i < N_RX_DESC_RINGS; i++)
5074 cp->init_rxds[i] = cp->init_block->rxds[i];
5075
5076 for (i = 0; i < N_RX_COMP_RINGS; i++)
5077 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5078
5079 for (i = 0; i < N_RX_FLOWS; i++)
5080 skb_queue_head_init(&cp->rx_flows[i]);
5081
5082 dev->netdev_ops = &cas_netdev_ops;
5083 dev->ethtool_ops = &cas_ethtool_ops;
5084 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5085
5086#ifdef USE_NAPI
5087 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5088#endif
5089 dev->irq = pdev->irq;
5090 dev->dma = 0;
5091
5092
5093 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5094 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5095
5096 if (pci_using_dac)
5097 dev->features |= NETIF_F_HIGHDMA;
5098
5099
5100 dev->min_mtu = CAS_MIN_MTU;
5101 dev->max_mtu = CAS_MAX_MTU;
5102
5103 if (register_netdev(dev)) {
5104 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5105 goto err_out_free_consistent;
5106 }
5107
5108 i = readl(cp->regs + REG_BIM_CFG);
5109 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5110 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5111 (i & BIM_CFG_32BIT) ? "32" : "64",
5112 (i & BIM_CFG_66MHZ) ? "66" : "33",
5113 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5114 dev->dev_addr);
5115
5116 pci_set_drvdata(pdev, dev);
5117 cp->hw_running = 1;
5118 cas_entropy_reset(cp);
5119 cas_phy_init(cp);
5120 cas_begin_auto_negotiation(cp, NULL);
5121 return 0;
5122
5123err_out_free_consistent:
5124 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5125 cp->init_block, cp->block_dvma);
5126
5127err_out_iounmap:
5128 mutex_lock(&cp->pm_mutex);
5129 if (cp->hw_running)
5130 cas_shutdown(cp);
5131 mutex_unlock(&cp->pm_mutex);
5132
5133 pci_iounmap(pdev, cp->regs);
5134
5135
5136err_out_free_res:
5137 pci_release_regions(pdev);
5138
5139
5140
5141
5142 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5143
5144err_out_free_netdev:
5145 free_netdev(dev);
5146
5147err_out_disable_pdev:
5148 pci_disable_device(pdev);
5149 return -ENODEV;
5150}
5151
5152static void cas_remove_one(struct pci_dev *pdev)
5153{
5154 struct net_device *dev = pci_get_drvdata(pdev);
5155 struct cas *cp;
5156 if (!dev)
5157 return;
5158
5159 cp = netdev_priv(dev);
5160 unregister_netdev(dev);
5161
5162 vfree(cp->fw_data);
5163
5164 mutex_lock(&cp->pm_mutex);
5165 cancel_work_sync(&cp->reset_task);
5166 if (cp->hw_running)
5167 cas_shutdown(cp);
5168 mutex_unlock(&cp->pm_mutex);
5169
5170#if 1
5171 if (cp->orig_cacheline_size) {
5172
5173
5174
5175 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5176 cp->orig_cacheline_size);
5177 }
5178#endif
5179 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5180 cp->init_block, cp->block_dvma);
5181 pci_iounmap(pdev, cp->regs);
5182 free_netdev(dev);
5183 pci_release_regions(pdev);
5184 pci_disable_device(pdev);
5185}
5186
5187#ifdef CONFIG_PM
5188static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5189{
5190 struct net_device *dev = pci_get_drvdata(pdev);
5191 struct cas *cp = netdev_priv(dev);
5192 unsigned long flags;
5193
5194 mutex_lock(&cp->pm_mutex);
5195
5196
5197 if (cp->opened) {
5198 netif_device_detach(dev);
5199
5200 cas_lock_all_save(cp, flags);
5201
5202
5203
5204
5205
5206
5207 cas_reset(cp, 0);
5208 cas_clean_rings(cp);
5209 cas_unlock_all_restore(cp, flags);
5210 }
5211
5212 if (cp->hw_running)
5213 cas_shutdown(cp);
5214 mutex_unlock(&cp->pm_mutex);
5215
5216 return 0;
5217}
5218
5219static int cas_resume(struct pci_dev *pdev)
5220{
5221 struct net_device *dev = pci_get_drvdata(pdev);
5222 struct cas *cp = netdev_priv(dev);
5223
5224 netdev_info(dev, "resuming\n");
5225
5226 mutex_lock(&cp->pm_mutex);
5227 cas_hard_reset(cp);
5228 if (cp->opened) {
5229 unsigned long flags;
5230 cas_lock_all_save(cp, flags);
5231 cas_reset(cp, 0);
5232 cp->hw_running = 1;
5233 cas_clean_rings(cp);
5234 cas_init_hw(cp, 1);
5235 cas_unlock_all_restore(cp, flags);
5236
5237 netif_device_attach(dev);
5238 }
5239 mutex_unlock(&cp->pm_mutex);
5240 return 0;
5241}
5242#endif
5243
5244static struct pci_driver cas_driver = {
5245 .name = DRV_MODULE_NAME,
5246 .id_table = cas_pci_tbl,
5247 .probe = cas_init_one,
5248 .remove = cas_remove_one,
5249#ifdef CONFIG_PM
5250 .suspend = cas_suspend,
5251 .resume = cas_resume
5252#endif
5253};
5254
5255static int __init cas_init(void)
5256{
5257 if (linkdown_timeout > 0)
5258 link_transition_timeout = linkdown_timeout * HZ;
5259 else
5260 link_transition_timeout = 0;
5261
5262 return pci_register_driver(&cas_driver);
5263}
5264
5265static void __exit cas_cleanup(void)
5266{
5267 pci_unregister_driver(&cas_driver);
5268}
5269
5270module_init(cas_init);
5271module_exit(cas_cleanup);
5272