1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56
57#include <linux/module.h>
58#include <linux/kernel.h>
59#include <linux/types.h>
60#include <linux/compiler.h>
61#include <linux/slab.h>
62#include <linux/delay.h>
63#include <linux/init.h>
64#include <linux/interrupt.h>
65#include <linux/vmalloc.h>
66#include <linux/ioport.h>
67#include <linux/pci.h>
68#include <linux/mm.h>
69#include <linux/highmem.h>
70#include <linux/list.h>
71#include <linux/dma-mapping.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/ethtool.h>
77#include <linux/crc32.h>
78#include <linux/random.h>
79#include <linux/mii.h>
80#include <linux/ip.h>
81#include <linux/tcp.h>
82#include <linux/mutex.h>
83#include <linux/firmware.h>
84
85#include <net/checksum.h>
86
87#include <linux/atomic.h>
88#include <asm/io.h>
89#include <asm/byteorder.h>
90#include <linux/uaccess.h>
91
92#define cas_page_map(x) kmap_atomic((x))
93#define cas_page_unmap(x) kunmap_atomic((x))
94#define CAS_NCPUS num_online_cpus()
95
96#define cas_skb_release(x) netif_rx(x)
97
98
99#define USE_HP_WORKAROUND
100#define HP_WORKAROUND_DEFAULT
101#define CAS_HP_ALT_FIRMWARE cas_prog_null
102
103#include "cassini.h"
104
105#define USE_TX_COMPWB
106#define USE_CSMA_CD_PROTO
107#define USE_RX_BLANK
108#undef USE_ENTROPY_DEV
109
110
111
112
113#undef USE_PCI_INTB
114#undef USE_PCI_INTC
115#undef USE_PCI_INTD
116#undef USE_QOS
117
118#undef USE_VPD_DEBUG
119
120
121#define USE_PAGE_ORDER
122#define RX_DONT_BATCH 0
123#define RX_COPY_ALWAYS 0
124#define RX_COPY_MIN 64
125#undef RX_COUNT_BUFFERS
126
127#define DRV_MODULE_NAME "cassini"
128#define DRV_MODULE_VERSION "1.6"
129#define DRV_MODULE_RELDATE "21 May 2008"
130
131#define CAS_DEF_MSG_ENABLE \
132 (NETIF_MSG_DRV | \
133 NETIF_MSG_PROBE | \
134 NETIF_MSG_LINK | \
135 NETIF_MSG_TIMER | \
136 NETIF_MSG_IFDOWN | \
137 NETIF_MSG_IFUP | \
138 NETIF_MSG_RX_ERR | \
139 NETIF_MSG_TX_ERR)
140
141
142
143
144#define CAS_TX_TIMEOUT (HZ)
145#define CAS_LINK_TIMEOUT (22*HZ/10)
146#define CAS_LINK_FAST_TIMEOUT (1)
147
148
149
150
151#define STOP_TRIES_PHY 1000
152#define STOP_TRIES 5000
153
154
155
156
157
158#define CAS_MIN_FRAME 97
159#define CAS_1000MB_MIN_FRAME 255
160#define CAS_MIN_MTU 60
161#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
162
163#if 1
164
165
166
167
168#else
169#define CAS_RESET_MTU 1
170#define CAS_RESET_ALL 2
171#define CAS_RESET_SPARE 3
172#endif
173
174static char version[] =
175 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
176
177static int cassini_debug = -1;
178static int link_mode;
179
180MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
181MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
182MODULE_LICENSE("GPL");
183MODULE_FIRMWARE("sun/cassini.bin");
184module_param(cassini_debug, int, 0);
185MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
186module_param(link_mode, int, 0);
187MODULE_PARM_DESC(link_mode, "default link mode");
188
189
190
191
192
193#define DEFAULT_LINKDOWN_TIMEOUT 5
194
195
196
197static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
198module_param(linkdown_timeout, int, 0);
199MODULE_PARM_DESC(linkdown_timeout,
200"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
201
202
203
204
205
206
207static int link_transition_timeout;
208
209
210
211static u16 link_modes[] = {
212 BMCR_ANENABLE,
213 0,
214 BMCR_SPEED100,
215 BMCR_FULLDPLX,
216 BMCR_SPEED100|BMCR_FULLDPLX,
217 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
218};
219
220static const struct pci_device_id cas_pci_tbl[] = {
221 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { 0, }
226};
227
228MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
229
230static void cas_set_link_modes(struct cas *cp);
231
232static inline void cas_lock_tx(struct cas *cp)
233{
234 int i;
235
236 for (i = 0; i < N_TX_RINGS; i++)
237 spin_lock_nested(&cp->tx_lock[i], i);
238}
239
240
241
242
243
244
245
246
247
248#define cas_lock_all_save(cp, flags) \
249do { \
250 struct cas *xxxcp = (cp); \
251 spin_lock_irqsave(&xxxcp->lock, flags); \
252 cas_lock_tx(xxxcp); \
253} while (0)
254
255static inline void cas_unlock_tx(struct cas *cp)
256{
257 int i;
258
259 for (i = N_TX_RINGS; i > 0; i--)
260 spin_unlock(&cp->tx_lock[i - 1]);
261}
262
263#define cas_unlock_all_restore(cp, flags) \
264do { \
265 struct cas *xxxcp = (cp); \
266 cas_unlock_tx(xxxcp); \
267 spin_unlock_irqrestore(&xxxcp->lock, flags); \
268} while (0)
269
270static void cas_disable_irq(struct cas *cp, const int ring)
271{
272
273 if (ring == 0) {
274 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
275 return;
276 }
277
278
279 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
280 switch (ring) {
281#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
282#ifdef USE_PCI_INTB
283 case 1:
284#endif
285#ifdef USE_PCI_INTC
286 case 2:
287#endif
288#ifdef USE_PCI_INTD
289 case 3:
290#endif
291 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
292 cp->regs + REG_PLUS_INTRN_MASK(ring));
293 break;
294#endif
295 default:
296 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
297 REG_PLUS_INTRN_MASK(ring));
298 break;
299 }
300 }
301}
302
303static inline void cas_mask_intr(struct cas *cp)
304{
305 int i;
306
307 for (i = 0; i < N_RX_COMP_RINGS; i++)
308 cas_disable_irq(cp, i);
309}
310
311static void cas_enable_irq(struct cas *cp, const int ring)
312{
313 if (ring == 0) {
314 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
315 return;
316 }
317
318 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
319 switch (ring) {
320#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
321#ifdef USE_PCI_INTB
322 case 1:
323#endif
324#ifdef USE_PCI_INTC
325 case 2:
326#endif
327#ifdef USE_PCI_INTD
328 case 3:
329#endif
330 writel(INTRN_MASK_RX_EN, cp->regs +
331 REG_PLUS_INTRN_MASK(ring));
332 break;
333#endif
334 default:
335 break;
336 }
337 }
338}
339
340static inline void cas_unmask_intr(struct cas *cp)
341{
342 int i;
343
344 for (i = 0; i < N_RX_COMP_RINGS; i++)
345 cas_enable_irq(cp, i);
346}
347
348static inline void cas_entropy_gather(struct cas *cp)
349{
350#ifdef USE_ENTROPY_DEV
351 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
352 return;
353
354 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
355 readl(cp->regs + REG_ENTROPY_IV),
356 sizeof(uint64_t)*8);
357#endif
358}
359
360static inline void cas_entropy_reset(struct cas *cp)
361{
362#ifdef USE_ENTROPY_DEV
363 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
364 return;
365
366 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
367 cp->regs + REG_BIM_LOCAL_DEV_EN);
368 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
369 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
370
371
372 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
373 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
374#endif
375}
376
377
378
379
380static u16 cas_phy_read(struct cas *cp, int reg)
381{
382 u32 cmd;
383 int limit = STOP_TRIES_PHY;
384
385 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
386 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
387 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
388 cmd |= MIF_FRAME_TURN_AROUND_MSB;
389 writel(cmd, cp->regs + REG_MIF_FRAME);
390
391
392 while (limit-- > 0) {
393 udelay(10);
394 cmd = readl(cp->regs + REG_MIF_FRAME);
395 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
396 return cmd & MIF_FRAME_DATA_MASK;
397 }
398 return 0xFFFF;
399}
400
401static int cas_phy_write(struct cas *cp, int reg, u16 val)
402{
403 int limit = STOP_TRIES_PHY;
404 u32 cmd;
405
406 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
407 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
408 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
409 cmd |= MIF_FRAME_TURN_AROUND_MSB;
410 cmd |= val & MIF_FRAME_DATA_MASK;
411 writel(cmd, cp->regs + REG_MIF_FRAME);
412
413
414 while (limit-- > 0) {
415 udelay(10);
416 cmd = readl(cp->regs + REG_MIF_FRAME);
417 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
418 return 0;
419 }
420 return -1;
421}
422
423static void cas_phy_powerup(struct cas *cp)
424{
425 u16 ctl = cas_phy_read(cp, MII_BMCR);
426
427 if ((ctl & BMCR_PDOWN) == 0)
428 return;
429 ctl &= ~BMCR_PDOWN;
430 cas_phy_write(cp, MII_BMCR, ctl);
431}
432
433static void cas_phy_powerdown(struct cas *cp)
434{
435 u16 ctl = cas_phy_read(cp, MII_BMCR);
436
437 if (ctl & BMCR_PDOWN)
438 return;
439 ctl |= BMCR_PDOWN;
440 cas_phy_write(cp, MII_BMCR, ctl);
441}
442
443
444static int cas_page_free(struct cas *cp, cas_page_t *page)
445{
446 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
447 DMA_FROM_DEVICE);
448 __free_pages(page->buffer, cp->page_order);
449 kfree(page);
450 return 0;
451}
452
453#ifdef RX_COUNT_BUFFERS
454#define RX_USED_ADD(x, y) ((x)->used += (y))
455#define RX_USED_SET(x, y) ((x)->used = (y))
456#else
457#define RX_USED_ADD(x, y) do { } while(0)
458#define RX_USED_SET(x, y) do { } while(0)
459#endif
460
461
462
463
464static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
465{
466 cas_page_t *page;
467
468 page = kmalloc(sizeof(cas_page_t), flags);
469 if (!page)
470 return NULL;
471
472 INIT_LIST_HEAD(&page->list);
473 RX_USED_SET(page, 0);
474 page->buffer = alloc_pages(flags, cp->page_order);
475 if (!page->buffer)
476 goto page_err;
477 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
478 cp->page_size, DMA_FROM_DEVICE);
479 return page;
480
481page_err:
482 kfree(page);
483 return NULL;
484}
485
486
487static void cas_spare_init(struct cas *cp)
488{
489 spin_lock(&cp->rx_inuse_lock);
490 INIT_LIST_HEAD(&cp->rx_inuse_list);
491 spin_unlock(&cp->rx_inuse_lock);
492
493 spin_lock(&cp->rx_spare_lock);
494 INIT_LIST_HEAD(&cp->rx_spare_list);
495 cp->rx_spares_needed = RX_SPARE_COUNT;
496 spin_unlock(&cp->rx_spare_lock);
497}
498
499
500static void cas_spare_free(struct cas *cp)
501{
502 struct list_head list, *elem, *tmp;
503
504
505 INIT_LIST_HEAD(&list);
506 spin_lock(&cp->rx_spare_lock);
507 list_splice_init(&cp->rx_spare_list, &list);
508 spin_unlock(&cp->rx_spare_lock);
509 list_for_each_safe(elem, tmp, &list) {
510 cas_page_free(cp, list_entry(elem, cas_page_t, list));
511 }
512
513 INIT_LIST_HEAD(&list);
514#if 1
515
516
517
518
519 spin_lock(&cp->rx_inuse_lock);
520 list_splice_init(&cp->rx_inuse_list, &list);
521 spin_unlock(&cp->rx_inuse_lock);
522#else
523 spin_lock(&cp->rx_spare_lock);
524 list_splice_init(&cp->rx_inuse_list, &list);
525 spin_unlock(&cp->rx_spare_lock);
526#endif
527 list_for_each_safe(elem, tmp, &list) {
528 cas_page_free(cp, list_entry(elem, cas_page_t, list));
529 }
530}
531
532
533static void cas_spare_recover(struct cas *cp, const gfp_t flags)
534{
535 struct list_head list, *elem, *tmp;
536 int needed, i;
537
538
539
540
541
542
543 INIT_LIST_HEAD(&list);
544 spin_lock(&cp->rx_inuse_lock);
545 list_splice_init(&cp->rx_inuse_list, &list);
546 spin_unlock(&cp->rx_inuse_lock);
547
548 list_for_each_safe(elem, tmp, &list) {
549 cas_page_t *page = list_entry(elem, cas_page_t, list);
550
551
552
553
554
555
556
557
558
559
560
561
562
563 if (page_count(page->buffer) > 1)
564 continue;
565
566 list_del(elem);
567 spin_lock(&cp->rx_spare_lock);
568 if (cp->rx_spares_needed > 0) {
569 list_add(elem, &cp->rx_spare_list);
570 cp->rx_spares_needed--;
571 spin_unlock(&cp->rx_spare_lock);
572 } else {
573 spin_unlock(&cp->rx_spare_lock);
574 cas_page_free(cp, page);
575 }
576 }
577
578
579 if (!list_empty(&list)) {
580 spin_lock(&cp->rx_inuse_lock);
581 list_splice(&list, &cp->rx_inuse_list);
582 spin_unlock(&cp->rx_inuse_lock);
583 }
584
585 spin_lock(&cp->rx_spare_lock);
586 needed = cp->rx_spares_needed;
587 spin_unlock(&cp->rx_spare_lock);
588 if (!needed)
589 return;
590
591
592 INIT_LIST_HEAD(&list);
593 i = 0;
594 while (i < needed) {
595 cas_page_t *spare = cas_page_alloc(cp, flags);
596 if (!spare)
597 break;
598 list_add(&spare->list, &list);
599 i++;
600 }
601
602 spin_lock(&cp->rx_spare_lock);
603 list_splice(&list, &cp->rx_spare_list);
604 cp->rx_spares_needed -= i;
605 spin_unlock(&cp->rx_spare_lock);
606}
607
608
609static cas_page_t *cas_page_dequeue(struct cas *cp)
610{
611 struct list_head *entry;
612 int recover;
613
614 spin_lock(&cp->rx_spare_lock);
615 if (list_empty(&cp->rx_spare_list)) {
616
617 spin_unlock(&cp->rx_spare_lock);
618 cas_spare_recover(cp, GFP_ATOMIC);
619 spin_lock(&cp->rx_spare_lock);
620 if (list_empty(&cp->rx_spare_list)) {
621 netif_err(cp, rx_err, cp->dev,
622 "no spare buffers available\n");
623 spin_unlock(&cp->rx_spare_lock);
624 return NULL;
625 }
626 }
627
628 entry = cp->rx_spare_list.next;
629 list_del(entry);
630 recover = ++cp->rx_spares_needed;
631 spin_unlock(&cp->rx_spare_lock);
632
633
634 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
635#if 1
636 atomic_inc(&cp->reset_task_pending);
637 atomic_inc(&cp->reset_task_pending_spare);
638 schedule_work(&cp->reset_task);
639#else
640 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
641 schedule_work(&cp->reset_task);
642#endif
643 }
644 return list_entry(entry, cas_page_t, list);
645}
646
647
648static void cas_mif_poll(struct cas *cp, const int enable)
649{
650 u32 cfg;
651
652 cfg = readl(cp->regs + REG_MIF_CFG);
653 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
654
655 if (cp->phy_type & CAS_PHY_MII_MDIO1)
656 cfg |= MIF_CFG_PHY_SELECT;
657
658
659 if (enable) {
660 cfg |= MIF_CFG_POLL_EN;
661 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
662 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
663 }
664 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
665 cp->regs + REG_MIF_MASK);
666 writel(cfg, cp->regs + REG_MIF_CFG);
667}
668
669
670static void cas_begin_auto_negotiation(struct cas *cp,
671 const struct ethtool_link_ksettings *ep)
672{
673 u16 ctl;
674#if 1
675 int lcntl;
676 int changed = 0;
677 int oldstate = cp->lstate;
678 int link_was_not_down = !(oldstate == link_down);
679#endif
680
681 if (!ep)
682 goto start_aneg;
683 lcntl = cp->link_cntl;
684 if (ep->base.autoneg == AUTONEG_ENABLE) {
685 cp->link_cntl = BMCR_ANENABLE;
686 } else {
687 u32 speed = ep->base.speed;
688 cp->link_cntl = 0;
689 if (speed == SPEED_100)
690 cp->link_cntl |= BMCR_SPEED100;
691 else if (speed == SPEED_1000)
692 cp->link_cntl |= CAS_BMCR_SPEED1000;
693 if (ep->base.duplex == DUPLEX_FULL)
694 cp->link_cntl |= BMCR_FULLDPLX;
695 }
696#if 1
697 changed = (lcntl != cp->link_cntl);
698#endif
699start_aneg:
700 if (cp->lstate == link_up) {
701 netdev_info(cp->dev, "PCS link down\n");
702 } else {
703 if (changed) {
704 netdev_info(cp->dev, "link configuration changed\n");
705 }
706 }
707 cp->lstate = link_down;
708 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
709 if (!cp->hw_running)
710 return;
711#if 1
712
713
714
715
716
717 if (oldstate == link_up)
718 netif_carrier_off(cp->dev);
719 if (changed && link_was_not_down) {
720
721
722
723
724
725 atomic_inc(&cp->reset_task_pending);
726 atomic_inc(&cp->reset_task_pending_all);
727 schedule_work(&cp->reset_task);
728 cp->timer_ticks = 0;
729 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
730 return;
731 }
732#endif
733 if (cp->phy_type & CAS_PHY_SERDES) {
734 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
735
736 if (cp->link_cntl & BMCR_ANENABLE) {
737 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
738 cp->lstate = link_aneg;
739 } else {
740 if (cp->link_cntl & BMCR_FULLDPLX)
741 val |= PCS_MII_CTRL_DUPLEX;
742 val &= ~PCS_MII_AUTONEG_EN;
743 cp->lstate = link_force_ok;
744 }
745 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
746 writel(val, cp->regs + REG_PCS_MII_CTRL);
747
748 } else {
749 cas_mif_poll(cp, 0);
750 ctl = cas_phy_read(cp, MII_BMCR);
751 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
752 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
753 ctl |= cp->link_cntl;
754 if (ctl & BMCR_ANENABLE) {
755 ctl |= BMCR_ANRESTART;
756 cp->lstate = link_aneg;
757 } else {
758 cp->lstate = link_force_ok;
759 }
760 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
761 cas_phy_write(cp, MII_BMCR, ctl);
762 cas_mif_poll(cp, 1);
763 }
764
765 cp->timer_ticks = 0;
766 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
767}
768
769
770static int cas_reset_mii_phy(struct cas *cp)
771{
772 int limit = STOP_TRIES_PHY;
773 u16 val;
774
775 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
776 udelay(100);
777 while (--limit) {
778 val = cas_phy_read(cp, MII_BMCR);
779 if ((val & BMCR_RESET) == 0)
780 break;
781 udelay(10);
782 }
783 return limit <= 0;
784}
785
786static void cas_saturn_firmware_init(struct cas *cp)
787{
788 const struct firmware *fw;
789 const char fw_name[] = "sun/cassini.bin";
790 int err;
791
792 if (PHY_NS_DP83065 != cp->phy_id)
793 return;
794
795 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
796 if (err) {
797 pr_err("Failed to load firmware \"%s\"\n",
798 fw_name);
799 return;
800 }
801 if (fw->size < 2) {
802 pr_err("bogus length %zu in \"%s\"\n",
803 fw->size, fw_name);
804 goto out;
805 }
806 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
807 cp->fw_size = fw->size - 2;
808 cp->fw_data = vmalloc(cp->fw_size);
809 if (!cp->fw_data)
810 goto out;
811 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
812out:
813 release_firmware(fw);
814}
815
816static void cas_saturn_firmware_load(struct cas *cp)
817{
818 int i;
819
820 if (!cp->fw_data)
821 return;
822
823 cas_phy_powerdown(cp);
824
825
826 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
827
828
829 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
830 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
831 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
832 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
833 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
834 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
835 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
836 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
837
838
839 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
840 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
841 for (i = 0; i < cp->fw_size; i++)
842 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
843
844
845 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
846 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
847}
848
849
850
851static void cas_phy_init(struct cas *cp)
852{
853 u16 val;
854
855
856 if (CAS_PHY_MII(cp->phy_type)) {
857 writel(PCS_DATAPATH_MODE_MII,
858 cp->regs + REG_PCS_DATAPATH_MODE);
859
860 cas_mif_poll(cp, 0);
861 cas_reset_mii_phy(cp);
862
863 if (PHY_LUCENT_B0 == cp->phy_id) {
864
865 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
866 cas_phy_write(cp, MII_BMCR, 0x00f1);
867 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
868
869 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
870
871 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
872 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
873 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
874 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
875 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
876 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
877 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
878 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
879 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
880 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
881 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
882
883 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
884 val = cas_phy_read(cp, BROADCOM_MII_REG4);
885 val = cas_phy_read(cp, BROADCOM_MII_REG4);
886 if (val & 0x0080) {
887
888 cas_phy_write(cp, BROADCOM_MII_REG4,
889 val & ~0x0080);
890 }
891
892 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
893 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
894 SATURN_PCFG_FSI : 0x0,
895 cp->regs + REG_SATURN_PCFG);
896
897
898
899
900
901 if (PHY_NS_DP83065 == cp->phy_id) {
902 cas_saturn_firmware_load(cp);
903 }
904 cas_phy_powerup(cp);
905 }
906
907
908 val = cas_phy_read(cp, MII_BMCR);
909 val &= ~BMCR_ANENABLE;
910 cas_phy_write(cp, MII_BMCR, val);
911 udelay(10);
912
913 cas_phy_write(cp, MII_ADVERTISE,
914 cas_phy_read(cp, MII_ADVERTISE) |
915 (ADVERTISE_10HALF | ADVERTISE_10FULL |
916 ADVERTISE_100HALF | ADVERTISE_100FULL |
917 CAS_ADVERTISE_PAUSE |
918 CAS_ADVERTISE_ASYM_PAUSE));
919
920 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
921
922
923
924 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
925 val &= ~CAS_ADVERTISE_1000HALF;
926 val |= CAS_ADVERTISE_1000FULL;
927 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
928 }
929
930 } else {
931
932 u32 val;
933 int limit;
934
935 writel(PCS_DATAPATH_MODE_SERDES,
936 cp->regs + REG_PCS_DATAPATH_MODE);
937
938
939 if (cp->cas_flags & CAS_FLAG_SATURN)
940 writel(0, cp->regs + REG_SATURN_PCFG);
941
942
943 val = readl(cp->regs + REG_PCS_MII_CTRL);
944 val |= PCS_MII_RESET;
945 writel(val, cp->regs + REG_PCS_MII_CTRL);
946
947 limit = STOP_TRIES;
948 while (--limit > 0) {
949 udelay(10);
950 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
951 PCS_MII_RESET) == 0)
952 break;
953 }
954 if (limit <= 0)
955 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
956 readl(cp->regs + REG_PCS_STATE_MACHINE));
957
958
959
960
961 writel(0x0, cp->regs + REG_PCS_CFG);
962
963
964 val = readl(cp->regs + REG_PCS_MII_ADVERT);
965 val &= ~PCS_MII_ADVERT_HD;
966 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
967 PCS_MII_ADVERT_ASYM_PAUSE);
968 writel(val, cp->regs + REG_PCS_MII_ADVERT);
969
970
971 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
972
973
974 writel(PCS_SERDES_CTRL_SYNCD_EN,
975 cp->regs + REG_PCS_SERDES_CTRL);
976 }
977}
978
979
980static int cas_pcs_link_check(struct cas *cp)
981{
982 u32 stat, state_machine;
983 int retval = 0;
984
985
986
987
988
989 stat = readl(cp->regs + REG_PCS_MII_STATUS);
990 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
991 stat = readl(cp->regs + REG_PCS_MII_STATUS);
992
993
994
995
996 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
997 PCS_MII_STATUS_REMOTE_FAULT)) ==
998 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
999 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1000
1001
1002
1003
1004 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1005 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1006 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1007 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1008 stat |= PCS_MII_STATUS_LINK_STATUS;
1009 }
1010
1011 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1012 if (cp->lstate != link_up) {
1013 if (cp->opened) {
1014 cp->lstate = link_up;
1015 cp->link_transition = LINK_TRANSITION_LINK_UP;
1016
1017 cas_set_link_modes(cp);
1018 netif_carrier_on(cp->dev);
1019 }
1020 }
1021 } else if (cp->lstate == link_up) {
1022 cp->lstate = link_down;
1023 if (link_transition_timeout != 0 &&
1024 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1025 !cp->link_transition_jiffies_valid) {
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 retval = 1;
1039 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1040 cp->link_transition_jiffies = jiffies;
1041 cp->link_transition_jiffies_valid = 1;
1042 } else {
1043 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1044 }
1045 netif_carrier_off(cp->dev);
1046 if (cp->opened)
1047 netif_info(cp, link, cp->dev, "PCS link down\n");
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1058
1059 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1060 if (stat == 0x03)
1061 return 1;
1062 }
1063 } else if (cp->lstate == link_down) {
1064 if (link_transition_timeout != 0 &&
1065 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1066 !cp->link_transition_jiffies_valid) {
1067
1068
1069
1070
1071
1072 retval = 1;
1073 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1074 cp->link_transition_jiffies = jiffies;
1075 cp->link_transition_jiffies_valid = 1;
1076 } else {
1077 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1078 }
1079 }
1080
1081 return retval;
1082}
1083
1084static int cas_pcs_interrupt(struct net_device *dev,
1085 struct cas *cp, u32 status)
1086{
1087 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1088
1089 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1090 return 0;
1091 return cas_pcs_link_check(cp);
1092}
1093
1094static int cas_txmac_interrupt(struct net_device *dev,
1095 struct cas *cp, u32 status)
1096{
1097 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1098
1099 if (!txmac_stat)
1100 return 0;
1101
1102 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1103 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1104
1105
1106
1107
1108 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1109 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1110 return 0;
1111
1112 spin_lock(&cp->stat_lock[0]);
1113 if (txmac_stat & MAC_TX_UNDERRUN) {
1114 netdev_err(dev, "TX MAC xmit underrun\n");
1115 cp->net_stats[0].tx_fifo_errors++;
1116 }
1117
1118 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1119 netdev_err(dev, "TX MAC max packet size error\n");
1120 cp->net_stats[0].tx_errors++;
1121 }
1122
1123
1124
1125
1126 if (txmac_stat & MAC_TX_COLL_NORMAL)
1127 cp->net_stats[0].collisions += 0x10000;
1128
1129 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1130 cp->net_stats[0].tx_aborted_errors += 0x10000;
1131 cp->net_stats[0].collisions += 0x10000;
1132 }
1133
1134 if (txmac_stat & MAC_TX_COLL_LATE) {
1135 cp->net_stats[0].tx_aborted_errors += 0x10000;
1136 cp->net_stats[0].collisions += 0x10000;
1137 }
1138 spin_unlock(&cp->stat_lock[0]);
1139
1140
1141
1142
1143 return 0;
1144}
1145
1146static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1147{
1148 cas_hp_inst_t *inst;
1149 u32 val;
1150 int i;
1151
1152 i = 0;
1153 while ((inst = firmware) && inst->note) {
1154 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1155
1156 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1157 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1158 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1159
1160 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1161 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1162 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1163 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1164 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1165 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1166 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1167 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1168
1169 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1170 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1171 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1172 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1173 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1174 ++firmware;
1175 ++i;
1176 }
1177}
1178
1179static void cas_init_rx_dma(struct cas *cp)
1180{
1181 u64 desc_dma = cp->block_dvma;
1182 u32 val;
1183 int i, size;
1184
1185
1186 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1187 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1188 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1189 if ((N_RX_DESC_RINGS > 1) &&
1190 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1191 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1192 writel(val, cp->regs + REG_RX_CFG);
1193
1194 val = (unsigned long) cp->init_rxds[0] -
1195 (unsigned long) cp->init_block;
1196 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1197 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1198 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1199
1200 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1201
1202
1203
1204 val = (unsigned long) cp->init_rxds[1] -
1205 (unsigned long) cp->init_block;
1206 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1207 writel((desc_dma + val) & 0xffffffff, cp->regs +
1208 REG_PLUS_RX_DB1_LOW);
1209 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1210 REG_PLUS_RX_KICK1);
1211 }
1212
1213
1214 val = (unsigned long) cp->init_rxcs[0] -
1215 (unsigned long) cp->init_block;
1216 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1217 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1218
1219 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1220
1221 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1222 val = (unsigned long) cp->init_rxcs[i] -
1223 (unsigned long) cp->init_block;
1224 writel((desc_dma + val) >> 32, cp->regs +
1225 REG_PLUS_RX_CBN_HI(i));
1226 writel((desc_dma + val) & 0xffffffff, cp->regs +
1227 REG_PLUS_RX_CBN_LOW(i));
1228 }
1229 }
1230
1231
1232
1233
1234
1235 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1236 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1237 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1238 for (i = 1; i < N_RX_COMP_RINGS; i++)
1239 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1240
1241
1242 if (N_RX_COMP_RINGS > 1)
1243 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1244 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1245
1246 for (i = 2; i < N_RX_COMP_RINGS; i++)
1247 writel(INTR_RX_DONE_ALT,
1248 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1249 }
1250
1251
1252 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1253 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1254 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1255 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1256 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1257
1258
1259 for (i = 0; i < 64; i++) {
1260 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1261 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1262 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1263 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1264 }
1265
1266
1267 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1268 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1269
1270
1271#ifdef USE_RX_BLANK
1272 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1273 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1274 writel(val, cp->regs + REG_RX_BLANK);
1275#else
1276 writel(0x0, cp->regs + REG_RX_BLANK);
1277#endif
1278
1279
1280
1281
1282
1283
1284
1285 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1286 writel(val, cp->regs + REG_RX_AE_THRESH);
1287 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1288 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1289 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1290 }
1291
1292
1293
1294
1295 writel(0x0, cp->regs + REG_RX_RED);
1296
1297
1298 val = 0;
1299 if (cp->page_size == 0x1000)
1300 val = 0x1;
1301 else if (cp->page_size == 0x2000)
1302 val = 0x2;
1303 else if (cp->page_size == 0x4000)
1304 val = 0x3;
1305
1306
1307 size = cp->dev->mtu + 64;
1308 if (size > cp->page_size)
1309 size = cp->page_size;
1310
1311 if (size <= 0x400)
1312 i = 0x0;
1313 else if (size <= 0x800)
1314 i = 0x1;
1315 else if (size <= 0x1000)
1316 i = 0x2;
1317 else
1318 i = 0x3;
1319
1320 cp->mtu_stride = 1 << (i + 10);
1321 val = CAS_BASE(RX_PAGE_SIZE, val);
1322 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1323 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1324 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1325 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1326
1327
1328 if (CAS_HP_FIRMWARE == cas_prog_null)
1329 return;
1330
1331 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1332 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1333 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1334 writel(val, cp->regs + REG_HP_CFG);
1335}
1336
1337static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1338{
1339 memset(rxc, 0, sizeof(*rxc));
1340 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1341}
1342
1343
1344
1345
1346
1347static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1348{
1349 cas_page_t *page = cp->rx_pages[1][index];
1350 cas_page_t *new;
1351
1352 if (page_count(page->buffer) == 1)
1353 return page;
1354
1355 new = cas_page_dequeue(cp);
1356 if (new) {
1357 spin_lock(&cp->rx_inuse_lock);
1358 list_add(&page->list, &cp->rx_inuse_list);
1359 spin_unlock(&cp->rx_inuse_lock);
1360 }
1361 return new;
1362}
1363
1364
1365static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1366 const int index)
1367{
1368 cas_page_t **page0 = cp->rx_pages[0];
1369 cas_page_t **page1 = cp->rx_pages[1];
1370
1371
1372 if (page_count(page0[index]->buffer) > 1) {
1373 cas_page_t *new = cas_page_spare(cp, index);
1374 if (new) {
1375 page1[index] = page0[index];
1376 page0[index] = new;
1377 }
1378 }
1379 RX_USED_SET(page0[index], 0);
1380 return page0[index];
1381}
1382
1383static void cas_clean_rxds(struct cas *cp)
1384{
1385
1386 struct cas_rx_desc *rxd = cp->init_rxds[0];
1387 int i, size;
1388
1389
1390 for (i = 0; i < N_RX_FLOWS; i++) {
1391 struct sk_buff *skb;
1392 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1393 cas_skb_release(skb);
1394 }
1395 }
1396
1397
1398 size = RX_DESC_RINGN_SIZE(0);
1399 for (i = 0; i < size; i++) {
1400 cas_page_t *page = cas_page_swap(cp, 0, i);
1401 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1402 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1403 CAS_BASE(RX_INDEX_RING, 0));
1404 }
1405
1406 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1407 cp->rx_last[0] = 0;
1408 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1409}
1410
1411static void cas_clean_rxcs(struct cas *cp)
1412{
1413 int i, j;
1414
1415
1416 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1417 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1418 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1419 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1420 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1421 cas_rxc_init(rxc + j);
1422 }
1423 }
1424}
1425
1426#if 0
1427
1428
1429
1430
1431
1432
1433static int cas_rxmac_reset(struct cas *cp)
1434{
1435 struct net_device *dev = cp->dev;
1436 int limit;
1437 u32 val;
1438
1439
1440 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1441 for (limit = 0; limit < STOP_TRIES; limit++) {
1442 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1443 break;
1444 udelay(10);
1445 }
1446 if (limit == STOP_TRIES) {
1447 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1448 return 1;
1449 }
1450
1451
1452 writel(0, cp->regs + REG_RX_CFG);
1453 for (limit = 0; limit < STOP_TRIES; limit++) {
1454 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1455 break;
1456 udelay(10);
1457 }
1458 if (limit == STOP_TRIES) {
1459 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1460 return 1;
1461 }
1462
1463 mdelay(5);
1464
1465
1466 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1467 for (limit = 0; limit < STOP_TRIES; limit++) {
1468 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1469 break;
1470 udelay(10);
1471 }
1472 if (limit == STOP_TRIES) {
1473 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1474 return 1;
1475 }
1476
1477
1478 cas_clean_rxds(cp);
1479 cas_clean_rxcs(cp);
1480
1481
1482 cas_init_rx_dma(cp);
1483
1484
1485 val = readl(cp->regs + REG_RX_CFG);
1486 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1487 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1488 val = readl(cp->regs + REG_MAC_RX_CFG);
1489 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1490 return 0;
1491}
1492#endif
1493
1494static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1495 u32 status)
1496{
1497 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1498
1499 if (!stat)
1500 return 0;
1501
1502 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1503
1504
1505 spin_lock(&cp->stat_lock[0]);
1506 if (stat & MAC_RX_ALIGN_ERR)
1507 cp->net_stats[0].rx_frame_errors += 0x10000;
1508
1509 if (stat & MAC_RX_CRC_ERR)
1510 cp->net_stats[0].rx_crc_errors += 0x10000;
1511
1512 if (stat & MAC_RX_LEN_ERR)
1513 cp->net_stats[0].rx_length_errors += 0x10000;
1514
1515 if (stat & MAC_RX_OVERFLOW) {
1516 cp->net_stats[0].rx_over_errors++;
1517 cp->net_stats[0].rx_fifo_errors++;
1518 }
1519
1520
1521
1522
1523 spin_unlock(&cp->stat_lock[0]);
1524 return 0;
1525}
1526
1527static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1528 u32 status)
1529{
1530 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1531
1532 if (!stat)
1533 return 0;
1534
1535 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1536 "mac interrupt, stat: 0x%x\n", stat);
1537
1538
1539
1540
1541
1542 if (stat & MAC_CTRL_PAUSE_STATE)
1543 cp->pause_entered++;
1544
1545 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1546 cp->pause_last_time_recvd = (stat >> 16);
1547
1548 return 0;
1549}
1550
1551
1552
1553static inline int cas_mdio_link_not_up(struct cas *cp)
1554{
1555 u16 val;
1556
1557 switch (cp->lstate) {
1558 case link_force_ret:
1559 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1560 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1561 cp->timer_ticks = 5;
1562 cp->lstate = link_force_ok;
1563 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1564 break;
1565
1566 case link_aneg:
1567 val = cas_phy_read(cp, MII_BMCR);
1568
1569
1570
1571
1572 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1573 val |= BMCR_FULLDPLX;
1574 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1575 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1576 cas_phy_write(cp, MII_BMCR, val);
1577 cp->timer_ticks = 5;
1578 cp->lstate = link_force_try;
1579 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1580 break;
1581
1582 case link_force_try:
1583
1584 val = cas_phy_read(cp, MII_BMCR);
1585 cp->timer_ticks = 5;
1586 if (val & CAS_BMCR_SPEED1000) {
1587 val &= ~CAS_BMCR_SPEED1000;
1588 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1589 cas_phy_write(cp, MII_BMCR, val);
1590 break;
1591 }
1592
1593 if (val & BMCR_SPEED100) {
1594 if (val & BMCR_FULLDPLX)
1595 val &= ~BMCR_FULLDPLX;
1596 else {
1597 val &= ~BMCR_SPEED100;
1598 }
1599 cas_phy_write(cp, MII_BMCR, val);
1600 break;
1601 }
1602 break;
1603 default:
1604 break;
1605 }
1606 return 0;
1607}
1608
1609
1610
1611static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1612{
1613 int restart;
1614
1615 if (bmsr & BMSR_LSTATUS) {
1616
1617
1618
1619
1620
1621 if ((cp->lstate == link_force_try) &&
1622 (cp->link_cntl & BMCR_ANENABLE)) {
1623 cp->lstate = link_force_ret;
1624 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1625 cas_mif_poll(cp, 0);
1626 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1627 cp->timer_ticks = 5;
1628 if (cp->opened)
1629 netif_info(cp, link, cp->dev,
1630 "Got link after fallback, retrying autoneg once...\n");
1631 cas_phy_write(cp, MII_BMCR,
1632 cp->link_fcntl | BMCR_ANENABLE |
1633 BMCR_ANRESTART);
1634 cas_mif_poll(cp, 1);
1635
1636 } else if (cp->lstate != link_up) {
1637 cp->lstate = link_up;
1638 cp->link_transition = LINK_TRANSITION_LINK_UP;
1639
1640 if (cp->opened) {
1641 cas_set_link_modes(cp);
1642 netif_carrier_on(cp->dev);
1643 }
1644 }
1645 return 0;
1646 }
1647
1648
1649
1650
1651 restart = 0;
1652 if (cp->lstate == link_up) {
1653 cp->lstate = link_down;
1654 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1655
1656 netif_carrier_off(cp->dev);
1657 if (cp->opened)
1658 netif_info(cp, link, cp->dev, "Link down\n");
1659 restart = 1;
1660
1661 } else if (++cp->timer_ticks > 10)
1662 cas_mdio_link_not_up(cp);
1663
1664 return restart;
1665}
1666
1667static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1668 u32 status)
1669{
1670 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1671 u16 bmsr;
1672
1673
1674 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1675 return 0;
1676
1677 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1678 return cas_mii_link_check(cp, bmsr);
1679}
1680
1681static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1682 u32 status)
1683{
1684 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1685
1686 if (!stat)
1687 return 0;
1688
1689 netdev_err(dev, "PCI error [%04x:%04x]",
1690 stat, readl(cp->regs + REG_BIM_DIAG));
1691
1692
1693 if ((stat & PCI_ERR_BADACK) &&
1694 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1695 pr_cont(" <No ACK64# during ABS64 cycle>");
1696
1697 if (stat & PCI_ERR_DTRTO)
1698 pr_cont(" <Delayed transaction timeout>");
1699 if (stat & PCI_ERR_OTHER)
1700 pr_cont(" <other>");
1701 if (stat & PCI_ERR_BIM_DMA_WRITE)
1702 pr_cont(" <BIM DMA 0 write req>");
1703 if (stat & PCI_ERR_BIM_DMA_READ)
1704 pr_cont(" <BIM DMA 0 read req>");
1705 pr_cont("\n");
1706
1707 if (stat & PCI_ERR_OTHER) {
1708 int pci_errs;
1709
1710
1711
1712
1713 pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1714
1715 netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
1716 if (pci_errs & PCI_STATUS_PARITY)
1717 netdev_err(dev, "PCI parity error detected\n");
1718 if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
1719 netdev_err(dev, "PCI target abort\n");
1720 if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
1721 netdev_err(dev, "PCI master acks target abort\n");
1722 if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
1723 netdev_err(dev, "PCI master abort\n");
1724 if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
1725 netdev_err(dev, "PCI system error SERR#\n");
1726 if (pci_errs & PCI_STATUS_DETECTED_PARITY)
1727 netdev_err(dev, "PCI parity error\n");
1728 }
1729
1730
1731 return 1;
1732}
1733
1734
1735
1736
1737
1738
1739static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1740 u32 status)
1741{
1742 if (status & INTR_RX_TAG_ERROR) {
1743
1744 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1745 "corrupt rx tag framing\n");
1746 spin_lock(&cp->stat_lock[0]);
1747 cp->net_stats[0].rx_errors++;
1748 spin_unlock(&cp->stat_lock[0]);
1749 goto do_reset;
1750 }
1751
1752 if (status & INTR_RX_LEN_MISMATCH) {
1753
1754 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1755 "length mismatch for rx frame\n");
1756 spin_lock(&cp->stat_lock[0]);
1757 cp->net_stats[0].rx_errors++;
1758 spin_unlock(&cp->stat_lock[0]);
1759 goto do_reset;
1760 }
1761
1762 if (status & INTR_PCS_STATUS) {
1763 if (cas_pcs_interrupt(dev, cp, status))
1764 goto do_reset;
1765 }
1766
1767 if (status & INTR_TX_MAC_STATUS) {
1768 if (cas_txmac_interrupt(dev, cp, status))
1769 goto do_reset;
1770 }
1771
1772 if (status & INTR_RX_MAC_STATUS) {
1773 if (cas_rxmac_interrupt(dev, cp, status))
1774 goto do_reset;
1775 }
1776
1777 if (status & INTR_MAC_CTRL_STATUS) {
1778 if (cas_mac_interrupt(dev, cp, status))
1779 goto do_reset;
1780 }
1781
1782 if (status & INTR_MIF_STATUS) {
1783 if (cas_mif_interrupt(dev, cp, status))
1784 goto do_reset;
1785 }
1786
1787 if (status & INTR_PCI_ERROR_STATUS) {
1788 if (cas_pci_interrupt(dev, cp, status))
1789 goto do_reset;
1790 }
1791 return 0;
1792
1793do_reset:
1794#if 1
1795 atomic_inc(&cp->reset_task_pending);
1796 atomic_inc(&cp->reset_task_pending_all);
1797 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1798 schedule_work(&cp->reset_task);
1799#else
1800 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1801 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1802 schedule_work(&cp->reset_task);
1803#endif
1804 return 1;
1805}
1806
1807
1808
1809
1810#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1811#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1812static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1813 const int len)
1814{
1815 unsigned long off = addr + len;
1816
1817 if (CAS_TABORT(cp) == 1)
1818 return 0;
1819 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1820 return 0;
1821 return TX_TARGET_ABORT_LEN;
1822}
1823
1824static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1825{
1826 struct cas_tx_desc *txds;
1827 struct sk_buff **skbs;
1828 struct net_device *dev = cp->dev;
1829 int entry, count;
1830
1831 spin_lock(&cp->tx_lock[ring]);
1832 txds = cp->init_txds[ring];
1833 skbs = cp->tx_skbs[ring];
1834 entry = cp->tx_old[ring];
1835
1836 count = TX_BUFF_COUNT(ring, entry, limit);
1837 while (entry != limit) {
1838 struct sk_buff *skb = skbs[entry];
1839 dma_addr_t daddr;
1840 u32 dlen;
1841 int frag;
1842
1843 if (!skb) {
1844
1845 entry = TX_DESC_NEXT(ring, entry);
1846 continue;
1847 }
1848
1849
1850 count -= skb_shinfo(skb)->nr_frags +
1851 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1852 if (count < 0)
1853 break;
1854
1855 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1856 "tx[%d] done, slot %d\n", ring, entry);
1857
1858 skbs[entry] = NULL;
1859 cp->tx_tiny_use[ring][entry].nbufs = 0;
1860
1861 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1862 struct cas_tx_desc *txd = txds + entry;
1863
1864 daddr = le64_to_cpu(txd->buffer);
1865 dlen = CAS_VAL(TX_DESC_BUFLEN,
1866 le64_to_cpu(txd->control));
1867 dma_unmap_page(&cp->pdev->dev, daddr, dlen,
1868 DMA_TO_DEVICE);
1869 entry = TX_DESC_NEXT(ring, entry);
1870
1871
1872 if (cp->tx_tiny_use[ring][entry].used) {
1873 cp->tx_tiny_use[ring][entry].used = 0;
1874 entry = TX_DESC_NEXT(ring, entry);
1875 }
1876 }
1877
1878 spin_lock(&cp->stat_lock[ring]);
1879 cp->net_stats[ring].tx_packets++;
1880 cp->net_stats[ring].tx_bytes += skb->len;
1881 spin_unlock(&cp->stat_lock[ring]);
1882 dev_consume_skb_irq(skb);
1883 }
1884 cp->tx_old[ring] = entry;
1885
1886
1887
1888
1889
1890 if (netif_queue_stopped(dev) &&
1891 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1892 netif_wake_queue(dev);
1893 spin_unlock(&cp->tx_lock[ring]);
1894}
1895
1896static void cas_tx(struct net_device *dev, struct cas *cp,
1897 u32 status)
1898{
1899 int limit, ring;
1900#ifdef USE_TX_COMPWB
1901 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1902#endif
1903 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1904 "tx interrupt, status: 0x%x, %llx\n",
1905 status, (unsigned long long)compwb);
1906
1907 for (ring = 0; ring < N_TX_RINGS; ring++) {
1908#ifdef USE_TX_COMPWB
1909
1910 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1911 CAS_VAL(TX_COMPWB_LSB, compwb);
1912 compwb = TX_COMPWB_NEXT(compwb);
1913#else
1914 limit = readl(cp->regs + REG_TX_COMPN(ring));
1915#endif
1916 if (cp->tx_old[ring] != limit)
1917 cas_tx_ringN(cp, ring, limit);
1918 }
1919}
1920
1921
1922static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1923 int entry, const u64 *words,
1924 struct sk_buff **skbref)
1925{
1926 int dlen, hlen, len, i, alloclen;
1927 int off, swivel = RX_SWIVEL_OFF_VAL;
1928 struct cas_page *page;
1929 struct sk_buff *skb;
1930 void *addr, *crcaddr;
1931 __sum16 csum;
1932 char *p;
1933
1934 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1935 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1936 len = hlen + dlen;
1937
1938 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1939 alloclen = len;
1940 else
1941 alloclen = max(hlen, RX_COPY_MIN);
1942
1943 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1944 if (skb == NULL)
1945 return -1;
1946
1947 *skbref = skb;
1948 skb_reserve(skb, swivel);
1949
1950 p = skb->data;
1951 addr = crcaddr = NULL;
1952 if (hlen) {
1953 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1954 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1955 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1956 swivel;
1957
1958 i = hlen;
1959 if (!dlen)
1960 i += cp->crc_size;
1961 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1962 i, DMA_FROM_DEVICE);
1963 addr = cas_page_map(page->buffer);
1964 memcpy(p, addr + off, i);
1965 dma_sync_single_for_device(&cp->pdev->dev,
1966 page->dma_addr + off, i,
1967 DMA_FROM_DEVICE);
1968 cas_page_unmap(addr);
1969 RX_USED_ADD(page, 0x100);
1970 p += hlen;
1971 swivel = 0;
1972 }
1973
1974
1975 if (alloclen < (hlen + dlen)) {
1976 skb_frag_t *frag = skb_shinfo(skb)->frags;
1977
1978
1979 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
1980 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1981 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
1982
1983 hlen = min(cp->page_size - off, dlen);
1984 if (hlen < 0) {
1985 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1986 "rx page overflow: %d\n", hlen);
1987 dev_kfree_skb_irq(skb);
1988 return -1;
1989 }
1990 i = hlen;
1991 if (i == dlen)
1992 i += cp->crc_size;
1993 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1994 i, DMA_FROM_DEVICE);
1995
1996
1997 swivel = 0;
1998 if (p == (char *) skb->data) {
1999 addr = cas_page_map(page->buffer);
2000 memcpy(p, addr + off, RX_COPY_MIN);
2001 dma_sync_single_for_device(&cp->pdev->dev,
2002 page->dma_addr + off, i,
2003 DMA_FROM_DEVICE);
2004 cas_page_unmap(addr);
2005 off += RX_COPY_MIN;
2006 swivel = RX_COPY_MIN;
2007 RX_USED_ADD(page, cp->mtu_stride);
2008 } else {
2009 RX_USED_ADD(page, hlen);
2010 }
2011 skb_put(skb, alloclen);
2012
2013 skb_shinfo(skb)->nr_frags++;
2014 skb->data_len += hlen - swivel;
2015 skb->truesize += hlen - swivel;
2016 skb->len += hlen - swivel;
2017
2018 __skb_frag_set_page(frag, page->buffer);
2019 __skb_frag_ref(frag);
2020 skb_frag_off_set(frag, off);
2021 skb_frag_size_set(frag, hlen - swivel);
2022
2023
2024 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2025 hlen = dlen;
2026 off = 0;
2027
2028 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2029 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2030 dma_sync_single_for_cpu(&cp->pdev->dev,
2031 page->dma_addr,
2032 hlen + cp->crc_size,
2033 DMA_FROM_DEVICE);
2034 dma_sync_single_for_device(&cp->pdev->dev,
2035 page->dma_addr,
2036 hlen + cp->crc_size,
2037 DMA_FROM_DEVICE);
2038
2039 skb_shinfo(skb)->nr_frags++;
2040 skb->data_len += hlen;
2041 skb->len += hlen;
2042 frag++;
2043
2044 __skb_frag_set_page(frag, page->buffer);
2045 __skb_frag_ref(frag);
2046 skb_frag_off_set(frag, 0);
2047 skb_frag_size_set(frag, hlen);
2048 RX_USED_ADD(page, hlen + cp->crc_size);
2049 }
2050
2051 if (cp->crc_size) {
2052 addr = cas_page_map(page->buffer);
2053 crcaddr = addr + off + hlen;
2054 }
2055
2056 } else {
2057
2058 if (!dlen)
2059 goto end_copy_pkt;
2060
2061 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2062 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2063 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2064 hlen = min(cp->page_size - off, dlen);
2065 if (hlen < 0) {
2066 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2067 "rx page overflow: %d\n", hlen);
2068 dev_kfree_skb_irq(skb);
2069 return -1;
2070 }
2071 i = hlen;
2072 if (i == dlen)
2073 i += cp->crc_size;
2074 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2075 i, DMA_FROM_DEVICE);
2076 addr = cas_page_map(page->buffer);
2077 memcpy(p, addr + off, i);
2078 dma_sync_single_for_device(&cp->pdev->dev,
2079 page->dma_addr + off, i,
2080 DMA_FROM_DEVICE);
2081 cas_page_unmap(addr);
2082 if (p == (char *) skb->data)
2083 RX_USED_ADD(page, cp->mtu_stride);
2084 else
2085 RX_USED_ADD(page, i);
2086
2087
2088 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2089 p += hlen;
2090 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2091 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2092 dma_sync_single_for_cpu(&cp->pdev->dev,
2093 page->dma_addr,
2094 dlen + cp->crc_size,
2095 DMA_FROM_DEVICE);
2096 addr = cas_page_map(page->buffer);
2097 memcpy(p, addr, dlen + cp->crc_size);
2098 dma_sync_single_for_device(&cp->pdev->dev,
2099 page->dma_addr,
2100 dlen + cp->crc_size,
2101 DMA_FROM_DEVICE);
2102 cas_page_unmap(addr);
2103 RX_USED_ADD(page, dlen + cp->crc_size);
2104 }
2105end_copy_pkt:
2106 if (cp->crc_size) {
2107 addr = NULL;
2108 crcaddr = skb->data + alloclen;
2109 }
2110 skb_put(skb, alloclen);
2111 }
2112
2113 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2114 if (cp->crc_size) {
2115
2116 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2117 csum_unfold(csum)));
2118 if (addr)
2119 cas_page_unmap(addr);
2120 }
2121 skb->protocol = eth_type_trans(skb, cp->dev);
2122 if (skb->protocol == htons(ETH_P_IP)) {
2123 skb->csum = csum_unfold(~csum);
2124 skb->ip_summed = CHECKSUM_COMPLETE;
2125 } else
2126 skb_checksum_none_assert(skb);
2127 return len;
2128}
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2146 struct sk_buff *skb)
2147{
2148 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2149 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2150
2151
2152
2153
2154
2155 __skb_queue_tail(flow, skb);
2156 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2157 while ((skb = __skb_dequeue(flow))) {
2158 cas_skb_release(skb);
2159 }
2160 }
2161}
2162
2163
2164
2165
2166static void cas_post_page(struct cas *cp, const int ring, const int index)
2167{
2168 cas_page_t *new;
2169 int entry;
2170
2171 entry = cp->rx_old[ring];
2172
2173 new = cas_page_swap(cp, ring, index);
2174 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2175 cp->init_rxds[ring][entry].index =
2176 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2177 CAS_BASE(RX_INDEX_RING, ring));
2178
2179 entry = RX_DESC_ENTRY(ring, entry + 1);
2180 cp->rx_old[ring] = entry;
2181
2182 if (entry % 4)
2183 return;
2184
2185 if (ring == 0)
2186 writel(entry, cp->regs + REG_RX_KICK);
2187 else if ((N_RX_DESC_RINGS > 1) &&
2188 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2189 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2190}
2191
2192
2193
2194static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2195{
2196 unsigned int entry, last, count, released;
2197 int cluster;
2198 cas_page_t **page = cp->rx_pages[ring];
2199
2200 entry = cp->rx_old[ring];
2201
2202 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2203 "rxd[%d] interrupt, done: %d\n", ring, entry);
2204
2205 cluster = -1;
2206 count = entry & 0x3;
2207 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2208 released = 0;
2209 while (entry != last) {
2210
2211 if (page_count(page[entry]->buffer) > 1) {
2212 cas_page_t *new = cas_page_dequeue(cp);
2213 if (!new) {
2214
2215
2216
2217 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2218 if (!timer_pending(&cp->link_timer))
2219 mod_timer(&cp->link_timer, jiffies +
2220 CAS_LINK_FAST_TIMEOUT);
2221 cp->rx_old[ring] = entry;
2222 cp->rx_last[ring] = num ? num - released : 0;
2223 return -ENOMEM;
2224 }
2225 spin_lock(&cp->rx_inuse_lock);
2226 list_add(&page[entry]->list, &cp->rx_inuse_list);
2227 spin_unlock(&cp->rx_inuse_lock);
2228 cp->init_rxds[ring][entry].buffer =
2229 cpu_to_le64(new->dma_addr);
2230 page[entry] = new;
2231
2232 }
2233
2234 if (++count == 4) {
2235 cluster = entry;
2236 count = 0;
2237 }
2238 released++;
2239 entry = RX_DESC_ENTRY(ring, entry + 1);
2240 }
2241 cp->rx_old[ring] = entry;
2242
2243 if (cluster < 0)
2244 return 0;
2245
2246 if (ring == 0)
2247 writel(cluster, cp->regs + REG_RX_KICK);
2248 else if ((N_RX_DESC_RINGS > 1) &&
2249 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2250 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2251 return 0;
2252}
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2268{
2269 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2270 int entry, drops;
2271 int npackets = 0;
2272
2273 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2274 "rx[%d] interrupt, done: %d/%d\n",
2275 ring,
2276 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2277
2278 entry = cp->rx_new[ring];
2279 drops = 0;
2280 while (1) {
2281 struct cas_rx_comp *rxc = rxcs + entry;
2282 struct sk_buff *skb;
2283 int type, len;
2284 u64 words[4];
2285 int i, dring;
2286
2287 words[0] = le64_to_cpu(rxc->word1);
2288 words[1] = le64_to_cpu(rxc->word2);
2289 words[2] = le64_to_cpu(rxc->word3);
2290 words[3] = le64_to_cpu(rxc->word4);
2291
2292
2293 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2294 if (type == 0)
2295 break;
2296
2297
2298 if (words[3] & RX_COMP4_ZERO) {
2299 break;
2300 }
2301
2302
2303 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2304 spin_lock(&cp->stat_lock[ring]);
2305 cp->net_stats[ring].rx_errors++;
2306 if (words[3] & RX_COMP4_LEN_MISMATCH)
2307 cp->net_stats[ring].rx_length_errors++;
2308 if (words[3] & RX_COMP4_BAD)
2309 cp->net_stats[ring].rx_crc_errors++;
2310 spin_unlock(&cp->stat_lock[ring]);
2311
2312
2313 drop_it:
2314 spin_lock(&cp->stat_lock[ring]);
2315 ++cp->net_stats[ring].rx_dropped;
2316 spin_unlock(&cp->stat_lock[ring]);
2317 goto next;
2318 }
2319
2320 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2321 if (len < 0) {
2322 ++drops;
2323 goto drop_it;
2324 }
2325
2326
2327
2328
2329 if (RX_DONT_BATCH || (type == 0x2)) {
2330
2331 cas_skb_release(skb);
2332 } else {
2333 cas_rx_flow_pkt(cp, words, skb);
2334 }
2335
2336 spin_lock(&cp->stat_lock[ring]);
2337 cp->net_stats[ring].rx_packets++;
2338 cp->net_stats[ring].rx_bytes += len;
2339 spin_unlock(&cp->stat_lock[ring]);
2340
2341 next:
2342 npackets++;
2343
2344
2345 if (words[0] & RX_COMP1_RELEASE_HDR) {
2346 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2347 dring = CAS_VAL(RX_INDEX_RING, i);
2348 i = CAS_VAL(RX_INDEX_NUM, i);
2349 cas_post_page(cp, dring, i);
2350 }
2351
2352 if (words[0] & RX_COMP1_RELEASE_DATA) {
2353 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2354 dring = CAS_VAL(RX_INDEX_RING, i);
2355 i = CAS_VAL(RX_INDEX_NUM, i);
2356 cas_post_page(cp, dring, i);
2357 }
2358
2359 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2360 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2361 dring = CAS_VAL(RX_INDEX_RING, i);
2362 i = CAS_VAL(RX_INDEX_NUM, i);
2363 cas_post_page(cp, dring, i);
2364 }
2365
2366
2367 entry = RX_COMP_ENTRY(ring, entry + 1 +
2368 CAS_VAL(RX_COMP1_SKIP, words[0]));
2369#ifdef USE_NAPI
2370 if (budget && (npackets >= budget))
2371 break;
2372#endif
2373 }
2374 cp->rx_new[ring] = entry;
2375
2376 if (drops)
2377 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2378 return npackets;
2379}
2380
2381
2382
2383static void cas_post_rxcs_ringN(struct net_device *dev,
2384 struct cas *cp, int ring)
2385{
2386 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2387 int last, entry;
2388
2389 last = cp->rx_cur[ring];
2390 entry = cp->rx_new[ring];
2391 netif_printk(cp, intr, KERN_DEBUG, dev,
2392 "rxc[%d] interrupt, done: %d/%d\n",
2393 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2394
2395
2396 while (last != entry) {
2397 cas_rxc_init(rxc + last);
2398 last = RX_COMP_ENTRY(ring, last + 1);
2399 }
2400 cp->rx_cur[ring] = last;
2401
2402 if (ring == 0)
2403 writel(last, cp->regs + REG_RX_COMP_TAIL);
2404 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2405 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2406}
2407
2408
2409
2410
2411
2412
2413#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2414static inline void cas_handle_irqN(struct net_device *dev,
2415 struct cas *cp, const u32 status,
2416 const int ring)
2417{
2418 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2419 cas_post_rxcs_ringN(dev, cp, ring);
2420}
2421
2422static irqreturn_t cas_interruptN(int irq, void *dev_id)
2423{
2424 struct net_device *dev = dev_id;
2425 struct cas *cp = netdev_priv(dev);
2426 unsigned long flags;
2427 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2428 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2429
2430
2431 if (status == 0)
2432 return IRQ_NONE;
2433
2434 spin_lock_irqsave(&cp->lock, flags);
2435 if (status & INTR_RX_DONE_ALT) {
2436#ifdef USE_NAPI
2437 cas_mask_intr(cp);
2438 napi_schedule(&cp->napi);
2439#else
2440 cas_rx_ringN(cp, ring, 0);
2441#endif
2442 status &= ~INTR_RX_DONE_ALT;
2443 }
2444
2445 if (status)
2446 cas_handle_irqN(dev, cp, status, ring);
2447 spin_unlock_irqrestore(&cp->lock, flags);
2448 return IRQ_HANDLED;
2449}
2450#endif
2451
2452#ifdef USE_PCI_INTB
2453
2454static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2455{
2456 if (status & INTR_RX_BUF_UNAVAIL_1) {
2457
2458
2459 cas_post_rxds_ringN(cp, 1, 0);
2460 spin_lock(&cp->stat_lock[1]);
2461 cp->net_stats[1].rx_dropped++;
2462 spin_unlock(&cp->stat_lock[1]);
2463 }
2464
2465 if (status & INTR_RX_BUF_AE_1)
2466 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2467 RX_AE_FREEN_VAL(1));
2468
2469 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2470 cas_post_rxcs_ringN(cp, 1);
2471}
2472
2473
2474static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2475{
2476 struct net_device *dev = dev_id;
2477 struct cas *cp = netdev_priv(dev);
2478 unsigned long flags;
2479 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2480
2481
2482 if (status == 0)
2483 return IRQ_NONE;
2484
2485 spin_lock_irqsave(&cp->lock, flags);
2486 if (status & INTR_RX_DONE_ALT) {
2487#ifdef USE_NAPI
2488 cas_mask_intr(cp);
2489 napi_schedule(&cp->napi);
2490#else
2491 cas_rx_ringN(cp, 1, 0);
2492#endif
2493 status &= ~INTR_RX_DONE_ALT;
2494 }
2495 if (status)
2496 cas_handle_irq1(cp, status);
2497 spin_unlock_irqrestore(&cp->lock, flags);
2498 return IRQ_HANDLED;
2499}
2500#endif
2501
2502static inline void cas_handle_irq(struct net_device *dev,
2503 struct cas *cp, const u32 status)
2504{
2505
2506 if (status & INTR_ERROR_MASK)
2507 cas_abnormal_irq(dev, cp, status);
2508
2509 if (status & INTR_RX_BUF_UNAVAIL) {
2510
2511
2512
2513 cas_post_rxds_ringN(cp, 0, 0);
2514 spin_lock(&cp->stat_lock[0]);
2515 cp->net_stats[0].rx_dropped++;
2516 spin_unlock(&cp->stat_lock[0]);
2517 } else if (status & INTR_RX_BUF_AE) {
2518 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2519 RX_AE_FREEN_VAL(0));
2520 }
2521
2522 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2523 cas_post_rxcs_ringN(dev, cp, 0);
2524}
2525
2526static irqreturn_t cas_interrupt(int irq, void *dev_id)
2527{
2528 struct net_device *dev = dev_id;
2529 struct cas *cp = netdev_priv(dev);
2530 unsigned long flags;
2531 u32 status = readl(cp->regs + REG_INTR_STATUS);
2532
2533 if (status == 0)
2534 return IRQ_NONE;
2535
2536 spin_lock_irqsave(&cp->lock, flags);
2537 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2538 cas_tx(dev, cp, status);
2539 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2540 }
2541
2542 if (status & INTR_RX_DONE) {
2543#ifdef USE_NAPI
2544 cas_mask_intr(cp);
2545 napi_schedule(&cp->napi);
2546#else
2547 cas_rx_ringN(cp, 0, 0);
2548#endif
2549 status &= ~INTR_RX_DONE;
2550 }
2551
2552 if (status)
2553 cas_handle_irq(dev, cp, status);
2554 spin_unlock_irqrestore(&cp->lock, flags);
2555 return IRQ_HANDLED;
2556}
2557
2558
2559#ifdef USE_NAPI
2560static int cas_poll(struct napi_struct *napi, int budget)
2561{
2562 struct cas *cp = container_of(napi, struct cas, napi);
2563 struct net_device *dev = cp->dev;
2564 int i, enable_intr, credits;
2565 u32 status = readl(cp->regs + REG_INTR_STATUS);
2566 unsigned long flags;
2567
2568 spin_lock_irqsave(&cp->lock, flags);
2569 cas_tx(dev, cp, status);
2570 spin_unlock_irqrestore(&cp->lock, flags);
2571
2572
2573
2574
2575
2576
2577
2578
2579 enable_intr = 1;
2580 credits = 0;
2581 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2582 int j;
2583 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2584 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2585 if (credits >= budget) {
2586 enable_intr = 0;
2587 goto rx_comp;
2588 }
2589 }
2590 }
2591
2592rx_comp:
2593
2594 spin_lock_irqsave(&cp->lock, flags);
2595 if (status)
2596 cas_handle_irq(dev, cp, status);
2597
2598#ifdef USE_PCI_INTB
2599 if (N_RX_COMP_RINGS > 1) {
2600 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2601 if (status)
2602 cas_handle_irq1(dev, cp, status);
2603 }
2604#endif
2605
2606#ifdef USE_PCI_INTC
2607 if (N_RX_COMP_RINGS > 2) {
2608 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2609 if (status)
2610 cas_handle_irqN(dev, cp, status, 2);
2611 }
2612#endif
2613
2614#ifdef USE_PCI_INTD
2615 if (N_RX_COMP_RINGS > 3) {
2616 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2617 if (status)
2618 cas_handle_irqN(dev, cp, status, 3);
2619 }
2620#endif
2621 spin_unlock_irqrestore(&cp->lock, flags);
2622 if (enable_intr) {
2623 napi_complete(napi);
2624 cas_unmask_intr(cp);
2625 }
2626 return credits;
2627}
2628#endif
2629
2630#ifdef CONFIG_NET_POLL_CONTROLLER
2631static void cas_netpoll(struct net_device *dev)
2632{
2633 struct cas *cp = netdev_priv(dev);
2634
2635 cas_disable_irq(cp, 0);
2636 cas_interrupt(cp->pdev->irq, dev);
2637 cas_enable_irq(cp, 0);
2638
2639#ifdef USE_PCI_INTB
2640 if (N_RX_COMP_RINGS > 1) {
2641
2642 }
2643#endif
2644#ifdef USE_PCI_INTC
2645 if (N_RX_COMP_RINGS > 2) {
2646
2647 }
2648#endif
2649#ifdef USE_PCI_INTD
2650 if (N_RX_COMP_RINGS > 3) {
2651
2652 }
2653#endif
2654}
2655#endif
2656
2657static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2658{
2659 struct cas *cp = netdev_priv(dev);
2660
2661 netdev_err(dev, "transmit timed out, resetting\n");
2662 if (!cp->hw_running) {
2663 netdev_err(dev, "hrm.. hw not running!\n");
2664 return;
2665 }
2666
2667 netdev_err(dev, "MIF_STATE[%08x]\n",
2668 readl(cp->regs + REG_MIF_STATE_MACHINE));
2669
2670 netdev_err(dev, "MAC_STATE[%08x]\n",
2671 readl(cp->regs + REG_MAC_STATE_MACHINE));
2672
2673 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2674 readl(cp->regs + REG_TX_CFG),
2675 readl(cp->regs + REG_MAC_TX_STATUS),
2676 readl(cp->regs + REG_MAC_TX_CFG),
2677 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2678 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2679 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2680 readl(cp->regs + REG_TX_SM_1),
2681 readl(cp->regs + REG_TX_SM_2));
2682
2683 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2684 readl(cp->regs + REG_RX_CFG),
2685 readl(cp->regs + REG_MAC_RX_STATUS),
2686 readl(cp->regs + REG_MAC_RX_CFG));
2687
2688 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2689 readl(cp->regs + REG_HP_STATE_MACHINE),
2690 readl(cp->regs + REG_HP_STATUS0),
2691 readl(cp->regs + REG_HP_STATUS1),
2692 readl(cp->regs + REG_HP_STATUS2));
2693
2694#if 1
2695 atomic_inc(&cp->reset_task_pending);
2696 atomic_inc(&cp->reset_task_pending_all);
2697 schedule_work(&cp->reset_task);
2698#else
2699 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2700 schedule_work(&cp->reset_task);
2701#endif
2702}
2703
2704static inline int cas_intme(int ring, int entry)
2705{
2706
2707 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2708 return 1;
2709 return 0;
2710}
2711
2712
2713static void cas_write_txd(struct cas *cp, int ring, int entry,
2714 dma_addr_t mapping, int len, u64 ctrl, int last)
2715{
2716 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2717
2718 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2719 if (cas_intme(ring, entry))
2720 ctrl |= TX_DESC_INTME;
2721 if (last)
2722 ctrl |= TX_DESC_EOF;
2723 txd->control = cpu_to_le64(ctrl);
2724 txd->buffer = cpu_to_le64(mapping);
2725}
2726
2727static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2728 const int entry)
2729{
2730 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2731}
2732
2733static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2734 const int entry, const int tentry)
2735{
2736 cp->tx_tiny_use[ring][tentry].nbufs++;
2737 cp->tx_tiny_use[ring][entry].used = 1;
2738 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2739}
2740
2741static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2742 struct sk_buff *skb)
2743{
2744 struct net_device *dev = cp->dev;
2745 int entry, nr_frags, frag, tabort, tentry;
2746 dma_addr_t mapping;
2747 unsigned long flags;
2748 u64 ctrl;
2749 u32 len;
2750
2751 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2752
2753
2754 if (TX_BUFFS_AVAIL(cp, ring) <=
2755 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2756 netif_stop_queue(dev);
2757 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2758 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2759 return 1;
2760 }
2761
2762 ctrl = 0;
2763 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2764 const u64 csum_start_off = skb_checksum_start_offset(skb);
2765 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2766
2767 ctrl = TX_DESC_CSUM_EN |
2768 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2769 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2770 }
2771
2772 entry = cp->tx_new[ring];
2773 cp->tx_skbs[ring][entry] = skb;
2774
2775 nr_frags = skb_shinfo(skb)->nr_frags;
2776 len = skb_headlen(skb);
2777 mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
2778 offset_in_page(skb->data), len, DMA_TO_DEVICE);
2779
2780 tentry = entry;
2781 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2782 if (unlikely(tabort)) {
2783
2784 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2785 ctrl | TX_DESC_SOF, 0);
2786 entry = TX_DESC_NEXT(ring, entry);
2787
2788 skb_copy_from_linear_data_offset(skb, len - tabort,
2789 tx_tiny_buf(cp, ring, entry), tabort);
2790 mapping = tx_tiny_map(cp, ring, entry, tentry);
2791 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2792 (nr_frags == 0));
2793 } else {
2794 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2795 TX_DESC_SOF, (nr_frags == 0));
2796 }
2797 entry = TX_DESC_NEXT(ring, entry);
2798
2799 for (frag = 0; frag < nr_frags; frag++) {
2800 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2801
2802 len = skb_frag_size(fragp);
2803 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2804 DMA_TO_DEVICE);
2805
2806 tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
2807 if (unlikely(tabort)) {
2808 void *addr;
2809
2810
2811 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2812 ctrl, 0);
2813 entry = TX_DESC_NEXT(ring, entry);
2814
2815 addr = cas_page_map(skb_frag_page(fragp));
2816 memcpy(tx_tiny_buf(cp, ring, entry),
2817 addr + skb_frag_off(fragp) + len - tabort,
2818 tabort);
2819 cas_page_unmap(addr);
2820 mapping = tx_tiny_map(cp, ring, entry, tentry);
2821 len = tabort;
2822 }
2823
2824 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2825 (frag + 1 == nr_frags));
2826 entry = TX_DESC_NEXT(ring, entry);
2827 }
2828
2829 cp->tx_new[ring] = entry;
2830 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2831 netif_stop_queue(dev);
2832
2833 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2834 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2835 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2836 writel(entry, cp->regs + REG_TX_KICKN(ring));
2837 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2838 return 0;
2839}
2840
2841static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2842{
2843 struct cas *cp = netdev_priv(dev);
2844
2845
2846
2847
2848 static int ring;
2849
2850 if (skb_padto(skb, cp->min_frame_size))
2851 return NETDEV_TX_OK;
2852
2853
2854
2855
2856 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2857 return NETDEV_TX_BUSY;
2858 return NETDEV_TX_OK;
2859}
2860
2861static void cas_init_tx_dma(struct cas *cp)
2862{
2863 u64 desc_dma = cp->block_dvma;
2864 unsigned long off;
2865 u32 val;
2866 int i;
2867
2868
2869#ifdef USE_TX_COMPWB
2870 off = offsetof(struct cas_init_block, tx_compwb);
2871 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2872 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2873#endif
2874
2875
2876
2877
2878 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2879 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2880 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2881 TX_CFG_INTR_COMPWB_DIS;
2882
2883
2884 for (i = 0; i < MAX_TX_RINGS; i++) {
2885 off = (unsigned long) cp->init_txds[i] -
2886 (unsigned long) cp->init_block;
2887
2888 val |= CAS_TX_RINGN_BASE(i);
2889 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2890 writel((desc_dma + off) & 0xffffffff, cp->regs +
2891 REG_TX_DBN_LOW(i));
2892
2893
2894
2895 }
2896 writel(val, cp->regs + REG_TX_CFG);
2897
2898
2899
2900
2901#ifdef USE_QOS
2902 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2903 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2904 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2905 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2906#else
2907 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2908 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2909 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2910 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2911#endif
2912}
2913
2914
2915static inline void cas_init_dma(struct cas *cp)
2916{
2917 cas_init_tx_dma(cp);
2918 cas_init_rx_dma(cp);
2919}
2920
2921static void cas_process_mc_list(struct cas *cp)
2922{
2923 u16 hash_table[16];
2924 u32 crc;
2925 struct netdev_hw_addr *ha;
2926 int i = 1;
2927
2928 memset(hash_table, 0, sizeof(hash_table));
2929 netdev_for_each_mc_addr(ha, cp->dev) {
2930 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2931
2932
2933
2934 writel((ha->addr[4] << 8) | ha->addr[5],
2935 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2936 writel((ha->addr[2] << 8) | ha->addr[3],
2937 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2938 writel((ha->addr[0] << 8) | ha->addr[1],
2939 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2940 i++;
2941 }
2942 else {
2943
2944
2945
2946 crc = ether_crc_le(ETH_ALEN, ha->addr);
2947 crc >>= 24;
2948 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2949 }
2950 }
2951 for (i = 0; i < 16; i++)
2952 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2953}
2954
2955
2956static u32 cas_setup_multicast(struct cas *cp)
2957{
2958 u32 rxcfg = 0;
2959 int i;
2960
2961 if (cp->dev->flags & IFF_PROMISC) {
2962 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2963
2964 } else if (cp->dev->flags & IFF_ALLMULTI) {
2965 for (i=0; i < 16; i++)
2966 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2967 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2968
2969 } else {
2970 cas_process_mc_list(cp);
2971 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2972 }
2973
2974 return rxcfg;
2975}
2976
2977
2978static void cas_clear_mac_err(struct cas *cp)
2979{
2980 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2981 writel(0, cp->regs + REG_MAC_COLL_FIRST);
2982 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2983 writel(0, cp->regs + REG_MAC_COLL_LATE);
2984 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2985 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2986 writel(0, cp->regs + REG_MAC_RECV_FRAME);
2987 writel(0, cp->regs + REG_MAC_LEN_ERR);
2988 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2989 writel(0, cp->regs + REG_MAC_FCS_ERR);
2990 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2991}
2992
2993
2994static void cas_mac_reset(struct cas *cp)
2995{
2996 int i;
2997
2998
2999 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3000 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3001
3002
3003 i = STOP_TRIES;
3004 while (i-- > 0) {
3005 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3006 break;
3007 udelay(10);
3008 }
3009
3010
3011 i = STOP_TRIES;
3012 while (i-- > 0) {
3013 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3014 break;
3015 udelay(10);
3016 }
3017
3018 if (readl(cp->regs + REG_MAC_TX_RESET) |
3019 readl(cp->regs + REG_MAC_RX_RESET))
3020 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3021 readl(cp->regs + REG_MAC_TX_RESET),
3022 readl(cp->regs + REG_MAC_RX_RESET),
3023 readl(cp->regs + REG_MAC_STATE_MACHINE));
3024}
3025
3026
3027
3028static void cas_init_mac(struct cas *cp)
3029{
3030 unsigned char *e = &cp->dev->dev_addr[0];
3031 int i;
3032 cas_mac_reset(cp);
3033
3034
3035 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3036
3037#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3038
3039
3040
3041 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3042 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3043#endif
3044
3045 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3046
3047 writel(0x00, cp->regs + REG_MAC_IPG0);
3048 writel(0x08, cp->regs + REG_MAC_IPG1);
3049 writel(0x04, cp->regs + REG_MAC_IPG2);
3050
3051
3052 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3053
3054
3055 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3056
3057
3058
3059
3060
3061 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3062 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3063 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3064 cp->regs + REG_MAC_FRAMESIZE_MAX);
3065
3066
3067
3068
3069
3070 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3071 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3072 else
3073 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3074 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3075 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3076 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3077
3078 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3079
3080 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3081 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3082 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3083 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3084 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3085
3086
3087 for (i = 0; i < 45; i++)
3088 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3089
3090 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3091 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3092 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3093
3094 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3095 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3096 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3097
3098 cp->mac_rx_cfg = cas_setup_multicast(cp);
3099
3100 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3101 cas_clear_mac_err(cp);
3102 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3103
3104
3105
3106
3107
3108 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3109 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3110
3111
3112
3113
3114 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3115}
3116
3117
3118static void cas_init_pause_thresholds(struct cas *cp)
3119{
3120
3121
3122
3123 if (cp->rx_fifo_size <= (2 * 1024)) {
3124 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3125 } else {
3126 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3127 if (max_frame * 3 > cp->rx_fifo_size) {
3128 cp->rx_pause_off = 7104;
3129 cp->rx_pause_on = 960;
3130 } else {
3131 int off = (cp->rx_fifo_size - (max_frame * 2));
3132 int on = off - max_frame;
3133 cp->rx_pause_off = off;
3134 cp->rx_pause_on = on;
3135 }
3136 }
3137}
3138
3139static int cas_vpd_match(const void __iomem *p, const char *str)
3140{
3141 int len = strlen(str) + 1;
3142 int i;
3143
3144 for (i = 0; i < len; i++) {
3145 if (readb(p + i) != str[i])
3146 return 0;
3147 }
3148 return 1;
3149}
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3164 const int offset)
3165{
3166 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3167 void __iomem *base, *kstart;
3168 int i, len;
3169 int found = 0;
3170#define VPD_FOUND_MAC 0x01
3171#define VPD_FOUND_PHY 0x02
3172
3173 int phy_type = CAS_PHY_MII_MDIO0;
3174 int mac_off = 0;
3175
3176#if defined(CONFIG_SPARC)
3177 const unsigned char *addr;
3178#endif
3179
3180
3181 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3182 cp->regs + REG_BIM_LOCAL_DEV_EN);
3183
3184
3185 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3186 goto use_random_mac_addr;
3187
3188
3189 base = NULL;
3190 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3191
3192 if ((readb(p + i + 0) == 0x50) &&
3193 (readb(p + i + 1) == 0x43) &&
3194 (readb(p + i + 2) == 0x49) &&
3195 (readb(p + i + 3) == 0x52)) {
3196 base = p + (readb(p + i + 8) |
3197 (readb(p + i + 9) << 8));
3198 break;
3199 }
3200 }
3201
3202 if (!base || (readb(base) != 0x82))
3203 goto use_random_mac_addr;
3204
3205 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3206 while (i < EXPANSION_ROM_SIZE) {
3207 if (readb(base + i) != 0x90)
3208 goto use_random_mac_addr;
3209
3210
3211 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3212
3213
3214 kstart = base + i + 3;
3215 p = kstart;
3216 while ((p - kstart) < len) {
3217 int klen = readb(p + 2);
3218 int j;
3219 char type;
3220
3221 p += 3;
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260 if (readb(p) != 'I')
3261 goto next;
3262
3263
3264 type = readb(p + 3);
3265 if (type == 'B') {
3266 if ((klen == 29) && readb(p + 4) == 6 &&
3267 cas_vpd_match(p + 5,
3268 "local-mac-address")) {
3269 if (mac_off++ > offset)
3270 goto next;
3271
3272
3273 for (j = 0; j < 6; j++)
3274 dev_addr[j] =
3275 readb(p + 23 + j);
3276 goto found_mac;
3277 }
3278 }
3279
3280 if (type != 'S')
3281 goto next;
3282
3283#ifdef USE_ENTROPY_DEV
3284 if ((klen == 24) &&
3285 cas_vpd_match(p + 5, "entropy-dev") &&
3286 cas_vpd_match(p + 17, "vms110")) {
3287 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3288 goto next;
3289 }
3290#endif
3291
3292 if (found & VPD_FOUND_PHY)
3293 goto next;
3294
3295 if ((klen == 18) && readb(p + 4) == 4 &&
3296 cas_vpd_match(p + 5, "phy-type")) {
3297 if (cas_vpd_match(p + 14, "pcs")) {
3298 phy_type = CAS_PHY_SERDES;
3299 goto found_phy;
3300 }
3301 }
3302
3303 if ((klen == 23) && readb(p + 4) == 4 &&
3304 cas_vpd_match(p + 5, "phy-interface")) {
3305 if (cas_vpd_match(p + 19, "pcs")) {
3306 phy_type = CAS_PHY_SERDES;
3307 goto found_phy;
3308 }
3309 }
3310found_mac:
3311 found |= VPD_FOUND_MAC;
3312 goto next;
3313
3314found_phy:
3315 found |= VPD_FOUND_PHY;
3316
3317next:
3318 p += klen;
3319 }
3320 i += len + 3;
3321 }
3322
3323use_random_mac_addr:
3324 if (found & VPD_FOUND_MAC)
3325 goto done;
3326
3327#if defined(CONFIG_SPARC)
3328 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3329 if (addr != NULL) {
3330 memcpy(dev_addr, addr, ETH_ALEN);
3331 goto done;
3332 }
3333#endif
3334
3335
3336 pr_info("MAC address not found in ROM VPD\n");
3337 dev_addr[0] = 0x08;
3338 dev_addr[1] = 0x00;
3339 dev_addr[2] = 0x20;
3340 get_random_bytes(dev_addr + 3, 3);
3341
3342done:
3343 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3344 return phy_type;
3345}
3346
3347
3348static void cas_check_pci_invariants(struct cas *cp)
3349{
3350 struct pci_dev *pdev = cp->pdev;
3351
3352 cp->cas_flags = 0;
3353 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3354 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3355 if (pdev->revision >= CAS_ID_REVPLUS)
3356 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3357 if (pdev->revision < CAS_ID_REVPLUS02u)
3358 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3359
3360
3361
3362
3363 if (pdev->revision < CAS_ID_REV2)
3364 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3365 } else {
3366
3367 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3368
3369
3370
3371
3372 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3373 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3374 cp->cas_flags |= CAS_FLAG_SATURN;
3375 }
3376}
3377
3378
3379static int cas_check_invariants(struct cas *cp)
3380{
3381 struct pci_dev *pdev = cp->pdev;
3382 u32 cfg;
3383 int i;
3384
3385
3386 cp->page_order = 0;
3387#ifdef USE_PAGE_ORDER
3388 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3389
3390 struct page *page = alloc_pages(GFP_ATOMIC,
3391 CAS_JUMBO_PAGE_SHIFT -
3392 PAGE_SHIFT);
3393 if (page) {
3394 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3395 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3396 } else {
3397 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3398 }
3399 }
3400#endif
3401 cp->page_size = (PAGE_SIZE << cp->page_order);
3402
3403
3404 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3405 cp->rx_fifo_size = RX_FIFO_SIZE;
3406
3407
3408
3409
3410 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3411 PCI_SLOT(pdev->devfn));
3412 if (cp->phy_type & CAS_PHY_SERDES) {
3413 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3414 return 0;
3415 }
3416
3417
3418 cfg = readl(cp->regs + REG_MIF_CFG);
3419 if (cfg & MIF_CFG_MDIO_1) {
3420 cp->phy_type = CAS_PHY_MII_MDIO1;
3421 } else if (cfg & MIF_CFG_MDIO_0) {
3422 cp->phy_type = CAS_PHY_MII_MDIO0;
3423 }
3424
3425 cas_mif_poll(cp, 0);
3426 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3427
3428 for (i = 0; i < 32; i++) {
3429 u32 phy_id;
3430 int j;
3431
3432 for (j = 0; j < 3; j++) {
3433 cp->phy_addr = i;
3434 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3435 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3436 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3437 cp->phy_id = phy_id;
3438 goto done;
3439 }
3440 }
3441 }
3442 pr_err("MII phy did not respond [%08x]\n",
3443 readl(cp->regs + REG_MIF_STATE_MACHINE));
3444 return -1;
3445
3446done:
3447
3448 cfg = cas_phy_read(cp, MII_BMSR);
3449 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3450 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3451 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3452 return 0;
3453}
3454
3455
3456static inline void cas_start_dma(struct cas *cp)
3457{
3458 int i;
3459 u32 val;
3460 int txfailed = 0;
3461
3462
3463 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3464 writel(val, cp->regs + REG_TX_CFG);
3465 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3466 writel(val, cp->regs + REG_RX_CFG);
3467
3468
3469 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3470 writel(val, cp->regs + REG_MAC_TX_CFG);
3471 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3472 writel(val, cp->regs + REG_MAC_RX_CFG);
3473
3474 i = STOP_TRIES;
3475 while (i-- > 0) {
3476 val = readl(cp->regs + REG_MAC_TX_CFG);
3477 if ((val & MAC_TX_CFG_EN))
3478 break;
3479 udelay(10);
3480 }
3481 if (i < 0) txfailed = 1;
3482 i = STOP_TRIES;
3483 while (i-- > 0) {
3484 val = readl(cp->regs + REG_MAC_RX_CFG);
3485 if ((val & MAC_RX_CFG_EN)) {
3486 if (txfailed) {
3487 netdev_err(cp->dev,
3488 "enabling mac failed [tx:%08x:%08x]\n",
3489 readl(cp->regs + REG_MIF_STATE_MACHINE),
3490 readl(cp->regs + REG_MAC_STATE_MACHINE));
3491 }
3492 goto enable_rx_done;
3493 }
3494 udelay(10);
3495 }
3496 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3497 (txfailed ? "tx,rx" : "rx"),
3498 readl(cp->regs + REG_MIF_STATE_MACHINE),
3499 readl(cp->regs + REG_MAC_STATE_MACHINE));
3500
3501enable_rx_done:
3502 cas_unmask_intr(cp);
3503 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3504 writel(0, cp->regs + REG_RX_COMP_TAIL);
3505
3506 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3507 if (N_RX_DESC_RINGS > 1)
3508 writel(RX_DESC_RINGN_SIZE(1) - 4,
3509 cp->regs + REG_PLUS_RX_KICK1);
3510
3511 for (i = 1; i < N_RX_COMP_RINGS; i++)
3512 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3513 }
3514}
3515
3516
3517static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3518 int *pause)
3519{
3520 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3521 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3522 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3523 if (val & PCS_MII_LPA_ASYM_PAUSE)
3524 *pause |= 0x10;
3525 *spd = 1000;
3526}
3527
3528
3529static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3530 int *pause)
3531{
3532 u32 val;
3533
3534 *fd = 0;
3535 *spd = 10;
3536 *pause = 0;
3537
3538
3539 val = cas_phy_read(cp, MII_LPA);
3540 if (val & CAS_LPA_PAUSE)
3541 *pause = 0x01;
3542
3543 if (val & CAS_LPA_ASYM_PAUSE)
3544 *pause |= 0x10;
3545
3546 if (val & LPA_DUPLEX)
3547 *fd = 1;
3548 if (val & LPA_100)
3549 *spd = 100;
3550
3551 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3552 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3553 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3554 *spd = 1000;
3555 if (val & CAS_LPA_1000FULL)
3556 *fd = 1;
3557 }
3558}
3559
3560
3561
3562
3563
3564
3565static void cas_set_link_modes(struct cas *cp)
3566{
3567 u32 val;
3568 int full_duplex, speed, pause;
3569
3570 full_duplex = 0;
3571 speed = 10;
3572 pause = 0;
3573
3574 if (CAS_PHY_MII(cp->phy_type)) {
3575 cas_mif_poll(cp, 0);
3576 val = cas_phy_read(cp, MII_BMCR);
3577 if (val & BMCR_ANENABLE) {
3578 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3579 &pause);
3580 } else {
3581 if (val & BMCR_FULLDPLX)
3582 full_duplex = 1;
3583
3584 if (val & BMCR_SPEED100)
3585 speed = 100;
3586 else if (val & CAS_BMCR_SPEED1000)
3587 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3588 1000 : 100;
3589 }
3590 cas_mif_poll(cp, 1);
3591
3592 } else {
3593 val = readl(cp->regs + REG_PCS_MII_CTRL);
3594 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3595 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3596 if (val & PCS_MII_CTRL_DUPLEX)
3597 full_duplex = 1;
3598 }
3599 }
3600
3601 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3602 speed, full_duplex ? "full" : "half");
3603
3604 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3605 if (CAS_PHY_MII(cp->phy_type)) {
3606 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3607 if (!full_duplex)
3608 val |= MAC_XIF_DISABLE_ECHO;
3609 }
3610 if (full_duplex)
3611 val |= MAC_XIF_FDPLX_LED;
3612 if (speed == 1000)
3613 val |= MAC_XIF_GMII_MODE;
3614 writel(val, cp->regs + REG_MAC_XIF_CFG);
3615
3616
3617 val = MAC_TX_CFG_IPG_EN;
3618 if (full_duplex) {
3619 val |= MAC_TX_CFG_IGNORE_CARRIER;
3620 val |= MAC_TX_CFG_IGNORE_COLL;
3621 } else {
3622#ifndef USE_CSMA_CD_PROTO
3623 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3624 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3625#endif
3626 }
3627
3628
3629
3630
3631
3632
3633
3634 if ((speed == 1000) && !full_duplex) {
3635 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3636 cp->regs + REG_MAC_TX_CFG);
3637
3638 val = readl(cp->regs + REG_MAC_RX_CFG);
3639 val &= ~MAC_RX_CFG_STRIP_FCS;
3640 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3641 cp->regs + REG_MAC_RX_CFG);
3642
3643 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3644
3645 cp->crc_size = 4;
3646
3647 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3648
3649 } else {
3650 writel(val, cp->regs + REG_MAC_TX_CFG);
3651
3652
3653
3654
3655 val = readl(cp->regs + REG_MAC_RX_CFG);
3656 if (full_duplex) {
3657 val |= MAC_RX_CFG_STRIP_FCS;
3658 cp->crc_size = 0;
3659 cp->min_frame_size = CAS_MIN_MTU;
3660 } else {
3661 val &= ~MAC_RX_CFG_STRIP_FCS;
3662 cp->crc_size = 4;
3663 cp->min_frame_size = CAS_MIN_FRAME;
3664 }
3665 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3666 cp->regs + REG_MAC_RX_CFG);
3667 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3668 }
3669
3670 if (netif_msg_link(cp)) {
3671 if (pause & 0x01) {
3672 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3673 cp->rx_fifo_size,
3674 cp->rx_pause_off,
3675 cp->rx_pause_on);
3676 } else if (pause & 0x10) {
3677 netdev_info(cp->dev, "TX pause enabled\n");
3678 } else {
3679 netdev_info(cp->dev, "Pause is disabled\n");
3680 }
3681 }
3682
3683 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3684 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3685 if (pause) {
3686 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3687 if (pause & 0x01) {
3688 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3689 }
3690 }
3691 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3692 cas_start_dma(cp);
3693}
3694
3695
3696static void cas_init_hw(struct cas *cp, int restart_link)
3697{
3698 if (restart_link)
3699 cas_phy_init(cp);
3700
3701 cas_init_pause_thresholds(cp);
3702 cas_init_mac(cp);
3703 cas_init_dma(cp);
3704
3705 if (restart_link) {
3706
3707 cp->timer_ticks = 0;
3708 cas_begin_auto_negotiation(cp, NULL);
3709 } else if (cp->lstate == link_up) {
3710 cas_set_link_modes(cp);
3711 netif_carrier_on(cp->dev);
3712 }
3713}
3714
3715
3716
3717
3718
3719static void cas_hard_reset(struct cas *cp)
3720{
3721 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3722 udelay(20);
3723 pci_restore_state(cp->pdev);
3724}
3725
3726
3727static void cas_global_reset(struct cas *cp, int blkflag)
3728{
3729 int limit;
3730
3731
3732 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3733
3734
3735
3736
3737
3738
3739 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3740 cp->regs + REG_SW_RESET);
3741 } else {
3742 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3743 }
3744
3745
3746 mdelay(3);
3747
3748 limit = STOP_TRIES;
3749 while (limit-- > 0) {
3750 u32 val = readl(cp->regs + REG_SW_RESET);
3751 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3752 goto done;
3753 udelay(10);
3754 }
3755 netdev_err(cp->dev, "sw reset failed\n");
3756
3757done:
3758
3759 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3760 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3761
3762
3763
3764
3765
3766 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3767 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3768 PCI_ERR_BIM_DMA_READ), cp->regs +
3769 REG_PCI_ERR_STATUS_MASK);
3770
3771
3772
3773
3774 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3775}
3776
3777static void cas_reset(struct cas *cp, int blkflag)
3778{
3779 u32 val;
3780
3781 cas_mask_intr(cp);
3782 cas_global_reset(cp, blkflag);
3783 cas_mac_reset(cp);
3784 cas_entropy_reset(cp);
3785
3786
3787 val = readl(cp->regs + REG_TX_CFG);
3788 val &= ~TX_CFG_DMA_EN;
3789 writel(val, cp->regs + REG_TX_CFG);
3790
3791 val = readl(cp->regs + REG_RX_CFG);
3792 val &= ~RX_CFG_DMA_EN;
3793 writel(val, cp->regs + REG_RX_CFG);
3794
3795
3796 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3797 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3798 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3799 } else {
3800 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3801 }
3802
3803
3804 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3805 cas_clear_mac_err(cp);
3806 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3807}
3808
3809
3810static void cas_shutdown(struct cas *cp)
3811{
3812 unsigned long flags;
3813
3814
3815 cp->hw_running = 0;
3816
3817 del_timer_sync(&cp->link_timer);
3818
3819
3820#if 0
3821 while (atomic_read(&cp->reset_task_pending_mtu) ||
3822 atomic_read(&cp->reset_task_pending_spare) ||
3823 atomic_read(&cp->reset_task_pending_all))
3824 schedule();
3825
3826#else
3827 while (atomic_read(&cp->reset_task_pending))
3828 schedule();
3829#endif
3830
3831 cas_lock_all_save(cp, flags);
3832 cas_reset(cp, 0);
3833 if (cp->cas_flags & CAS_FLAG_SATURN)
3834 cas_phy_powerdown(cp);
3835 cas_unlock_all_restore(cp, flags);
3836}
3837
3838static int cas_change_mtu(struct net_device *dev, int new_mtu)
3839{
3840 struct cas *cp = netdev_priv(dev);
3841
3842 dev->mtu = new_mtu;
3843 if (!netif_running(dev) || !netif_device_present(dev))
3844 return 0;
3845
3846
3847#if 1
3848 atomic_inc(&cp->reset_task_pending);
3849 if ((cp->phy_type & CAS_PHY_SERDES)) {
3850 atomic_inc(&cp->reset_task_pending_all);
3851 } else {
3852 atomic_inc(&cp->reset_task_pending_mtu);
3853 }
3854 schedule_work(&cp->reset_task);
3855#else
3856 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3857 CAS_RESET_ALL : CAS_RESET_MTU);
3858 pr_err("reset called in cas_change_mtu\n");
3859 schedule_work(&cp->reset_task);
3860#endif
3861
3862 flush_work(&cp->reset_task);
3863 return 0;
3864}
3865
3866static void cas_clean_txd(struct cas *cp, int ring)
3867{
3868 struct cas_tx_desc *txd = cp->init_txds[ring];
3869 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3870 u64 daddr, dlen;
3871 int i, size;
3872
3873 size = TX_DESC_RINGN_SIZE(ring);
3874 for (i = 0; i < size; i++) {
3875 int frag;
3876
3877 if (skbs[i] == NULL)
3878 continue;
3879
3880 skb = skbs[i];
3881 skbs[i] = NULL;
3882
3883 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3884 int ent = i & (size - 1);
3885
3886
3887
3888
3889 daddr = le64_to_cpu(txd[ent].buffer);
3890 dlen = CAS_VAL(TX_DESC_BUFLEN,
3891 le64_to_cpu(txd[ent].control));
3892 dma_unmap_page(&cp->pdev->dev, daddr, dlen,
3893 DMA_TO_DEVICE);
3894
3895 if (frag != skb_shinfo(skb)->nr_frags) {
3896 i++;
3897
3898
3899
3900
3901 ent = i & (size - 1);
3902 if (cp->tx_tiny_use[ring][ent].used)
3903 i++;
3904 }
3905 }
3906 dev_kfree_skb_any(skb);
3907 }
3908
3909
3910 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3911}
3912
3913
3914static inline void cas_free_rx_desc(struct cas *cp, int ring)
3915{
3916 cas_page_t **page = cp->rx_pages[ring];
3917 int i, size;
3918
3919 size = RX_DESC_RINGN_SIZE(ring);
3920 for (i = 0; i < size; i++) {
3921 if (page[i]) {
3922 cas_page_free(cp, page[i]);
3923 page[i] = NULL;
3924 }
3925 }
3926}
3927
3928static void cas_free_rxds(struct cas *cp)
3929{
3930 int i;
3931
3932 for (i = 0; i < N_RX_DESC_RINGS; i++)
3933 cas_free_rx_desc(cp, i);
3934}
3935
3936
3937static void cas_clean_rings(struct cas *cp)
3938{
3939 int i;
3940
3941
3942 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3943 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3944 for (i = 0; i < N_TX_RINGS; i++)
3945 cas_clean_txd(cp, i);
3946
3947
3948 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3949 cas_clean_rxds(cp);
3950 cas_clean_rxcs(cp);
3951}
3952
3953
3954static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3955{
3956 cas_page_t **page = cp->rx_pages[ring];
3957 int size, i = 0;
3958
3959 size = RX_DESC_RINGN_SIZE(ring);
3960 for (i = 0; i < size; i++) {
3961 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3962 return -1;
3963 }
3964 return 0;
3965}
3966
3967static int cas_alloc_rxds(struct cas *cp)
3968{
3969 int i;
3970
3971 for (i = 0; i < N_RX_DESC_RINGS; i++) {
3972 if (cas_alloc_rx_desc(cp, i) < 0) {
3973 cas_free_rxds(cp);
3974 return -1;
3975 }
3976 }
3977 return 0;
3978}
3979
3980static void cas_reset_task(struct work_struct *work)
3981{
3982 struct cas *cp = container_of(work, struct cas, reset_task);
3983#if 0
3984 int pending = atomic_read(&cp->reset_task_pending);
3985#else
3986 int pending_all = atomic_read(&cp->reset_task_pending_all);
3987 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3988 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3989
3990 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
3991
3992
3993
3994 atomic_dec(&cp->reset_task_pending);
3995 return;
3996 }
3997#endif
3998
3999
4000
4001
4002 if (cp->hw_running) {
4003 unsigned long flags;
4004
4005
4006 netif_device_detach(cp->dev);
4007 cas_lock_all_save(cp, flags);
4008
4009 if (cp->opened) {
4010
4011
4012
4013
4014 cas_spare_recover(cp, GFP_ATOMIC);
4015 }
4016#if 1
4017
4018 if (!pending_all && !pending_mtu)
4019 goto done;
4020#else
4021 if (pending == CAS_RESET_SPARE)
4022 goto done;
4023#endif
4024
4025
4026
4027
4028
4029
4030
4031#if 1
4032 cas_reset(cp, !(pending_all > 0));
4033 if (cp->opened)
4034 cas_clean_rings(cp);
4035 cas_init_hw(cp, (pending_all > 0));
4036#else
4037 cas_reset(cp, !(pending == CAS_RESET_ALL));
4038 if (cp->opened)
4039 cas_clean_rings(cp);
4040 cas_init_hw(cp, pending == CAS_RESET_ALL);
4041#endif
4042
4043done:
4044 cas_unlock_all_restore(cp, flags);
4045 netif_device_attach(cp->dev);
4046 }
4047#if 1
4048 atomic_sub(pending_all, &cp->reset_task_pending_all);
4049 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4050 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4051 atomic_dec(&cp->reset_task_pending);
4052#else
4053 atomic_set(&cp->reset_task_pending, 0);
4054#endif
4055}
4056
4057static void cas_link_timer(struct timer_list *t)
4058{
4059 struct cas *cp = from_timer(cp, t, link_timer);
4060 int mask, pending = 0, reset = 0;
4061 unsigned long flags;
4062
4063 if (link_transition_timeout != 0 &&
4064 cp->link_transition_jiffies_valid &&
4065 ((jiffies - cp->link_transition_jiffies) >
4066 (link_transition_timeout))) {
4067
4068
4069
4070
4071 cp->link_transition_jiffies_valid = 0;
4072 }
4073
4074 if (!cp->hw_running)
4075 return;
4076
4077 spin_lock_irqsave(&cp->lock, flags);
4078 cas_lock_tx(cp);
4079 cas_entropy_gather(cp);
4080
4081
4082
4083
4084#if 1
4085 if (atomic_read(&cp->reset_task_pending_all) ||
4086 atomic_read(&cp->reset_task_pending_spare) ||
4087 atomic_read(&cp->reset_task_pending_mtu))
4088 goto done;
4089#else
4090 if (atomic_read(&cp->reset_task_pending))
4091 goto done;
4092#endif
4093
4094
4095 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4096 int i, rmask;
4097
4098 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4099 rmask = CAS_FLAG_RXD_POST(i);
4100 if ((mask & rmask) == 0)
4101 continue;
4102
4103
4104 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4105 pending = 1;
4106 continue;
4107 }
4108 cp->cas_flags &= ~rmask;
4109 }
4110 }
4111
4112 if (CAS_PHY_MII(cp->phy_type)) {
4113 u16 bmsr;
4114 cas_mif_poll(cp, 0);
4115 bmsr = cas_phy_read(cp, MII_BMSR);
4116
4117
4118
4119
4120
4121 bmsr = cas_phy_read(cp, MII_BMSR);
4122 cas_mif_poll(cp, 1);
4123 readl(cp->regs + REG_MIF_STATUS);
4124 reset = cas_mii_link_check(cp, bmsr);
4125 } else {
4126 reset = cas_pcs_link_check(cp);
4127 }
4128
4129 if (reset)
4130 goto done;
4131
4132
4133 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4134 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4135 u32 wptr, rptr;
4136 int tlm = CAS_VAL(MAC_SM_TLM, val);
4137
4138 if (((tlm == 0x5) || (tlm == 0x3)) &&
4139 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4140 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4141 "tx err: MAC_STATE[%08x]\n", val);
4142 reset = 1;
4143 goto done;
4144 }
4145
4146 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4147 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4148 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4149 if ((val == 0) && (wptr != rptr)) {
4150 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4151 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4152 val, wptr, rptr);
4153 reset = 1;
4154 }
4155
4156 if (reset)
4157 cas_hard_reset(cp);
4158 }
4159
4160done:
4161 if (reset) {
4162#if 1
4163 atomic_inc(&cp->reset_task_pending);
4164 atomic_inc(&cp->reset_task_pending_all);
4165 schedule_work(&cp->reset_task);
4166#else
4167 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4168 pr_err("reset called in cas_link_timer\n");
4169 schedule_work(&cp->reset_task);
4170#endif
4171 }
4172
4173 if (!pending)
4174 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4175 cas_unlock_tx(cp);
4176 spin_unlock_irqrestore(&cp->lock, flags);
4177}
4178
4179
4180
4181
4182static void cas_tx_tiny_free(struct cas *cp)
4183{
4184 struct pci_dev *pdev = cp->pdev;
4185 int i;
4186
4187 for (i = 0; i < N_TX_RINGS; i++) {
4188 if (!cp->tx_tiny_bufs[i])
4189 continue;
4190
4191 dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4192 cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
4193 cp->tx_tiny_bufs[i] = NULL;
4194 }
4195}
4196
4197static int cas_tx_tiny_alloc(struct cas *cp)
4198{
4199 struct pci_dev *pdev = cp->pdev;
4200 int i;
4201
4202 for (i = 0; i < N_TX_RINGS; i++) {
4203 cp->tx_tiny_bufs[i] =
4204 dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4205 &cp->tx_tiny_dvma[i], GFP_KERNEL);
4206 if (!cp->tx_tiny_bufs[i]) {
4207 cas_tx_tiny_free(cp);
4208 return -1;
4209 }
4210 }
4211 return 0;
4212}
4213
4214
4215static int cas_open(struct net_device *dev)
4216{
4217 struct cas *cp = netdev_priv(dev);
4218 int hw_was_up, err;
4219 unsigned long flags;
4220
4221 mutex_lock(&cp->pm_mutex);
4222
4223 hw_was_up = cp->hw_running;
4224
4225
4226
4227
4228 if (!cp->hw_running) {
4229
4230 cas_lock_all_save(cp, flags);
4231
4232
4233
4234
4235
4236 cas_reset(cp, 0);
4237 cp->hw_running = 1;
4238 cas_unlock_all_restore(cp, flags);
4239 }
4240
4241 err = -ENOMEM;
4242 if (cas_tx_tiny_alloc(cp) < 0)
4243 goto err_unlock;
4244
4245
4246 if (cas_alloc_rxds(cp) < 0)
4247 goto err_tx_tiny;
4248
4249
4250 cas_spare_init(cp);
4251 cas_spare_recover(cp, GFP_KERNEL);
4252
4253
4254
4255
4256
4257
4258 if (request_irq(cp->pdev->irq, cas_interrupt,
4259 IRQF_SHARED, dev->name, (void *) dev)) {
4260 netdev_err(cp->dev, "failed to request irq !\n");
4261 err = -EAGAIN;
4262 goto err_spare;
4263 }
4264
4265#ifdef USE_NAPI
4266 napi_enable(&cp->napi);
4267#endif
4268
4269 cas_lock_all_save(cp, flags);
4270 cas_clean_rings(cp);
4271 cas_init_hw(cp, !hw_was_up);
4272 cp->opened = 1;
4273 cas_unlock_all_restore(cp, flags);
4274
4275 netif_start_queue(dev);
4276 mutex_unlock(&cp->pm_mutex);
4277 return 0;
4278
4279err_spare:
4280 cas_spare_free(cp);
4281 cas_free_rxds(cp);
4282err_tx_tiny:
4283 cas_tx_tiny_free(cp);
4284err_unlock:
4285 mutex_unlock(&cp->pm_mutex);
4286 return err;
4287}
4288
4289static int cas_close(struct net_device *dev)
4290{
4291 unsigned long flags;
4292 struct cas *cp = netdev_priv(dev);
4293
4294#ifdef USE_NAPI
4295 napi_disable(&cp->napi);
4296#endif
4297
4298 mutex_lock(&cp->pm_mutex);
4299
4300 netif_stop_queue(dev);
4301
4302
4303 cas_lock_all_save(cp, flags);
4304 cp->opened = 0;
4305 cas_reset(cp, 0);
4306 cas_phy_init(cp);
4307 cas_begin_auto_negotiation(cp, NULL);
4308 cas_clean_rings(cp);
4309 cas_unlock_all_restore(cp, flags);
4310
4311 free_irq(cp->pdev->irq, (void *) dev);
4312 cas_spare_free(cp);
4313 cas_free_rxds(cp);
4314 cas_tx_tiny_free(cp);
4315 mutex_unlock(&cp->pm_mutex);
4316 return 0;
4317}
4318
4319static struct {
4320 const char name[ETH_GSTRING_LEN];
4321} ethtool_cassini_statnames[] = {
4322 {"collisions"},
4323 {"rx_bytes"},
4324 {"rx_crc_errors"},
4325 {"rx_dropped"},
4326 {"rx_errors"},
4327 {"rx_fifo_errors"},
4328 {"rx_frame_errors"},
4329 {"rx_length_errors"},
4330 {"rx_over_errors"},
4331 {"rx_packets"},
4332 {"tx_aborted_errors"},
4333 {"tx_bytes"},
4334 {"tx_dropped"},
4335 {"tx_errors"},
4336 {"tx_fifo_errors"},
4337 {"tx_packets"}
4338};
4339#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4340
4341static struct {
4342 const int offsets;
4343} ethtool_register_table[] = {
4344 {-MII_BMSR},
4345 {-MII_BMCR},
4346 {REG_CAWR},
4347 {REG_INF_BURST},
4348 {REG_BIM_CFG},
4349 {REG_RX_CFG},
4350 {REG_HP_CFG},
4351 {REG_MAC_TX_CFG},
4352 {REG_MAC_RX_CFG},
4353 {REG_MAC_CTRL_CFG},
4354 {REG_MAC_XIF_CFG},
4355 {REG_MIF_CFG},
4356 {REG_PCS_CFG},
4357 {REG_SATURN_PCFG},
4358 {REG_PCS_MII_STATUS},
4359 {REG_PCS_STATE_MACHINE},
4360 {REG_MAC_COLL_EXCESS},
4361 {REG_MAC_COLL_LATE}
4362};
4363#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4364#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4365
4366static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4367{
4368 u8 *p;
4369 int i;
4370 unsigned long flags;
4371
4372 spin_lock_irqsave(&cp->lock, flags);
4373 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4374 u16 hval;
4375 u32 val;
4376 if (ethtool_register_table[i].offsets < 0) {
4377 hval = cas_phy_read(cp,
4378 -ethtool_register_table[i].offsets);
4379 val = hval;
4380 } else {
4381 val= readl(cp->regs+ethtool_register_table[i].offsets);
4382 }
4383 memcpy(p, (u8 *)&val, sizeof(u32));
4384 }
4385 spin_unlock_irqrestore(&cp->lock, flags);
4386}
4387
4388static struct net_device_stats *cas_get_stats(struct net_device *dev)
4389{
4390 struct cas *cp = netdev_priv(dev);
4391 struct net_device_stats *stats = cp->net_stats;
4392 unsigned long flags;
4393 int i;
4394 unsigned long tmp;
4395
4396
4397 if (!cp->hw_running)
4398 return stats + N_TX_RINGS;
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4409 stats[N_TX_RINGS].rx_crc_errors +=
4410 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4411 stats[N_TX_RINGS].rx_frame_errors +=
4412 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4413 stats[N_TX_RINGS].rx_length_errors +=
4414 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4415#if 1
4416 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4417 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4418 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4419 stats[N_TX_RINGS].collisions +=
4420 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4421#else
4422 stats[N_TX_RINGS].tx_aborted_errors +=
4423 readl(cp->regs + REG_MAC_COLL_EXCESS);
4424 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4425 readl(cp->regs + REG_MAC_COLL_LATE);
4426#endif
4427 cas_clear_mac_err(cp);
4428
4429
4430 spin_lock(&cp->stat_lock[0]);
4431 stats[N_TX_RINGS].collisions += stats[0].collisions;
4432 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4433 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4434 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4435 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4436 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4437 spin_unlock(&cp->stat_lock[0]);
4438
4439 for (i = 0; i < N_TX_RINGS; i++) {
4440 spin_lock(&cp->stat_lock[i]);
4441 stats[N_TX_RINGS].rx_length_errors +=
4442 stats[i].rx_length_errors;
4443 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4444 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4445 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4446 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4447 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4448 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4449 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4450 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4451 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4452 memset(stats + i, 0, sizeof(struct net_device_stats));
4453 spin_unlock(&cp->stat_lock[i]);
4454 }
4455 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4456 return stats + N_TX_RINGS;
4457}
4458
4459
4460static void cas_set_multicast(struct net_device *dev)
4461{
4462 struct cas *cp = netdev_priv(dev);
4463 u32 rxcfg, rxcfg_new;
4464 unsigned long flags;
4465 int limit = STOP_TRIES;
4466
4467 if (!cp->hw_running)
4468 return;
4469
4470 spin_lock_irqsave(&cp->lock, flags);
4471 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4472
4473
4474 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4475 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4476 if (!limit--)
4477 break;
4478 udelay(10);
4479 }
4480
4481
4482 limit = STOP_TRIES;
4483 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4484 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4485 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4486 if (!limit--)
4487 break;
4488 udelay(10);
4489 }
4490
4491
4492 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4493 rxcfg |= rxcfg_new;
4494 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4495 spin_unlock_irqrestore(&cp->lock, flags);
4496}
4497
4498static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4499{
4500 struct cas *cp = netdev_priv(dev);
4501 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4502 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4503 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4504}
4505
4506static int cas_get_link_ksettings(struct net_device *dev,
4507 struct ethtool_link_ksettings *cmd)
4508{
4509 struct cas *cp = netdev_priv(dev);
4510 u16 bmcr;
4511 int full_duplex, speed, pause;
4512 unsigned long flags;
4513 enum link_state linkstate = link_up;
4514 u32 supported, advertising;
4515
4516 advertising = 0;
4517 supported = SUPPORTED_Autoneg;
4518 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4519 supported |= SUPPORTED_1000baseT_Full;
4520 advertising |= ADVERTISED_1000baseT_Full;
4521 }
4522
4523
4524 spin_lock_irqsave(&cp->lock, flags);
4525 bmcr = 0;
4526 linkstate = cp->lstate;
4527 if (CAS_PHY_MII(cp->phy_type)) {
4528 cmd->base.port = PORT_MII;
4529 cmd->base.phy_address = cp->phy_addr;
4530 advertising |= ADVERTISED_TP | ADVERTISED_MII |
4531 ADVERTISED_10baseT_Half |
4532 ADVERTISED_10baseT_Full |
4533 ADVERTISED_100baseT_Half |
4534 ADVERTISED_100baseT_Full;
4535
4536 supported |=
4537 (SUPPORTED_10baseT_Half |
4538 SUPPORTED_10baseT_Full |
4539 SUPPORTED_100baseT_Half |
4540 SUPPORTED_100baseT_Full |
4541 SUPPORTED_TP | SUPPORTED_MII);
4542
4543 if (cp->hw_running) {
4544 cas_mif_poll(cp, 0);
4545 bmcr = cas_phy_read(cp, MII_BMCR);
4546 cas_read_mii_link_mode(cp, &full_duplex,
4547 &speed, &pause);
4548 cas_mif_poll(cp, 1);
4549 }
4550
4551 } else {
4552 cmd->base.port = PORT_FIBRE;
4553 cmd->base.phy_address = 0;
4554 supported |= SUPPORTED_FIBRE;
4555 advertising |= ADVERTISED_FIBRE;
4556
4557 if (cp->hw_running) {
4558
4559 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4560 cas_read_pcs_link_mode(cp, &full_duplex,
4561 &speed, &pause);
4562 }
4563 }
4564 spin_unlock_irqrestore(&cp->lock, flags);
4565
4566 if (bmcr & BMCR_ANENABLE) {
4567 advertising |= ADVERTISED_Autoneg;
4568 cmd->base.autoneg = AUTONEG_ENABLE;
4569 cmd->base.speed = ((speed == 10) ?
4570 SPEED_10 :
4571 ((speed == 1000) ?
4572 SPEED_1000 : SPEED_100));
4573 cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4574 } else {
4575 cmd->base.autoneg = AUTONEG_DISABLE;
4576 cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4577 SPEED_1000 :
4578 ((bmcr & BMCR_SPEED100) ?
4579 SPEED_100 : SPEED_10));
4580 cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4581 DUPLEX_FULL : DUPLEX_HALF;
4582 }
4583 if (linkstate != link_up) {
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594 if (cp->link_cntl & BMCR_ANENABLE) {
4595 cmd->base.speed = 0;
4596 cmd->base.duplex = 0xff;
4597 } else {
4598 cmd->base.speed = SPEED_10;
4599 if (cp->link_cntl & BMCR_SPEED100) {
4600 cmd->base.speed = SPEED_100;
4601 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4602 cmd->base.speed = SPEED_1000;
4603 }
4604 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4605 DUPLEX_FULL : DUPLEX_HALF;
4606 }
4607 }
4608
4609 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4610 supported);
4611 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4612 advertising);
4613
4614 return 0;
4615}
4616
4617static int cas_set_link_ksettings(struct net_device *dev,
4618 const struct ethtool_link_ksettings *cmd)
4619{
4620 struct cas *cp = netdev_priv(dev);
4621 unsigned long flags;
4622 u32 speed = cmd->base.speed;
4623
4624
4625 if (cmd->base.autoneg != AUTONEG_ENABLE &&
4626 cmd->base.autoneg != AUTONEG_DISABLE)
4627 return -EINVAL;
4628
4629 if (cmd->base.autoneg == AUTONEG_DISABLE &&
4630 ((speed != SPEED_1000 &&
4631 speed != SPEED_100 &&
4632 speed != SPEED_10) ||
4633 (cmd->base.duplex != DUPLEX_HALF &&
4634 cmd->base.duplex != DUPLEX_FULL)))
4635 return -EINVAL;
4636
4637
4638 spin_lock_irqsave(&cp->lock, flags);
4639 cas_begin_auto_negotiation(cp, cmd);
4640 spin_unlock_irqrestore(&cp->lock, flags);
4641 return 0;
4642}
4643
4644static int cas_nway_reset(struct net_device *dev)
4645{
4646 struct cas *cp = netdev_priv(dev);
4647 unsigned long flags;
4648
4649 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4650 return -EINVAL;
4651
4652
4653 spin_lock_irqsave(&cp->lock, flags);
4654 cas_begin_auto_negotiation(cp, NULL);
4655 spin_unlock_irqrestore(&cp->lock, flags);
4656
4657 return 0;
4658}
4659
4660static u32 cas_get_link(struct net_device *dev)
4661{
4662 struct cas *cp = netdev_priv(dev);
4663 return cp->lstate == link_up;
4664}
4665
4666static u32 cas_get_msglevel(struct net_device *dev)
4667{
4668 struct cas *cp = netdev_priv(dev);
4669 return cp->msg_enable;
4670}
4671
4672static void cas_set_msglevel(struct net_device *dev, u32 value)
4673{
4674 struct cas *cp = netdev_priv(dev);
4675 cp->msg_enable = value;
4676}
4677
4678static int cas_get_regs_len(struct net_device *dev)
4679{
4680 struct cas *cp = netdev_priv(dev);
4681 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4682}
4683
4684static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4685 void *p)
4686{
4687 struct cas *cp = netdev_priv(dev);
4688 regs->version = 0;
4689
4690 cas_read_regs(cp, p, regs->len / sizeof(u32));
4691}
4692
4693static int cas_get_sset_count(struct net_device *dev, int sset)
4694{
4695 switch (sset) {
4696 case ETH_SS_STATS:
4697 return CAS_NUM_STAT_KEYS;
4698 default:
4699 return -EOPNOTSUPP;
4700 }
4701}
4702
4703static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4704{
4705 memcpy(data, ðtool_cassini_statnames,
4706 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4707}
4708
4709static void cas_get_ethtool_stats(struct net_device *dev,
4710 struct ethtool_stats *estats, u64 *data)
4711{
4712 struct cas *cp = netdev_priv(dev);
4713 struct net_device_stats *stats = cas_get_stats(cp->dev);
4714 int i = 0;
4715 data[i++] = stats->collisions;
4716 data[i++] = stats->rx_bytes;
4717 data[i++] = stats->rx_crc_errors;
4718 data[i++] = stats->rx_dropped;
4719 data[i++] = stats->rx_errors;
4720 data[i++] = stats->rx_fifo_errors;
4721 data[i++] = stats->rx_frame_errors;
4722 data[i++] = stats->rx_length_errors;
4723 data[i++] = stats->rx_over_errors;
4724 data[i++] = stats->rx_packets;
4725 data[i++] = stats->tx_aborted_errors;
4726 data[i++] = stats->tx_bytes;
4727 data[i++] = stats->tx_dropped;
4728 data[i++] = stats->tx_errors;
4729 data[i++] = stats->tx_fifo_errors;
4730 data[i++] = stats->tx_packets;
4731 BUG_ON(i != CAS_NUM_STAT_KEYS);
4732}
4733
4734static const struct ethtool_ops cas_ethtool_ops = {
4735 .get_drvinfo = cas_get_drvinfo,
4736 .nway_reset = cas_nway_reset,
4737 .get_link = cas_get_link,
4738 .get_msglevel = cas_get_msglevel,
4739 .set_msglevel = cas_set_msglevel,
4740 .get_regs_len = cas_get_regs_len,
4741 .get_regs = cas_get_regs,
4742 .get_sset_count = cas_get_sset_count,
4743 .get_strings = cas_get_strings,
4744 .get_ethtool_stats = cas_get_ethtool_stats,
4745 .get_link_ksettings = cas_get_link_ksettings,
4746 .set_link_ksettings = cas_set_link_ksettings,
4747};
4748
4749static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4750{
4751 struct cas *cp = netdev_priv(dev);
4752 struct mii_ioctl_data *data = if_mii(ifr);
4753 unsigned long flags;
4754 int rc = -EOPNOTSUPP;
4755
4756
4757
4758
4759 mutex_lock(&cp->pm_mutex);
4760 switch (cmd) {
4761 case SIOCGMIIPHY:
4762 data->phy_id = cp->phy_addr;
4763 fallthrough;
4764
4765 case SIOCGMIIREG:
4766 spin_lock_irqsave(&cp->lock, flags);
4767 cas_mif_poll(cp, 0);
4768 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4769 cas_mif_poll(cp, 1);
4770 spin_unlock_irqrestore(&cp->lock, flags);
4771 rc = 0;
4772 break;
4773
4774 case SIOCSMIIREG:
4775 spin_lock_irqsave(&cp->lock, flags);
4776 cas_mif_poll(cp, 0);
4777 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4778 cas_mif_poll(cp, 1);
4779 spin_unlock_irqrestore(&cp->lock, flags);
4780 break;
4781 default:
4782 break;
4783 }
4784
4785 mutex_unlock(&cp->pm_mutex);
4786 return rc;
4787}
4788
4789
4790
4791
4792
4793static void cas_program_bridge(struct pci_dev *cas_pdev)
4794{
4795 struct pci_dev *pdev = cas_pdev->bus->self;
4796 u32 val;
4797
4798 if (!pdev)
4799 return;
4800
4801 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4802 return;
4803
4804
4805
4806
4807
4808
4809 pci_read_config_dword(pdev, 0x40, &val);
4810 val &= ~0x00040000;
4811 pci_write_config_dword(pdev, 0x40, val);
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857 pci_write_config_word(pdev, 0x52,
4858 (0x7 << 13) |
4859 (0x7 << 10) |
4860 (0x7 << 7) |
4861 (0x7 << 4) |
4862 (0xf << 0));
4863
4864
4865 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4866
4867
4868
4869
4870 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4871}
4872
4873static const struct net_device_ops cas_netdev_ops = {
4874 .ndo_open = cas_open,
4875 .ndo_stop = cas_close,
4876 .ndo_start_xmit = cas_start_xmit,
4877 .ndo_get_stats = cas_get_stats,
4878 .ndo_set_rx_mode = cas_set_multicast,
4879 .ndo_eth_ioctl = cas_ioctl,
4880 .ndo_tx_timeout = cas_tx_timeout,
4881 .ndo_change_mtu = cas_change_mtu,
4882 .ndo_set_mac_address = eth_mac_addr,
4883 .ndo_validate_addr = eth_validate_addr,
4884#ifdef CONFIG_NET_POLL_CONTROLLER
4885 .ndo_poll_controller = cas_netpoll,
4886#endif
4887};
4888
4889static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4890{
4891 static int cas_version_printed = 0;
4892 unsigned long casreg_len;
4893 struct net_device *dev;
4894 struct cas *cp;
4895 int i, err, pci_using_dac;
4896 u16 pci_cmd;
4897 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4898
4899 if (cas_version_printed++ == 0)
4900 pr_info("%s", version);
4901
4902 err = pci_enable_device(pdev);
4903 if (err) {
4904 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4905 return err;
4906 }
4907
4908 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4909 dev_err(&pdev->dev, "Cannot find proper PCI device "
4910 "base address, aborting\n");
4911 err = -ENODEV;
4912 goto err_out_disable_pdev;
4913 }
4914
4915 dev = alloc_etherdev(sizeof(*cp));
4916 if (!dev) {
4917 err = -ENOMEM;
4918 goto err_out_disable_pdev;
4919 }
4920 SET_NETDEV_DEV(dev, &pdev->dev);
4921
4922 err = pci_request_regions(pdev, dev->name);
4923 if (err) {
4924 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4925 goto err_out_free_netdev;
4926 }
4927 pci_set_master(pdev);
4928
4929
4930
4931
4932
4933 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4934 pci_cmd &= ~PCI_COMMAND_SERR;
4935 pci_cmd |= PCI_COMMAND_PARITY;
4936 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4937 if (pci_try_set_mwi(pdev))
4938 pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4939
4940 cas_program_bridge(pdev);
4941
4942
4943
4944
4945
4946
4947
4948#if 1
4949 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4950 &orig_cacheline_size);
4951 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4952 cas_cacheline_size =
4953 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4954 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4955 if (pci_write_config_byte(pdev,
4956 PCI_CACHE_LINE_SIZE,
4957 cas_cacheline_size)) {
4958 dev_err(&pdev->dev, "Could not set PCI cache "
4959 "line size\n");
4960 goto err_out_free_res;
4961 }
4962 }
4963#endif
4964
4965
4966
4967 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4968 pci_using_dac = 1;
4969 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4970 if (err < 0) {
4971 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
4972 "for consistent allocations\n");
4973 goto err_out_free_res;
4974 }
4975
4976 } else {
4977 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4978 if (err) {
4979 dev_err(&pdev->dev, "No usable DMA configuration, "
4980 "aborting\n");
4981 goto err_out_free_res;
4982 }
4983 pci_using_dac = 0;
4984 }
4985
4986 casreg_len = pci_resource_len(pdev, 0);
4987
4988 cp = netdev_priv(dev);
4989 cp->pdev = pdev;
4990#if 1
4991
4992 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4993#endif
4994 cp->dev = dev;
4995 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4996 cassini_debug;
4997
4998#if defined(CONFIG_SPARC)
4999 cp->of_node = pci_device_to_OF_node(pdev);
5000#endif
5001
5002 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5003 cp->link_transition_jiffies_valid = 0;
5004
5005 spin_lock_init(&cp->lock);
5006 spin_lock_init(&cp->rx_inuse_lock);
5007 spin_lock_init(&cp->rx_spare_lock);
5008 for (i = 0; i < N_TX_RINGS; i++) {
5009 spin_lock_init(&cp->stat_lock[i]);
5010 spin_lock_init(&cp->tx_lock[i]);
5011 }
5012 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5013 mutex_init(&cp->pm_mutex);
5014
5015 timer_setup(&cp->link_timer, cas_link_timer, 0);
5016
5017#if 1
5018
5019
5020
5021 atomic_set(&cp->reset_task_pending, 0);
5022 atomic_set(&cp->reset_task_pending_all, 0);
5023 atomic_set(&cp->reset_task_pending_spare, 0);
5024 atomic_set(&cp->reset_task_pending_mtu, 0);
5025#endif
5026 INIT_WORK(&cp->reset_task, cas_reset_task);
5027
5028
5029 if (link_mode >= 0 && link_mode < 6)
5030 cp->link_cntl = link_modes[link_mode];
5031 else
5032 cp->link_cntl = BMCR_ANENABLE;
5033 cp->lstate = link_down;
5034 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5035 netif_carrier_off(cp->dev);
5036 cp->timer_ticks = 0;
5037
5038
5039 cp->regs = pci_iomap(pdev, 0, casreg_len);
5040 if (!cp->regs) {
5041 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5042 goto err_out_free_res;
5043 }
5044 cp->casreg_len = casreg_len;
5045
5046 pci_save_state(pdev);
5047 cas_check_pci_invariants(cp);
5048 cas_hard_reset(cp);
5049 cas_reset(cp, 0);
5050 if (cas_check_invariants(cp))
5051 goto err_out_iounmap;
5052 if (cp->cas_flags & CAS_FLAG_SATURN)
5053 cas_saturn_firmware_init(cp);
5054
5055 cp->init_block =
5056 dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
5057 &cp->block_dvma, GFP_KERNEL);
5058 if (!cp->init_block) {
5059 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5060 goto err_out_iounmap;
5061 }
5062
5063 for (i = 0; i < N_TX_RINGS; i++)
5064 cp->init_txds[i] = cp->init_block->txds[i];
5065
5066 for (i = 0; i < N_RX_DESC_RINGS; i++)
5067 cp->init_rxds[i] = cp->init_block->rxds[i];
5068
5069 for (i = 0; i < N_RX_COMP_RINGS; i++)
5070 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5071
5072 for (i = 0; i < N_RX_FLOWS; i++)
5073 skb_queue_head_init(&cp->rx_flows[i]);
5074
5075 dev->netdev_ops = &cas_netdev_ops;
5076 dev->ethtool_ops = &cas_ethtool_ops;
5077 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5078
5079#ifdef USE_NAPI
5080 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5081#endif
5082 dev->irq = pdev->irq;
5083 dev->dma = 0;
5084
5085
5086 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5087 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5088
5089 if (pci_using_dac)
5090 dev->features |= NETIF_F_HIGHDMA;
5091
5092
5093 dev->min_mtu = CAS_MIN_MTU;
5094 dev->max_mtu = CAS_MAX_MTU;
5095
5096 if (register_netdev(dev)) {
5097 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5098 goto err_out_free_consistent;
5099 }
5100
5101 i = readl(cp->regs + REG_BIM_CFG);
5102 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5103 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5104 (i & BIM_CFG_32BIT) ? "32" : "64",
5105 (i & BIM_CFG_66MHZ) ? "66" : "33",
5106 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5107 dev->dev_addr);
5108
5109 pci_set_drvdata(pdev, dev);
5110 cp->hw_running = 1;
5111 cas_entropy_reset(cp);
5112 cas_phy_init(cp);
5113 cas_begin_auto_negotiation(cp, NULL);
5114 return 0;
5115
5116err_out_free_consistent:
5117 dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5118 cp->init_block, cp->block_dvma);
5119
5120err_out_iounmap:
5121 mutex_lock(&cp->pm_mutex);
5122 if (cp->hw_running)
5123 cas_shutdown(cp);
5124 mutex_unlock(&cp->pm_mutex);
5125
5126 pci_iounmap(pdev, cp->regs);
5127
5128
5129err_out_free_res:
5130 pci_release_regions(pdev);
5131
5132
5133
5134
5135 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5136
5137err_out_free_netdev:
5138 free_netdev(dev);
5139
5140err_out_disable_pdev:
5141 pci_disable_device(pdev);
5142 return -ENODEV;
5143}
5144
5145static void cas_remove_one(struct pci_dev *pdev)
5146{
5147 struct net_device *dev = pci_get_drvdata(pdev);
5148 struct cas *cp;
5149 if (!dev)
5150 return;
5151
5152 cp = netdev_priv(dev);
5153 unregister_netdev(dev);
5154
5155 vfree(cp->fw_data);
5156
5157 mutex_lock(&cp->pm_mutex);
5158 cancel_work_sync(&cp->reset_task);
5159 if (cp->hw_running)
5160 cas_shutdown(cp);
5161 mutex_unlock(&cp->pm_mutex);
5162
5163#if 1
5164 if (cp->orig_cacheline_size) {
5165
5166
5167
5168 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5169 cp->orig_cacheline_size);
5170 }
5171#endif
5172 dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5173 cp->init_block, cp->block_dvma);
5174 pci_iounmap(pdev, cp->regs);
5175 free_netdev(dev);
5176 pci_release_regions(pdev);
5177 pci_disable_device(pdev);
5178}
5179
5180static int __maybe_unused cas_suspend(struct device *dev_d)
5181{
5182 struct net_device *dev = dev_get_drvdata(dev_d);
5183 struct cas *cp = netdev_priv(dev);
5184 unsigned long flags;
5185
5186 mutex_lock(&cp->pm_mutex);
5187
5188
5189 if (cp->opened) {
5190 netif_device_detach(dev);
5191
5192 cas_lock_all_save(cp, flags);
5193
5194
5195
5196
5197
5198
5199 cas_reset(cp, 0);
5200 cas_clean_rings(cp);
5201 cas_unlock_all_restore(cp, flags);
5202 }
5203
5204 if (cp->hw_running)
5205 cas_shutdown(cp);
5206 mutex_unlock(&cp->pm_mutex);
5207
5208 return 0;
5209}
5210
5211static int __maybe_unused cas_resume(struct device *dev_d)
5212{
5213 struct net_device *dev = dev_get_drvdata(dev_d);
5214 struct cas *cp = netdev_priv(dev);
5215
5216 netdev_info(dev, "resuming\n");
5217
5218 mutex_lock(&cp->pm_mutex);
5219 cas_hard_reset(cp);
5220 if (cp->opened) {
5221 unsigned long flags;
5222 cas_lock_all_save(cp, flags);
5223 cas_reset(cp, 0);
5224 cp->hw_running = 1;
5225 cas_clean_rings(cp);
5226 cas_init_hw(cp, 1);
5227 cas_unlock_all_restore(cp, flags);
5228
5229 netif_device_attach(dev);
5230 }
5231 mutex_unlock(&cp->pm_mutex);
5232 return 0;
5233}
5234
5235static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
5236
5237static struct pci_driver cas_driver = {
5238 .name = DRV_MODULE_NAME,
5239 .id_table = cas_pci_tbl,
5240 .probe = cas_init_one,
5241 .remove = cas_remove_one,
5242 .driver.pm = &cas_pm_ops,
5243};
5244
5245static int __init cas_init(void)
5246{
5247 if (linkdown_timeout > 0)
5248 link_transition_timeout = linkdown_timeout * HZ;
5249 else
5250 link_transition_timeout = 0;
5251
5252 return pci_register_driver(&cas_driver);
5253}
5254
5255static void __exit cas_cleanup(void)
5256{
5257 pci_unregister_driver(&cas_driver);
5258}
5259
5260module_init(cas_init);
5261module_exit(cas_cleanup);
5262