1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/types.h>
73#include <linux/compiler.h>
74#include <linux/slab.h>
75#include <linux/delay.h>
76#include <linux/init.h>
77#include <linux/interrupt.h>
78#include <linux/vmalloc.h>
79#include <linux/ioport.h>
80#include <linux/pci.h>
81#include <linux/mm.h>
82#include <linux/highmem.h>
83#include <linux/list.h>
84#include <linux/dma-mapping.h>
85
86#include <linux/netdevice.h>
87#include <linux/etherdevice.h>
88#include <linux/skbuff.h>
89#include <linux/ethtool.h>
90#include <linux/crc32.h>
91#include <linux/random.h>
92#include <linux/mii.h>
93#include <linux/ip.h>
94#include <linux/tcp.h>
95#include <linux/mutex.h>
96#include <linux/firmware.h>
97
98#include <net/checksum.h>
99
100#include <linux/atomic.h>
101#include <asm/io.h>
102#include <asm/byteorder.h>
103#include <linux/uaccess.h>
104
105#define cas_page_map(x) kmap_atomic((x))
106#define cas_page_unmap(x) kunmap_atomic((x))
107#define CAS_NCPUS num_online_cpus()
108
109#define cas_skb_release(x) netif_rx(x)
110
111
112#define USE_HP_WORKAROUND
113#define HP_WORKAROUND_DEFAULT
114#define CAS_HP_ALT_FIRMWARE cas_prog_null
115
116#include "cassini.h"
117
118#define USE_TX_COMPWB
119#define USE_CSMA_CD_PROTO
120#define USE_RX_BLANK
121#undef USE_ENTROPY_DEV
122
123
124
125
126#undef USE_PCI_INTB
127#undef USE_PCI_INTC
128#undef USE_PCI_INTD
129#undef USE_QOS
130
131#undef USE_VPD_DEBUG
132
133
134#define USE_PAGE_ORDER
135#define RX_DONT_BATCH 0
136#define RX_COPY_ALWAYS 0
137#define RX_COPY_MIN 64
138#undef RX_COUNT_BUFFERS
139
140#define DRV_MODULE_NAME "cassini"
141#define DRV_MODULE_VERSION "1.6"
142#define DRV_MODULE_RELDATE "21 May 2008"
143
144#define CAS_DEF_MSG_ENABLE \
145 (NETIF_MSG_DRV | \
146 NETIF_MSG_PROBE | \
147 NETIF_MSG_LINK | \
148 NETIF_MSG_TIMER | \
149 NETIF_MSG_IFDOWN | \
150 NETIF_MSG_IFUP | \
151 NETIF_MSG_RX_ERR | \
152 NETIF_MSG_TX_ERR)
153
154
155
156
157#define CAS_TX_TIMEOUT (HZ)
158#define CAS_LINK_TIMEOUT (22*HZ/10)
159#define CAS_LINK_FAST_TIMEOUT (1)
160
161
162
163
164#define STOP_TRIES_PHY 1000
165#define STOP_TRIES 5000
166
167
168
169
170
171#define CAS_MIN_FRAME 97
172#define CAS_1000MB_MIN_FRAME 255
173#define CAS_MIN_MTU 60
174#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
175
176#if 1
177
178
179
180
181#else
182#define CAS_RESET_MTU 1
183#define CAS_RESET_ALL 2
184#define CAS_RESET_SPARE 3
185#endif
186
187static char version[] =
188 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
189
190static int cassini_debug = -1;
191static int link_mode;
192
193MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
194MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
195MODULE_LICENSE("GPL");
196MODULE_FIRMWARE("sun/cassini.bin");
197module_param(cassini_debug, int, 0);
198MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
199module_param(link_mode, int, 0);
200MODULE_PARM_DESC(link_mode, "default link mode");
201
202
203
204
205
206#define DEFAULT_LINKDOWN_TIMEOUT 5
207
208
209
210static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
211module_param(linkdown_timeout, int, 0);
212MODULE_PARM_DESC(linkdown_timeout,
213"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
214
215
216
217
218
219
220static int link_transition_timeout;
221
222
223
224static u16 link_modes[] = {
225 BMCR_ANENABLE,
226 0,
227 BMCR_SPEED100,
228 BMCR_FULLDPLX,
229 BMCR_SPEED100|BMCR_FULLDPLX,
230 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
231};
232
233static const struct pci_device_id cas_pci_tbl[] = {
234 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { 0, }
239};
240
241MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
242
243static void cas_set_link_modes(struct cas *cp);
244
245static inline void cas_lock_tx(struct cas *cp)
246{
247 int i;
248
249 for (i = 0; i < N_TX_RINGS; i++)
250 spin_lock_nested(&cp->tx_lock[i], i);
251}
252
253static inline void cas_lock_all(struct cas *cp)
254{
255 spin_lock_irq(&cp->lock);
256 cas_lock_tx(cp);
257}
258
259
260
261
262
263
264
265
266
267#define cas_lock_all_save(cp, flags) \
268do { \
269 struct cas *xxxcp = (cp); \
270 spin_lock_irqsave(&xxxcp->lock, flags); \
271 cas_lock_tx(xxxcp); \
272} while (0)
273
274static inline void cas_unlock_tx(struct cas *cp)
275{
276 int i;
277
278 for (i = N_TX_RINGS; i > 0; i--)
279 spin_unlock(&cp->tx_lock[i - 1]);
280}
281
282static inline void cas_unlock_all(struct cas *cp)
283{
284 cas_unlock_tx(cp);
285 spin_unlock_irq(&cp->lock);
286}
287
288#define cas_unlock_all_restore(cp, flags) \
289do { \
290 struct cas *xxxcp = (cp); \
291 cas_unlock_tx(xxxcp); \
292 spin_unlock_irqrestore(&xxxcp->lock, flags); \
293} while (0)
294
295static void cas_disable_irq(struct cas *cp, const int ring)
296{
297
298 if (ring == 0) {
299 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
300 return;
301 }
302
303
304 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
305 switch (ring) {
306#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
307#ifdef USE_PCI_INTB
308 case 1:
309#endif
310#ifdef USE_PCI_INTC
311 case 2:
312#endif
313#ifdef USE_PCI_INTD
314 case 3:
315#endif
316 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
317 cp->regs + REG_PLUS_INTRN_MASK(ring));
318 break;
319#endif
320 default:
321 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
322 REG_PLUS_INTRN_MASK(ring));
323 break;
324 }
325 }
326}
327
328static inline void cas_mask_intr(struct cas *cp)
329{
330 int i;
331
332 for (i = 0; i < N_RX_COMP_RINGS; i++)
333 cas_disable_irq(cp, i);
334}
335
336static void cas_enable_irq(struct cas *cp, const int ring)
337{
338 if (ring == 0) {
339 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
340 return;
341 }
342
343 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
344 switch (ring) {
345#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
346#ifdef USE_PCI_INTB
347 case 1:
348#endif
349#ifdef USE_PCI_INTC
350 case 2:
351#endif
352#ifdef USE_PCI_INTD
353 case 3:
354#endif
355 writel(INTRN_MASK_RX_EN, cp->regs +
356 REG_PLUS_INTRN_MASK(ring));
357 break;
358#endif
359 default:
360 break;
361 }
362 }
363}
364
365static inline void cas_unmask_intr(struct cas *cp)
366{
367 int i;
368
369 for (i = 0; i < N_RX_COMP_RINGS; i++)
370 cas_enable_irq(cp, i);
371}
372
373static inline void cas_entropy_gather(struct cas *cp)
374{
375#ifdef USE_ENTROPY_DEV
376 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
377 return;
378
379 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
380 readl(cp->regs + REG_ENTROPY_IV),
381 sizeof(uint64_t)*8);
382#endif
383}
384
385static inline void cas_entropy_reset(struct cas *cp)
386{
387#ifdef USE_ENTROPY_DEV
388 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
389 return;
390
391 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
392 cp->regs + REG_BIM_LOCAL_DEV_EN);
393 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
394 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
395
396
397 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
398 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
399#endif
400}
401
402
403
404
405static u16 cas_phy_read(struct cas *cp, int reg)
406{
407 u32 cmd;
408 int limit = STOP_TRIES_PHY;
409
410 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
411 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
412 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
413 cmd |= MIF_FRAME_TURN_AROUND_MSB;
414 writel(cmd, cp->regs + REG_MIF_FRAME);
415
416
417 while (limit-- > 0) {
418 udelay(10);
419 cmd = readl(cp->regs + REG_MIF_FRAME);
420 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
421 return cmd & MIF_FRAME_DATA_MASK;
422 }
423 return 0xFFFF;
424}
425
426static int cas_phy_write(struct cas *cp, int reg, u16 val)
427{
428 int limit = STOP_TRIES_PHY;
429 u32 cmd;
430
431 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
432 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
433 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
434 cmd |= MIF_FRAME_TURN_AROUND_MSB;
435 cmd |= val & MIF_FRAME_DATA_MASK;
436 writel(cmd, cp->regs + REG_MIF_FRAME);
437
438
439 while (limit-- > 0) {
440 udelay(10);
441 cmd = readl(cp->regs + REG_MIF_FRAME);
442 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
443 return 0;
444 }
445 return -1;
446}
447
448static void cas_phy_powerup(struct cas *cp)
449{
450 u16 ctl = cas_phy_read(cp, MII_BMCR);
451
452 if ((ctl & BMCR_PDOWN) == 0)
453 return;
454 ctl &= ~BMCR_PDOWN;
455 cas_phy_write(cp, MII_BMCR, ctl);
456}
457
458static void cas_phy_powerdown(struct cas *cp)
459{
460 u16 ctl = cas_phy_read(cp, MII_BMCR);
461
462 if (ctl & BMCR_PDOWN)
463 return;
464 ctl |= BMCR_PDOWN;
465 cas_phy_write(cp, MII_BMCR, ctl);
466}
467
468
469static int cas_page_free(struct cas *cp, cas_page_t *page)
470{
471 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
472 PCI_DMA_FROMDEVICE);
473 __free_pages(page->buffer, cp->page_order);
474 kfree(page);
475 return 0;
476}
477
478#ifdef RX_COUNT_BUFFERS
479#define RX_USED_ADD(x, y) ((x)->used += (y))
480#define RX_USED_SET(x, y) ((x)->used = (y))
481#else
482#define RX_USED_ADD(x, y)
483#define RX_USED_SET(x, y)
484#endif
485
486
487
488
489static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
490{
491 cas_page_t *page;
492
493 page = kmalloc(sizeof(cas_page_t), flags);
494 if (!page)
495 return NULL;
496
497 INIT_LIST_HEAD(&page->list);
498 RX_USED_SET(page, 0);
499 page->buffer = alloc_pages(flags, cp->page_order);
500 if (!page->buffer)
501 goto page_err;
502 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
503 cp->page_size, PCI_DMA_FROMDEVICE);
504 return page;
505
506page_err:
507 kfree(page);
508 return NULL;
509}
510
511
512static void cas_spare_init(struct cas *cp)
513{
514 spin_lock(&cp->rx_inuse_lock);
515 INIT_LIST_HEAD(&cp->rx_inuse_list);
516 spin_unlock(&cp->rx_inuse_lock);
517
518 spin_lock(&cp->rx_spare_lock);
519 INIT_LIST_HEAD(&cp->rx_spare_list);
520 cp->rx_spares_needed = RX_SPARE_COUNT;
521 spin_unlock(&cp->rx_spare_lock);
522}
523
524
525static void cas_spare_free(struct cas *cp)
526{
527 struct list_head list, *elem, *tmp;
528
529
530 INIT_LIST_HEAD(&list);
531 spin_lock(&cp->rx_spare_lock);
532 list_splice_init(&cp->rx_spare_list, &list);
533 spin_unlock(&cp->rx_spare_lock);
534 list_for_each_safe(elem, tmp, &list) {
535 cas_page_free(cp, list_entry(elem, cas_page_t, list));
536 }
537
538 INIT_LIST_HEAD(&list);
539#if 1
540
541
542
543
544 spin_lock(&cp->rx_inuse_lock);
545 list_splice_init(&cp->rx_inuse_list, &list);
546 spin_unlock(&cp->rx_inuse_lock);
547#else
548 spin_lock(&cp->rx_spare_lock);
549 list_splice_init(&cp->rx_inuse_list, &list);
550 spin_unlock(&cp->rx_spare_lock);
551#endif
552 list_for_each_safe(elem, tmp, &list) {
553 cas_page_free(cp, list_entry(elem, cas_page_t, list));
554 }
555}
556
557
558static void cas_spare_recover(struct cas *cp, const gfp_t flags)
559{
560 struct list_head list, *elem, *tmp;
561 int needed, i;
562
563
564
565
566
567
568 INIT_LIST_HEAD(&list);
569 spin_lock(&cp->rx_inuse_lock);
570 list_splice_init(&cp->rx_inuse_list, &list);
571 spin_unlock(&cp->rx_inuse_lock);
572
573 list_for_each_safe(elem, tmp, &list) {
574 cas_page_t *page = list_entry(elem, cas_page_t, list);
575
576
577
578
579
580
581
582
583
584
585
586
587
588 if (page_count(page->buffer) > 1)
589 continue;
590
591 list_del(elem);
592 spin_lock(&cp->rx_spare_lock);
593 if (cp->rx_spares_needed > 0) {
594 list_add(elem, &cp->rx_spare_list);
595 cp->rx_spares_needed--;
596 spin_unlock(&cp->rx_spare_lock);
597 } else {
598 spin_unlock(&cp->rx_spare_lock);
599 cas_page_free(cp, page);
600 }
601 }
602
603
604 if (!list_empty(&list)) {
605 spin_lock(&cp->rx_inuse_lock);
606 list_splice(&list, &cp->rx_inuse_list);
607 spin_unlock(&cp->rx_inuse_lock);
608 }
609
610 spin_lock(&cp->rx_spare_lock);
611 needed = cp->rx_spares_needed;
612 spin_unlock(&cp->rx_spare_lock);
613 if (!needed)
614 return;
615
616
617 INIT_LIST_HEAD(&list);
618 i = 0;
619 while (i < needed) {
620 cas_page_t *spare = cas_page_alloc(cp, flags);
621 if (!spare)
622 break;
623 list_add(&spare->list, &list);
624 i++;
625 }
626
627 spin_lock(&cp->rx_spare_lock);
628 list_splice(&list, &cp->rx_spare_list);
629 cp->rx_spares_needed -= i;
630 spin_unlock(&cp->rx_spare_lock);
631}
632
633
634static cas_page_t *cas_page_dequeue(struct cas *cp)
635{
636 struct list_head *entry;
637 int recover;
638
639 spin_lock(&cp->rx_spare_lock);
640 if (list_empty(&cp->rx_spare_list)) {
641
642 spin_unlock(&cp->rx_spare_lock);
643 cas_spare_recover(cp, GFP_ATOMIC);
644 spin_lock(&cp->rx_spare_lock);
645 if (list_empty(&cp->rx_spare_list)) {
646 netif_err(cp, rx_err, cp->dev,
647 "no spare buffers available\n");
648 spin_unlock(&cp->rx_spare_lock);
649 return NULL;
650 }
651 }
652
653 entry = cp->rx_spare_list.next;
654 list_del(entry);
655 recover = ++cp->rx_spares_needed;
656 spin_unlock(&cp->rx_spare_lock);
657
658
659 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
660#if 1
661 atomic_inc(&cp->reset_task_pending);
662 atomic_inc(&cp->reset_task_pending_spare);
663 schedule_work(&cp->reset_task);
664#else
665 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
666 schedule_work(&cp->reset_task);
667#endif
668 }
669 return list_entry(entry, cas_page_t, list);
670}
671
672
673static void cas_mif_poll(struct cas *cp, const int enable)
674{
675 u32 cfg;
676
677 cfg = readl(cp->regs + REG_MIF_CFG);
678 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
679
680 if (cp->phy_type & CAS_PHY_MII_MDIO1)
681 cfg |= MIF_CFG_PHY_SELECT;
682
683
684 if (enable) {
685 cfg |= MIF_CFG_POLL_EN;
686 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
687 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
688 }
689 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
690 cp->regs + REG_MIF_MASK);
691 writel(cfg, cp->regs + REG_MIF_CFG);
692}
693
694
695static void cas_begin_auto_negotiation(struct cas *cp,
696 const struct ethtool_link_ksettings *ep)
697{
698 u16 ctl;
699#if 1
700 int lcntl;
701 int changed = 0;
702 int oldstate = cp->lstate;
703 int link_was_not_down = !(oldstate == link_down);
704#endif
705
706 if (!ep)
707 goto start_aneg;
708 lcntl = cp->link_cntl;
709 if (ep->base.autoneg == AUTONEG_ENABLE) {
710 cp->link_cntl = BMCR_ANENABLE;
711 } else {
712 u32 speed = ep->base.speed;
713 cp->link_cntl = 0;
714 if (speed == SPEED_100)
715 cp->link_cntl |= BMCR_SPEED100;
716 else if (speed == SPEED_1000)
717 cp->link_cntl |= CAS_BMCR_SPEED1000;
718 if (ep->base.duplex == DUPLEX_FULL)
719 cp->link_cntl |= BMCR_FULLDPLX;
720 }
721#if 1
722 changed = (lcntl != cp->link_cntl);
723#endif
724start_aneg:
725 if (cp->lstate == link_up) {
726 netdev_info(cp->dev, "PCS link down\n");
727 } else {
728 if (changed) {
729 netdev_info(cp->dev, "link configuration changed\n");
730 }
731 }
732 cp->lstate = link_down;
733 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
734 if (!cp->hw_running)
735 return;
736#if 1
737
738
739
740
741
742 if (oldstate == link_up)
743 netif_carrier_off(cp->dev);
744 if (changed && link_was_not_down) {
745
746
747
748
749
750 atomic_inc(&cp->reset_task_pending);
751 atomic_inc(&cp->reset_task_pending_all);
752 schedule_work(&cp->reset_task);
753 cp->timer_ticks = 0;
754 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
755 return;
756 }
757#endif
758 if (cp->phy_type & CAS_PHY_SERDES) {
759 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
760
761 if (cp->link_cntl & BMCR_ANENABLE) {
762 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
763 cp->lstate = link_aneg;
764 } else {
765 if (cp->link_cntl & BMCR_FULLDPLX)
766 val |= PCS_MII_CTRL_DUPLEX;
767 val &= ~PCS_MII_AUTONEG_EN;
768 cp->lstate = link_force_ok;
769 }
770 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
771 writel(val, cp->regs + REG_PCS_MII_CTRL);
772
773 } else {
774 cas_mif_poll(cp, 0);
775 ctl = cas_phy_read(cp, MII_BMCR);
776 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
777 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
778 ctl |= cp->link_cntl;
779 if (ctl & BMCR_ANENABLE) {
780 ctl |= BMCR_ANRESTART;
781 cp->lstate = link_aneg;
782 } else {
783 cp->lstate = link_force_ok;
784 }
785 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
786 cas_phy_write(cp, MII_BMCR, ctl);
787 cas_mif_poll(cp, 1);
788 }
789
790 cp->timer_ticks = 0;
791 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
792}
793
794
795static int cas_reset_mii_phy(struct cas *cp)
796{
797 int limit = STOP_TRIES_PHY;
798 u16 val;
799
800 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
801 udelay(100);
802 while (--limit) {
803 val = cas_phy_read(cp, MII_BMCR);
804 if ((val & BMCR_RESET) == 0)
805 break;
806 udelay(10);
807 }
808 return limit <= 0;
809}
810
811static void cas_saturn_firmware_init(struct cas *cp)
812{
813 const struct firmware *fw;
814 const char fw_name[] = "sun/cassini.bin";
815 int err;
816
817 if (PHY_NS_DP83065 != cp->phy_id)
818 return;
819
820 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
821 if (err) {
822 pr_err("Failed to load firmware \"%s\"\n",
823 fw_name);
824 return;
825 }
826 if (fw->size < 2) {
827 pr_err("bogus length %zu in \"%s\"\n",
828 fw->size, fw_name);
829 goto out;
830 }
831 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
832 cp->fw_size = fw->size - 2;
833 cp->fw_data = vmalloc(cp->fw_size);
834 if (!cp->fw_data)
835 goto out;
836 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
837out:
838 release_firmware(fw);
839}
840
841static void cas_saturn_firmware_load(struct cas *cp)
842{
843 int i;
844
845 if (!cp->fw_data)
846 return;
847
848 cas_phy_powerdown(cp);
849
850
851 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
852
853
854 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
855 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
856 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
857 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
858 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
859 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
860 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
861 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
862
863
864 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
865 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
866 for (i = 0; i < cp->fw_size; i++)
867 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
868
869
870 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
871 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
872}
873
874
875
876static void cas_phy_init(struct cas *cp)
877{
878 u16 val;
879
880
881 if (CAS_PHY_MII(cp->phy_type)) {
882 writel(PCS_DATAPATH_MODE_MII,
883 cp->regs + REG_PCS_DATAPATH_MODE);
884
885 cas_mif_poll(cp, 0);
886 cas_reset_mii_phy(cp);
887
888 if (PHY_LUCENT_B0 == cp->phy_id) {
889
890 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
891 cas_phy_write(cp, MII_BMCR, 0x00f1);
892 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
893
894 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
895
896 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
897 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
898 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
899 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
900 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
901 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
902 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
903 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
904 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
905 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
906 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
907
908 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
909 val = cas_phy_read(cp, BROADCOM_MII_REG4);
910 val = cas_phy_read(cp, BROADCOM_MII_REG4);
911 if (val & 0x0080) {
912
913 cas_phy_write(cp, BROADCOM_MII_REG4,
914 val & ~0x0080);
915 }
916
917 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
918 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
919 SATURN_PCFG_FSI : 0x0,
920 cp->regs + REG_SATURN_PCFG);
921
922
923
924
925
926 if (PHY_NS_DP83065 == cp->phy_id) {
927 cas_saturn_firmware_load(cp);
928 }
929 cas_phy_powerup(cp);
930 }
931
932
933 val = cas_phy_read(cp, MII_BMCR);
934 val &= ~BMCR_ANENABLE;
935 cas_phy_write(cp, MII_BMCR, val);
936 udelay(10);
937
938 cas_phy_write(cp, MII_ADVERTISE,
939 cas_phy_read(cp, MII_ADVERTISE) |
940 (ADVERTISE_10HALF | ADVERTISE_10FULL |
941 ADVERTISE_100HALF | ADVERTISE_100FULL |
942 CAS_ADVERTISE_PAUSE |
943 CAS_ADVERTISE_ASYM_PAUSE));
944
945 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
946
947
948
949 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
950 val &= ~CAS_ADVERTISE_1000HALF;
951 val |= CAS_ADVERTISE_1000FULL;
952 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
953 }
954
955 } else {
956
957 u32 val;
958 int limit;
959
960 writel(PCS_DATAPATH_MODE_SERDES,
961 cp->regs + REG_PCS_DATAPATH_MODE);
962
963
964 if (cp->cas_flags & CAS_FLAG_SATURN)
965 writel(0, cp->regs + REG_SATURN_PCFG);
966
967
968 val = readl(cp->regs + REG_PCS_MII_CTRL);
969 val |= PCS_MII_RESET;
970 writel(val, cp->regs + REG_PCS_MII_CTRL);
971
972 limit = STOP_TRIES;
973 while (--limit > 0) {
974 udelay(10);
975 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
976 PCS_MII_RESET) == 0)
977 break;
978 }
979 if (limit <= 0)
980 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
981 readl(cp->regs + REG_PCS_STATE_MACHINE));
982
983
984
985
986 writel(0x0, cp->regs + REG_PCS_CFG);
987
988
989 val = readl(cp->regs + REG_PCS_MII_ADVERT);
990 val &= ~PCS_MII_ADVERT_HD;
991 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
992 PCS_MII_ADVERT_ASYM_PAUSE);
993 writel(val, cp->regs + REG_PCS_MII_ADVERT);
994
995
996 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
997
998
999 writel(PCS_SERDES_CTRL_SYNCD_EN,
1000 cp->regs + REG_PCS_SERDES_CTRL);
1001 }
1002}
1003
1004
1005static int cas_pcs_link_check(struct cas *cp)
1006{
1007 u32 stat, state_machine;
1008 int retval = 0;
1009
1010
1011
1012
1013
1014 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1015 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1016 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1017
1018
1019
1020
1021 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1022 PCS_MII_STATUS_REMOTE_FAULT)) ==
1023 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1024 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1025
1026
1027
1028
1029 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1030 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1031 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1032 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1033 stat |= PCS_MII_STATUS_LINK_STATUS;
1034 }
1035
1036 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1037 if (cp->lstate != link_up) {
1038 if (cp->opened) {
1039 cp->lstate = link_up;
1040 cp->link_transition = LINK_TRANSITION_LINK_UP;
1041
1042 cas_set_link_modes(cp);
1043 netif_carrier_on(cp->dev);
1044 }
1045 }
1046 } else if (cp->lstate == link_up) {
1047 cp->lstate = link_down;
1048 if (link_transition_timeout != 0 &&
1049 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1050 !cp->link_transition_jiffies_valid) {
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 retval = 1;
1064 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1065 cp->link_transition_jiffies = jiffies;
1066 cp->link_transition_jiffies_valid = 1;
1067 } else {
1068 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1069 }
1070 netif_carrier_off(cp->dev);
1071 if (cp->opened)
1072 netif_info(cp, link, cp->dev, "PCS link down\n");
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1083
1084 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1085 if (stat == 0x03)
1086 return 1;
1087 }
1088 } else if (cp->lstate == link_down) {
1089 if (link_transition_timeout != 0 &&
1090 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1091 !cp->link_transition_jiffies_valid) {
1092
1093
1094
1095
1096
1097 retval = 1;
1098 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1099 cp->link_transition_jiffies = jiffies;
1100 cp->link_transition_jiffies_valid = 1;
1101 } else {
1102 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1103 }
1104 }
1105
1106 return retval;
1107}
1108
1109static int cas_pcs_interrupt(struct net_device *dev,
1110 struct cas *cp, u32 status)
1111{
1112 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1113
1114 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1115 return 0;
1116 return cas_pcs_link_check(cp);
1117}
1118
1119static int cas_txmac_interrupt(struct net_device *dev,
1120 struct cas *cp, u32 status)
1121{
1122 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1123
1124 if (!txmac_stat)
1125 return 0;
1126
1127 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1128 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1129
1130
1131
1132
1133 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1134 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1135 return 0;
1136
1137 spin_lock(&cp->stat_lock[0]);
1138 if (txmac_stat & MAC_TX_UNDERRUN) {
1139 netdev_err(dev, "TX MAC xmit underrun\n");
1140 cp->net_stats[0].tx_fifo_errors++;
1141 }
1142
1143 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1144 netdev_err(dev, "TX MAC max packet size error\n");
1145 cp->net_stats[0].tx_errors++;
1146 }
1147
1148
1149
1150
1151 if (txmac_stat & MAC_TX_COLL_NORMAL)
1152 cp->net_stats[0].collisions += 0x10000;
1153
1154 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1155 cp->net_stats[0].tx_aborted_errors += 0x10000;
1156 cp->net_stats[0].collisions += 0x10000;
1157 }
1158
1159 if (txmac_stat & MAC_TX_COLL_LATE) {
1160 cp->net_stats[0].tx_aborted_errors += 0x10000;
1161 cp->net_stats[0].collisions += 0x10000;
1162 }
1163 spin_unlock(&cp->stat_lock[0]);
1164
1165
1166
1167
1168 return 0;
1169}
1170
1171static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1172{
1173 cas_hp_inst_t *inst;
1174 u32 val;
1175 int i;
1176
1177 i = 0;
1178 while ((inst = firmware) && inst->note) {
1179 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1180
1181 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1182 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1183 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1184
1185 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1186 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1187 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1188 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1189 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1190 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1191 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1192 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1193
1194 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1195 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1196 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1197 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1198 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1199 ++firmware;
1200 ++i;
1201 }
1202}
1203
1204static void cas_init_rx_dma(struct cas *cp)
1205{
1206 u64 desc_dma = cp->block_dvma;
1207 u32 val;
1208 int i, size;
1209
1210
1211 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1212 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1213 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1214 if ((N_RX_DESC_RINGS > 1) &&
1215 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1216 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1217 writel(val, cp->regs + REG_RX_CFG);
1218
1219 val = (unsigned long) cp->init_rxds[0] -
1220 (unsigned long) cp->init_block;
1221 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1222 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1223 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1224
1225 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1226
1227
1228
1229 val = (unsigned long) cp->init_rxds[1] -
1230 (unsigned long) cp->init_block;
1231 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1232 writel((desc_dma + val) & 0xffffffff, cp->regs +
1233 REG_PLUS_RX_DB1_LOW);
1234 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1235 REG_PLUS_RX_KICK1);
1236 }
1237
1238
1239 val = (unsigned long) cp->init_rxcs[0] -
1240 (unsigned long) cp->init_block;
1241 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1242 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1243
1244 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1245
1246 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1247 val = (unsigned long) cp->init_rxcs[i] -
1248 (unsigned long) cp->init_block;
1249 writel((desc_dma + val) >> 32, cp->regs +
1250 REG_PLUS_RX_CBN_HI(i));
1251 writel((desc_dma + val) & 0xffffffff, cp->regs +
1252 REG_PLUS_RX_CBN_LOW(i));
1253 }
1254 }
1255
1256
1257
1258
1259
1260 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1261 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1262 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1263 for (i = 1; i < N_RX_COMP_RINGS; i++)
1264 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1265
1266
1267 if (N_RX_COMP_RINGS > 1)
1268 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1269 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1270
1271 for (i = 2; i < N_RX_COMP_RINGS; i++)
1272 writel(INTR_RX_DONE_ALT,
1273 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1274 }
1275
1276
1277 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1278 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1279 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1280 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1281 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1282
1283
1284 for (i = 0; i < 64; i++) {
1285 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1286 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1287 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1288 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1289 }
1290
1291
1292 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1293 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1294
1295
1296#ifdef USE_RX_BLANK
1297 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1298 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1299 writel(val, cp->regs + REG_RX_BLANK);
1300#else
1301 writel(0x0, cp->regs + REG_RX_BLANK);
1302#endif
1303
1304
1305
1306
1307
1308
1309
1310 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1311 writel(val, cp->regs + REG_RX_AE_THRESH);
1312 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1313 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1314 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1315 }
1316
1317
1318
1319
1320 writel(0x0, cp->regs + REG_RX_RED);
1321
1322
1323 val = 0;
1324 if (cp->page_size == 0x1000)
1325 val = 0x1;
1326 else if (cp->page_size == 0x2000)
1327 val = 0x2;
1328 else if (cp->page_size == 0x4000)
1329 val = 0x3;
1330
1331
1332 size = cp->dev->mtu + 64;
1333 if (size > cp->page_size)
1334 size = cp->page_size;
1335
1336 if (size <= 0x400)
1337 i = 0x0;
1338 else if (size <= 0x800)
1339 i = 0x1;
1340 else if (size <= 0x1000)
1341 i = 0x2;
1342 else
1343 i = 0x3;
1344
1345 cp->mtu_stride = 1 << (i + 10);
1346 val = CAS_BASE(RX_PAGE_SIZE, val);
1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1349 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1350 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1351
1352
1353 if (CAS_HP_FIRMWARE == cas_prog_null)
1354 return;
1355
1356 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1357 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1358 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1359 writel(val, cp->regs + REG_HP_CFG);
1360}
1361
1362static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1363{
1364 memset(rxc, 0, sizeof(*rxc));
1365 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1366}
1367
1368
1369
1370
1371
1372static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1373{
1374 cas_page_t *page = cp->rx_pages[1][index];
1375 cas_page_t *new;
1376
1377 if (page_count(page->buffer) == 1)
1378 return page;
1379
1380 new = cas_page_dequeue(cp);
1381 if (new) {
1382 spin_lock(&cp->rx_inuse_lock);
1383 list_add(&page->list, &cp->rx_inuse_list);
1384 spin_unlock(&cp->rx_inuse_lock);
1385 }
1386 return new;
1387}
1388
1389
1390static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1391 const int index)
1392{
1393 cas_page_t **page0 = cp->rx_pages[0];
1394 cas_page_t **page1 = cp->rx_pages[1];
1395
1396
1397 if (page_count(page0[index]->buffer) > 1) {
1398 cas_page_t *new = cas_page_spare(cp, index);
1399 if (new) {
1400 page1[index] = page0[index];
1401 page0[index] = new;
1402 }
1403 }
1404 RX_USED_SET(page0[index], 0);
1405 return page0[index];
1406}
1407
1408static void cas_clean_rxds(struct cas *cp)
1409{
1410
1411 struct cas_rx_desc *rxd = cp->init_rxds[0];
1412 int i, size;
1413
1414
1415 for (i = 0; i < N_RX_FLOWS; i++) {
1416 struct sk_buff *skb;
1417 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1418 cas_skb_release(skb);
1419 }
1420 }
1421
1422
1423 size = RX_DESC_RINGN_SIZE(0);
1424 for (i = 0; i < size; i++) {
1425 cas_page_t *page = cas_page_swap(cp, 0, i);
1426 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1427 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1428 CAS_BASE(RX_INDEX_RING, 0));
1429 }
1430
1431 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1432 cp->rx_last[0] = 0;
1433 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1434}
1435
1436static void cas_clean_rxcs(struct cas *cp)
1437{
1438 int i, j;
1439
1440
1441 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1442 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1443 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1444 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1445 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1446 cas_rxc_init(rxc + j);
1447 }
1448 }
1449}
1450
1451#if 0
1452
1453
1454
1455
1456
1457
1458static int cas_rxmac_reset(struct cas *cp)
1459{
1460 struct net_device *dev = cp->dev;
1461 int limit;
1462 u32 val;
1463
1464
1465 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1466 for (limit = 0; limit < STOP_TRIES; limit++) {
1467 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1468 break;
1469 udelay(10);
1470 }
1471 if (limit == STOP_TRIES) {
1472 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1473 return 1;
1474 }
1475
1476
1477 writel(0, cp->regs + REG_RX_CFG);
1478 for (limit = 0; limit < STOP_TRIES; limit++) {
1479 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1480 break;
1481 udelay(10);
1482 }
1483 if (limit == STOP_TRIES) {
1484 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1485 return 1;
1486 }
1487
1488 mdelay(5);
1489
1490
1491 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1492 for (limit = 0; limit < STOP_TRIES; limit++) {
1493 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1494 break;
1495 udelay(10);
1496 }
1497 if (limit == STOP_TRIES) {
1498 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1499 return 1;
1500 }
1501
1502
1503 cas_clean_rxds(cp);
1504 cas_clean_rxcs(cp);
1505
1506
1507 cas_init_rx_dma(cp);
1508
1509
1510 val = readl(cp->regs + REG_RX_CFG);
1511 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1512 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1513 val = readl(cp->regs + REG_MAC_RX_CFG);
1514 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1515 return 0;
1516}
1517#endif
1518
1519static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1520 u32 status)
1521{
1522 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1523
1524 if (!stat)
1525 return 0;
1526
1527 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1528
1529
1530 spin_lock(&cp->stat_lock[0]);
1531 if (stat & MAC_RX_ALIGN_ERR)
1532 cp->net_stats[0].rx_frame_errors += 0x10000;
1533
1534 if (stat & MAC_RX_CRC_ERR)
1535 cp->net_stats[0].rx_crc_errors += 0x10000;
1536
1537 if (stat & MAC_RX_LEN_ERR)
1538 cp->net_stats[0].rx_length_errors += 0x10000;
1539
1540 if (stat & MAC_RX_OVERFLOW) {
1541 cp->net_stats[0].rx_over_errors++;
1542 cp->net_stats[0].rx_fifo_errors++;
1543 }
1544
1545
1546
1547
1548 spin_unlock(&cp->stat_lock[0]);
1549 return 0;
1550}
1551
1552static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1553 u32 status)
1554{
1555 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1556
1557 if (!stat)
1558 return 0;
1559
1560 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1561 "mac interrupt, stat: 0x%x\n", stat);
1562
1563
1564
1565
1566
1567 if (stat & MAC_CTRL_PAUSE_STATE)
1568 cp->pause_entered++;
1569
1570 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1571 cp->pause_last_time_recvd = (stat >> 16);
1572
1573 return 0;
1574}
1575
1576
1577
1578static inline int cas_mdio_link_not_up(struct cas *cp)
1579{
1580 u16 val;
1581
1582 switch (cp->lstate) {
1583 case link_force_ret:
1584 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1585 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1586 cp->timer_ticks = 5;
1587 cp->lstate = link_force_ok;
1588 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1589 break;
1590
1591 case link_aneg:
1592 val = cas_phy_read(cp, MII_BMCR);
1593
1594
1595
1596
1597 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1598 val |= BMCR_FULLDPLX;
1599 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1600 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1601 cas_phy_write(cp, MII_BMCR, val);
1602 cp->timer_ticks = 5;
1603 cp->lstate = link_force_try;
1604 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1605 break;
1606
1607 case link_force_try:
1608
1609 val = cas_phy_read(cp, MII_BMCR);
1610 cp->timer_ticks = 5;
1611 if (val & CAS_BMCR_SPEED1000) {
1612 val &= ~CAS_BMCR_SPEED1000;
1613 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1614 cas_phy_write(cp, MII_BMCR, val);
1615 break;
1616 }
1617
1618 if (val & BMCR_SPEED100) {
1619 if (val & BMCR_FULLDPLX)
1620 val &= ~BMCR_FULLDPLX;
1621 else {
1622 val &= ~BMCR_SPEED100;
1623 }
1624 cas_phy_write(cp, MII_BMCR, val);
1625 break;
1626 }
1627 default:
1628 break;
1629 }
1630 return 0;
1631}
1632
1633
1634
1635static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1636{
1637 int restart;
1638
1639 if (bmsr & BMSR_LSTATUS) {
1640
1641
1642
1643
1644
1645 if ((cp->lstate == link_force_try) &&
1646 (cp->link_cntl & BMCR_ANENABLE)) {
1647 cp->lstate = link_force_ret;
1648 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1649 cas_mif_poll(cp, 0);
1650 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1651 cp->timer_ticks = 5;
1652 if (cp->opened)
1653 netif_info(cp, link, cp->dev,
1654 "Got link after fallback, retrying autoneg once...\n");
1655 cas_phy_write(cp, MII_BMCR,
1656 cp->link_fcntl | BMCR_ANENABLE |
1657 BMCR_ANRESTART);
1658 cas_mif_poll(cp, 1);
1659
1660 } else if (cp->lstate != link_up) {
1661 cp->lstate = link_up;
1662 cp->link_transition = LINK_TRANSITION_LINK_UP;
1663
1664 if (cp->opened) {
1665 cas_set_link_modes(cp);
1666 netif_carrier_on(cp->dev);
1667 }
1668 }
1669 return 0;
1670 }
1671
1672
1673
1674
1675 restart = 0;
1676 if (cp->lstate == link_up) {
1677 cp->lstate = link_down;
1678 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1679
1680 netif_carrier_off(cp->dev);
1681 if (cp->opened)
1682 netif_info(cp, link, cp->dev, "Link down\n");
1683 restart = 1;
1684
1685 } else if (++cp->timer_ticks > 10)
1686 cas_mdio_link_not_up(cp);
1687
1688 return restart;
1689}
1690
1691static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1692 u32 status)
1693{
1694 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1695 u16 bmsr;
1696
1697
1698 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1699 return 0;
1700
1701 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1702 return cas_mii_link_check(cp, bmsr);
1703}
1704
1705static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1706 u32 status)
1707{
1708 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1709
1710 if (!stat)
1711 return 0;
1712
1713 netdev_err(dev, "PCI error [%04x:%04x]",
1714 stat, readl(cp->regs + REG_BIM_DIAG));
1715
1716
1717 if ((stat & PCI_ERR_BADACK) &&
1718 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1719 pr_cont(" <No ACK64# during ABS64 cycle>");
1720
1721 if (stat & PCI_ERR_DTRTO)
1722 pr_cont(" <Delayed transaction timeout>");
1723 if (stat & PCI_ERR_OTHER)
1724 pr_cont(" <other>");
1725 if (stat & PCI_ERR_BIM_DMA_WRITE)
1726 pr_cont(" <BIM DMA 0 write req>");
1727 if (stat & PCI_ERR_BIM_DMA_READ)
1728 pr_cont(" <BIM DMA 0 read req>");
1729 pr_cont("\n");
1730
1731 if (stat & PCI_ERR_OTHER) {
1732 u16 cfg;
1733
1734
1735
1736
1737 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1738 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1739 if (cfg & PCI_STATUS_PARITY)
1740 netdev_err(dev, "PCI parity error detected\n");
1741 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1742 netdev_err(dev, "PCI target abort\n");
1743 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1744 netdev_err(dev, "PCI master acks target abort\n");
1745 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1746 netdev_err(dev, "PCI master abort\n");
1747 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1748 netdev_err(dev, "PCI system error SERR#\n");
1749 if (cfg & PCI_STATUS_DETECTED_PARITY)
1750 netdev_err(dev, "PCI parity error\n");
1751
1752
1753 cfg &= (PCI_STATUS_PARITY |
1754 PCI_STATUS_SIG_TARGET_ABORT |
1755 PCI_STATUS_REC_TARGET_ABORT |
1756 PCI_STATUS_REC_MASTER_ABORT |
1757 PCI_STATUS_SIG_SYSTEM_ERROR |
1758 PCI_STATUS_DETECTED_PARITY);
1759 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1760 }
1761
1762
1763 return 1;
1764}
1765
1766
1767
1768
1769
1770
1771static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1772 u32 status)
1773{
1774 if (status & INTR_RX_TAG_ERROR) {
1775
1776 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1777 "corrupt rx tag framing\n");
1778 spin_lock(&cp->stat_lock[0]);
1779 cp->net_stats[0].rx_errors++;
1780 spin_unlock(&cp->stat_lock[0]);
1781 goto do_reset;
1782 }
1783
1784 if (status & INTR_RX_LEN_MISMATCH) {
1785
1786 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1787 "length mismatch for rx frame\n");
1788 spin_lock(&cp->stat_lock[0]);
1789 cp->net_stats[0].rx_errors++;
1790 spin_unlock(&cp->stat_lock[0]);
1791 goto do_reset;
1792 }
1793
1794 if (status & INTR_PCS_STATUS) {
1795 if (cas_pcs_interrupt(dev, cp, status))
1796 goto do_reset;
1797 }
1798
1799 if (status & INTR_TX_MAC_STATUS) {
1800 if (cas_txmac_interrupt(dev, cp, status))
1801 goto do_reset;
1802 }
1803
1804 if (status & INTR_RX_MAC_STATUS) {
1805 if (cas_rxmac_interrupt(dev, cp, status))
1806 goto do_reset;
1807 }
1808
1809 if (status & INTR_MAC_CTRL_STATUS) {
1810 if (cas_mac_interrupt(dev, cp, status))
1811 goto do_reset;
1812 }
1813
1814 if (status & INTR_MIF_STATUS) {
1815 if (cas_mif_interrupt(dev, cp, status))
1816 goto do_reset;
1817 }
1818
1819 if (status & INTR_PCI_ERROR_STATUS) {
1820 if (cas_pci_interrupt(dev, cp, status))
1821 goto do_reset;
1822 }
1823 return 0;
1824
1825do_reset:
1826#if 1
1827 atomic_inc(&cp->reset_task_pending);
1828 atomic_inc(&cp->reset_task_pending_all);
1829 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1830 schedule_work(&cp->reset_task);
1831#else
1832 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1833 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1834 schedule_work(&cp->reset_task);
1835#endif
1836 return 1;
1837}
1838
1839
1840
1841
1842#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1843#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1844static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1845 const int len)
1846{
1847 unsigned long off = addr + len;
1848
1849 if (CAS_TABORT(cp) == 1)
1850 return 0;
1851 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1852 return 0;
1853 return TX_TARGET_ABORT_LEN;
1854}
1855
1856static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1857{
1858 struct cas_tx_desc *txds;
1859 struct sk_buff **skbs;
1860 struct net_device *dev = cp->dev;
1861 int entry, count;
1862
1863 spin_lock(&cp->tx_lock[ring]);
1864 txds = cp->init_txds[ring];
1865 skbs = cp->tx_skbs[ring];
1866 entry = cp->tx_old[ring];
1867
1868 count = TX_BUFF_COUNT(ring, entry, limit);
1869 while (entry != limit) {
1870 struct sk_buff *skb = skbs[entry];
1871 dma_addr_t daddr;
1872 u32 dlen;
1873 int frag;
1874
1875 if (!skb) {
1876
1877 entry = TX_DESC_NEXT(ring, entry);
1878 continue;
1879 }
1880
1881
1882 count -= skb_shinfo(skb)->nr_frags +
1883 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1884 if (count < 0)
1885 break;
1886
1887 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1888 "tx[%d] done, slot %d\n", ring, entry);
1889
1890 skbs[entry] = NULL;
1891 cp->tx_tiny_use[ring][entry].nbufs = 0;
1892
1893 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1894 struct cas_tx_desc *txd = txds + entry;
1895
1896 daddr = le64_to_cpu(txd->buffer);
1897 dlen = CAS_VAL(TX_DESC_BUFLEN,
1898 le64_to_cpu(txd->control));
1899 pci_unmap_page(cp->pdev, daddr, dlen,
1900 PCI_DMA_TODEVICE);
1901 entry = TX_DESC_NEXT(ring, entry);
1902
1903
1904 if (cp->tx_tiny_use[ring][entry].used) {
1905 cp->tx_tiny_use[ring][entry].used = 0;
1906 entry = TX_DESC_NEXT(ring, entry);
1907 }
1908 }
1909
1910 spin_lock(&cp->stat_lock[ring]);
1911 cp->net_stats[ring].tx_packets++;
1912 cp->net_stats[ring].tx_bytes += skb->len;
1913 spin_unlock(&cp->stat_lock[ring]);
1914 dev_kfree_skb_irq(skb);
1915 }
1916 cp->tx_old[ring] = entry;
1917
1918
1919
1920
1921
1922 if (netif_queue_stopped(dev) &&
1923 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1924 netif_wake_queue(dev);
1925 spin_unlock(&cp->tx_lock[ring]);
1926}
1927
1928static void cas_tx(struct net_device *dev, struct cas *cp,
1929 u32 status)
1930{
1931 int limit, ring;
1932#ifdef USE_TX_COMPWB
1933 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1934#endif
1935 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1936 "tx interrupt, status: 0x%x, %llx\n",
1937 status, (unsigned long long)compwb);
1938
1939 for (ring = 0; ring < N_TX_RINGS; ring++) {
1940#ifdef USE_TX_COMPWB
1941
1942 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1943 CAS_VAL(TX_COMPWB_LSB, compwb);
1944 compwb = TX_COMPWB_NEXT(compwb);
1945#else
1946 limit = readl(cp->regs + REG_TX_COMPN(ring));
1947#endif
1948 if (cp->tx_old[ring] != limit)
1949 cas_tx_ringN(cp, ring, limit);
1950 }
1951}
1952
1953
1954static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1955 int entry, const u64 *words,
1956 struct sk_buff **skbref)
1957{
1958 int dlen, hlen, len, i, alloclen;
1959 int off, swivel = RX_SWIVEL_OFF_VAL;
1960 struct cas_page *page;
1961 struct sk_buff *skb;
1962 void *addr, *crcaddr;
1963 __sum16 csum;
1964 char *p;
1965
1966 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1967 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1968 len = hlen + dlen;
1969
1970 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1971 alloclen = len;
1972 else
1973 alloclen = max(hlen, RX_COPY_MIN);
1974
1975 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1976 if (skb == NULL)
1977 return -1;
1978
1979 *skbref = skb;
1980 skb_reserve(skb, swivel);
1981
1982 p = skb->data;
1983 addr = crcaddr = NULL;
1984 if (hlen) {
1985 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1986 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1987 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1988 swivel;
1989
1990 i = hlen;
1991 if (!dlen)
1992 i += cp->crc_size;
1993 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1994 PCI_DMA_FROMDEVICE);
1995 addr = cas_page_map(page->buffer);
1996 memcpy(p, addr + off, i);
1997 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1998 PCI_DMA_FROMDEVICE);
1999 cas_page_unmap(addr);
2000 RX_USED_ADD(page, 0x100);
2001 p += hlen;
2002 swivel = 0;
2003 }
2004
2005
2006 if (alloclen < (hlen + dlen)) {
2007 skb_frag_t *frag = skb_shinfo(skb)->frags;
2008
2009
2010 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2011 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2012 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2013
2014 hlen = min(cp->page_size - off, dlen);
2015 if (hlen < 0) {
2016 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2017 "rx page overflow: %d\n", hlen);
2018 dev_kfree_skb_irq(skb);
2019 return -1;
2020 }
2021 i = hlen;
2022 if (i == dlen)
2023 i += cp->crc_size;
2024 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2025 PCI_DMA_FROMDEVICE);
2026
2027
2028 swivel = 0;
2029 if (p == (char *) skb->data) {
2030 addr = cas_page_map(page->buffer);
2031 memcpy(p, addr + off, RX_COPY_MIN);
2032 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2033 PCI_DMA_FROMDEVICE);
2034 cas_page_unmap(addr);
2035 off += RX_COPY_MIN;
2036 swivel = RX_COPY_MIN;
2037 RX_USED_ADD(page, cp->mtu_stride);
2038 } else {
2039 RX_USED_ADD(page, hlen);
2040 }
2041 skb_put(skb, alloclen);
2042
2043 skb_shinfo(skb)->nr_frags++;
2044 skb->data_len += hlen - swivel;
2045 skb->truesize += hlen - swivel;
2046 skb->len += hlen - swivel;
2047
2048 __skb_frag_set_page(frag, page->buffer);
2049 __skb_frag_ref(frag);
2050 frag->page_offset = off;
2051 skb_frag_size_set(frag, hlen - swivel);
2052
2053
2054 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2055 hlen = dlen;
2056 off = 0;
2057
2058 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2059 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2060 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2061 hlen + cp->crc_size,
2062 PCI_DMA_FROMDEVICE);
2063 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2064 hlen + cp->crc_size,
2065 PCI_DMA_FROMDEVICE);
2066
2067 skb_shinfo(skb)->nr_frags++;
2068 skb->data_len += hlen;
2069 skb->len += hlen;
2070 frag++;
2071
2072 __skb_frag_set_page(frag, page->buffer);
2073 __skb_frag_ref(frag);
2074 frag->page_offset = 0;
2075 skb_frag_size_set(frag, hlen);
2076 RX_USED_ADD(page, hlen + cp->crc_size);
2077 }
2078
2079 if (cp->crc_size) {
2080 addr = cas_page_map(page->buffer);
2081 crcaddr = addr + off + hlen;
2082 }
2083
2084 } else {
2085
2086 if (!dlen)
2087 goto end_copy_pkt;
2088
2089 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2090 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2091 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2092 hlen = min(cp->page_size - off, dlen);
2093 if (hlen < 0) {
2094 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2095 "rx page overflow: %d\n", hlen);
2096 dev_kfree_skb_irq(skb);
2097 return -1;
2098 }
2099 i = hlen;
2100 if (i == dlen)
2101 i += cp->crc_size;
2102 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2103 PCI_DMA_FROMDEVICE);
2104 addr = cas_page_map(page->buffer);
2105 memcpy(p, addr + off, i);
2106 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2107 PCI_DMA_FROMDEVICE);
2108 cas_page_unmap(addr);
2109 if (p == (char *) skb->data)
2110 RX_USED_ADD(page, cp->mtu_stride);
2111 else
2112 RX_USED_ADD(page, i);
2113
2114
2115 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2116 p += hlen;
2117 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2118 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2119 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2120 dlen + cp->crc_size,
2121 PCI_DMA_FROMDEVICE);
2122 addr = cas_page_map(page->buffer);
2123 memcpy(p, addr, dlen + cp->crc_size);
2124 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2125 dlen + cp->crc_size,
2126 PCI_DMA_FROMDEVICE);
2127 cas_page_unmap(addr);
2128 RX_USED_ADD(page, dlen + cp->crc_size);
2129 }
2130end_copy_pkt:
2131 if (cp->crc_size) {
2132 addr = NULL;
2133 crcaddr = skb->data + alloclen;
2134 }
2135 skb_put(skb, alloclen);
2136 }
2137
2138 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2139 if (cp->crc_size) {
2140
2141 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2142 csum_unfold(csum)));
2143 if (addr)
2144 cas_page_unmap(addr);
2145 }
2146 skb->protocol = eth_type_trans(skb, cp->dev);
2147 if (skb->protocol == htons(ETH_P_IP)) {
2148 skb->csum = csum_unfold(~csum);
2149 skb->ip_summed = CHECKSUM_COMPLETE;
2150 } else
2151 skb_checksum_none_assert(skb);
2152 return len;
2153}
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2171 struct sk_buff *skb)
2172{
2173 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2174 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2175
2176
2177
2178
2179
2180 __skb_queue_tail(flow, skb);
2181 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2182 while ((skb = __skb_dequeue(flow))) {
2183 cas_skb_release(skb);
2184 }
2185 }
2186}
2187
2188
2189
2190
2191static void cas_post_page(struct cas *cp, const int ring, const int index)
2192{
2193 cas_page_t *new;
2194 int entry;
2195
2196 entry = cp->rx_old[ring];
2197
2198 new = cas_page_swap(cp, ring, index);
2199 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2200 cp->init_rxds[ring][entry].index =
2201 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2202 CAS_BASE(RX_INDEX_RING, ring));
2203
2204 entry = RX_DESC_ENTRY(ring, entry + 1);
2205 cp->rx_old[ring] = entry;
2206
2207 if (entry % 4)
2208 return;
2209
2210 if (ring == 0)
2211 writel(entry, cp->regs + REG_RX_KICK);
2212 else if ((N_RX_DESC_RINGS > 1) &&
2213 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2214 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2215}
2216
2217
2218
2219static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2220{
2221 unsigned int entry, last, count, released;
2222 int cluster;
2223 cas_page_t **page = cp->rx_pages[ring];
2224
2225 entry = cp->rx_old[ring];
2226
2227 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2228 "rxd[%d] interrupt, done: %d\n", ring, entry);
2229
2230 cluster = -1;
2231 count = entry & 0x3;
2232 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2233 released = 0;
2234 while (entry != last) {
2235
2236 if (page_count(page[entry]->buffer) > 1) {
2237 cas_page_t *new = cas_page_dequeue(cp);
2238 if (!new) {
2239
2240
2241
2242 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2243 if (!timer_pending(&cp->link_timer))
2244 mod_timer(&cp->link_timer, jiffies +
2245 CAS_LINK_FAST_TIMEOUT);
2246 cp->rx_old[ring] = entry;
2247 cp->rx_last[ring] = num ? num - released : 0;
2248 return -ENOMEM;
2249 }
2250 spin_lock(&cp->rx_inuse_lock);
2251 list_add(&page[entry]->list, &cp->rx_inuse_list);
2252 spin_unlock(&cp->rx_inuse_lock);
2253 cp->init_rxds[ring][entry].buffer =
2254 cpu_to_le64(new->dma_addr);
2255 page[entry] = new;
2256
2257 }
2258
2259 if (++count == 4) {
2260 cluster = entry;
2261 count = 0;
2262 }
2263 released++;
2264 entry = RX_DESC_ENTRY(ring, entry + 1);
2265 }
2266 cp->rx_old[ring] = entry;
2267
2268 if (cluster < 0)
2269 return 0;
2270
2271 if (ring == 0)
2272 writel(cluster, cp->regs + REG_RX_KICK);
2273 else if ((N_RX_DESC_RINGS > 1) &&
2274 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2275 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2276 return 0;
2277}
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2293{
2294 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2295 int entry, drops;
2296 int npackets = 0;
2297
2298 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2299 "rx[%d] interrupt, done: %d/%d\n",
2300 ring,
2301 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2302
2303 entry = cp->rx_new[ring];
2304 drops = 0;
2305 while (1) {
2306 struct cas_rx_comp *rxc = rxcs + entry;
2307 struct sk_buff *uninitialized_var(skb);
2308 int type, len;
2309 u64 words[4];
2310 int i, dring;
2311
2312 words[0] = le64_to_cpu(rxc->word1);
2313 words[1] = le64_to_cpu(rxc->word2);
2314 words[2] = le64_to_cpu(rxc->word3);
2315 words[3] = le64_to_cpu(rxc->word4);
2316
2317
2318 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2319 if (type == 0)
2320 break;
2321
2322
2323 if (words[3] & RX_COMP4_ZERO) {
2324 break;
2325 }
2326
2327
2328 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2329 spin_lock(&cp->stat_lock[ring]);
2330 cp->net_stats[ring].rx_errors++;
2331 if (words[3] & RX_COMP4_LEN_MISMATCH)
2332 cp->net_stats[ring].rx_length_errors++;
2333 if (words[3] & RX_COMP4_BAD)
2334 cp->net_stats[ring].rx_crc_errors++;
2335 spin_unlock(&cp->stat_lock[ring]);
2336
2337
2338 drop_it:
2339 spin_lock(&cp->stat_lock[ring]);
2340 ++cp->net_stats[ring].rx_dropped;
2341 spin_unlock(&cp->stat_lock[ring]);
2342 goto next;
2343 }
2344
2345 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2346 if (len < 0) {
2347 ++drops;
2348 goto drop_it;
2349 }
2350
2351
2352
2353
2354 if (RX_DONT_BATCH || (type == 0x2)) {
2355
2356 cas_skb_release(skb);
2357 } else {
2358 cas_rx_flow_pkt(cp, words, skb);
2359 }
2360
2361 spin_lock(&cp->stat_lock[ring]);
2362 cp->net_stats[ring].rx_packets++;
2363 cp->net_stats[ring].rx_bytes += len;
2364 spin_unlock(&cp->stat_lock[ring]);
2365
2366 next:
2367 npackets++;
2368
2369
2370 if (words[0] & RX_COMP1_RELEASE_HDR) {
2371 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2372 dring = CAS_VAL(RX_INDEX_RING, i);
2373 i = CAS_VAL(RX_INDEX_NUM, i);
2374 cas_post_page(cp, dring, i);
2375 }
2376
2377 if (words[0] & RX_COMP1_RELEASE_DATA) {
2378 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2379 dring = CAS_VAL(RX_INDEX_RING, i);
2380 i = CAS_VAL(RX_INDEX_NUM, i);
2381 cas_post_page(cp, dring, i);
2382 }
2383
2384 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2385 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2386 dring = CAS_VAL(RX_INDEX_RING, i);
2387 i = CAS_VAL(RX_INDEX_NUM, i);
2388 cas_post_page(cp, dring, i);
2389 }
2390
2391
2392 entry = RX_COMP_ENTRY(ring, entry + 1 +
2393 CAS_VAL(RX_COMP1_SKIP, words[0]));
2394#ifdef USE_NAPI
2395 if (budget && (npackets >= budget))
2396 break;
2397#endif
2398 }
2399 cp->rx_new[ring] = entry;
2400
2401 if (drops)
2402 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2403 return npackets;
2404}
2405
2406
2407
2408static void cas_post_rxcs_ringN(struct net_device *dev,
2409 struct cas *cp, int ring)
2410{
2411 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2412 int last, entry;
2413
2414 last = cp->rx_cur[ring];
2415 entry = cp->rx_new[ring];
2416 netif_printk(cp, intr, KERN_DEBUG, dev,
2417 "rxc[%d] interrupt, done: %d/%d\n",
2418 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2419
2420
2421 while (last != entry) {
2422 cas_rxc_init(rxc + last);
2423 last = RX_COMP_ENTRY(ring, last + 1);
2424 }
2425 cp->rx_cur[ring] = last;
2426
2427 if (ring == 0)
2428 writel(last, cp->regs + REG_RX_COMP_TAIL);
2429 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2430 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2431}
2432
2433
2434
2435
2436
2437
2438#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2439static inline void cas_handle_irqN(struct net_device *dev,
2440 struct cas *cp, const u32 status,
2441 const int ring)
2442{
2443 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2444 cas_post_rxcs_ringN(dev, cp, ring);
2445}
2446
2447static irqreturn_t cas_interruptN(int irq, void *dev_id)
2448{
2449 struct net_device *dev = dev_id;
2450 struct cas *cp = netdev_priv(dev);
2451 unsigned long flags;
2452 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2453 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2454
2455
2456 if (status == 0)
2457 return IRQ_NONE;
2458
2459 spin_lock_irqsave(&cp->lock, flags);
2460 if (status & INTR_RX_DONE_ALT) {
2461#ifdef USE_NAPI
2462 cas_mask_intr(cp);
2463 napi_schedule(&cp->napi);
2464#else
2465 cas_rx_ringN(cp, ring, 0);
2466#endif
2467 status &= ~INTR_RX_DONE_ALT;
2468 }
2469
2470 if (status)
2471 cas_handle_irqN(dev, cp, status, ring);
2472 spin_unlock_irqrestore(&cp->lock, flags);
2473 return IRQ_HANDLED;
2474}
2475#endif
2476
2477#ifdef USE_PCI_INTB
2478
2479static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2480{
2481 if (status & INTR_RX_BUF_UNAVAIL_1) {
2482
2483
2484 cas_post_rxds_ringN(cp, 1, 0);
2485 spin_lock(&cp->stat_lock[1]);
2486 cp->net_stats[1].rx_dropped++;
2487 spin_unlock(&cp->stat_lock[1]);
2488 }
2489
2490 if (status & INTR_RX_BUF_AE_1)
2491 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2492 RX_AE_FREEN_VAL(1));
2493
2494 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2495 cas_post_rxcs_ringN(cp, 1);
2496}
2497
2498
2499static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2500{
2501 struct net_device *dev = dev_id;
2502 struct cas *cp = netdev_priv(dev);
2503 unsigned long flags;
2504 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2505
2506
2507 if (status == 0)
2508 return IRQ_NONE;
2509
2510 spin_lock_irqsave(&cp->lock, flags);
2511 if (status & INTR_RX_DONE_ALT) {
2512#ifdef USE_NAPI
2513 cas_mask_intr(cp);
2514 napi_schedule(&cp->napi);
2515#else
2516 cas_rx_ringN(cp, 1, 0);
2517#endif
2518 status &= ~INTR_RX_DONE_ALT;
2519 }
2520 if (status)
2521 cas_handle_irq1(cp, status);
2522 spin_unlock_irqrestore(&cp->lock, flags);
2523 return IRQ_HANDLED;
2524}
2525#endif
2526
2527static inline void cas_handle_irq(struct net_device *dev,
2528 struct cas *cp, const u32 status)
2529{
2530
2531 if (status & INTR_ERROR_MASK)
2532 cas_abnormal_irq(dev, cp, status);
2533
2534 if (status & INTR_RX_BUF_UNAVAIL) {
2535
2536
2537
2538 cas_post_rxds_ringN(cp, 0, 0);
2539 spin_lock(&cp->stat_lock[0]);
2540 cp->net_stats[0].rx_dropped++;
2541 spin_unlock(&cp->stat_lock[0]);
2542 } else if (status & INTR_RX_BUF_AE) {
2543 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2544 RX_AE_FREEN_VAL(0));
2545 }
2546
2547 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2548 cas_post_rxcs_ringN(dev, cp, 0);
2549}
2550
2551static irqreturn_t cas_interrupt(int irq, void *dev_id)
2552{
2553 struct net_device *dev = dev_id;
2554 struct cas *cp = netdev_priv(dev);
2555 unsigned long flags;
2556 u32 status = readl(cp->regs + REG_INTR_STATUS);
2557
2558 if (status == 0)
2559 return IRQ_NONE;
2560
2561 spin_lock_irqsave(&cp->lock, flags);
2562 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2563 cas_tx(dev, cp, status);
2564 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2565 }
2566
2567 if (status & INTR_RX_DONE) {
2568#ifdef USE_NAPI
2569 cas_mask_intr(cp);
2570 napi_schedule(&cp->napi);
2571#else
2572 cas_rx_ringN(cp, 0, 0);
2573#endif
2574 status &= ~INTR_RX_DONE;
2575 }
2576
2577 if (status)
2578 cas_handle_irq(dev, cp, status);
2579 spin_unlock_irqrestore(&cp->lock, flags);
2580 return IRQ_HANDLED;
2581}
2582
2583
2584#ifdef USE_NAPI
2585static int cas_poll(struct napi_struct *napi, int budget)
2586{
2587 struct cas *cp = container_of(napi, struct cas, napi);
2588 struct net_device *dev = cp->dev;
2589 int i, enable_intr, credits;
2590 u32 status = readl(cp->regs + REG_INTR_STATUS);
2591 unsigned long flags;
2592
2593 spin_lock_irqsave(&cp->lock, flags);
2594 cas_tx(dev, cp, status);
2595 spin_unlock_irqrestore(&cp->lock, flags);
2596
2597
2598
2599
2600
2601
2602
2603
2604 enable_intr = 1;
2605 credits = 0;
2606 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2607 int j;
2608 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2609 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2610 if (credits >= budget) {
2611 enable_intr = 0;
2612 goto rx_comp;
2613 }
2614 }
2615 }
2616
2617rx_comp:
2618
2619 spin_lock_irqsave(&cp->lock, flags);
2620 if (status)
2621 cas_handle_irq(dev, cp, status);
2622
2623#ifdef USE_PCI_INTB
2624 if (N_RX_COMP_RINGS > 1) {
2625 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2626 if (status)
2627 cas_handle_irq1(dev, cp, status);
2628 }
2629#endif
2630
2631#ifdef USE_PCI_INTC
2632 if (N_RX_COMP_RINGS > 2) {
2633 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2634 if (status)
2635 cas_handle_irqN(dev, cp, status, 2);
2636 }
2637#endif
2638
2639#ifdef USE_PCI_INTD
2640 if (N_RX_COMP_RINGS > 3) {
2641 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2642 if (status)
2643 cas_handle_irqN(dev, cp, status, 3);
2644 }
2645#endif
2646 spin_unlock_irqrestore(&cp->lock, flags);
2647 if (enable_intr) {
2648 napi_complete(napi);
2649 cas_unmask_intr(cp);
2650 }
2651 return credits;
2652}
2653#endif
2654
2655#ifdef CONFIG_NET_POLL_CONTROLLER
2656static void cas_netpoll(struct net_device *dev)
2657{
2658 struct cas *cp = netdev_priv(dev);
2659
2660 cas_disable_irq(cp, 0);
2661 cas_interrupt(cp->pdev->irq, dev);
2662 cas_enable_irq(cp, 0);
2663
2664#ifdef USE_PCI_INTB
2665 if (N_RX_COMP_RINGS > 1) {
2666
2667 }
2668#endif
2669#ifdef USE_PCI_INTC
2670 if (N_RX_COMP_RINGS > 2) {
2671
2672 }
2673#endif
2674#ifdef USE_PCI_INTD
2675 if (N_RX_COMP_RINGS > 3) {
2676
2677 }
2678#endif
2679}
2680#endif
2681
2682static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2683{
2684 struct cas *cp = netdev_priv(dev);
2685
2686 netdev_err(dev, "transmit timed out, resetting\n");
2687 if (!cp->hw_running) {
2688 netdev_err(dev, "hrm.. hw not running!\n");
2689 return;
2690 }
2691
2692 netdev_err(dev, "MIF_STATE[%08x]\n",
2693 readl(cp->regs + REG_MIF_STATE_MACHINE));
2694
2695 netdev_err(dev, "MAC_STATE[%08x]\n",
2696 readl(cp->regs + REG_MAC_STATE_MACHINE));
2697
2698 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2699 readl(cp->regs + REG_TX_CFG),
2700 readl(cp->regs + REG_MAC_TX_STATUS),
2701 readl(cp->regs + REG_MAC_TX_CFG),
2702 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2703 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2704 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2705 readl(cp->regs + REG_TX_SM_1),
2706 readl(cp->regs + REG_TX_SM_2));
2707
2708 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2709 readl(cp->regs + REG_RX_CFG),
2710 readl(cp->regs + REG_MAC_RX_STATUS),
2711 readl(cp->regs + REG_MAC_RX_CFG));
2712
2713 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2714 readl(cp->regs + REG_HP_STATE_MACHINE),
2715 readl(cp->regs + REG_HP_STATUS0),
2716 readl(cp->regs + REG_HP_STATUS1),
2717 readl(cp->regs + REG_HP_STATUS2));
2718
2719#if 1
2720 atomic_inc(&cp->reset_task_pending);
2721 atomic_inc(&cp->reset_task_pending_all);
2722 schedule_work(&cp->reset_task);
2723#else
2724 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2725 schedule_work(&cp->reset_task);
2726#endif
2727}
2728
2729static inline int cas_intme(int ring, int entry)
2730{
2731
2732 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2733 return 1;
2734 return 0;
2735}
2736
2737
2738static void cas_write_txd(struct cas *cp, int ring, int entry,
2739 dma_addr_t mapping, int len, u64 ctrl, int last)
2740{
2741 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2742
2743 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2744 if (cas_intme(ring, entry))
2745 ctrl |= TX_DESC_INTME;
2746 if (last)
2747 ctrl |= TX_DESC_EOF;
2748 txd->control = cpu_to_le64(ctrl);
2749 txd->buffer = cpu_to_le64(mapping);
2750}
2751
2752static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2753 const int entry)
2754{
2755 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2756}
2757
2758static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2759 const int entry, const int tentry)
2760{
2761 cp->tx_tiny_use[ring][tentry].nbufs++;
2762 cp->tx_tiny_use[ring][entry].used = 1;
2763 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2764}
2765
2766static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2767 struct sk_buff *skb)
2768{
2769 struct net_device *dev = cp->dev;
2770 int entry, nr_frags, frag, tabort, tentry;
2771 dma_addr_t mapping;
2772 unsigned long flags;
2773 u64 ctrl;
2774 u32 len;
2775
2776 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2777
2778
2779 if (TX_BUFFS_AVAIL(cp, ring) <=
2780 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2781 netif_stop_queue(dev);
2782 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2783 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2784 return 1;
2785 }
2786
2787 ctrl = 0;
2788 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2789 const u64 csum_start_off = skb_checksum_start_offset(skb);
2790 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2791
2792 ctrl = TX_DESC_CSUM_EN |
2793 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2794 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2795 }
2796
2797 entry = cp->tx_new[ring];
2798 cp->tx_skbs[ring][entry] = skb;
2799
2800 nr_frags = skb_shinfo(skb)->nr_frags;
2801 len = skb_headlen(skb);
2802 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2803 offset_in_page(skb->data), len,
2804 PCI_DMA_TODEVICE);
2805
2806 tentry = entry;
2807 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2808 if (unlikely(tabort)) {
2809
2810 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2811 ctrl | TX_DESC_SOF, 0);
2812 entry = TX_DESC_NEXT(ring, entry);
2813
2814 skb_copy_from_linear_data_offset(skb, len - tabort,
2815 tx_tiny_buf(cp, ring, entry), tabort);
2816 mapping = tx_tiny_map(cp, ring, entry, tentry);
2817 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2818 (nr_frags == 0));
2819 } else {
2820 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2821 TX_DESC_SOF, (nr_frags == 0));
2822 }
2823 entry = TX_DESC_NEXT(ring, entry);
2824
2825 for (frag = 0; frag < nr_frags; frag++) {
2826 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2827
2828 len = skb_frag_size(fragp);
2829 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2830 DMA_TO_DEVICE);
2831
2832 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2833 if (unlikely(tabort)) {
2834 void *addr;
2835
2836
2837 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2838 ctrl, 0);
2839 entry = TX_DESC_NEXT(ring, entry);
2840
2841 addr = cas_page_map(skb_frag_page(fragp));
2842 memcpy(tx_tiny_buf(cp, ring, entry),
2843 addr + fragp->page_offset + len - tabort,
2844 tabort);
2845 cas_page_unmap(addr);
2846 mapping = tx_tiny_map(cp, ring, entry, tentry);
2847 len = tabort;
2848 }
2849
2850 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2851 (frag + 1 == nr_frags));
2852 entry = TX_DESC_NEXT(ring, entry);
2853 }
2854
2855 cp->tx_new[ring] = entry;
2856 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2857 netif_stop_queue(dev);
2858
2859 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2860 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2861 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2862 writel(entry, cp->regs + REG_TX_KICKN(ring));
2863 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2864 return 0;
2865}
2866
2867static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2868{
2869 struct cas *cp = netdev_priv(dev);
2870
2871
2872
2873
2874 static int ring;
2875
2876 if (skb_padto(skb, cp->min_frame_size))
2877 return NETDEV_TX_OK;
2878
2879
2880
2881
2882 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2883 return NETDEV_TX_BUSY;
2884 return NETDEV_TX_OK;
2885}
2886
2887static void cas_init_tx_dma(struct cas *cp)
2888{
2889 u64 desc_dma = cp->block_dvma;
2890 unsigned long off;
2891 u32 val;
2892 int i;
2893
2894
2895#ifdef USE_TX_COMPWB
2896 off = offsetof(struct cas_init_block, tx_compwb);
2897 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2898 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2899#endif
2900
2901
2902
2903
2904 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2905 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2906 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2907 TX_CFG_INTR_COMPWB_DIS;
2908
2909
2910 for (i = 0; i < MAX_TX_RINGS; i++) {
2911 off = (unsigned long) cp->init_txds[i] -
2912 (unsigned long) cp->init_block;
2913
2914 val |= CAS_TX_RINGN_BASE(i);
2915 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2916 writel((desc_dma + off) & 0xffffffff, cp->regs +
2917 REG_TX_DBN_LOW(i));
2918
2919
2920
2921 }
2922 writel(val, cp->regs + REG_TX_CFG);
2923
2924
2925
2926
2927#ifdef USE_QOS
2928 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2929 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2930 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2931 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2932#else
2933 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2934 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2935 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2936 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2937#endif
2938}
2939
2940
2941static inline void cas_init_dma(struct cas *cp)
2942{
2943 cas_init_tx_dma(cp);
2944 cas_init_rx_dma(cp);
2945}
2946
2947static void cas_process_mc_list(struct cas *cp)
2948{
2949 u16 hash_table[16];
2950 u32 crc;
2951 struct netdev_hw_addr *ha;
2952 int i = 1;
2953
2954 memset(hash_table, 0, sizeof(hash_table));
2955 netdev_for_each_mc_addr(ha, cp->dev) {
2956 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2957
2958
2959
2960 writel((ha->addr[4] << 8) | ha->addr[5],
2961 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2962 writel((ha->addr[2] << 8) | ha->addr[3],
2963 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2964 writel((ha->addr[0] << 8) | ha->addr[1],
2965 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2966 i++;
2967 }
2968 else {
2969
2970
2971
2972 crc = ether_crc_le(ETH_ALEN, ha->addr);
2973 crc >>= 24;
2974 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2975 }
2976 }
2977 for (i = 0; i < 16; i++)
2978 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2979}
2980
2981
2982static u32 cas_setup_multicast(struct cas *cp)
2983{
2984 u32 rxcfg = 0;
2985 int i;
2986
2987 if (cp->dev->flags & IFF_PROMISC) {
2988 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2989
2990 } else if (cp->dev->flags & IFF_ALLMULTI) {
2991 for (i=0; i < 16; i++)
2992 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2993 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2994
2995 } else {
2996 cas_process_mc_list(cp);
2997 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2998 }
2999
3000 return rxcfg;
3001}
3002
3003
3004static void cas_clear_mac_err(struct cas *cp)
3005{
3006 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3007 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3008 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3009 writel(0, cp->regs + REG_MAC_COLL_LATE);
3010 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3011 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3012 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3013 writel(0, cp->regs + REG_MAC_LEN_ERR);
3014 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3015 writel(0, cp->regs + REG_MAC_FCS_ERR);
3016 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3017}
3018
3019
3020static void cas_mac_reset(struct cas *cp)
3021{
3022 int i;
3023
3024
3025 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3026 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3027
3028
3029 i = STOP_TRIES;
3030 while (i-- > 0) {
3031 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3032 break;
3033 udelay(10);
3034 }
3035
3036
3037 i = STOP_TRIES;
3038 while (i-- > 0) {
3039 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3040 break;
3041 udelay(10);
3042 }
3043
3044 if (readl(cp->regs + REG_MAC_TX_RESET) |
3045 readl(cp->regs + REG_MAC_RX_RESET))
3046 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3047 readl(cp->regs + REG_MAC_TX_RESET),
3048 readl(cp->regs + REG_MAC_RX_RESET),
3049 readl(cp->regs + REG_MAC_STATE_MACHINE));
3050}
3051
3052
3053
3054static void cas_init_mac(struct cas *cp)
3055{
3056 unsigned char *e = &cp->dev->dev_addr[0];
3057 int i;
3058 cas_mac_reset(cp);
3059
3060
3061 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3062
3063#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3064
3065
3066
3067 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3068 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3069#endif
3070
3071 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3072
3073 writel(0x00, cp->regs + REG_MAC_IPG0);
3074 writel(0x08, cp->regs + REG_MAC_IPG1);
3075 writel(0x04, cp->regs + REG_MAC_IPG2);
3076
3077
3078 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3079
3080
3081 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3082
3083
3084
3085
3086
3087 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3088 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3089 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3090 cp->regs + REG_MAC_FRAMESIZE_MAX);
3091
3092
3093
3094
3095
3096 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3097 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3098 else
3099 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3100 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3101 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3102 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3103
3104 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3105
3106 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3107 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3108 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3109 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3110 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3111
3112
3113 for (i = 0; i < 45; i++)
3114 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3115
3116 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3117 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3118 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3119
3120 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3121 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3122 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3123
3124 cp->mac_rx_cfg = cas_setup_multicast(cp);
3125
3126 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3127 cas_clear_mac_err(cp);
3128 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3129
3130
3131
3132
3133
3134 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3135 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3136
3137
3138
3139
3140 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3141}
3142
3143
3144static void cas_init_pause_thresholds(struct cas *cp)
3145{
3146
3147
3148
3149 if (cp->rx_fifo_size <= (2 * 1024)) {
3150 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3151 } else {
3152 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3153 if (max_frame * 3 > cp->rx_fifo_size) {
3154 cp->rx_pause_off = 7104;
3155 cp->rx_pause_on = 960;
3156 } else {
3157 int off = (cp->rx_fifo_size - (max_frame * 2));
3158 int on = off - max_frame;
3159 cp->rx_pause_off = off;
3160 cp->rx_pause_on = on;
3161 }
3162 }
3163}
3164
3165static int cas_vpd_match(const void __iomem *p, const char *str)
3166{
3167 int len = strlen(str) + 1;
3168 int i;
3169
3170 for (i = 0; i < len; i++) {
3171 if (readb(p + i) != str[i])
3172 return 0;
3173 }
3174 return 1;
3175}
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3190 const int offset)
3191{
3192 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3193 void __iomem *base, *kstart;
3194 int i, len;
3195 int found = 0;
3196#define VPD_FOUND_MAC 0x01
3197#define VPD_FOUND_PHY 0x02
3198
3199 int phy_type = CAS_PHY_MII_MDIO0;
3200 int mac_off = 0;
3201
3202#if defined(CONFIG_SPARC)
3203 const unsigned char *addr;
3204#endif
3205
3206
3207 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3208 cp->regs + REG_BIM_LOCAL_DEV_EN);
3209
3210
3211 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3212 goto use_random_mac_addr;
3213
3214
3215 base = NULL;
3216 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3217
3218 if ((readb(p + i + 0) == 0x50) &&
3219 (readb(p + i + 1) == 0x43) &&
3220 (readb(p + i + 2) == 0x49) &&
3221 (readb(p + i + 3) == 0x52)) {
3222 base = p + (readb(p + i + 8) |
3223 (readb(p + i + 9) << 8));
3224 break;
3225 }
3226 }
3227
3228 if (!base || (readb(base) != 0x82))
3229 goto use_random_mac_addr;
3230
3231 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3232 while (i < EXPANSION_ROM_SIZE) {
3233 if (readb(base + i) != 0x90)
3234 goto use_random_mac_addr;
3235
3236
3237 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3238
3239
3240 kstart = base + i + 3;
3241 p = kstart;
3242 while ((p - kstart) < len) {
3243 int klen = readb(p + 2);
3244 int j;
3245 char type;
3246
3247 p += 3;
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286 if (readb(p) != 'I')
3287 goto next;
3288
3289
3290 type = readb(p + 3);
3291 if (type == 'B') {
3292 if ((klen == 29) && readb(p + 4) == 6 &&
3293 cas_vpd_match(p + 5,
3294 "local-mac-address")) {
3295 if (mac_off++ > offset)
3296 goto next;
3297
3298
3299 for (j = 0; j < 6; j++)
3300 dev_addr[j] =
3301 readb(p + 23 + j);
3302 goto found_mac;
3303 }
3304 }
3305
3306 if (type != 'S')
3307 goto next;
3308
3309#ifdef USE_ENTROPY_DEV
3310 if ((klen == 24) &&
3311 cas_vpd_match(p + 5, "entropy-dev") &&
3312 cas_vpd_match(p + 17, "vms110")) {
3313 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3314 goto next;
3315 }
3316#endif
3317
3318 if (found & VPD_FOUND_PHY)
3319 goto next;
3320
3321 if ((klen == 18) && readb(p + 4) == 4 &&
3322 cas_vpd_match(p + 5, "phy-type")) {
3323 if (cas_vpd_match(p + 14, "pcs")) {
3324 phy_type = CAS_PHY_SERDES;
3325 goto found_phy;
3326 }
3327 }
3328
3329 if ((klen == 23) && readb(p + 4) == 4 &&
3330 cas_vpd_match(p + 5, "phy-interface")) {
3331 if (cas_vpd_match(p + 19, "pcs")) {
3332 phy_type = CAS_PHY_SERDES;
3333 goto found_phy;
3334 }
3335 }
3336found_mac:
3337 found |= VPD_FOUND_MAC;
3338 goto next;
3339
3340found_phy:
3341 found |= VPD_FOUND_PHY;
3342
3343next:
3344 p += klen;
3345 }
3346 i += len + 3;
3347 }
3348
3349use_random_mac_addr:
3350 if (found & VPD_FOUND_MAC)
3351 goto done;
3352
3353#if defined(CONFIG_SPARC)
3354 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3355 if (addr != NULL) {
3356 memcpy(dev_addr, addr, ETH_ALEN);
3357 goto done;
3358 }
3359#endif
3360
3361
3362 pr_info("MAC address not found in ROM VPD\n");
3363 dev_addr[0] = 0x08;
3364 dev_addr[1] = 0x00;
3365 dev_addr[2] = 0x20;
3366 get_random_bytes(dev_addr + 3, 3);
3367
3368done:
3369 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3370 return phy_type;
3371}
3372
3373
3374static void cas_check_pci_invariants(struct cas *cp)
3375{
3376 struct pci_dev *pdev = cp->pdev;
3377
3378 cp->cas_flags = 0;
3379 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3380 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3381 if (pdev->revision >= CAS_ID_REVPLUS)
3382 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3383 if (pdev->revision < CAS_ID_REVPLUS02u)
3384 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3385
3386
3387
3388
3389 if (pdev->revision < CAS_ID_REV2)
3390 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3391 } else {
3392
3393 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3394
3395
3396
3397
3398 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3399 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3400 cp->cas_flags |= CAS_FLAG_SATURN;
3401 }
3402}
3403
3404
3405static int cas_check_invariants(struct cas *cp)
3406{
3407 struct pci_dev *pdev = cp->pdev;
3408 u32 cfg;
3409 int i;
3410
3411
3412 cp->page_order = 0;
3413#ifdef USE_PAGE_ORDER
3414 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3415
3416 struct page *page = alloc_pages(GFP_ATOMIC,
3417 CAS_JUMBO_PAGE_SHIFT -
3418 PAGE_SHIFT);
3419 if (page) {
3420 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3421 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3422 } else {
3423 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3424 }
3425 }
3426#endif
3427 cp->page_size = (PAGE_SIZE << cp->page_order);
3428
3429
3430 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3431 cp->rx_fifo_size = RX_FIFO_SIZE;
3432
3433
3434
3435
3436 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3437 PCI_SLOT(pdev->devfn));
3438 if (cp->phy_type & CAS_PHY_SERDES) {
3439 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3440 return 0;
3441 }
3442
3443
3444 cfg = readl(cp->regs + REG_MIF_CFG);
3445 if (cfg & MIF_CFG_MDIO_1) {
3446 cp->phy_type = CAS_PHY_MII_MDIO1;
3447 } else if (cfg & MIF_CFG_MDIO_0) {
3448 cp->phy_type = CAS_PHY_MII_MDIO0;
3449 }
3450
3451 cas_mif_poll(cp, 0);
3452 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3453
3454 for (i = 0; i < 32; i++) {
3455 u32 phy_id;
3456 int j;
3457
3458 for (j = 0; j < 3; j++) {
3459 cp->phy_addr = i;
3460 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3461 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3462 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3463 cp->phy_id = phy_id;
3464 goto done;
3465 }
3466 }
3467 }
3468 pr_err("MII phy did not respond [%08x]\n",
3469 readl(cp->regs + REG_MIF_STATE_MACHINE));
3470 return -1;
3471
3472done:
3473
3474 cfg = cas_phy_read(cp, MII_BMSR);
3475 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3476 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3477 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3478 return 0;
3479}
3480
3481
3482static inline void cas_start_dma(struct cas *cp)
3483{
3484 int i;
3485 u32 val;
3486 int txfailed = 0;
3487
3488
3489 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3490 writel(val, cp->regs + REG_TX_CFG);
3491 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3492 writel(val, cp->regs + REG_RX_CFG);
3493
3494
3495 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3496 writel(val, cp->regs + REG_MAC_TX_CFG);
3497 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3498 writel(val, cp->regs + REG_MAC_RX_CFG);
3499
3500 i = STOP_TRIES;
3501 while (i-- > 0) {
3502 val = readl(cp->regs + REG_MAC_TX_CFG);
3503 if ((val & MAC_TX_CFG_EN))
3504 break;
3505 udelay(10);
3506 }
3507 if (i < 0) txfailed = 1;
3508 i = STOP_TRIES;
3509 while (i-- > 0) {
3510 val = readl(cp->regs + REG_MAC_RX_CFG);
3511 if ((val & MAC_RX_CFG_EN)) {
3512 if (txfailed) {
3513 netdev_err(cp->dev,
3514 "enabling mac failed [tx:%08x:%08x]\n",
3515 readl(cp->regs + REG_MIF_STATE_MACHINE),
3516 readl(cp->regs + REG_MAC_STATE_MACHINE));
3517 }
3518 goto enable_rx_done;
3519 }
3520 udelay(10);
3521 }
3522 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3523 (txfailed ? "tx,rx" : "rx"),
3524 readl(cp->regs + REG_MIF_STATE_MACHINE),
3525 readl(cp->regs + REG_MAC_STATE_MACHINE));
3526
3527enable_rx_done:
3528 cas_unmask_intr(cp);
3529 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3530 writel(0, cp->regs + REG_RX_COMP_TAIL);
3531
3532 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3533 if (N_RX_DESC_RINGS > 1)
3534 writel(RX_DESC_RINGN_SIZE(1) - 4,
3535 cp->regs + REG_PLUS_RX_KICK1);
3536
3537 for (i = 1; i < N_RX_COMP_RINGS; i++)
3538 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3539 }
3540}
3541
3542
3543static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3544 int *pause)
3545{
3546 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3547 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3548 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3549 if (val & PCS_MII_LPA_ASYM_PAUSE)
3550 *pause |= 0x10;
3551 *spd = 1000;
3552}
3553
3554
3555static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3556 int *pause)
3557{
3558 u32 val;
3559
3560 *fd = 0;
3561 *spd = 10;
3562 *pause = 0;
3563
3564
3565 val = cas_phy_read(cp, MII_LPA);
3566 if (val & CAS_LPA_PAUSE)
3567 *pause = 0x01;
3568
3569 if (val & CAS_LPA_ASYM_PAUSE)
3570 *pause |= 0x10;
3571
3572 if (val & LPA_DUPLEX)
3573 *fd = 1;
3574 if (val & LPA_100)
3575 *spd = 100;
3576
3577 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3578 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3579 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3580 *spd = 1000;
3581 if (val & CAS_LPA_1000FULL)
3582 *fd = 1;
3583 }
3584}
3585
3586
3587
3588
3589
3590
3591static void cas_set_link_modes(struct cas *cp)
3592{
3593 u32 val;
3594 int full_duplex, speed, pause;
3595
3596 full_duplex = 0;
3597 speed = 10;
3598 pause = 0;
3599
3600 if (CAS_PHY_MII(cp->phy_type)) {
3601 cas_mif_poll(cp, 0);
3602 val = cas_phy_read(cp, MII_BMCR);
3603 if (val & BMCR_ANENABLE) {
3604 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3605 &pause);
3606 } else {
3607 if (val & BMCR_FULLDPLX)
3608 full_duplex = 1;
3609
3610 if (val & BMCR_SPEED100)
3611 speed = 100;
3612 else if (val & CAS_BMCR_SPEED1000)
3613 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3614 1000 : 100;
3615 }
3616 cas_mif_poll(cp, 1);
3617
3618 } else {
3619 val = readl(cp->regs + REG_PCS_MII_CTRL);
3620 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3621 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3622 if (val & PCS_MII_CTRL_DUPLEX)
3623 full_duplex = 1;
3624 }
3625 }
3626
3627 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3628 speed, full_duplex ? "full" : "half");
3629
3630 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3631 if (CAS_PHY_MII(cp->phy_type)) {
3632 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3633 if (!full_duplex)
3634 val |= MAC_XIF_DISABLE_ECHO;
3635 }
3636 if (full_duplex)
3637 val |= MAC_XIF_FDPLX_LED;
3638 if (speed == 1000)
3639 val |= MAC_XIF_GMII_MODE;
3640 writel(val, cp->regs + REG_MAC_XIF_CFG);
3641
3642
3643 val = MAC_TX_CFG_IPG_EN;
3644 if (full_duplex) {
3645 val |= MAC_TX_CFG_IGNORE_CARRIER;
3646 val |= MAC_TX_CFG_IGNORE_COLL;
3647 } else {
3648#ifndef USE_CSMA_CD_PROTO
3649 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3650 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3651#endif
3652 }
3653
3654
3655
3656
3657
3658
3659
3660 if ((speed == 1000) && !full_duplex) {
3661 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3662 cp->regs + REG_MAC_TX_CFG);
3663
3664 val = readl(cp->regs + REG_MAC_RX_CFG);
3665 val &= ~MAC_RX_CFG_STRIP_FCS;
3666 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3667 cp->regs + REG_MAC_RX_CFG);
3668
3669 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3670
3671 cp->crc_size = 4;
3672
3673 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3674
3675 } else {
3676 writel(val, cp->regs + REG_MAC_TX_CFG);
3677
3678
3679
3680
3681 val = readl(cp->regs + REG_MAC_RX_CFG);
3682 if (full_duplex) {
3683 val |= MAC_RX_CFG_STRIP_FCS;
3684 cp->crc_size = 0;
3685 cp->min_frame_size = CAS_MIN_MTU;
3686 } else {
3687 val &= ~MAC_RX_CFG_STRIP_FCS;
3688 cp->crc_size = 4;
3689 cp->min_frame_size = CAS_MIN_FRAME;
3690 }
3691 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3692 cp->regs + REG_MAC_RX_CFG);
3693 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3694 }
3695
3696 if (netif_msg_link(cp)) {
3697 if (pause & 0x01) {
3698 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3699 cp->rx_fifo_size,
3700 cp->rx_pause_off,
3701 cp->rx_pause_on);
3702 } else if (pause & 0x10) {
3703 netdev_info(cp->dev, "TX pause enabled\n");
3704 } else {
3705 netdev_info(cp->dev, "Pause is disabled\n");
3706 }
3707 }
3708
3709 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3710 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3711 if (pause) {
3712 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3713 if (pause & 0x01) {
3714 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3715 }
3716 }
3717 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3718 cas_start_dma(cp);
3719}
3720
3721
3722static void cas_init_hw(struct cas *cp, int restart_link)
3723{
3724 if (restart_link)
3725 cas_phy_init(cp);
3726
3727 cas_init_pause_thresholds(cp);
3728 cas_init_mac(cp);
3729 cas_init_dma(cp);
3730
3731 if (restart_link) {
3732
3733 cp->timer_ticks = 0;
3734 cas_begin_auto_negotiation(cp, NULL);
3735 } else if (cp->lstate == link_up) {
3736 cas_set_link_modes(cp);
3737 netif_carrier_on(cp->dev);
3738 }
3739}
3740
3741
3742
3743
3744
3745static void cas_hard_reset(struct cas *cp)
3746{
3747 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3748 udelay(20);
3749 pci_restore_state(cp->pdev);
3750}
3751
3752
3753static void cas_global_reset(struct cas *cp, int blkflag)
3754{
3755 int limit;
3756
3757
3758 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3759
3760
3761
3762
3763
3764
3765 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3766 cp->regs + REG_SW_RESET);
3767 } else {
3768 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3769 }
3770
3771
3772 mdelay(3);
3773
3774 limit = STOP_TRIES;
3775 while (limit-- > 0) {
3776 u32 val = readl(cp->regs + REG_SW_RESET);
3777 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3778 goto done;
3779 udelay(10);
3780 }
3781 netdev_err(cp->dev, "sw reset failed\n");
3782
3783done:
3784
3785 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3786 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3787
3788
3789
3790
3791
3792 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3793 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3794 PCI_ERR_BIM_DMA_READ), cp->regs +
3795 REG_PCI_ERR_STATUS_MASK);
3796
3797
3798
3799
3800 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3801}
3802
3803static void cas_reset(struct cas *cp, int blkflag)
3804{
3805 u32 val;
3806
3807 cas_mask_intr(cp);
3808 cas_global_reset(cp, blkflag);
3809 cas_mac_reset(cp);
3810 cas_entropy_reset(cp);
3811
3812
3813 val = readl(cp->regs + REG_TX_CFG);
3814 val &= ~TX_CFG_DMA_EN;
3815 writel(val, cp->regs + REG_TX_CFG);
3816
3817 val = readl(cp->regs + REG_RX_CFG);
3818 val &= ~RX_CFG_DMA_EN;
3819 writel(val, cp->regs + REG_RX_CFG);
3820
3821
3822 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3823 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3824 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3825 } else {
3826 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3827 }
3828
3829
3830 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3831 cas_clear_mac_err(cp);
3832 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3833}
3834
3835
3836static void cas_shutdown(struct cas *cp)
3837{
3838 unsigned long flags;
3839
3840
3841 cp->hw_running = 0;
3842
3843 del_timer_sync(&cp->link_timer);
3844
3845
3846#if 0
3847 while (atomic_read(&cp->reset_task_pending_mtu) ||
3848 atomic_read(&cp->reset_task_pending_spare) ||
3849 atomic_read(&cp->reset_task_pending_all))
3850 schedule();
3851
3852#else
3853 while (atomic_read(&cp->reset_task_pending))
3854 schedule();
3855#endif
3856
3857 cas_lock_all_save(cp, flags);
3858 cas_reset(cp, 0);
3859 if (cp->cas_flags & CAS_FLAG_SATURN)
3860 cas_phy_powerdown(cp);
3861 cas_unlock_all_restore(cp, flags);
3862}
3863
3864static int cas_change_mtu(struct net_device *dev, int new_mtu)
3865{
3866 struct cas *cp = netdev_priv(dev);
3867
3868 dev->mtu = new_mtu;
3869 if (!netif_running(dev) || !netif_device_present(dev))
3870 return 0;
3871
3872
3873#if 1
3874 atomic_inc(&cp->reset_task_pending);
3875 if ((cp->phy_type & CAS_PHY_SERDES)) {
3876 atomic_inc(&cp->reset_task_pending_all);
3877 } else {
3878 atomic_inc(&cp->reset_task_pending_mtu);
3879 }
3880 schedule_work(&cp->reset_task);
3881#else
3882 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3883 CAS_RESET_ALL : CAS_RESET_MTU);
3884 pr_err("reset called in cas_change_mtu\n");
3885 schedule_work(&cp->reset_task);
3886#endif
3887
3888 flush_work(&cp->reset_task);
3889 return 0;
3890}
3891
3892static void cas_clean_txd(struct cas *cp, int ring)
3893{
3894 struct cas_tx_desc *txd = cp->init_txds[ring];
3895 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3896 u64 daddr, dlen;
3897 int i, size;
3898
3899 size = TX_DESC_RINGN_SIZE(ring);
3900 for (i = 0; i < size; i++) {
3901 int frag;
3902
3903 if (skbs[i] == NULL)
3904 continue;
3905
3906 skb = skbs[i];
3907 skbs[i] = NULL;
3908
3909 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3910 int ent = i & (size - 1);
3911
3912
3913
3914
3915 daddr = le64_to_cpu(txd[ent].buffer);
3916 dlen = CAS_VAL(TX_DESC_BUFLEN,
3917 le64_to_cpu(txd[ent].control));
3918 pci_unmap_page(cp->pdev, daddr, dlen,
3919 PCI_DMA_TODEVICE);
3920
3921 if (frag != skb_shinfo(skb)->nr_frags) {
3922 i++;
3923
3924
3925
3926
3927 ent = i & (size - 1);
3928 if (cp->tx_tiny_use[ring][ent].used)
3929 i++;
3930 }
3931 }
3932 dev_kfree_skb_any(skb);
3933 }
3934
3935
3936 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3937}
3938
3939
3940static inline void cas_free_rx_desc(struct cas *cp, int ring)
3941{
3942 cas_page_t **page = cp->rx_pages[ring];
3943 int i, size;
3944
3945 size = RX_DESC_RINGN_SIZE(ring);
3946 for (i = 0; i < size; i++) {
3947 if (page[i]) {
3948 cas_page_free(cp, page[i]);
3949 page[i] = NULL;
3950 }
3951 }
3952}
3953
3954static void cas_free_rxds(struct cas *cp)
3955{
3956 int i;
3957
3958 for (i = 0; i < N_RX_DESC_RINGS; i++)
3959 cas_free_rx_desc(cp, i);
3960}
3961
3962
3963static void cas_clean_rings(struct cas *cp)
3964{
3965 int i;
3966
3967
3968 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3969 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3970 for (i = 0; i < N_TX_RINGS; i++)
3971 cas_clean_txd(cp, i);
3972
3973
3974 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3975 cas_clean_rxds(cp);
3976 cas_clean_rxcs(cp);
3977}
3978
3979
3980static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3981{
3982 cas_page_t **page = cp->rx_pages[ring];
3983 int size, i = 0;
3984
3985 size = RX_DESC_RINGN_SIZE(ring);
3986 for (i = 0; i < size; i++) {
3987 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3988 return -1;
3989 }
3990 return 0;
3991}
3992
3993static int cas_alloc_rxds(struct cas *cp)
3994{
3995 int i;
3996
3997 for (i = 0; i < N_RX_DESC_RINGS; i++) {
3998 if (cas_alloc_rx_desc(cp, i) < 0) {
3999 cas_free_rxds(cp);
4000 return -1;
4001 }
4002 }
4003 return 0;
4004}
4005
4006static void cas_reset_task(struct work_struct *work)
4007{
4008 struct cas *cp = container_of(work, struct cas, reset_task);
4009#if 0
4010 int pending = atomic_read(&cp->reset_task_pending);
4011#else
4012 int pending_all = atomic_read(&cp->reset_task_pending_all);
4013 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4014 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4015
4016 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4017
4018
4019
4020 atomic_dec(&cp->reset_task_pending);
4021 return;
4022 }
4023#endif
4024
4025
4026
4027
4028 if (cp->hw_running) {
4029 unsigned long flags;
4030
4031
4032 netif_device_detach(cp->dev);
4033 cas_lock_all_save(cp, flags);
4034
4035 if (cp->opened) {
4036
4037
4038
4039
4040 cas_spare_recover(cp, GFP_ATOMIC);
4041 }
4042#if 1
4043
4044 if (!pending_all && !pending_mtu)
4045 goto done;
4046#else
4047 if (pending == CAS_RESET_SPARE)
4048 goto done;
4049#endif
4050
4051
4052
4053
4054
4055
4056
4057#if 1
4058 cas_reset(cp, !(pending_all > 0));
4059 if (cp->opened)
4060 cas_clean_rings(cp);
4061 cas_init_hw(cp, (pending_all > 0));
4062#else
4063 cas_reset(cp, !(pending == CAS_RESET_ALL));
4064 if (cp->opened)
4065 cas_clean_rings(cp);
4066 cas_init_hw(cp, pending == CAS_RESET_ALL);
4067#endif
4068
4069done:
4070 cas_unlock_all_restore(cp, flags);
4071 netif_device_attach(cp->dev);
4072 }
4073#if 1
4074 atomic_sub(pending_all, &cp->reset_task_pending_all);
4075 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4076 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4077 atomic_dec(&cp->reset_task_pending);
4078#else
4079 atomic_set(&cp->reset_task_pending, 0);
4080#endif
4081}
4082
4083static void cas_link_timer(struct timer_list *t)
4084{
4085 struct cas *cp = from_timer(cp, t, link_timer);
4086 int mask, pending = 0, reset = 0;
4087 unsigned long flags;
4088
4089 if (link_transition_timeout != 0 &&
4090 cp->link_transition_jiffies_valid &&
4091 ((jiffies - cp->link_transition_jiffies) >
4092 (link_transition_timeout))) {
4093
4094
4095
4096
4097 cp->link_transition_jiffies_valid = 0;
4098 }
4099
4100 if (!cp->hw_running)
4101 return;
4102
4103 spin_lock_irqsave(&cp->lock, flags);
4104 cas_lock_tx(cp);
4105 cas_entropy_gather(cp);
4106
4107
4108
4109
4110#if 1
4111 if (atomic_read(&cp->reset_task_pending_all) ||
4112 atomic_read(&cp->reset_task_pending_spare) ||
4113 atomic_read(&cp->reset_task_pending_mtu))
4114 goto done;
4115#else
4116 if (atomic_read(&cp->reset_task_pending))
4117 goto done;
4118#endif
4119
4120
4121 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4122 int i, rmask;
4123
4124 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4125 rmask = CAS_FLAG_RXD_POST(i);
4126 if ((mask & rmask) == 0)
4127 continue;
4128
4129
4130 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4131 pending = 1;
4132 continue;
4133 }
4134 cp->cas_flags &= ~rmask;
4135 }
4136 }
4137
4138 if (CAS_PHY_MII(cp->phy_type)) {
4139 u16 bmsr;
4140 cas_mif_poll(cp, 0);
4141 bmsr = cas_phy_read(cp, MII_BMSR);
4142
4143
4144
4145
4146
4147 bmsr = cas_phy_read(cp, MII_BMSR);
4148 cas_mif_poll(cp, 1);
4149 readl(cp->regs + REG_MIF_STATUS);
4150 reset = cas_mii_link_check(cp, bmsr);
4151 } else {
4152 reset = cas_pcs_link_check(cp);
4153 }
4154
4155 if (reset)
4156 goto done;
4157
4158
4159 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4160 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4161 u32 wptr, rptr;
4162 int tlm = CAS_VAL(MAC_SM_TLM, val);
4163
4164 if (((tlm == 0x5) || (tlm == 0x3)) &&
4165 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4166 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4167 "tx err: MAC_STATE[%08x]\n", val);
4168 reset = 1;
4169 goto done;
4170 }
4171
4172 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4173 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4174 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4175 if ((val == 0) && (wptr != rptr)) {
4176 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4177 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4178 val, wptr, rptr);
4179 reset = 1;
4180 }
4181
4182 if (reset)
4183 cas_hard_reset(cp);
4184 }
4185
4186done:
4187 if (reset) {
4188#if 1
4189 atomic_inc(&cp->reset_task_pending);
4190 atomic_inc(&cp->reset_task_pending_all);
4191 schedule_work(&cp->reset_task);
4192#else
4193 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4194 pr_err("reset called in cas_link_timer\n");
4195 schedule_work(&cp->reset_task);
4196#endif
4197 }
4198
4199 if (!pending)
4200 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4201 cas_unlock_tx(cp);
4202 spin_unlock_irqrestore(&cp->lock, flags);
4203}
4204
4205
4206
4207
4208static void cas_tx_tiny_free(struct cas *cp)
4209{
4210 struct pci_dev *pdev = cp->pdev;
4211 int i;
4212
4213 for (i = 0; i < N_TX_RINGS; i++) {
4214 if (!cp->tx_tiny_bufs[i])
4215 continue;
4216
4217 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4218 cp->tx_tiny_bufs[i],
4219 cp->tx_tiny_dvma[i]);
4220 cp->tx_tiny_bufs[i] = NULL;
4221 }
4222}
4223
4224static int cas_tx_tiny_alloc(struct cas *cp)
4225{
4226 struct pci_dev *pdev = cp->pdev;
4227 int i;
4228
4229 for (i = 0; i < N_TX_RINGS; i++) {
4230 cp->tx_tiny_bufs[i] =
4231 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4232 &cp->tx_tiny_dvma[i]);
4233 if (!cp->tx_tiny_bufs[i]) {
4234 cas_tx_tiny_free(cp);
4235 return -1;
4236 }
4237 }
4238 return 0;
4239}
4240
4241
4242static int cas_open(struct net_device *dev)
4243{
4244 struct cas *cp = netdev_priv(dev);
4245 int hw_was_up, err;
4246 unsigned long flags;
4247
4248 mutex_lock(&cp->pm_mutex);
4249
4250 hw_was_up = cp->hw_running;
4251
4252
4253
4254
4255 if (!cp->hw_running) {
4256
4257 cas_lock_all_save(cp, flags);
4258
4259
4260
4261
4262
4263 cas_reset(cp, 0);
4264 cp->hw_running = 1;
4265 cas_unlock_all_restore(cp, flags);
4266 }
4267
4268 err = -ENOMEM;
4269 if (cas_tx_tiny_alloc(cp) < 0)
4270 goto err_unlock;
4271
4272
4273 if (cas_alloc_rxds(cp) < 0)
4274 goto err_tx_tiny;
4275
4276
4277 cas_spare_init(cp);
4278 cas_spare_recover(cp, GFP_KERNEL);
4279
4280
4281
4282
4283
4284
4285 if (request_irq(cp->pdev->irq, cas_interrupt,
4286 IRQF_SHARED, dev->name, (void *) dev)) {
4287 netdev_err(cp->dev, "failed to request irq !\n");
4288 err = -EAGAIN;
4289 goto err_spare;
4290 }
4291
4292#ifdef USE_NAPI
4293 napi_enable(&cp->napi);
4294#endif
4295
4296 cas_lock_all_save(cp, flags);
4297 cas_clean_rings(cp);
4298 cas_init_hw(cp, !hw_was_up);
4299 cp->opened = 1;
4300 cas_unlock_all_restore(cp, flags);
4301
4302 netif_start_queue(dev);
4303 mutex_unlock(&cp->pm_mutex);
4304 return 0;
4305
4306err_spare:
4307 cas_spare_free(cp);
4308 cas_free_rxds(cp);
4309err_tx_tiny:
4310 cas_tx_tiny_free(cp);
4311err_unlock:
4312 mutex_unlock(&cp->pm_mutex);
4313 return err;
4314}
4315
4316static int cas_close(struct net_device *dev)
4317{
4318 unsigned long flags;
4319 struct cas *cp = netdev_priv(dev);
4320
4321#ifdef USE_NAPI
4322 napi_disable(&cp->napi);
4323#endif
4324
4325 mutex_lock(&cp->pm_mutex);
4326
4327 netif_stop_queue(dev);
4328
4329
4330 cas_lock_all_save(cp, flags);
4331 cp->opened = 0;
4332 cas_reset(cp, 0);
4333 cas_phy_init(cp);
4334 cas_begin_auto_negotiation(cp, NULL);
4335 cas_clean_rings(cp);
4336 cas_unlock_all_restore(cp, flags);
4337
4338 free_irq(cp->pdev->irq, (void *) dev);
4339 cas_spare_free(cp);
4340 cas_free_rxds(cp);
4341 cas_tx_tiny_free(cp);
4342 mutex_unlock(&cp->pm_mutex);
4343 return 0;
4344}
4345
4346static struct {
4347 const char name[ETH_GSTRING_LEN];
4348} ethtool_cassini_statnames[] = {
4349 {"collisions"},
4350 {"rx_bytes"},
4351 {"rx_crc_errors"},
4352 {"rx_dropped"},
4353 {"rx_errors"},
4354 {"rx_fifo_errors"},
4355 {"rx_frame_errors"},
4356 {"rx_length_errors"},
4357 {"rx_over_errors"},
4358 {"rx_packets"},
4359 {"tx_aborted_errors"},
4360 {"tx_bytes"},
4361 {"tx_dropped"},
4362 {"tx_errors"},
4363 {"tx_fifo_errors"},
4364 {"tx_packets"}
4365};
4366#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4367
4368static struct {
4369 const int offsets;
4370} ethtool_register_table[] = {
4371 {-MII_BMSR},
4372 {-MII_BMCR},
4373 {REG_CAWR},
4374 {REG_INF_BURST},
4375 {REG_BIM_CFG},
4376 {REG_RX_CFG},
4377 {REG_HP_CFG},
4378 {REG_MAC_TX_CFG},
4379 {REG_MAC_RX_CFG},
4380 {REG_MAC_CTRL_CFG},
4381 {REG_MAC_XIF_CFG},
4382 {REG_MIF_CFG},
4383 {REG_PCS_CFG},
4384 {REG_SATURN_PCFG},
4385 {REG_PCS_MII_STATUS},
4386 {REG_PCS_STATE_MACHINE},
4387 {REG_MAC_COLL_EXCESS},
4388 {REG_MAC_COLL_LATE}
4389};
4390#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4391#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4392
4393static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4394{
4395 u8 *p;
4396 int i;
4397 unsigned long flags;
4398
4399 spin_lock_irqsave(&cp->lock, flags);
4400 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4401 u16 hval;
4402 u32 val;
4403 if (ethtool_register_table[i].offsets < 0) {
4404 hval = cas_phy_read(cp,
4405 -ethtool_register_table[i].offsets);
4406 val = hval;
4407 } else {
4408 val= readl(cp->regs+ethtool_register_table[i].offsets);
4409 }
4410 memcpy(p, (u8 *)&val, sizeof(u32));
4411 }
4412 spin_unlock_irqrestore(&cp->lock, flags);
4413}
4414
4415static struct net_device_stats *cas_get_stats(struct net_device *dev)
4416{
4417 struct cas *cp = netdev_priv(dev);
4418 struct net_device_stats *stats = cp->net_stats;
4419 unsigned long flags;
4420 int i;
4421 unsigned long tmp;
4422
4423
4424 if (!cp->hw_running)
4425 return stats + N_TX_RINGS;
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4436 stats[N_TX_RINGS].rx_crc_errors +=
4437 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4438 stats[N_TX_RINGS].rx_frame_errors +=
4439 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4440 stats[N_TX_RINGS].rx_length_errors +=
4441 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4442#if 1
4443 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4444 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4445 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4446 stats[N_TX_RINGS].collisions +=
4447 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4448#else
4449 stats[N_TX_RINGS].tx_aborted_errors +=
4450 readl(cp->regs + REG_MAC_COLL_EXCESS);
4451 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4452 readl(cp->regs + REG_MAC_COLL_LATE);
4453#endif
4454 cas_clear_mac_err(cp);
4455
4456
4457 spin_lock(&cp->stat_lock[0]);
4458 stats[N_TX_RINGS].collisions += stats[0].collisions;
4459 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4460 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4461 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4462 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4463 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4464 spin_unlock(&cp->stat_lock[0]);
4465
4466 for (i = 0; i < N_TX_RINGS; i++) {
4467 spin_lock(&cp->stat_lock[i]);
4468 stats[N_TX_RINGS].rx_length_errors +=
4469 stats[i].rx_length_errors;
4470 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4471 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4472 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4473 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4474 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4475 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4476 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4477 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4478 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4479 memset(stats + i, 0, sizeof(struct net_device_stats));
4480 spin_unlock(&cp->stat_lock[i]);
4481 }
4482 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4483 return stats + N_TX_RINGS;
4484}
4485
4486
4487static void cas_set_multicast(struct net_device *dev)
4488{
4489 struct cas *cp = netdev_priv(dev);
4490 u32 rxcfg, rxcfg_new;
4491 unsigned long flags;
4492 int limit = STOP_TRIES;
4493
4494 if (!cp->hw_running)
4495 return;
4496
4497 spin_lock_irqsave(&cp->lock, flags);
4498 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4499
4500
4501 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4502 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4503 if (!limit--)
4504 break;
4505 udelay(10);
4506 }
4507
4508
4509 limit = STOP_TRIES;
4510 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4511 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4512 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4513 if (!limit--)
4514 break;
4515 udelay(10);
4516 }
4517
4518
4519 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4520 rxcfg |= rxcfg_new;
4521 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4522 spin_unlock_irqrestore(&cp->lock, flags);
4523}
4524
4525static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4526{
4527 struct cas *cp = netdev_priv(dev);
4528 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4529 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4530 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4531}
4532
4533static int cas_get_link_ksettings(struct net_device *dev,
4534 struct ethtool_link_ksettings *cmd)
4535{
4536 struct cas *cp = netdev_priv(dev);
4537 u16 bmcr;
4538 int full_duplex, speed, pause;
4539 unsigned long flags;
4540 enum link_state linkstate = link_up;
4541 u32 supported, advertising;
4542
4543 advertising = 0;
4544 supported = SUPPORTED_Autoneg;
4545 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4546 supported |= SUPPORTED_1000baseT_Full;
4547 advertising |= ADVERTISED_1000baseT_Full;
4548 }
4549
4550
4551 spin_lock_irqsave(&cp->lock, flags);
4552 bmcr = 0;
4553 linkstate = cp->lstate;
4554 if (CAS_PHY_MII(cp->phy_type)) {
4555 cmd->base.port = PORT_MII;
4556 cmd->base.phy_address = cp->phy_addr;
4557 advertising |= ADVERTISED_TP | ADVERTISED_MII |
4558 ADVERTISED_10baseT_Half |
4559 ADVERTISED_10baseT_Full |
4560 ADVERTISED_100baseT_Half |
4561 ADVERTISED_100baseT_Full;
4562
4563 supported |=
4564 (SUPPORTED_10baseT_Half |
4565 SUPPORTED_10baseT_Full |
4566 SUPPORTED_100baseT_Half |
4567 SUPPORTED_100baseT_Full |
4568 SUPPORTED_TP | SUPPORTED_MII);
4569
4570 if (cp->hw_running) {
4571 cas_mif_poll(cp, 0);
4572 bmcr = cas_phy_read(cp, MII_BMCR);
4573 cas_read_mii_link_mode(cp, &full_duplex,
4574 &speed, &pause);
4575 cas_mif_poll(cp, 1);
4576 }
4577
4578 } else {
4579 cmd->base.port = PORT_FIBRE;
4580 cmd->base.phy_address = 0;
4581 supported |= SUPPORTED_FIBRE;
4582 advertising |= ADVERTISED_FIBRE;
4583
4584 if (cp->hw_running) {
4585
4586 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4587 cas_read_pcs_link_mode(cp, &full_duplex,
4588 &speed, &pause);
4589 }
4590 }
4591 spin_unlock_irqrestore(&cp->lock, flags);
4592
4593 if (bmcr & BMCR_ANENABLE) {
4594 advertising |= ADVERTISED_Autoneg;
4595 cmd->base.autoneg = AUTONEG_ENABLE;
4596 cmd->base.speed = ((speed == 10) ?
4597 SPEED_10 :
4598 ((speed == 1000) ?
4599 SPEED_1000 : SPEED_100));
4600 cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4601 } else {
4602 cmd->base.autoneg = AUTONEG_DISABLE;
4603 cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4604 SPEED_1000 :
4605 ((bmcr & BMCR_SPEED100) ?
4606 SPEED_100 : SPEED_10));
4607 cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4608 DUPLEX_FULL : DUPLEX_HALF;
4609 }
4610 if (linkstate != link_up) {
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621 if (cp->link_cntl & BMCR_ANENABLE) {
4622 cmd->base.speed = 0;
4623 cmd->base.duplex = 0xff;
4624 } else {
4625 cmd->base.speed = SPEED_10;
4626 if (cp->link_cntl & BMCR_SPEED100) {
4627 cmd->base.speed = SPEED_100;
4628 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4629 cmd->base.speed = SPEED_1000;
4630 }
4631 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4632 DUPLEX_FULL : DUPLEX_HALF;
4633 }
4634 }
4635
4636 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4637 supported);
4638 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4639 advertising);
4640
4641 return 0;
4642}
4643
4644static int cas_set_link_ksettings(struct net_device *dev,
4645 const struct ethtool_link_ksettings *cmd)
4646{
4647 struct cas *cp = netdev_priv(dev);
4648 unsigned long flags;
4649 u32 speed = cmd->base.speed;
4650
4651
4652 if (cmd->base.autoneg != AUTONEG_ENABLE &&
4653 cmd->base.autoneg != AUTONEG_DISABLE)
4654 return -EINVAL;
4655
4656 if (cmd->base.autoneg == AUTONEG_DISABLE &&
4657 ((speed != SPEED_1000 &&
4658 speed != SPEED_100 &&
4659 speed != SPEED_10) ||
4660 (cmd->base.duplex != DUPLEX_HALF &&
4661 cmd->base.duplex != DUPLEX_FULL)))
4662 return -EINVAL;
4663
4664
4665 spin_lock_irqsave(&cp->lock, flags);
4666 cas_begin_auto_negotiation(cp, cmd);
4667 spin_unlock_irqrestore(&cp->lock, flags);
4668 return 0;
4669}
4670
4671static int cas_nway_reset(struct net_device *dev)
4672{
4673 struct cas *cp = netdev_priv(dev);
4674 unsigned long flags;
4675
4676 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4677 return -EINVAL;
4678
4679
4680 spin_lock_irqsave(&cp->lock, flags);
4681 cas_begin_auto_negotiation(cp, NULL);
4682 spin_unlock_irqrestore(&cp->lock, flags);
4683
4684 return 0;
4685}
4686
4687static u32 cas_get_link(struct net_device *dev)
4688{
4689 struct cas *cp = netdev_priv(dev);
4690 return cp->lstate == link_up;
4691}
4692
4693static u32 cas_get_msglevel(struct net_device *dev)
4694{
4695 struct cas *cp = netdev_priv(dev);
4696 return cp->msg_enable;
4697}
4698
4699static void cas_set_msglevel(struct net_device *dev, u32 value)
4700{
4701 struct cas *cp = netdev_priv(dev);
4702 cp->msg_enable = value;
4703}
4704
4705static int cas_get_regs_len(struct net_device *dev)
4706{
4707 struct cas *cp = netdev_priv(dev);
4708 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4709}
4710
4711static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4712 void *p)
4713{
4714 struct cas *cp = netdev_priv(dev);
4715 regs->version = 0;
4716
4717 cas_read_regs(cp, p, regs->len / sizeof(u32));
4718}
4719
4720static int cas_get_sset_count(struct net_device *dev, int sset)
4721{
4722 switch (sset) {
4723 case ETH_SS_STATS:
4724 return CAS_NUM_STAT_KEYS;
4725 default:
4726 return -EOPNOTSUPP;
4727 }
4728}
4729
4730static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4731{
4732 memcpy(data, ðtool_cassini_statnames,
4733 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4734}
4735
4736static void cas_get_ethtool_stats(struct net_device *dev,
4737 struct ethtool_stats *estats, u64 *data)
4738{
4739 struct cas *cp = netdev_priv(dev);
4740 struct net_device_stats *stats = cas_get_stats(cp->dev);
4741 int i = 0;
4742 data[i++] = stats->collisions;
4743 data[i++] = stats->rx_bytes;
4744 data[i++] = stats->rx_crc_errors;
4745 data[i++] = stats->rx_dropped;
4746 data[i++] = stats->rx_errors;
4747 data[i++] = stats->rx_fifo_errors;
4748 data[i++] = stats->rx_frame_errors;
4749 data[i++] = stats->rx_length_errors;
4750 data[i++] = stats->rx_over_errors;
4751 data[i++] = stats->rx_packets;
4752 data[i++] = stats->tx_aborted_errors;
4753 data[i++] = stats->tx_bytes;
4754 data[i++] = stats->tx_dropped;
4755 data[i++] = stats->tx_errors;
4756 data[i++] = stats->tx_fifo_errors;
4757 data[i++] = stats->tx_packets;
4758 BUG_ON(i != CAS_NUM_STAT_KEYS);
4759}
4760
4761static const struct ethtool_ops cas_ethtool_ops = {
4762 .get_drvinfo = cas_get_drvinfo,
4763 .nway_reset = cas_nway_reset,
4764 .get_link = cas_get_link,
4765 .get_msglevel = cas_get_msglevel,
4766 .set_msglevel = cas_set_msglevel,
4767 .get_regs_len = cas_get_regs_len,
4768 .get_regs = cas_get_regs,
4769 .get_sset_count = cas_get_sset_count,
4770 .get_strings = cas_get_strings,
4771 .get_ethtool_stats = cas_get_ethtool_stats,
4772 .get_link_ksettings = cas_get_link_ksettings,
4773 .set_link_ksettings = cas_set_link_ksettings,
4774};
4775
4776static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4777{
4778 struct cas *cp = netdev_priv(dev);
4779 struct mii_ioctl_data *data = if_mii(ifr);
4780 unsigned long flags;
4781 int rc = -EOPNOTSUPP;
4782
4783
4784
4785
4786 mutex_lock(&cp->pm_mutex);
4787 switch (cmd) {
4788 case SIOCGMIIPHY:
4789 data->phy_id = cp->phy_addr;
4790
4791
4792 case SIOCGMIIREG:
4793 spin_lock_irqsave(&cp->lock, flags);
4794 cas_mif_poll(cp, 0);
4795 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4796 cas_mif_poll(cp, 1);
4797 spin_unlock_irqrestore(&cp->lock, flags);
4798 rc = 0;
4799 break;
4800
4801 case SIOCSMIIREG:
4802 spin_lock_irqsave(&cp->lock, flags);
4803 cas_mif_poll(cp, 0);
4804 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4805 cas_mif_poll(cp, 1);
4806 spin_unlock_irqrestore(&cp->lock, flags);
4807 break;
4808 default:
4809 break;
4810 }
4811
4812 mutex_unlock(&cp->pm_mutex);
4813 return rc;
4814}
4815
4816
4817
4818
4819
4820static void cas_program_bridge(struct pci_dev *cas_pdev)
4821{
4822 struct pci_dev *pdev = cas_pdev->bus->self;
4823 u32 val;
4824
4825 if (!pdev)
4826 return;
4827
4828 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4829 return;
4830
4831
4832
4833
4834
4835
4836 pci_read_config_dword(pdev, 0x40, &val);
4837 val &= ~0x00040000;
4838 pci_write_config_dword(pdev, 0x40, val);
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884 pci_write_config_word(pdev, 0x52,
4885 (0x7 << 13) |
4886 (0x7 << 10) |
4887 (0x7 << 7) |
4888 (0x7 << 4) |
4889 (0xf << 0));
4890
4891
4892 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4893
4894
4895
4896
4897 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4898}
4899
4900static const struct net_device_ops cas_netdev_ops = {
4901 .ndo_open = cas_open,
4902 .ndo_stop = cas_close,
4903 .ndo_start_xmit = cas_start_xmit,
4904 .ndo_get_stats = cas_get_stats,
4905 .ndo_set_rx_mode = cas_set_multicast,
4906 .ndo_do_ioctl = cas_ioctl,
4907 .ndo_tx_timeout = cas_tx_timeout,
4908 .ndo_change_mtu = cas_change_mtu,
4909 .ndo_set_mac_address = eth_mac_addr,
4910 .ndo_validate_addr = eth_validate_addr,
4911#ifdef CONFIG_NET_POLL_CONTROLLER
4912 .ndo_poll_controller = cas_netpoll,
4913#endif
4914};
4915
4916static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4917{
4918 static int cas_version_printed = 0;
4919 unsigned long casreg_len;
4920 struct net_device *dev;
4921 struct cas *cp;
4922 int i, err, pci_using_dac;
4923 u16 pci_cmd;
4924 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4925
4926 if (cas_version_printed++ == 0)
4927 pr_info("%s", version);
4928
4929 err = pci_enable_device(pdev);
4930 if (err) {
4931 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4932 return err;
4933 }
4934
4935 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4936 dev_err(&pdev->dev, "Cannot find proper PCI device "
4937 "base address, aborting\n");
4938 err = -ENODEV;
4939 goto err_out_disable_pdev;
4940 }
4941
4942 dev = alloc_etherdev(sizeof(*cp));
4943 if (!dev) {
4944 err = -ENOMEM;
4945 goto err_out_disable_pdev;
4946 }
4947 SET_NETDEV_DEV(dev, &pdev->dev);
4948
4949 err = pci_request_regions(pdev, dev->name);
4950 if (err) {
4951 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4952 goto err_out_free_netdev;
4953 }
4954 pci_set_master(pdev);
4955
4956
4957
4958
4959
4960 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4961 pci_cmd &= ~PCI_COMMAND_SERR;
4962 pci_cmd |= PCI_COMMAND_PARITY;
4963 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4964 if (pci_try_set_mwi(pdev))
4965 pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4966
4967 cas_program_bridge(pdev);
4968
4969
4970
4971
4972
4973
4974
4975#if 1
4976 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4977 &orig_cacheline_size);
4978 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4979 cas_cacheline_size =
4980 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4981 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4982 if (pci_write_config_byte(pdev,
4983 PCI_CACHE_LINE_SIZE,
4984 cas_cacheline_size)) {
4985 dev_err(&pdev->dev, "Could not set PCI cache "
4986 "line size\n");
4987 goto err_write_cacheline;
4988 }
4989 }
4990#endif
4991
4992
4993
4994 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4995 pci_using_dac = 1;
4996 err = pci_set_consistent_dma_mask(pdev,
4997 DMA_BIT_MASK(64));
4998 if (err < 0) {
4999 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5000 "for consistent allocations\n");
5001 goto err_out_free_res;
5002 }
5003
5004 } else {
5005 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5006 if (err) {
5007 dev_err(&pdev->dev, "No usable DMA configuration, "
5008 "aborting\n");
5009 goto err_out_free_res;
5010 }
5011 pci_using_dac = 0;
5012 }
5013
5014 casreg_len = pci_resource_len(pdev, 0);
5015
5016 cp = netdev_priv(dev);
5017 cp->pdev = pdev;
5018#if 1
5019
5020 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5021#endif
5022 cp->dev = dev;
5023 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5024 cassini_debug;
5025
5026#if defined(CONFIG_SPARC)
5027 cp->of_node = pci_device_to_OF_node(pdev);
5028#endif
5029
5030 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5031 cp->link_transition_jiffies_valid = 0;
5032
5033 spin_lock_init(&cp->lock);
5034 spin_lock_init(&cp->rx_inuse_lock);
5035 spin_lock_init(&cp->rx_spare_lock);
5036 for (i = 0; i < N_TX_RINGS; i++) {
5037 spin_lock_init(&cp->stat_lock[i]);
5038 spin_lock_init(&cp->tx_lock[i]);
5039 }
5040 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5041 mutex_init(&cp->pm_mutex);
5042
5043 timer_setup(&cp->link_timer, cas_link_timer, 0);
5044
5045#if 1
5046
5047
5048
5049 atomic_set(&cp->reset_task_pending, 0);
5050 atomic_set(&cp->reset_task_pending_all, 0);
5051 atomic_set(&cp->reset_task_pending_spare, 0);
5052 atomic_set(&cp->reset_task_pending_mtu, 0);
5053#endif
5054 INIT_WORK(&cp->reset_task, cas_reset_task);
5055
5056
5057 if (link_mode >= 0 && link_mode < 6)
5058 cp->link_cntl = link_modes[link_mode];
5059 else
5060 cp->link_cntl = BMCR_ANENABLE;
5061 cp->lstate = link_down;
5062 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5063 netif_carrier_off(cp->dev);
5064 cp->timer_ticks = 0;
5065
5066
5067 cp->regs = pci_iomap(pdev, 0, casreg_len);
5068 if (!cp->regs) {
5069 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5070 goto err_out_free_res;
5071 }
5072 cp->casreg_len = casreg_len;
5073
5074 pci_save_state(pdev);
5075 cas_check_pci_invariants(cp);
5076 cas_hard_reset(cp);
5077 cas_reset(cp, 0);
5078 if (cas_check_invariants(cp))
5079 goto err_out_iounmap;
5080 if (cp->cas_flags & CAS_FLAG_SATURN)
5081 cas_saturn_firmware_init(cp);
5082
5083 cp->init_block = (struct cas_init_block *)
5084 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5085 &cp->block_dvma);
5086 if (!cp->init_block) {
5087 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5088 goto err_out_iounmap;
5089 }
5090
5091 for (i = 0; i < N_TX_RINGS; i++)
5092 cp->init_txds[i] = cp->init_block->txds[i];
5093
5094 for (i = 0; i < N_RX_DESC_RINGS; i++)
5095 cp->init_rxds[i] = cp->init_block->rxds[i];
5096
5097 for (i = 0; i < N_RX_COMP_RINGS; i++)
5098 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5099
5100 for (i = 0; i < N_RX_FLOWS; i++)
5101 skb_queue_head_init(&cp->rx_flows[i]);
5102
5103 dev->netdev_ops = &cas_netdev_ops;
5104 dev->ethtool_ops = &cas_ethtool_ops;
5105 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5106
5107#ifdef USE_NAPI
5108 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5109#endif
5110 dev->irq = pdev->irq;
5111 dev->dma = 0;
5112
5113
5114 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5115 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5116
5117 if (pci_using_dac)
5118 dev->features |= NETIF_F_HIGHDMA;
5119
5120
5121 dev->min_mtu = CAS_MIN_MTU;
5122 dev->max_mtu = CAS_MAX_MTU;
5123
5124 if (register_netdev(dev)) {
5125 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5126 goto err_out_free_consistent;
5127 }
5128
5129 i = readl(cp->regs + REG_BIM_CFG);
5130 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5131 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5132 (i & BIM_CFG_32BIT) ? "32" : "64",
5133 (i & BIM_CFG_66MHZ) ? "66" : "33",
5134 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5135 dev->dev_addr);
5136
5137 pci_set_drvdata(pdev, dev);
5138 cp->hw_running = 1;
5139 cas_entropy_reset(cp);
5140 cas_phy_init(cp);
5141 cas_begin_auto_negotiation(cp, NULL);
5142 return 0;
5143
5144err_out_free_consistent:
5145 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5146 cp->init_block, cp->block_dvma);
5147
5148err_out_iounmap:
5149 mutex_lock(&cp->pm_mutex);
5150 if (cp->hw_running)
5151 cas_shutdown(cp);
5152 mutex_unlock(&cp->pm_mutex);
5153
5154 pci_iounmap(pdev, cp->regs);
5155
5156
5157err_out_free_res:
5158 pci_release_regions(pdev);
5159
5160err_write_cacheline:
5161
5162
5163
5164 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5165
5166err_out_free_netdev:
5167 free_netdev(dev);
5168
5169err_out_disable_pdev:
5170 pci_disable_device(pdev);
5171 return -ENODEV;
5172}
5173
5174static void cas_remove_one(struct pci_dev *pdev)
5175{
5176 struct net_device *dev = pci_get_drvdata(pdev);
5177 struct cas *cp;
5178 if (!dev)
5179 return;
5180
5181 cp = netdev_priv(dev);
5182 unregister_netdev(dev);
5183
5184 vfree(cp->fw_data);
5185
5186 mutex_lock(&cp->pm_mutex);
5187 cancel_work_sync(&cp->reset_task);
5188 if (cp->hw_running)
5189 cas_shutdown(cp);
5190 mutex_unlock(&cp->pm_mutex);
5191
5192#if 1
5193 if (cp->orig_cacheline_size) {
5194
5195
5196
5197 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5198 cp->orig_cacheline_size);
5199 }
5200#endif
5201 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5202 cp->init_block, cp->block_dvma);
5203 pci_iounmap(pdev, cp->regs);
5204 free_netdev(dev);
5205 pci_release_regions(pdev);
5206 pci_disable_device(pdev);
5207}
5208
5209#ifdef CONFIG_PM
5210static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5211{
5212 struct net_device *dev = pci_get_drvdata(pdev);
5213 struct cas *cp = netdev_priv(dev);
5214 unsigned long flags;
5215
5216 mutex_lock(&cp->pm_mutex);
5217
5218
5219 if (cp->opened) {
5220 netif_device_detach(dev);
5221
5222 cas_lock_all_save(cp, flags);
5223
5224
5225
5226
5227
5228
5229 cas_reset(cp, 0);
5230 cas_clean_rings(cp);
5231 cas_unlock_all_restore(cp, flags);
5232 }
5233
5234 if (cp->hw_running)
5235 cas_shutdown(cp);
5236 mutex_unlock(&cp->pm_mutex);
5237
5238 return 0;
5239}
5240
5241static int cas_resume(struct pci_dev *pdev)
5242{
5243 struct net_device *dev = pci_get_drvdata(pdev);
5244 struct cas *cp = netdev_priv(dev);
5245
5246 netdev_info(dev, "resuming\n");
5247
5248 mutex_lock(&cp->pm_mutex);
5249 cas_hard_reset(cp);
5250 if (cp->opened) {
5251 unsigned long flags;
5252 cas_lock_all_save(cp, flags);
5253 cas_reset(cp, 0);
5254 cp->hw_running = 1;
5255 cas_clean_rings(cp);
5256 cas_init_hw(cp, 1);
5257 cas_unlock_all_restore(cp, flags);
5258
5259 netif_device_attach(dev);
5260 }
5261 mutex_unlock(&cp->pm_mutex);
5262 return 0;
5263}
5264#endif
5265
5266static struct pci_driver cas_driver = {
5267 .name = DRV_MODULE_NAME,
5268 .id_table = cas_pci_tbl,
5269 .probe = cas_init_one,
5270 .remove = cas_remove_one,
5271#ifdef CONFIG_PM
5272 .suspend = cas_suspend,
5273 .resume = cas_resume
5274#endif
5275};
5276
5277static int __init cas_init(void)
5278{
5279 if (linkdown_timeout > 0)
5280 link_transition_timeout = linkdown_timeout * HZ;
5281 else
5282 link_transition_timeout = 0;
5283
5284 return pci_register_driver(&cas_driver);
5285}
5286
5287static void __exit cas_cleanup(void)
5288{
5289 pci_unregister_driver(&cas_driver);
5290}
5291
5292module_init(cas_init);
5293module_exit(cas_cleanup);
5294