1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/types.h>
74#include <linux/compiler.h>
75#include <linux/slab.h>
76#include <linux/delay.h>
77#include <linux/init.h>
78#include <linux/vmalloc.h>
79#include <linux/ioport.h>
80#include <linux/pci.h>
81#include <linux/mm.h>
82#include <linux/highmem.h>
83#include <linux/list.h>
84#include <linux/dma-mapping.h>
85
86#include <linux/netdevice.h>
87#include <linux/etherdevice.h>
88#include <linux/skbuff.h>
89#include <linux/ethtool.h>
90#include <linux/crc32.h>
91#include <linux/random.h>
92#include <linux/mii.h>
93#include <linux/ip.h>
94#include <linux/tcp.h>
95#include <linux/mutex.h>
96#include <linux/firmware.h>
97
98#include <net/checksum.h>
99
100#include <asm/atomic.h>
101#include <asm/system.h>
102#include <asm/io.h>
103#include <asm/byteorder.h>
104#include <asm/uaccess.h>
105
106#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
107#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
108#define CAS_NCPUS num_online_cpus()
109
110#define cas_skb_release(x) netif_rx(x)
111
112
113#define USE_HP_WORKAROUND
114#define HP_WORKAROUND_DEFAULT
115#define CAS_HP_ALT_FIRMWARE cas_prog_null
116
117#include "cassini.h"
118
119#define USE_TX_COMPWB
120#define USE_CSMA_CD_PROTO
121#define USE_RX_BLANK
122#undef USE_ENTROPY_DEV
123
124
125
126
127#undef USE_PCI_INTB
128#undef USE_PCI_INTC
129#undef USE_PCI_INTD
130#undef USE_QOS
131
132#undef USE_VPD_DEBUG
133
134
135#define USE_PAGE_ORDER
136#define RX_DONT_BATCH 0
137#define RX_COPY_ALWAYS 0
138#define RX_COPY_MIN 64
139#undef RX_COUNT_BUFFERS
140
141#define DRV_MODULE_NAME "cassini"
142#define DRV_MODULE_VERSION "1.6"
143#define DRV_MODULE_RELDATE "21 May 2008"
144
145#define CAS_DEF_MSG_ENABLE \
146 (NETIF_MSG_DRV | \
147 NETIF_MSG_PROBE | \
148 NETIF_MSG_LINK | \
149 NETIF_MSG_TIMER | \
150 NETIF_MSG_IFDOWN | \
151 NETIF_MSG_IFUP | \
152 NETIF_MSG_RX_ERR | \
153 NETIF_MSG_TX_ERR)
154
155
156
157
158#define CAS_TX_TIMEOUT (HZ)
159#define CAS_LINK_TIMEOUT (22*HZ/10)
160#define CAS_LINK_FAST_TIMEOUT (1)
161
162
163
164
165#define STOP_TRIES_PHY 1000
166#define STOP_TRIES 5000
167
168
169
170
171
172#define CAS_MIN_FRAME 97
173#define CAS_1000MB_MIN_FRAME 255
174#define CAS_MIN_MTU 60
175#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
176
177#if 1
178
179
180
181
182#else
183#define CAS_RESET_MTU 1
184#define CAS_RESET_ALL 2
185#define CAS_RESET_SPARE 3
186#endif
187
188static char version[] __devinitdata =
189 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
190
191static int cassini_debug = -1;
192static int link_mode;
193
194MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
195MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
196MODULE_LICENSE("GPL");
197MODULE_FIRMWARE("sun/cassini.bin");
198module_param(cassini_debug, int, 0);
199MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
200module_param(link_mode, int, 0);
201MODULE_PARM_DESC(link_mode, "default link mode");
202
203
204
205
206
207#define DEFAULT_LINKDOWN_TIMEOUT 5
208
209
210
211static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
212module_param(linkdown_timeout, int, 0);
213MODULE_PARM_DESC(linkdown_timeout,
214"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
215
216
217
218
219
220
221static int link_transition_timeout;
222
223
224
225static u16 link_modes[] __devinitdata = {
226 BMCR_ANENABLE,
227 0,
228 BMCR_SPEED100,
229 BMCR_FULLDPLX,
230 BMCR_SPEED100|BMCR_FULLDPLX,
231 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
232};
233
234static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
235 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { 0, }
240};
241
242MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
243
244static void cas_set_link_modes(struct cas *cp);
245
246static inline void cas_lock_tx(struct cas *cp)
247{
248 int i;
249
250 for (i = 0; i < N_TX_RINGS; i++)
251 spin_lock(&cp->tx_lock[i]);
252}
253
254static inline void cas_lock_all(struct cas *cp)
255{
256 spin_lock_irq(&cp->lock);
257 cas_lock_tx(cp);
258}
259
260
261
262
263
264
265
266
267
268#define cas_lock_all_save(cp, flags) \
269do { \
270 struct cas *xxxcp = (cp); \
271 spin_lock_irqsave(&xxxcp->lock, flags); \
272 cas_lock_tx(xxxcp); \
273} while (0)
274
275static inline void cas_unlock_tx(struct cas *cp)
276{
277 int i;
278
279 for (i = N_TX_RINGS; i > 0; i--)
280 spin_unlock(&cp->tx_lock[i - 1]);
281}
282
283static inline void cas_unlock_all(struct cas *cp)
284{
285 cas_unlock_tx(cp);
286 spin_unlock_irq(&cp->lock);
287}
288
289#define cas_unlock_all_restore(cp, flags) \
290do { \
291 struct cas *xxxcp = (cp); \
292 cas_unlock_tx(xxxcp); \
293 spin_unlock_irqrestore(&xxxcp->lock, flags); \
294} while (0)
295
296static void cas_disable_irq(struct cas *cp, const int ring)
297{
298
299 if (ring == 0) {
300 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
301 return;
302 }
303
304
305 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
306 switch (ring) {
307#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
308#ifdef USE_PCI_INTB
309 case 1:
310#endif
311#ifdef USE_PCI_INTC
312 case 2:
313#endif
314#ifdef USE_PCI_INTD
315 case 3:
316#endif
317 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
318 cp->regs + REG_PLUS_INTRN_MASK(ring));
319 break;
320#endif
321 default:
322 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
323 REG_PLUS_INTRN_MASK(ring));
324 break;
325 }
326 }
327}
328
329static inline void cas_mask_intr(struct cas *cp)
330{
331 int i;
332
333 for (i = 0; i < N_RX_COMP_RINGS; i++)
334 cas_disable_irq(cp, i);
335}
336
337static void cas_enable_irq(struct cas *cp, const int ring)
338{
339 if (ring == 0) {
340 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
341 return;
342 }
343
344 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
345 switch (ring) {
346#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
347#ifdef USE_PCI_INTB
348 case 1:
349#endif
350#ifdef USE_PCI_INTC
351 case 2:
352#endif
353#ifdef USE_PCI_INTD
354 case 3:
355#endif
356 writel(INTRN_MASK_RX_EN, cp->regs +
357 REG_PLUS_INTRN_MASK(ring));
358 break;
359#endif
360 default:
361 break;
362 }
363 }
364}
365
366static inline void cas_unmask_intr(struct cas *cp)
367{
368 int i;
369
370 for (i = 0; i < N_RX_COMP_RINGS; i++)
371 cas_enable_irq(cp, i);
372}
373
374static inline void cas_entropy_gather(struct cas *cp)
375{
376#ifdef USE_ENTROPY_DEV
377 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
378 return;
379
380 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
381 readl(cp->regs + REG_ENTROPY_IV),
382 sizeof(uint64_t)*8);
383#endif
384}
385
386static inline void cas_entropy_reset(struct cas *cp)
387{
388#ifdef USE_ENTROPY_DEV
389 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
390 return;
391
392 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
393 cp->regs + REG_BIM_LOCAL_DEV_EN);
394 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
395 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
396
397
398 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
399 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
400#endif
401}
402
403
404
405
406static u16 cas_phy_read(struct cas *cp, int reg)
407{
408 u32 cmd;
409 int limit = STOP_TRIES_PHY;
410
411 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
412 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
413 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
414 cmd |= MIF_FRAME_TURN_AROUND_MSB;
415 writel(cmd, cp->regs + REG_MIF_FRAME);
416
417
418 while (limit-- > 0) {
419 udelay(10);
420 cmd = readl(cp->regs + REG_MIF_FRAME);
421 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
422 return cmd & MIF_FRAME_DATA_MASK;
423 }
424 return 0xFFFF;
425}
426
427static int cas_phy_write(struct cas *cp, int reg, u16 val)
428{
429 int limit = STOP_TRIES_PHY;
430 u32 cmd;
431
432 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
433 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
434 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
435 cmd |= MIF_FRAME_TURN_AROUND_MSB;
436 cmd |= val & MIF_FRAME_DATA_MASK;
437 writel(cmd, cp->regs + REG_MIF_FRAME);
438
439
440 while (limit-- > 0) {
441 udelay(10);
442 cmd = readl(cp->regs + REG_MIF_FRAME);
443 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
444 return 0;
445 }
446 return -1;
447}
448
449static void cas_phy_powerup(struct cas *cp)
450{
451 u16 ctl = cas_phy_read(cp, MII_BMCR);
452
453 if ((ctl & BMCR_PDOWN) == 0)
454 return;
455 ctl &= ~BMCR_PDOWN;
456 cas_phy_write(cp, MII_BMCR, ctl);
457}
458
459static void cas_phy_powerdown(struct cas *cp)
460{
461 u16 ctl = cas_phy_read(cp, MII_BMCR);
462
463 if (ctl & BMCR_PDOWN)
464 return;
465 ctl |= BMCR_PDOWN;
466 cas_phy_write(cp, MII_BMCR, ctl);
467}
468
469
470static int cas_page_free(struct cas *cp, cas_page_t *page)
471{
472 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
473 PCI_DMA_FROMDEVICE);
474 __free_pages(page->buffer, cp->page_order);
475 kfree(page);
476 return 0;
477}
478
479#ifdef RX_COUNT_BUFFERS
480#define RX_USED_ADD(x, y) ((x)->used += (y))
481#define RX_USED_SET(x, y) ((x)->used = (y))
482#else
483#define RX_USED_ADD(x, y)
484#define RX_USED_SET(x, y)
485#endif
486
487
488
489
490static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
491{
492 cas_page_t *page;
493
494 page = kmalloc(sizeof(cas_page_t), flags);
495 if (!page)
496 return NULL;
497
498 INIT_LIST_HEAD(&page->list);
499 RX_USED_SET(page, 0);
500 page->buffer = alloc_pages(flags, cp->page_order);
501 if (!page->buffer)
502 goto page_err;
503 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
504 cp->page_size, PCI_DMA_FROMDEVICE);
505 return page;
506
507page_err:
508 kfree(page);
509 return NULL;
510}
511
512
513static void cas_spare_init(struct cas *cp)
514{
515 spin_lock(&cp->rx_inuse_lock);
516 INIT_LIST_HEAD(&cp->rx_inuse_list);
517 spin_unlock(&cp->rx_inuse_lock);
518
519 spin_lock(&cp->rx_spare_lock);
520 INIT_LIST_HEAD(&cp->rx_spare_list);
521 cp->rx_spares_needed = RX_SPARE_COUNT;
522 spin_unlock(&cp->rx_spare_lock);
523}
524
525
526static void cas_spare_free(struct cas *cp)
527{
528 struct list_head list, *elem, *tmp;
529
530
531 INIT_LIST_HEAD(&list);
532 spin_lock(&cp->rx_spare_lock);
533 list_splice_init(&cp->rx_spare_list, &list);
534 spin_unlock(&cp->rx_spare_lock);
535 list_for_each_safe(elem, tmp, &list) {
536 cas_page_free(cp, list_entry(elem, cas_page_t, list));
537 }
538
539 INIT_LIST_HEAD(&list);
540#if 1
541
542
543
544
545 spin_lock(&cp->rx_inuse_lock);
546 list_splice_init(&cp->rx_inuse_list, &list);
547 spin_unlock(&cp->rx_inuse_lock);
548#else
549 spin_lock(&cp->rx_spare_lock);
550 list_splice_init(&cp->rx_inuse_list, &list);
551 spin_unlock(&cp->rx_spare_lock);
552#endif
553 list_for_each_safe(elem, tmp, &list) {
554 cas_page_free(cp, list_entry(elem, cas_page_t, list));
555 }
556}
557
558
559static void cas_spare_recover(struct cas *cp, const gfp_t flags)
560{
561 struct list_head list, *elem, *tmp;
562 int needed, i;
563
564
565
566
567
568
569 INIT_LIST_HEAD(&list);
570 spin_lock(&cp->rx_inuse_lock);
571 list_splice_init(&cp->rx_inuse_list, &list);
572 spin_unlock(&cp->rx_inuse_lock);
573
574 list_for_each_safe(elem, tmp, &list) {
575 cas_page_t *page = list_entry(elem, cas_page_t, list);
576
577
578
579
580
581
582
583
584
585
586
587
588
589 if (page_count(page->buffer) > 1)
590 continue;
591
592 list_del(elem);
593 spin_lock(&cp->rx_spare_lock);
594 if (cp->rx_spares_needed > 0) {
595 list_add(elem, &cp->rx_spare_list);
596 cp->rx_spares_needed--;
597 spin_unlock(&cp->rx_spare_lock);
598 } else {
599 spin_unlock(&cp->rx_spare_lock);
600 cas_page_free(cp, page);
601 }
602 }
603
604
605 if (!list_empty(&list)) {
606 spin_lock(&cp->rx_inuse_lock);
607 list_splice(&list, &cp->rx_inuse_list);
608 spin_unlock(&cp->rx_inuse_lock);
609 }
610
611 spin_lock(&cp->rx_spare_lock);
612 needed = cp->rx_spares_needed;
613 spin_unlock(&cp->rx_spare_lock);
614 if (!needed)
615 return;
616
617
618 INIT_LIST_HEAD(&list);
619 i = 0;
620 while (i < needed) {
621 cas_page_t *spare = cas_page_alloc(cp, flags);
622 if (!spare)
623 break;
624 list_add(&spare->list, &list);
625 i++;
626 }
627
628 spin_lock(&cp->rx_spare_lock);
629 list_splice(&list, &cp->rx_spare_list);
630 cp->rx_spares_needed -= i;
631 spin_unlock(&cp->rx_spare_lock);
632}
633
634
635static cas_page_t *cas_page_dequeue(struct cas *cp)
636{
637 struct list_head *entry;
638 int recover;
639
640 spin_lock(&cp->rx_spare_lock);
641 if (list_empty(&cp->rx_spare_list)) {
642
643 spin_unlock(&cp->rx_spare_lock);
644 cas_spare_recover(cp, GFP_ATOMIC);
645 spin_lock(&cp->rx_spare_lock);
646 if (list_empty(&cp->rx_spare_list)) {
647 netif_err(cp, rx_err, cp->dev,
648 "no spare buffers available\n");
649 spin_unlock(&cp->rx_spare_lock);
650 return NULL;
651 }
652 }
653
654 entry = cp->rx_spare_list.next;
655 list_del(entry);
656 recover = ++cp->rx_spares_needed;
657 spin_unlock(&cp->rx_spare_lock);
658
659
660 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
661#if 1
662 atomic_inc(&cp->reset_task_pending);
663 atomic_inc(&cp->reset_task_pending_spare);
664 schedule_work(&cp->reset_task);
665#else
666 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
667 schedule_work(&cp->reset_task);
668#endif
669 }
670 return list_entry(entry, cas_page_t, list);
671}
672
673
674static void cas_mif_poll(struct cas *cp, const int enable)
675{
676 u32 cfg;
677
678 cfg = readl(cp->regs + REG_MIF_CFG);
679 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
680
681 if (cp->phy_type & CAS_PHY_MII_MDIO1)
682 cfg |= MIF_CFG_PHY_SELECT;
683
684
685 if (enable) {
686 cfg |= MIF_CFG_POLL_EN;
687 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
688 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
689 }
690 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
691 cp->regs + REG_MIF_MASK);
692 writel(cfg, cp->regs + REG_MIF_CFG);
693}
694
695
696static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
697{
698 u16 ctl;
699#if 1
700 int lcntl;
701 int changed = 0;
702 int oldstate = cp->lstate;
703 int link_was_not_down = !(oldstate == link_down);
704#endif
705
706 if (!ep)
707 goto start_aneg;
708 lcntl = cp->link_cntl;
709 if (ep->autoneg == AUTONEG_ENABLE)
710 cp->link_cntl = BMCR_ANENABLE;
711 else {
712 cp->link_cntl = 0;
713 if (ep->speed == SPEED_100)
714 cp->link_cntl |= BMCR_SPEED100;
715 else if (ep->speed == SPEED_1000)
716 cp->link_cntl |= CAS_BMCR_SPEED1000;
717 if (ep->duplex == DUPLEX_FULL)
718 cp->link_cntl |= BMCR_FULLDPLX;
719 }
720#if 1
721 changed = (lcntl != cp->link_cntl);
722#endif
723start_aneg:
724 if (cp->lstate == link_up) {
725 netdev_info(cp->dev, "PCS link down\n");
726 } else {
727 if (changed) {
728 netdev_info(cp->dev, "link configuration changed\n");
729 }
730 }
731 cp->lstate = link_down;
732 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
733 if (!cp->hw_running)
734 return;
735#if 1
736
737
738
739
740
741 if (oldstate == link_up)
742 netif_carrier_off(cp->dev);
743 if (changed && link_was_not_down) {
744
745
746
747
748
749 atomic_inc(&cp->reset_task_pending);
750 atomic_inc(&cp->reset_task_pending_all);
751 schedule_work(&cp->reset_task);
752 cp->timer_ticks = 0;
753 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
754 return;
755 }
756#endif
757 if (cp->phy_type & CAS_PHY_SERDES) {
758 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
759
760 if (cp->link_cntl & BMCR_ANENABLE) {
761 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
762 cp->lstate = link_aneg;
763 } else {
764 if (cp->link_cntl & BMCR_FULLDPLX)
765 val |= PCS_MII_CTRL_DUPLEX;
766 val &= ~PCS_MII_AUTONEG_EN;
767 cp->lstate = link_force_ok;
768 }
769 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
770 writel(val, cp->regs + REG_PCS_MII_CTRL);
771
772 } else {
773 cas_mif_poll(cp, 0);
774 ctl = cas_phy_read(cp, MII_BMCR);
775 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
776 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
777 ctl |= cp->link_cntl;
778 if (ctl & BMCR_ANENABLE) {
779 ctl |= BMCR_ANRESTART;
780 cp->lstate = link_aneg;
781 } else {
782 cp->lstate = link_force_ok;
783 }
784 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
785 cas_phy_write(cp, MII_BMCR, ctl);
786 cas_mif_poll(cp, 1);
787 }
788
789 cp->timer_ticks = 0;
790 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
791}
792
793
794static int cas_reset_mii_phy(struct cas *cp)
795{
796 int limit = STOP_TRIES_PHY;
797 u16 val;
798
799 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
800 udelay(100);
801 while (--limit) {
802 val = cas_phy_read(cp, MII_BMCR);
803 if ((val & BMCR_RESET) == 0)
804 break;
805 udelay(10);
806 }
807 return limit <= 0;
808}
809
810static int cas_saturn_firmware_init(struct cas *cp)
811{
812 const struct firmware *fw;
813 const char fw_name[] = "sun/cassini.bin";
814 int err;
815
816 if (PHY_NS_DP83065 != cp->phy_id)
817 return 0;
818
819 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
820 if (err) {
821 pr_err("Failed to load firmware \"%s\"\n",
822 fw_name);
823 return err;
824 }
825 if (fw->size < 2) {
826 pr_err("bogus length %zu in \"%s\"\n",
827 fw->size, fw_name);
828 err = -EINVAL;
829 goto out;
830 }
831 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
832 cp->fw_size = fw->size - 2;
833 cp->fw_data = vmalloc(cp->fw_size);
834 if (!cp->fw_data) {
835 err = -ENOMEM;
836 pr_err("\"%s\" Failed %d\n", fw_name, err);
837 goto out;
838 }
839 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
840out:
841 release_firmware(fw);
842 return err;
843}
844
845static void cas_saturn_firmware_load(struct cas *cp)
846{
847 int i;
848
849 cas_phy_powerdown(cp);
850
851
852 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
853
854
855 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
856 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
857 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
858 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
859 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
860 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
861 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
862 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
863
864
865 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
866 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
867 for (i = 0; i < cp->fw_size; i++)
868 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
869
870
871 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
872 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
873}
874
875
876
877static void cas_phy_init(struct cas *cp)
878{
879 u16 val;
880
881
882 if (CAS_PHY_MII(cp->phy_type)) {
883 writel(PCS_DATAPATH_MODE_MII,
884 cp->regs + REG_PCS_DATAPATH_MODE);
885
886 cas_mif_poll(cp, 0);
887 cas_reset_mii_phy(cp);
888
889 if (PHY_LUCENT_B0 == cp->phy_id) {
890
891 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
892 cas_phy_write(cp, MII_BMCR, 0x00f1);
893 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
894
895 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
896
897 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
898 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
899 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
900 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
901 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
902 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
903 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
904 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
905 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
906 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
907 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
908
909 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
910 val = cas_phy_read(cp, BROADCOM_MII_REG4);
911 val = cas_phy_read(cp, BROADCOM_MII_REG4);
912 if (val & 0x0080) {
913
914 cas_phy_write(cp, BROADCOM_MII_REG4,
915 val & ~0x0080);
916 }
917
918 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
919 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
920 SATURN_PCFG_FSI : 0x0,
921 cp->regs + REG_SATURN_PCFG);
922
923
924
925
926
927 if (PHY_NS_DP83065 == cp->phy_id) {
928 cas_saturn_firmware_load(cp);
929 }
930 cas_phy_powerup(cp);
931 }
932
933
934 val = cas_phy_read(cp, MII_BMCR);
935 val &= ~BMCR_ANENABLE;
936 cas_phy_write(cp, MII_BMCR, val);
937 udelay(10);
938
939 cas_phy_write(cp, MII_ADVERTISE,
940 cas_phy_read(cp, MII_ADVERTISE) |
941 (ADVERTISE_10HALF | ADVERTISE_10FULL |
942 ADVERTISE_100HALF | ADVERTISE_100FULL |
943 CAS_ADVERTISE_PAUSE |
944 CAS_ADVERTISE_ASYM_PAUSE));
945
946 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
947
948
949
950 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
951 val &= ~CAS_ADVERTISE_1000HALF;
952 val |= CAS_ADVERTISE_1000FULL;
953 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
954 }
955
956 } else {
957
958 u32 val;
959 int limit;
960
961 writel(PCS_DATAPATH_MODE_SERDES,
962 cp->regs + REG_PCS_DATAPATH_MODE);
963
964
965 if (cp->cas_flags & CAS_FLAG_SATURN)
966 writel(0, cp->regs + REG_SATURN_PCFG);
967
968
969 val = readl(cp->regs + REG_PCS_MII_CTRL);
970 val |= PCS_MII_RESET;
971 writel(val, cp->regs + REG_PCS_MII_CTRL);
972
973 limit = STOP_TRIES;
974 while (--limit > 0) {
975 udelay(10);
976 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
977 PCS_MII_RESET) == 0)
978 break;
979 }
980 if (limit <= 0)
981 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
982 readl(cp->regs + REG_PCS_STATE_MACHINE));
983
984
985
986
987 writel(0x0, cp->regs + REG_PCS_CFG);
988
989
990 val = readl(cp->regs + REG_PCS_MII_ADVERT);
991 val &= ~PCS_MII_ADVERT_HD;
992 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
993 PCS_MII_ADVERT_ASYM_PAUSE);
994 writel(val, cp->regs + REG_PCS_MII_ADVERT);
995
996
997 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
998
999
1000 writel(PCS_SERDES_CTRL_SYNCD_EN,
1001 cp->regs + REG_PCS_SERDES_CTRL);
1002 }
1003}
1004
1005
1006static int cas_pcs_link_check(struct cas *cp)
1007{
1008 u32 stat, state_machine;
1009 int retval = 0;
1010
1011
1012
1013
1014
1015 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1016 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1017 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1018
1019
1020
1021
1022 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1023 PCS_MII_STATUS_REMOTE_FAULT)) ==
1024 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1025 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1026
1027
1028
1029
1030 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1031 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1032 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1033 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1034 stat |= PCS_MII_STATUS_LINK_STATUS;
1035 }
1036
1037 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1038 if (cp->lstate != link_up) {
1039 if (cp->opened) {
1040 cp->lstate = link_up;
1041 cp->link_transition = LINK_TRANSITION_LINK_UP;
1042
1043 cas_set_link_modes(cp);
1044 netif_carrier_on(cp->dev);
1045 }
1046 }
1047 } else if (cp->lstate == link_up) {
1048 cp->lstate = link_down;
1049 if (link_transition_timeout != 0 &&
1050 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1051 !cp->link_transition_jiffies_valid) {
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064 retval = 1;
1065 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1066 cp->link_transition_jiffies = jiffies;
1067 cp->link_transition_jiffies_valid = 1;
1068 } else {
1069 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1070 }
1071 netif_carrier_off(cp->dev);
1072 if (cp->opened)
1073 netif_info(cp, link, cp->dev, "PCS link down\n");
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1084
1085 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1086 if (stat == 0x03)
1087 return 1;
1088 }
1089 } else if (cp->lstate == link_down) {
1090 if (link_transition_timeout != 0 &&
1091 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1092 !cp->link_transition_jiffies_valid) {
1093
1094
1095
1096
1097
1098 retval = 1;
1099 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1100 cp->link_transition_jiffies = jiffies;
1101 cp->link_transition_jiffies_valid = 1;
1102 } else {
1103 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1104 }
1105 }
1106
1107 return retval;
1108}
1109
1110static int cas_pcs_interrupt(struct net_device *dev,
1111 struct cas *cp, u32 status)
1112{
1113 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1114
1115 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1116 return 0;
1117 return cas_pcs_link_check(cp);
1118}
1119
1120static int cas_txmac_interrupt(struct net_device *dev,
1121 struct cas *cp, u32 status)
1122{
1123 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1124
1125 if (!txmac_stat)
1126 return 0;
1127
1128 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1129 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1130
1131
1132
1133
1134 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1135 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1136 return 0;
1137
1138 spin_lock(&cp->stat_lock[0]);
1139 if (txmac_stat & MAC_TX_UNDERRUN) {
1140 netdev_err(dev, "TX MAC xmit underrun\n");
1141 cp->net_stats[0].tx_fifo_errors++;
1142 }
1143
1144 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1145 netdev_err(dev, "TX MAC max packet size error\n");
1146 cp->net_stats[0].tx_errors++;
1147 }
1148
1149
1150
1151
1152 if (txmac_stat & MAC_TX_COLL_NORMAL)
1153 cp->net_stats[0].collisions += 0x10000;
1154
1155 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1156 cp->net_stats[0].tx_aborted_errors += 0x10000;
1157 cp->net_stats[0].collisions += 0x10000;
1158 }
1159
1160 if (txmac_stat & MAC_TX_COLL_LATE) {
1161 cp->net_stats[0].tx_aborted_errors += 0x10000;
1162 cp->net_stats[0].collisions += 0x10000;
1163 }
1164 spin_unlock(&cp->stat_lock[0]);
1165
1166
1167
1168
1169 return 0;
1170}
1171
1172static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1173{
1174 cas_hp_inst_t *inst;
1175 u32 val;
1176 int i;
1177
1178 i = 0;
1179 while ((inst = firmware) && inst->note) {
1180 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1181
1182 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1183 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1184 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1185
1186 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1187 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1188 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1189 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1190 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1191 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1192 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1193 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1194
1195 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1196 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1197 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1198 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1199 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1200 ++firmware;
1201 ++i;
1202 }
1203}
1204
1205static void cas_init_rx_dma(struct cas *cp)
1206{
1207 u64 desc_dma = cp->block_dvma;
1208 u32 val;
1209 int i, size;
1210
1211
1212 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1213 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1214 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1215 if ((N_RX_DESC_RINGS > 1) &&
1216 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1217 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1218 writel(val, cp->regs + REG_RX_CFG);
1219
1220 val = (unsigned long) cp->init_rxds[0] -
1221 (unsigned long) cp->init_block;
1222 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1223 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1224 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1225
1226 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1227
1228
1229
1230 val = (unsigned long) cp->init_rxds[1] -
1231 (unsigned long) cp->init_block;
1232 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1233 writel((desc_dma + val) & 0xffffffff, cp->regs +
1234 REG_PLUS_RX_DB1_LOW);
1235 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1236 REG_PLUS_RX_KICK1);
1237 }
1238
1239
1240 val = (unsigned long) cp->init_rxcs[0] -
1241 (unsigned long) cp->init_block;
1242 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1243 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1244
1245 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1246
1247 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1248 val = (unsigned long) cp->init_rxcs[i] -
1249 (unsigned long) cp->init_block;
1250 writel((desc_dma + val) >> 32, cp->regs +
1251 REG_PLUS_RX_CBN_HI(i));
1252 writel((desc_dma + val) & 0xffffffff, cp->regs +
1253 REG_PLUS_RX_CBN_LOW(i));
1254 }
1255 }
1256
1257
1258
1259
1260
1261 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1262 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1263 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1264 for (i = 1; i < N_RX_COMP_RINGS; i++)
1265 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1266
1267
1268 if (N_RX_COMP_RINGS > 1)
1269 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1270 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1271
1272 for (i = 2; i < N_RX_COMP_RINGS; i++)
1273 writel(INTR_RX_DONE_ALT,
1274 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1275 }
1276
1277
1278 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1279 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1280 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1281 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1282 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1283
1284
1285 for (i = 0; i < 64; i++) {
1286 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1287 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1288 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1289 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1290 }
1291
1292
1293 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1294 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1295
1296
1297#ifdef USE_RX_BLANK
1298 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1299 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1300 writel(val, cp->regs + REG_RX_BLANK);
1301#else
1302 writel(0x0, cp->regs + REG_RX_BLANK);
1303#endif
1304
1305
1306
1307
1308
1309
1310
1311 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1312 writel(val, cp->regs + REG_RX_AE_THRESH);
1313 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1314 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1315 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1316 }
1317
1318
1319
1320
1321 writel(0x0, cp->regs + REG_RX_RED);
1322
1323
1324 val = 0;
1325 if (cp->page_size == 0x1000)
1326 val = 0x1;
1327 else if (cp->page_size == 0x2000)
1328 val = 0x2;
1329 else if (cp->page_size == 0x4000)
1330 val = 0x3;
1331
1332
1333 size = cp->dev->mtu + 64;
1334 if (size > cp->page_size)
1335 size = cp->page_size;
1336
1337 if (size <= 0x400)
1338 i = 0x0;
1339 else if (size <= 0x800)
1340 i = 0x1;
1341 else if (size <= 0x1000)
1342 i = 0x2;
1343 else
1344 i = 0x3;
1345
1346 cp->mtu_stride = 1 << (i + 10);
1347 val = CAS_BASE(RX_PAGE_SIZE, val);
1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1349 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1350 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1351 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1352
1353
1354 if (CAS_HP_FIRMWARE == cas_prog_null)
1355 return;
1356
1357 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1358 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1359 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1360 writel(val, cp->regs + REG_HP_CFG);
1361}
1362
1363static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1364{
1365 memset(rxc, 0, sizeof(*rxc));
1366 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1367}
1368
1369
1370
1371
1372
1373static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1374{
1375 cas_page_t *page = cp->rx_pages[1][index];
1376 cas_page_t *new;
1377
1378 if (page_count(page->buffer) == 1)
1379 return page;
1380
1381 new = cas_page_dequeue(cp);
1382 if (new) {
1383 spin_lock(&cp->rx_inuse_lock);
1384 list_add(&page->list, &cp->rx_inuse_list);
1385 spin_unlock(&cp->rx_inuse_lock);
1386 }
1387 return new;
1388}
1389
1390
1391static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1392 const int index)
1393{
1394 cas_page_t **page0 = cp->rx_pages[0];
1395 cas_page_t **page1 = cp->rx_pages[1];
1396
1397
1398 if (page_count(page0[index]->buffer) > 1) {
1399 cas_page_t *new = cas_page_spare(cp, index);
1400 if (new) {
1401 page1[index] = page0[index];
1402 page0[index] = new;
1403 }
1404 }
1405 RX_USED_SET(page0[index], 0);
1406 return page0[index];
1407}
1408
1409static void cas_clean_rxds(struct cas *cp)
1410{
1411
1412 struct cas_rx_desc *rxd = cp->init_rxds[0];
1413 int i, size;
1414
1415
1416 for (i = 0; i < N_RX_FLOWS; i++) {
1417 struct sk_buff *skb;
1418 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1419 cas_skb_release(skb);
1420 }
1421 }
1422
1423
1424 size = RX_DESC_RINGN_SIZE(0);
1425 for (i = 0; i < size; i++) {
1426 cas_page_t *page = cas_page_swap(cp, 0, i);
1427 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1428 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1429 CAS_BASE(RX_INDEX_RING, 0));
1430 }
1431
1432 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1433 cp->rx_last[0] = 0;
1434 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1435}
1436
1437static void cas_clean_rxcs(struct cas *cp)
1438{
1439 int i, j;
1440
1441
1442 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1443 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1444 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1445 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1446 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1447 cas_rxc_init(rxc + j);
1448 }
1449 }
1450}
1451
1452#if 0
1453
1454
1455
1456
1457
1458
1459static int cas_rxmac_reset(struct cas *cp)
1460{
1461 struct net_device *dev = cp->dev;
1462 int limit;
1463 u32 val;
1464
1465
1466 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1467 for (limit = 0; limit < STOP_TRIES; limit++) {
1468 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1469 break;
1470 udelay(10);
1471 }
1472 if (limit == STOP_TRIES) {
1473 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1474 return 1;
1475 }
1476
1477
1478 writel(0, cp->regs + REG_RX_CFG);
1479 for (limit = 0; limit < STOP_TRIES; limit++) {
1480 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1481 break;
1482 udelay(10);
1483 }
1484 if (limit == STOP_TRIES) {
1485 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1486 return 1;
1487 }
1488
1489 mdelay(5);
1490
1491
1492 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1493 for (limit = 0; limit < STOP_TRIES; limit++) {
1494 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1495 break;
1496 udelay(10);
1497 }
1498 if (limit == STOP_TRIES) {
1499 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1500 return 1;
1501 }
1502
1503
1504 cas_clean_rxds(cp);
1505 cas_clean_rxcs(cp);
1506
1507
1508 cas_init_rx_dma(cp);
1509
1510
1511 val = readl(cp->regs + REG_RX_CFG);
1512 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1513 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1514 val = readl(cp->regs + REG_MAC_RX_CFG);
1515 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1516 return 0;
1517}
1518#endif
1519
1520static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1521 u32 status)
1522{
1523 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1524
1525 if (!stat)
1526 return 0;
1527
1528 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1529
1530
1531 spin_lock(&cp->stat_lock[0]);
1532 if (stat & MAC_RX_ALIGN_ERR)
1533 cp->net_stats[0].rx_frame_errors += 0x10000;
1534
1535 if (stat & MAC_RX_CRC_ERR)
1536 cp->net_stats[0].rx_crc_errors += 0x10000;
1537
1538 if (stat & MAC_RX_LEN_ERR)
1539 cp->net_stats[0].rx_length_errors += 0x10000;
1540
1541 if (stat & MAC_RX_OVERFLOW) {
1542 cp->net_stats[0].rx_over_errors++;
1543 cp->net_stats[0].rx_fifo_errors++;
1544 }
1545
1546
1547
1548
1549 spin_unlock(&cp->stat_lock[0]);
1550 return 0;
1551}
1552
1553static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1554 u32 status)
1555{
1556 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1557
1558 if (!stat)
1559 return 0;
1560
1561 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1562 "mac interrupt, stat: 0x%x\n", stat);
1563
1564
1565
1566
1567
1568 if (stat & MAC_CTRL_PAUSE_STATE)
1569 cp->pause_entered++;
1570
1571 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1572 cp->pause_last_time_recvd = (stat >> 16);
1573
1574 return 0;
1575}
1576
1577
1578
1579static inline int cas_mdio_link_not_up(struct cas *cp)
1580{
1581 u16 val;
1582
1583 switch (cp->lstate) {
1584 case link_force_ret:
1585 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1586 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1587 cp->timer_ticks = 5;
1588 cp->lstate = link_force_ok;
1589 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1590 break;
1591
1592 case link_aneg:
1593 val = cas_phy_read(cp, MII_BMCR);
1594
1595
1596
1597
1598 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1599 val |= BMCR_FULLDPLX;
1600 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1601 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1602 cas_phy_write(cp, MII_BMCR, val);
1603 cp->timer_ticks = 5;
1604 cp->lstate = link_force_try;
1605 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1606 break;
1607
1608 case link_force_try:
1609
1610 val = cas_phy_read(cp, MII_BMCR);
1611 cp->timer_ticks = 5;
1612 if (val & CAS_BMCR_SPEED1000) {
1613 val &= ~CAS_BMCR_SPEED1000;
1614 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1615 cas_phy_write(cp, MII_BMCR, val);
1616 break;
1617 }
1618
1619 if (val & BMCR_SPEED100) {
1620 if (val & BMCR_FULLDPLX)
1621 val &= ~BMCR_FULLDPLX;
1622 else {
1623 val &= ~BMCR_SPEED100;
1624 }
1625 cas_phy_write(cp, MII_BMCR, val);
1626 break;
1627 }
1628 default:
1629 break;
1630 }
1631 return 0;
1632}
1633
1634
1635
1636static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1637{
1638 int restart;
1639
1640 if (bmsr & BMSR_LSTATUS) {
1641
1642
1643
1644
1645
1646 if ((cp->lstate == link_force_try) &&
1647 (cp->link_cntl & BMCR_ANENABLE)) {
1648 cp->lstate = link_force_ret;
1649 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1650 cas_mif_poll(cp, 0);
1651 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1652 cp->timer_ticks = 5;
1653 if (cp->opened)
1654 netif_info(cp, link, cp->dev,
1655 "Got link after fallback, retrying autoneg once...\n");
1656 cas_phy_write(cp, MII_BMCR,
1657 cp->link_fcntl | BMCR_ANENABLE |
1658 BMCR_ANRESTART);
1659 cas_mif_poll(cp, 1);
1660
1661 } else if (cp->lstate != link_up) {
1662 cp->lstate = link_up;
1663 cp->link_transition = LINK_TRANSITION_LINK_UP;
1664
1665 if (cp->opened) {
1666 cas_set_link_modes(cp);
1667 netif_carrier_on(cp->dev);
1668 }
1669 }
1670 return 0;
1671 }
1672
1673
1674
1675
1676 restart = 0;
1677 if (cp->lstate == link_up) {
1678 cp->lstate = link_down;
1679 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1680
1681 netif_carrier_off(cp->dev);
1682 if (cp->opened)
1683 netif_info(cp, link, cp->dev, "Link down\n");
1684 restart = 1;
1685
1686 } else if (++cp->timer_ticks > 10)
1687 cas_mdio_link_not_up(cp);
1688
1689 return restart;
1690}
1691
1692static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1693 u32 status)
1694{
1695 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1696 u16 bmsr;
1697
1698
1699 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1700 return 0;
1701
1702 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1703 return cas_mii_link_check(cp, bmsr);
1704}
1705
1706static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1707 u32 status)
1708{
1709 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1710
1711 if (!stat)
1712 return 0;
1713
1714 netdev_err(dev, "PCI error [%04x:%04x]",
1715 stat, readl(cp->regs + REG_BIM_DIAG));
1716
1717
1718 if ((stat & PCI_ERR_BADACK) &&
1719 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1720 pr_cont(" <No ACK64# during ABS64 cycle>");
1721
1722 if (stat & PCI_ERR_DTRTO)
1723 pr_cont(" <Delayed transaction timeout>");
1724 if (stat & PCI_ERR_OTHER)
1725 pr_cont(" <other>");
1726 if (stat & PCI_ERR_BIM_DMA_WRITE)
1727 pr_cont(" <BIM DMA 0 write req>");
1728 if (stat & PCI_ERR_BIM_DMA_READ)
1729 pr_cont(" <BIM DMA 0 read req>");
1730 pr_cont("\n");
1731
1732 if (stat & PCI_ERR_OTHER) {
1733 u16 cfg;
1734
1735
1736
1737
1738 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1739 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1740 if (cfg & PCI_STATUS_PARITY)
1741 netdev_err(dev, "PCI parity error detected\n");
1742 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1743 netdev_err(dev, "PCI target abort\n");
1744 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1745 netdev_err(dev, "PCI master acks target abort\n");
1746 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1747 netdev_err(dev, "PCI master abort\n");
1748 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1749 netdev_err(dev, "PCI system error SERR#\n");
1750 if (cfg & PCI_STATUS_DETECTED_PARITY)
1751 netdev_err(dev, "PCI parity error\n");
1752
1753
1754 cfg &= (PCI_STATUS_PARITY |
1755 PCI_STATUS_SIG_TARGET_ABORT |
1756 PCI_STATUS_REC_TARGET_ABORT |
1757 PCI_STATUS_REC_MASTER_ABORT |
1758 PCI_STATUS_SIG_SYSTEM_ERROR |
1759 PCI_STATUS_DETECTED_PARITY);
1760 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1761 }
1762
1763
1764 return 1;
1765}
1766
1767
1768
1769
1770
1771
1772static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1773 u32 status)
1774{
1775 if (status & INTR_RX_TAG_ERROR) {
1776
1777 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1778 "corrupt rx tag framing\n");
1779 spin_lock(&cp->stat_lock[0]);
1780 cp->net_stats[0].rx_errors++;
1781 spin_unlock(&cp->stat_lock[0]);
1782 goto do_reset;
1783 }
1784
1785 if (status & INTR_RX_LEN_MISMATCH) {
1786
1787 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1788 "length mismatch for rx frame\n");
1789 spin_lock(&cp->stat_lock[0]);
1790 cp->net_stats[0].rx_errors++;
1791 spin_unlock(&cp->stat_lock[0]);
1792 goto do_reset;
1793 }
1794
1795 if (status & INTR_PCS_STATUS) {
1796 if (cas_pcs_interrupt(dev, cp, status))
1797 goto do_reset;
1798 }
1799
1800 if (status & INTR_TX_MAC_STATUS) {
1801 if (cas_txmac_interrupt(dev, cp, status))
1802 goto do_reset;
1803 }
1804
1805 if (status & INTR_RX_MAC_STATUS) {
1806 if (cas_rxmac_interrupt(dev, cp, status))
1807 goto do_reset;
1808 }
1809
1810 if (status & INTR_MAC_CTRL_STATUS) {
1811 if (cas_mac_interrupt(dev, cp, status))
1812 goto do_reset;
1813 }
1814
1815 if (status & INTR_MIF_STATUS) {
1816 if (cas_mif_interrupt(dev, cp, status))
1817 goto do_reset;
1818 }
1819
1820 if (status & INTR_PCI_ERROR_STATUS) {
1821 if (cas_pci_interrupt(dev, cp, status))
1822 goto do_reset;
1823 }
1824 return 0;
1825
1826do_reset:
1827#if 1
1828 atomic_inc(&cp->reset_task_pending);
1829 atomic_inc(&cp->reset_task_pending_all);
1830 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1831 schedule_work(&cp->reset_task);
1832#else
1833 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1834 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1835 schedule_work(&cp->reset_task);
1836#endif
1837 return 1;
1838}
1839
1840
1841
1842
1843#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1844#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1845static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1846 const int len)
1847{
1848 unsigned long off = addr + len;
1849
1850 if (CAS_TABORT(cp) == 1)
1851 return 0;
1852 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1853 return 0;
1854 return TX_TARGET_ABORT_LEN;
1855}
1856
1857static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1858{
1859 struct cas_tx_desc *txds;
1860 struct sk_buff **skbs;
1861 struct net_device *dev = cp->dev;
1862 int entry, count;
1863
1864 spin_lock(&cp->tx_lock[ring]);
1865 txds = cp->init_txds[ring];
1866 skbs = cp->tx_skbs[ring];
1867 entry = cp->tx_old[ring];
1868
1869 count = TX_BUFF_COUNT(ring, entry, limit);
1870 while (entry != limit) {
1871 struct sk_buff *skb = skbs[entry];
1872 dma_addr_t daddr;
1873 u32 dlen;
1874 int frag;
1875
1876 if (!skb) {
1877
1878 entry = TX_DESC_NEXT(ring, entry);
1879 continue;
1880 }
1881
1882
1883 count -= skb_shinfo(skb)->nr_frags +
1884 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1885 if (count < 0)
1886 break;
1887
1888 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1889 "tx[%d] done, slot %d\n", ring, entry);
1890
1891 skbs[entry] = NULL;
1892 cp->tx_tiny_use[ring][entry].nbufs = 0;
1893
1894 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1895 struct cas_tx_desc *txd = txds + entry;
1896
1897 daddr = le64_to_cpu(txd->buffer);
1898 dlen = CAS_VAL(TX_DESC_BUFLEN,
1899 le64_to_cpu(txd->control));
1900 pci_unmap_page(cp->pdev, daddr, dlen,
1901 PCI_DMA_TODEVICE);
1902 entry = TX_DESC_NEXT(ring, entry);
1903
1904
1905 if (cp->tx_tiny_use[ring][entry].used) {
1906 cp->tx_tiny_use[ring][entry].used = 0;
1907 entry = TX_DESC_NEXT(ring, entry);
1908 }
1909 }
1910
1911 spin_lock(&cp->stat_lock[ring]);
1912 cp->net_stats[ring].tx_packets++;
1913 cp->net_stats[ring].tx_bytes += skb->len;
1914 spin_unlock(&cp->stat_lock[ring]);
1915 dev_kfree_skb_irq(skb);
1916 }
1917 cp->tx_old[ring] = entry;
1918
1919
1920
1921
1922
1923 if (netif_queue_stopped(dev) &&
1924 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1925 netif_wake_queue(dev);
1926 spin_unlock(&cp->tx_lock[ring]);
1927}
1928
1929static void cas_tx(struct net_device *dev, struct cas *cp,
1930 u32 status)
1931{
1932 int limit, ring;
1933#ifdef USE_TX_COMPWB
1934 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1935#endif
1936 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1937 "tx interrupt, status: 0x%x, %llx\n",
1938 status, (unsigned long long)compwb);
1939
1940 for (ring = 0; ring < N_TX_RINGS; ring++) {
1941#ifdef USE_TX_COMPWB
1942
1943 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1944 CAS_VAL(TX_COMPWB_LSB, compwb);
1945 compwb = TX_COMPWB_NEXT(compwb);
1946#else
1947 limit = readl(cp->regs + REG_TX_COMPN(ring));
1948#endif
1949 if (cp->tx_old[ring] != limit)
1950 cas_tx_ringN(cp, ring, limit);
1951 }
1952}
1953
1954
1955static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1956 int entry, const u64 *words,
1957 struct sk_buff **skbref)
1958{
1959 int dlen, hlen, len, i, alloclen;
1960 int off, swivel = RX_SWIVEL_OFF_VAL;
1961 struct cas_page *page;
1962 struct sk_buff *skb;
1963 void *addr, *crcaddr;
1964 __sum16 csum;
1965 char *p;
1966
1967 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1968 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1969 len = hlen + dlen;
1970
1971 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1972 alloclen = len;
1973 else
1974 alloclen = max(hlen, RX_COPY_MIN);
1975
1976 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
1977 if (skb == NULL)
1978 return -1;
1979
1980 *skbref = skb;
1981 skb_reserve(skb, swivel);
1982
1983 p = skb->data;
1984 addr = crcaddr = NULL;
1985 if (hlen) {
1986 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1987 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1988 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1989 swivel;
1990
1991 i = hlen;
1992 if (!dlen)
1993 i += cp->crc_size;
1994 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1995 PCI_DMA_FROMDEVICE);
1996 addr = cas_page_map(page->buffer);
1997 memcpy(p, addr + off, i);
1998 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1999 PCI_DMA_FROMDEVICE);
2000 cas_page_unmap(addr);
2001 RX_USED_ADD(page, 0x100);
2002 p += hlen;
2003 swivel = 0;
2004 }
2005
2006
2007 if (alloclen < (hlen + dlen)) {
2008 skb_frag_t *frag = skb_shinfo(skb)->frags;
2009
2010
2011 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2012 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2013 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2014
2015 hlen = min(cp->page_size - off, dlen);
2016 if (hlen < 0) {
2017 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2018 "rx page overflow: %d\n", hlen);
2019 dev_kfree_skb_irq(skb);
2020 return -1;
2021 }
2022 i = hlen;
2023 if (i == dlen)
2024 i += cp->crc_size;
2025 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2026 PCI_DMA_FROMDEVICE);
2027
2028
2029 swivel = 0;
2030 if (p == (char *) skb->data) {
2031 addr = cas_page_map(page->buffer);
2032 memcpy(p, addr + off, RX_COPY_MIN);
2033 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2034 PCI_DMA_FROMDEVICE);
2035 cas_page_unmap(addr);
2036 off += RX_COPY_MIN;
2037 swivel = RX_COPY_MIN;
2038 RX_USED_ADD(page, cp->mtu_stride);
2039 } else {
2040 RX_USED_ADD(page, hlen);
2041 }
2042 skb_put(skb, alloclen);
2043
2044 skb_shinfo(skb)->nr_frags++;
2045 skb->data_len += hlen - swivel;
2046 skb->truesize += hlen - swivel;
2047 skb->len += hlen - swivel;
2048
2049 get_page(page->buffer);
2050 frag->page = page->buffer;
2051 frag->page_offset = off;
2052 frag->size = hlen - swivel;
2053
2054
2055 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2056 hlen = dlen;
2057 off = 0;
2058
2059 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2060 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2061 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2062 hlen + cp->crc_size,
2063 PCI_DMA_FROMDEVICE);
2064 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2065 hlen + cp->crc_size,
2066 PCI_DMA_FROMDEVICE);
2067
2068 skb_shinfo(skb)->nr_frags++;
2069 skb->data_len += hlen;
2070 skb->len += hlen;
2071 frag++;
2072
2073 get_page(page->buffer);
2074 frag->page = page->buffer;
2075 frag->page_offset = 0;
2076 frag->size = hlen;
2077 RX_USED_ADD(page, hlen + cp->crc_size);
2078 }
2079
2080 if (cp->crc_size) {
2081 addr = cas_page_map(page->buffer);
2082 crcaddr = addr + off + hlen;
2083 }
2084
2085 } else {
2086
2087 if (!dlen)
2088 goto end_copy_pkt;
2089
2090 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2091 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2092 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2093 hlen = min(cp->page_size - off, dlen);
2094 if (hlen < 0) {
2095 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2096 "rx page overflow: %d\n", hlen);
2097 dev_kfree_skb_irq(skb);
2098 return -1;
2099 }
2100 i = hlen;
2101 if (i == dlen)
2102 i += cp->crc_size;
2103 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2104 PCI_DMA_FROMDEVICE);
2105 addr = cas_page_map(page->buffer);
2106 memcpy(p, addr + off, i);
2107 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2108 PCI_DMA_FROMDEVICE);
2109 cas_page_unmap(addr);
2110 if (p == (char *) skb->data)
2111 RX_USED_ADD(page, cp->mtu_stride);
2112 else
2113 RX_USED_ADD(page, i);
2114
2115
2116 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2117 p += hlen;
2118 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2119 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2120 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2121 dlen + cp->crc_size,
2122 PCI_DMA_FROMDEVICE);
2123 addr = cas_page_map(page->buffer);
2124 memcpy(p, addr, dlen + cp->crc_size);
2125 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2126 dlen + cp->crc_size,
2127 PCI_DMA_FROMDEVICE);
2128 cas_page_unmap(addr);
2129 RX_USED_ADD(page, dlen + cp->crc_size);
2130 }
2131end_copy_pkt:
2132 if (cp->crc_size) {
2133 addr = NULL;
2134 crcaddr = skb->data + alloclen;
2135 }
2136 skb_put(skb, alloclen);
2137 }
2138
2139 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2140 if (cp->crc_size) {
2141
2142 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2143 csum_unfold(csum)));
2144 if (addr)
2145 cas_page_unmap(addr);
2146 }
2147 skb->protocol = eth_type_trans(skb, cp->dev);
2148 if (skb->protocol == htons(ETH_P_IP)) {
2149 skb->csum = csum_unfold(~csum);
2150 skb->ip_summed = CHECKSUM_COMPLETE;
2151 } else
2152 skb_checksum_none_assert(skb);
2153 return len;
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2172 struct sk_buff *skb)
2173{
2174 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2175 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2176
2177
2178
2179
2180
2181 __skb_queue_tail(flow, skb);
2182 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2183 while ((skb = __skb_dequeue(flow))) {
2184 cas_skb_release(skb);
2185 }
2186 }
2187}
2188
2189
2190
2191
2192static void cas_post_page(struct cas *cp, const int ring, const int index)
2193{
2194 cas_page_t *new;
2195 int entry;
2196
2197 entry = cp->rx_old[ring];
2198
2199 new = cas_page_swap(cp, ring, index);
2200 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2201 cp->init_rxds[ring][entry].index =
2202 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2203 CAS_BASE(RX_INDEX_RING, ring));
2204
2205 entry = RX_DESC_ENTRY(ring, entry + 1);
2206 cp->rx_old[ring] = entry;
2207
2208 if (entry % 4)
2209 return;
2210
2211 if (ring == 0)
2212 writel(entry, cp->regs + REG_RX_KICK);
2213 else if ((N_RX_DESC_RINGS > 1) &&
2214 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2215 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2216}
2217
2218
2219
2220static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2221{
2222 unsigned int entry, last, count, released;
2223 int cluster;
2224 cas_page_t **page = cp->rx_pages[ring];
2225
2226 entry = cp->rx_old[ring];
2227
2228 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2229 "rxd[%d] interrupt, done: %d\n", ring, entry);
2230
2231 cluster = -1;
2232 count = entry & 0x3;
2233 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2234 released = 0;
2235 while (entry != last) {
2236
2237 if (page_count(page[entry]->buffer) > 1) {
2238 cas_page_t *new = cas_page_dequeue(cp);
2239 if (!new) {
2240
2241
2242
2243 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2244 if (!timer_pending(&cp->link_timer))
2245 mod_timer(&cp->link_timer, jiffies +
2246 CAS_LINK_FAST_TIMEOUT);
2247 cp->rx_old[ring] = entry;
2248 cp->rx_last[ring] = num ? num - released : 0;
2249 return -ENOMEM;
2250 }
2251 spin_lock(&cp->rx_inuse_lock);
2252 list_add(&page[entry]->list, &cp->rx_inuse_list);
2253 spin_unlock(&cp->rx_inuse_lock);
2254 cp->init_rxds[ring][entry].buffer =
2255 cpu_to_le64(new->dma_addr);
2256 page[entry] = new;
2257
2258 }
2259
2260 if (++count == 4) {
2261 cluster = entry;
2262 count = 0;
2263 }
2264 released++;
2265 entry = RX_DESC_ENTRY(ring, entry + 1);
2266 }
2267 cp->rx_old[ring] = entry;
2268
2269 if (cluster < 0)
2270 return 0;
2271
2272 if (ring == 0)
2273 writel(cluster, cp->regs + REG_RX_KICK);
2274 else if ((N_RX_DESC_RINGS > 1) &&
2275 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2276 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2277 return 0;
2278}
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2294{
2295 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2296 int entry, drops;
2297 int npackets = 0;
2298
2299 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2300 "rx[%d] interrupt, done: %d/%d\n",
2301 ring,
2302 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2303
2304 entry = cp->rx_new[ring];
2305 drops = 0;
2306 while (1) {
2307 struct cas_rx_comp *rxc = rxcs + entry;
2308 struct sk_buff *uninitialized_var(skb);
2309 int type, len;
2310 u64 words[4];
2311 int i, dring;
2312
2313 words[0] = le64_to_cpu(rxc->word1);
2314 words[1] = le64_to_cpu(rxc->word2);
2315 words[2] = le64_to_cpu(rxc->word3);
2316 words[3] = le64_to_cpu(rxc->word4);
2317
2318
2319 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2320 if (type == 0)
2321 break;
2322
2323
2324 if (words[3] & RX_COMP4_ZERO) {
2325 break;
2326 }
2327
2328
2329 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2330 spin_lock(&cp->stat_lock[ring]);
2331 cp->net_stats[ring].rx_errors++;
2332 if (words[3] & RX_COMP4_LEN_MISMATCH)
2333 cp->net_stats[ring].rx_length_errors++;
2334 if (words[3] & RX_COMP4_BAD)
2335 cp->net_stats[ring].rx_crc_errors++;
2336 spin_unlock(&cp->stat_lock[ring]);
2337
2338
2339 drop_it:
2340 spin_lock(&cp->stat_lock[ring]);
2341 ++cp->net_stats[ring].rx_dropped;
2342 spin_unlock(&cp->stat_lock[ring]);
2343 goto next;
2344 }
2345
2346 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2347 if (len < 0) {
2348 ++drops;
2349 goto drop_it;
2350 }
2351
2352
2353
2354
2355 if (RX_DONT_BATCH || (type == 0x2)) {
2356
2357 cas_skb_release(skb);
2358 } else {
2359 cas_rx_flow_pkt(cp, words, skb);
2360 }
2361
2362 spin_lock(&cp->stat_lock[ring]);
2363 cp->net_stats[ring].rx_packets++;
2364 cp->net_stats[ring].rx_bytes += len;
2365 spin_unlock(&cp->stat_lock[ring]);
2366
2367 next:
2368 npackets++;
2369
2370
2371 if (words[0] & RX_COMP1_RELEASE_HDR) {
2372 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2373 dring = CAS_VAL(RX_INDEX_RING, i);
2374 i = CAS_VAL(RX_INDEX_NUM, i);
2375 cas_post_page(cp, dring, i);
2376 }
2377
2378 if (words[0] & RX_COMP1_RELEASE_DATA) {
2379 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2380 dring = CAS_VAL(RX_INDEX_RING, i);
2381 i = CAS_VAL(RX_INDEX_NUM, i);
2382 cas_post_page(cp, dring, i);
2383 }
2384
2385 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2386 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2387 dring = CAS_VAL(RX_INDEX_RING, i);
2388 i = CAS_VAL(RX_INDEX_NUM, i);
2389 cas_post_page(cp, dring, i);
2390 }
2391
2392
2393 entry = RX_COMP_ENTRY(ring, entry + 1 +
2394 CAS_VAL(RX_COMP1_SKIP, words[0]));
2395#ifdef USE_NAPI
2396 if (budget && (npackets >= budget))
2397 break;
2398#endif
2399 }
2400 cp->rx_new[ring] = entry;
2401
2402 if (drops)
2403 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2404 return npackets;
2405}
2406
2407
2408
2409static void cas_post_rxcs_ringN(struct net_device *dev,
2410 struct cas *cp, int ring)
2411{
2412 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2413 int last, entry;
2414
2415 last = cp->rx_cur[ring];
2416 entry = cp->rx_new[ring];
2417 netif_printk(cp, intr, KERN_DEBUG, dev,
2418 "rxc[%d] interrupt, done: %d/%d\n",
2419 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2420
2421
2422 while (last != entry) {
2423 cas_rxc_init(rxc + last);
2424 last = RX_COMP_ENTRY(ring, last + 1);
2425 }
2426 cp->rx_cur[ring] = last;
2427
2428 if (ring == 0)
2429 writel(last, cp->regs + REG_RX_COMP_TAIL);
2430 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2431 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2432}
2433
2434
2435
2436
2437
2438
2439#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2440static inline void cas_handle_irqN(struct net_device *dev,
2441 struct cas *cp, const u32 status,
2442 const int ring)
2443{
2444 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2445 cas_post_rxcs_ringN(dev, cp, ring);
2446}
2447
2448static irqreturn_t cas_interruptN(int irq, void *dev_id)
2449{
2450 struct net_device *dev = dev_id;
2451 struct cas *cp = netdev_priv(dev);
2452 unsigned long flags;
2453 int ring;
2454 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2455
2456
2457 if (status == 0)
2458 return IRQ_NONE;
2459
2460 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2461 spin_lock_irqsave(&cp->lock, flags);
2462 if (status & INTR_RX_DONE_ALT) {
2463#ifdef USE_NAPI
2464 cas_mask_intr(cp);
2465 napi_schedule(&cp->napi);
2466#else
2467 cas_rx_ringN(cp, ring, 0);
2468#endif
2469 status &= ~INTR_RX_DONE_ALT;
2470 }
2471
2472 if (status)
2473 cas_handle_irqN(dev, cp, status, ring);
2474 spin_unlock_irqrestore(&cp->lock, flags);
2475 return IRQ_HANDLED;
2476}
2477#endif
2478
2479#ifdef USE_PCI_INTB
2480
2481static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2482{
2483 if (status & INTR_RX_BUF_UNAVAIL_1) {
2484
2485
2486 cas_post_rxds_ringN(cp, 1, 0);
2487 spin_lock(&cp->stat_lock[1]);
2488 cp->net_stats[1].rx_dropped++;
2489 spin_unlock(&cp->stat_lock[1]);
2490 }
2491
2492 if (status & INTR_RX_BUF_AE_1)
2493 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2494 RX_AE_FREEN_VAL(1));
2495
2496 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2497 cas_post_rxcs_ringN(cp, 1);
2498}
2499
2500
2501static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2502{
2503 struct net_device *dev = dev_id;
2504 struct cas *cp = netdev_priv(dev);
2505 unsigned long flags;
2506 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2507
2508
2509 if (status == 0)
2510 return IRQ_NONE;
2511
2512 spin_lock_irqsave(&cp->lock, flags);
2513 if (status & INTR_RX_DONE_ALT) {
2514#ifdef USE_NAPI
2515 cas_mask_intr(cp);
2516 napi_schedule(&cp->napi);
2517#else
2518 cas_rx_ringN(cp, 1, 0);
2519#endif
2520 status &= ~INTR_RX_DONE_ALT;
2521 }
2522 if (status)
2523 cas_handle_irq1(cp, status);
2524 spin_unlock_irqrestore(&cp->lock, flags);
2525 return IRQ_HANDLED;
2526}
2527#endif
2528
2529static inline void cas_handle_irq(struct net_device *dev,
2530 struct cas *cp, const u32 status)
2531{
2532
2533 if (status & INTR_ERROR_MASK)
2534 cas_abnormal_irq(dev, cp, status);
2535
2536 if (status & INTR_RX_BUF_UNAVAIL) {
2537
2538
2539
2540 cas_post_rxds_ringN(cp, 0, 0);
2541 spin_lock(&cp->stat_lock[0]);
2542 cp->net_stats[0].rx_dropped++;
2543 spin_unlock(&cp->stat_lock[0]);
2544 } else if (status & INTR_RX_BUF_AE) {
2545 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2546 RX_AE_FREEN_VAL(0));
2547 }
2548
2549 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2550 cas_post_rxcs_ringN(dev, cp, 0);
2551}
2552
2553static irqreturn_t cas_interrupt(int irq, void *dev_id)
2554{
2555 struct net_device *dev = dev_id;
2556 struct cas *cp = netdev_priv(dev);
2557 unsigned long flags;
2558 u32 status = readl(cp->regs + REG_INTR_STATUS);
2559
2560 if (status == 0)
2561 return IRQ_NONE;
2562
2563 spin_lock_irqsave(&cp->lock, flags);
2564 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2565 cas_tx(dev, cp, status);
2566 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2567 }
2568
2569 if (status & INTR_RX_DONE) {
2570#ifdef USE_NAPI
2571 cas_mask_intr(cp);
2572 napi_schedule(&cp->napi);
2573#else
2574 cas_rx_ringN(cp, 0, 0);
2575#endif
2576 status &= ~INTR_RX_DONE;
2577 }
2578
2579 if (status)
2580 cas_handle_irq(dev, cp, status);
2581 spin_unlock_irqrestore(&cp->lock, flags);
2582 return IRQ_HANDLED;
2583}
2584
2585
2586#ifdef USE_NAPI
2587static int cas_poll(struct napi_struct *napi, int budget)
2588{
2589 struct cas *cp = container_of(napi, struct cas, napi);
2590 struct net_device *dev = cp->dev;
2591 int i, enable_intr, credits;
2592 u32 status = readl(cp->regs + REG_INTR_STATUS);
2593 unsigned long flags;
2594
2595 spin_lock_irqsave(&cp->lock, flags);
2596 cas_tx(dev, cp, status);
2597 spin_unlock_irqrestore(&cp->lock, flags);
2598
2599
2600
2601
2602
2603
2604
2605
2606 enable_intr = 1;
2607 credits = 0;
2608 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2609 int j;
2610 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2611 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2612 if (credits >= budget) {
2613 enable_intr = 0;
2614 goto rx_comp;
2615 }
2616 }
2617 }
2618
2619rx_comp:
2620
2621 spin_lock_irqsave(&cp->lock, flags);
2622 if (status)
2623 cas_handle_irq(dev, cp, status);
2624
2625#ifdef USE_PCI_INTB
2626 if (N_RX_COMP_RINGS > 1) {
2627 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2628 if (status)
2629 cas_handle_irq1(dev, cp, status);
2630 }
2631#endif
2632
2633#ifdef USE_PCI_INTC
2634 if (N_RX_COMP_RINGS > 2) {
2635 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2636 if (status)
2637 cas_handle_irqN(dev, cp, status, 2);
2638 }
2639#endif
2640
2641#ifdef USE_PCI_INTD
2642 if (N_RX_COMP_RINGS > 3) {
2643 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2644 if (status)
2645 cas_handle_irqN(dev, cp, status, 3);
2646 }
2647#endif
2648 spin_unlock_irqrestore(&cp->lock, flags);
2649 if (enable_intr) {
2650 napi_complete(napi);
2651 cas_unmask_intr(cp);
2652 }
2653 return credits;
2654}
2655#endif
2656
2657#ifdef CONFIG_NET_POLL_CONTROLLER
2658static void cas_netpoll(struct net_device *dev)
2659{
2660 struct cas *cp = netdev_priv(dev);
2661
2662 cas_disable_irq(cp, 0);
2663 cas_interrupt(cp->pdev->irq, dev);
2664 cas_enable_irq(cp, 0);
2665
2666#ifdef USE_PCI_INTB
2667 if (N_RX_COMP_RINGS > 1) {
2668
2669 }
2670#endif
2671#ifdef USE_PCI_INTC
2672 if (N_RX_COMP_RINGS > 2) {
2673
2674 }
2675#endif
2676#ifdef USE_PCI_INTD
2677 if (N_RX_COMP_RINGS > 3) {
2678
2679 }
2680#endif
2681}
2682#endif
2683
2684static void cas_tx_timeout(struct net_device *dev)
2685{
2686 struct cas *cp = netdev_priv(dev);
2687
2688 netdev_err(dev, "transmit timed out, resetting\n");
2689 if (!cp->hw_running) {
2690 netdev_err(dev, "hrm.. hw not running!\n");
2691 return;
2692 }
2693
2694 netdev_err(dev, "MIF_STATE[%08x]\n",
2695 readl(cp->regs + REG_MIF_STATE_MACHINE));
2696
2697 netdev_err(dev, "MAC_STATE[%08x]\n",
2698 readl(cp->regs + REG_MAC_STATE_MACHINE));
2699
2700 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2701 readl(cp->regs + REG_TX_CFG),
2702 readl(cp->regs + REG_MAC_TX_STATUS),
2703 readl(cp->regs + REG_MAC_TX_CFG),
2704 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2705 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2706 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2707 readl(cp->regs + REG_TX_SM_1),
2708 readl(cp->regs + REG_TX_SM_2));
2709
2710 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2711 readl(cp->regs + REG_RX_CFG),
2712 readl(cp->regs + REG_MAC_RX_STATUS),
2713 readl(cp->regs + REG_MAC_RX_CFG));
2714
2715 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2716 readl(cp->regs + REG_HP_STATE_MACHINE),
2717 readl(cp->regs + REG_HP_STATUS0),
2718 readl(cp->regs + REG_HP_STATUS1),
2719 readl(cp->regs + REG_HP_STATUS2));
2720
2721#if 1
2722 atomic_inc(&cp->reset_task_pending);
2723 atomic_inc(&cp->reset_task_pending_all);
2724 schedule_work(&cp->reset_task);
2725#else
2726 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2727 schedule_work(&cp->reset_task);
2728#endif
2729}
2730
2731static inline int cas_intme(int ring, int entry)
2732{
2733
2734 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2735 return 1;
2736 return 0;
2737}
2738
2739
2740static void cas_write_txd(struct cas *cp, int ring, int entry,
2741 dma_addr_t mapping, int len, u64 ctrl, int last)
2742{
2743 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2744
2745 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2746 if (cas_intme(ring, entry))
2747 ctrl |= TX_DESC_INTME;
2748 if (last)
2749 ctrl |= TX_DESC_EOF;
2750 txd->control = cpu_to_le64(ctrl);
2751 txd->buffer = cpu_to_le64(mapping);
2752}
2753
2754static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2755 const int entry)
2756{
2757 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2758}
2759
2760static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2761 const int entry, const int tentry)
2762{
2763 cp->tx_tiny_use[ring][tentry].nbufs++;
2764 cp->tx_tiny_use[ring][entry].used = 1;
2765 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2766}
2767
2768static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2769 struct sk_buff *skb)
2770{
2771 struct net_device *dev = cp->dev;
2772 int entry, nr_frags, frag, tabort, tentry;
2773 dma_addr_t mapping;
2774 unsigned long flags;
2775 u64 ctrl;
2776 u32 len;
2777
2778 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2779
2780
2781 if (TX_BUFFS_AVAIL(cp, ring) <=
2782 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2783 netif_stop_queue(dev);
2784 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2785 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2786 return 1;
2787 }
2788
2789 ctrl = 0;
2790 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2791 const u64 csum_start_off = skb_checksum_start_offset(skb);
2792 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2793
2794 ctrl = TX_DESC_CSUM_EN |
2795 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2796 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2797 }
2798
2799 entry = cp->tx_new[ring];
2800 cp->tx_skbs[ring][entry] = skb;
2801
2802 nr_frags = skb_shinfo(skb)->nr_frags;
2803 len = skb_headlen(skb);
2804 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2805 offset_in_page(skb->data), len,
2806 PCI_DMA_TODEVICE);
2807
2808 tentry = entry;
2809 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2810 if (unlikely(tabort)) {
2811
2812 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2813 ctrl | TX_DESC_SOF, 0);
2814 entry = TX_DESC_NEXT(ring, entry);
2815
2816 skb_copy_from_linear_data_offset(skb, len - tabort,
2817 tx_tiny_buf(cp, ring, entry), tabort);
2818 mapping = tx_tiny_map(cp, ring, entry, tentry);
2819 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2820 (nr_frags == 0));
2821 } else {
2822 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2823 TX_DESC_SOF, (nr_frags == 0));
2824 }
2825 entry = TX_DESC_NEXT(ring, entry);
2826
2827 for (frag = 0; frag < nr_frags; frag++) {
2828 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2829
2830 len = fragp->size;
2831 mapping = pci_map_page(cp->pdev, fragp->page,
2832 fragp->page_offset, len,
2833 PCI_DMA_TODEVICE);
2834
2835 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2836 if (unlikely(tabort)) {
2837 void *addr;
2838
2839
2840 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2841 ctrl, 0);
2842 entry = TX_DESC_NEXT(ring, entry);
2843
2844 addr = cas_page_map(fragp->page);
2845 memcpy(tx_tiny_buf(cp, ring, entry),
2846 addr + fragp->page_offset + len - tabort,
2847 tabort);
2848 cas_page_unmap(addr);
2849 mapping = tx_tiny_map(cp, ring, entry, tentry);
2850 len = tabort;
2851 }
2852
2853 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2854 (frag + 1 == nr_frags));
2855 entry = TX_DESC_NEXT(ring, entry);
2856 }
2857
2858 cp->tx_new[ring] = entry;
2859 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2860 netif_stop_queue(dev);
2861
2862 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2863 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2864 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2865 writel(entry, cp->regs + REG_TX_KICKN(ring));
2866 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2867 return 0;
2868}
2869
2870static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2871{
2872 struct cas *cp = netdev_priv(dev);
2873
2874
2875
2876
2877 static int ring;
2878
2879 if (skb_padto(skb, cp->min_frame_size))
2880 return NETDEV_TX_OK;
2881
2882
2883
2884
2885 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2886 return NETDEV_TX_BUSY;
2887 return NETDEV_TX_OK;
2888}
2889
2890static void cas_init_tx_dma(struct cas *cp)
2891{
2892 u64 desc_dma = cp->block_dvma;
2893 unsigned long off;
2894 u32 val;
2895 int i;
2896
2897
2898#ifdef USE_TX_COMPWB
2899 off = offsetof(struct cas_init_block, tx_compwb);
2900 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2901 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2902#endif
2903
2904
2905
2906
2907 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2908 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2909 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2910 TX_CFG_INTR_COMPWB_DIS;
2911
2912
2913 for (i = 0; i < MAX_TX_RINGS; i++) {
2914 off = (unsigned long) cp->init_txds[i] -
2915 (unsigned long) cp->init_block;
2916
2917 val |= CAS_TX_RINGN_BASE(i);
2918 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2919 writel((desc_dma + off) & 0xffffffff, cp->regs +
2920 REG_TX_DBN_LOW(i));
2921
2922
2923
2924 }
2925 writel(val, cp->regs + REG_TX_CFG);
2926
2927
2928
2929
2930#ifdef USE_QOS
2931 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2932 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2933 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2934 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2935#else
2936 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2937 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2938 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2939 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2940#endif
2941}
2942
2943
2944static inline void cas_init_dma(struct cas *cp)
2945{
2946 cas_init_tx_dma(cp);
2947 cas_init_rx_dma(cp);
2948}
2949
2950static void cas_process_mc_list(struct cas *cp)
2951{
2952 u16 hash_table[16];
2953 u32 crc;
2954 struct netdev_hw_addr *ha;
2955 int i = 1;
2956
2957 memset(hash_table, 0, sizeof(hash_table));
2958 netdev_for_each_mc_addr(ha, cp->dev) {
2959 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2960
2961
2962
2963 writel((ha->addr[4] << 8) | ha->addr[5],
2964 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2965 writel((ha->addr[2] << 8) | ha->addr[3],
2966 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2967 writel((ha->addr[0] << 8) | ha->addr[1],
2968 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2969 i++;
2970 }
2971 else {
2972
2973
2974
2975 crc = ether_crc_le(ETH_ALEN, ha->addr);
2976 crc >>= 24;
2977 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2978 }
2979 }
2980 for (i = 0; i < 16; i++)
2981 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2982}
2983
2984
2985static u32 cas_setup_multicast(struct cas *cp)
2986{
2987 u32 rxcfg = 0;
2988 int i;
2989
2990 if (cp->dev->flags & IFF_PROMISC) {
2991 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2992
2993 } else if (cp->dev->flags & IFF_ALLMULTI) {
2994 for (i=0; i < 16; i++)
2995 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2996 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2997
2998 } else {
2999 cas_process_mc_list(cp);
3000 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3001 }
3002
3003 return rxcfg;
3004}
3005
3006
3007static void cas_clear_mac_err(struct cas *cp)
3008{
3009 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3010 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3011 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3012 writel(0, cp->regs + REG_MAC_COLL_LATE);
3013 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3014 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3015 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3016 writel(0, cp->regs + REG_MAC_LEN_ERR);
3017 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3018 writel(0, cp->regs + REG_MAC_FCS_ERR);
3019 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3020}
3021
3022
3023static void cas_mac_reset(struct cas *cp)
3024{
3025 int i;
3026
3027
3028 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3029 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3030
3031
3032 i = STOP_TRIES;
3033 while (i-- > 0) {
3034 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3035 break;
3036 udelay(10);
3037 }
3038
3039
3040 i = STOP_TRIES;
3041 while (i-- > 0) {
3042 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3043 break;
3044 udelay(10);
3045 }
3046
3047 if (readl(cp->regs + REG_MAC_TX_RESET) |
3048 readl(cp->regs + REG_MAC_RX_RESET))
3049 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3050 readl(cp->regs + REG_MAC_TX_RESET),
3051 readl(cp->regs + REG_MAC_RX_RESET),
3052 readl(cp->regs + REG_MAC_STATE_MACHINE));
3053}
3054
3055
3056
3057static void cas_init_mac(struct cas *cp)
3058{
3059 unsigned char *e = &cp->dev->dev_addr[0];
3060 int i;
3061 cas_mac_reset(cp);
3062
3063
3064 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3065
3066
3067#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3068
3069
3070
3071 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3072 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3073#endif
3074
3075 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3076
3077 writel(0x00, cp->regs + REG_MAC_IPG0);
3078 writel(0x08, cp->regs + REG_MAC_IPG1);
3079 writel(0x04, cp->regs + REG_MAC_IPG2);
3080
3081
3082 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3083
3084
3085 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3086
3087
3088
3089
3090
3091 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3092 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3093 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3094 cp->regs + REG_MAC_FRAMESIZE_MAX);
3095
3096
3097
3098
3099
3100 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3101 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3102 else
3103 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3104 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3105 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3106 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3107
3108 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3109
3110 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3111 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3112 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3113 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3114 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3115
3116
3117 for (i = 0; i < 45; i++)
3118 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3119
3120 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3121 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3122 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3123
3124 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3125 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3126 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3127
3128 cp->mac_rx_cfg = cas_setup_multicast(cp);
3129
3130 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3131 cas_clear_mac_err(cp);
3132 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3133
3134
3135
3136
3137
3138 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3139 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3140
3141
3142
3143
3144 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3145}
3146
3147
3148static void cas_init_pause_thresholds(struct cas *cp)
3149{
3150
3151
3152
3153 if (cp->rx_fifo_size <= (2 * 1024)) {
3154 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3155 } else {
3156 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3157 if (max_frame * 3 > cp->rx_fifo_size) {
3158 cp->rx_pause_off = 7104;
3159 cp->rx_pause_on = 960;
3160 } else {
3161 int off = (cp->rx_fifo_size - (max_frame * 2));
3162 int on = off - max_frame;
3163 cp->rx_pause_off = off;
3164 cp->rx_pause_on = on;
3165 }
3166 }
3167}
3168
3169static int cas_vpd_match(const void __iomem *p, const char *str)
3170{
3171 int len = strlen(str) + 1;
3172 int i;
3173
3174 for (i = 0; i < len; i++) {
3175 if (readb(p + i) != str[i])
3176 return 0;
3177 }
3178 return 1;
3179}
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3194 const int offset)
3195{
3196 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3197 void __iomem *base, *kstart;
3198 int i, len;
3199 int found = 0;
3200#define VPD_FOUND_MAC 0x01
3201#define VPD_FOUND_PHY 0x02
3202
3203 int phy_type = CAS_PHY_MII_MDIO0;
3204 int mac_off = 0;
3205
3206#if defined(CONFIG_SPARC)
3207 const unsigned char *addr;
3208#endif
3209
3210
3211 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3212 cp->regs + REG_BIM_LOCAL_DEV_EN);
3213
3214
3215 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3216 goto use_random_mac_addr;
3217
3218
3219 base = NULL;
3220 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3221
3222 if ((readb(p + i + 0) == 0x50) &&
3223 (readb(p + i + 1) == 0x43) &&
3224 (readb(p + i + 2) == 0x49) &&
3225 (readb(p + i + 3) == 0x52)) {
3226 base = p + (readb(p + i + 8) |
3227 (readb(p + i + 9) << 8));
3228 break;
3229 }
3230 }
3231
3232 if (!base || (readb(base) != 0x82))
3233 goto use_random_mac_addr;
3234
3235 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3236 while (i < EXPANSION_ROM_SIZE) {
3237 if (readb(base + i) != 0x90)
3238 goto use_random_mac_addr;
3239
3240
3241 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3242
3243
3244 kstart = base + i + 3;
3245 p = kstart;
3246 while ((p - kstart) < len) {
3247 int klen = readb(p + 2);
3248 int j;
3249 char type;
3250
3251 p += 3;
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290 if (readb(p) != 'I')
3291 goto next;
3292
3293
3294 type = readb(p + 3);
3295 if (type == 'B') {
3296 if ((klen == 29) && readb(p + 4) == 6 &&
3297 cas_vpd_match(p + 5,
3298 "local-mac-address")) {
3299 if (mac_off++ > offset)
3300 goto next;
3301
3302
3303 for (j = 0; j < 6; j++)
3304 dev_addr[j] =
3305 readb(p + 23 + j);
3306 goto found_mac;
3307 }
3308 }
3309
3310 if (type != 'S')
3311 goto next;
3312
3313#ifdef USE_ENTROPY_DEV
3314 if ((klen == 24) &&
3315 cas_vpd_match(p + 5, "entropy-dev") &&
3316 cas_vpd_match(p + 17, "vms110")) {
3317 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3318 goto next;
3319 }
3320#endif
3321
3322 if (found & VPD_FOUND_PHY)
3323 goto next;
3324
3325 if ((klen == 18) && readb(p + 4) == 4 &&
3326 cas_vpd_match(p + 5, "phy-type")) {
3327 if (cas_vpd_match(p + 14, "pcs")) {
3328 phy_type = CAS_PHY_SERDES;
3329 goto found_phy;
3330 }
3331 }
3332
3333 if ((klen == 23) && readb(p + 4) == 4 &&
3334 cas_vpd_match(p + 5, "phy-interface")) {
3335 if (cas_vpd_match(p + 19, "pcs")) {
3336 phy_type = CAS_PHY_SERDES;
3337 goto found_phy;
3338 }
3339 }
3340found_mac:
3341 found |= VPD_FOUND_MAC;
3342 goto next;
3343
3344found_phy:
3345 found |= VPD_FOUND_PHY;
3346
3347next:
3348 p += klen;
3349 }
3350 i += len + 3;
3351 }
3352
3353use_random_mac_addr:
3354 if (found & VPD_FOUND_MAC)
3355 goto done;
3356
3357#if defined(CONFIG_SPARC)
3358 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3359 if (addr != NULL) {
3360 memcpy(dev_addr, addr, 6);
3361 goto done;
3362 }
3363#endif
3364
3365
3366 pr_info("MAC address not found in ROM VPD\n");
3367 dev_addr[0] = 0x08;
3368 dev_addr[1] = 0x00;
3369 dev_addr[2] = 0x20;
3370 get_random_bytes(dev_addr + 3, 3);
3371
3372done:
3373 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3374 return phy_type;
3375}
3376
3377
3378static void cas_check_pci_invariants(struct cas *cp)
3379{
3380 struct pci_dev *pdev = cp->pdev;
3381
3382 cp->cas_flags = 0;
3383 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3384 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3385 if (pdev->revision >= CAS_ID_REVPLUS)
3386 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3387 if (pdev->revision < CAS_ID_REVPLUS02u)
3388 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3389
3390
3391
3392
3393 if (pdev->revision < CAS_ID_REV2)
3394 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3395 } else {
3396
3397 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3398
3399
3400
3401
3402 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3403 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3404 cp->cas_flags |= CAS_FLAG_SATURN;
3405 }
3406}
3407
3408
3409static int cas_check_invariants(struct cas *cp)
3410{
3411 struct pci_dev *pdev = cp->pdev;
3412 u32 cfg;
3413 int i;
3414
3415
3416 cp->page_order = 0;
3417#ifdef USE_PAGE_ORDER
3418 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3419
3420 struct page *page = alloc_pages(GFP_ATOMIC,
3421 CAS_JUMBO_PAGE_SHIFT -
3422 PAGE_SHIFT);
3423 if (page) {
3424 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3425 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3426 } else {
3427 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3428 }
3429 }
3430#endif
3431 cp->page_size = (PAGE_SIZE << cp->page_order);
3432
3433
3434 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3435 cp->rx_fifo_size = RX_FIFO_SIZE;
3436
3437
3438
3439
3440 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3441 PCI_SLOT(pdev->devfn));
3442 if (cp->phy_type & CAS_PHY_SERDES) {
3443 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3444 return 0;
3445 }
3446
3447
3448 cfg = readl(cp->regs + REG_MIF_CFG);
3449 if (cfg & MIF_CFG_MDIO_1) {
3450 cp->phy_type = CAS_PHY_MII_MDIO1;
3451 } else if (cfg & MIF_CFG_MDIO_0) {
3452 cp->phy_type = CAS_PHY_MII_MDIO0;
3453 }
3454
3455 cas_mif_poll(cp, 0);
3456 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3457
3458 for (i = 0; i < 32; i++) {
3459 u32 phy_id;
3460 int j;
3461
3462 for (j = 0; j < 3; j++) {
3463 cp->phy_addr = i;
3464 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3465 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3466 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3467 cp->phy_id = phy_id;
3468 goto done;
3469 }
3470 }
3471 }
3472 pr_err("MII phy did not respond [%08x]\n",
3473 readl(cp->regs + REG_MIF_STATE_MACHINE));
3474 return -1;
3475
3476done:
3477
3478 cfg = cas_phy_read(cp, MII_BMSR);
3479 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3480 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3481 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3482 return 0;
3483}
3484
3485
3486static inline void cas_start_dma(struct cas *cp)
3487{
3488 int i;
3489 u32 val;
3490 int txfailed = 0;
3491
3492
3493 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3494 writel(val, cp->regs + REG_TX_CFG);
3495 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3496 writel(val, cp->regs + REG_RX_CFG);
3497
3498
3499 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3500 writel(val, cp->regs + REG_MAC_TX_CFG);
3501 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3502 writel(val, cp->regs + REG_MAC_RX_CFG);
3503
3504 i = STOP_TRIES;
3505 while (i-- > 0) {
3506 val = readl(cp->regs + REG_MAC_TX_CFG);
3507 if ((val & MAC_TX_CFG_EN))
3508 break;
3509 udelay(10);
3510 }
3511 if (i < 0) txfailed = 1;
3512 i = STOP_TRIES;
3513 while (i-- > 0) {
3514 val = readl(cp->regs + REG_MAC_RX_CFG);
3515 if ((val & MAC_RX_CFG_EN)) {
3516 if (txfailed) {
3517 netdev_err(cp->dev,
3518 "enabling mac failed [tx:%08x:%08x]\n",
3519 readl(cp->regs + REG_MIF_STATE_MACHINE),
3520 readl(cp->regs + REG_MAC_STATE_MACHINE));
3521 }
3522 goto enable_rx_done;
3523 }
3524 udelay(10);
3525 }
3526 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3527 (txfailed ? "tx,rx" : "rx"),
3528 readl(cp->regs + REG_MIF_STATE_MACHINE),
3529 readl(cp->regs + REG_MAC_STATE_MACHINE));
3530
3531enable_rx_done:
3532 cas_unmask_intr(cp);
3533 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3534 writel(0, cp->regs + REG_RX_COMP_TAIL);
3535
3536 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3537 if (N_RX_DESC_RINGS > 1)
3538 writel(RX_DESC_RINGN_SIZE(1) - 4,
3539 cp->regs + REG_PLUS_RX_KICK1);
3540
3541 for (i = 1; i < N_RX_COMP_RINGS; i++)
3542 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3543 }
3544}
3545
3546
3547static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3548 int *pause)
3549{
3550 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3551 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3552 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3553 if (val & PCS_MII_LPA_ASYM_PAUSE)
3554 *pause |= 0x10;
3555 *spd = 1000;
3556}
3557
3558
3559static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3560 int *pause)
3561{
3562 u32 val;
3563
3564 *fd = 0;
3565 *spd = 10;
3566 *pause = 0;
3567
3568
3569 val = cas_phy_read(cp, MII_LPA);
3570 if (val & CAS_LPA_PAUSE)
3571 *pause = 0x01;
3572
3573 if (val & CAS_LPA_ASYM_PAUSE)
3574 *pause |= 0x10;
3575
3576 if (val & LPA_DUPLEX)
3577 *fd = 1;
3578 if (val & LPA_100)
3579 *spd = 100;
3580
3581 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3582 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3583 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3584 *spd = 1000;
3585 if (val & CAS_LPA_1000FULL)
3586 *fd = 1;
3587 }
3588}
3589
3590
3591
3592
3593
3594
3595static void cas_set_link_modes(struct cas *cp)
3596{
3597 u32 val;
3598 int full_duplex, speed, pause;
3599
3600 full_duplex = 0;
3601 speed = 10;
3602 pause = 0;
3603
3604 if (CAS_PHY_MII(cp->phy_type)) {
3605 cas_mif_poll(cp, 0);
3606 val = cas_phy_read(cp, MII_BMCR);
3607 if (val & BMCR_ANENABLE) {
3608 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3609 &pause);
3610 } else {
3611 if (val & BMCR_FULLDPLX)
3612 full_duplex = 1;
3613
3614 if (val & BMCR_SPEED100)
3615 speed = 100;
3616 else if (val & CAS_BMCR_SPEED1000)
3617 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3618 1000 : 100;
3619 }
3620 cas_mif_poll(cp, 1);
3621
3622 } else {
3623 val = readl(cp->regs + REG_PCS_MII_CTRL);
3624 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3625 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3626 if (val & PCS_MII_CTRL_DUPLEX)
3627 full_duplex = 1;
3628 }
3629 }
3630
3631 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3632 speed, full_duplex ? "full" : "half");
3633
3634 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3635 if (CAS_PHY_MII(cp->phy_type)) {
3636 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3637 if (!full_duplex)
3638 val |= MAC_XIF_DISABLE_ECHO;
3639 }
3640 if (full_duplex)
3641 val |= MAC_XIF_FDPLX_LED;
3642 if (speed == 1000)
3643 val |= MAC_XIF_GMII_MODE;
3644 writel(val, cp->regs + REG_MAC_XIF_CFG);
3645
3646
3647 val = MAC_TX_CFG_IPG_EN;
3648 if (full_duplex) {
3649 val |= MAC_TX_CFG_IGNORE_CARRIER;
3650 val |= MAC_TX_CFG_IGNORE_COLL;
3651 } else {
3652#ifndef USE_CSMA_CD_PROTO
3653 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3654 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3655#endif
3656 }
3657
3658
3659
3660
3661
3662
3663
3664 if ((speed == 1000) && !full_duplex) {
3665 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3666 cp->regs + REG_MAC_TX_CFG);
3667
3668 val = readl(cp->regs + REG_MAC_RX_CFG);
3669 val &= ~MAC_RX_CFG_STRIP_FCS;
3670 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3671 cp->regs + REG_MAC_RX_CFG);
3672
3673 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3674
3675 cp->crc_size = 4;
3676
3677 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3678
3679 } else {
3680 writel(val, cp->regs + REG_MAC_TX_CFG);
3681
3682
3683
3684
3685 val = readl(cp->regs + REG_MAC_RX_CFG);
3686 if (full_duplex) {
3687 val |= MAC_RX_CFG_STRIP_FCS;
3688 cp->crc_size = 0;
3689 cp->min_frame_size = CAS_MIN_MTU;
3690 } else {
3691 val &= ~MAC_RX_CFG_STRIP_FCS;
3692 cp->crc_size = 4;
3693 cp->min_frame_size = CAS_MIN_FRAME;
3694 }
3695 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3696 cp->regs + REG_MAC_RX_CFG);
3697 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3698 }
3699
3700 if (netif_msg_link(cp)) {
3701 if (pause & 0x01) {
3702 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3703 cp->rx_fifo_size,
3704 cp->rx_pause_off,
3705 cp->rx_pause_on);
3706 } else if (pause & 0x10) {
3707 netdev_info(cp->dev, "TX pause enabled\n");
3708 } else {
3709 netdev_info(cp->dev, "Pause is disabled\n");
3710 }
3711 }
3712
3713 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3714 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3715 if (pause) {
3716 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3717 if (pause & 0x01) {
3718 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3719 }
3720 }
3721 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3722 cas_start_dma(cp);
3723}
3724
3725
3726static void cas_init_hw(struct cas *cp, int restart_link)
3727{
3728 if (restart_link)
3729 cas_phy_init(cp);
3730
3731 cas_init_pause_thresholds(cp);
3732 cas_init_mac(cp);
3733 cas_init_dma(cp);
3734
3735 if (restart_link) {
3736
3737 cp->timer_ticks = 0;
3738 cas_begin_auto_negotiation(cp, NULL);
3739 } else if (cp->lstate == link_up) {
3740 cas_set_link_modes(cp);
3741 netif_carrier_on(cp->dev);
3742 }
3743}
3744
3745
3746
3747
3748
3749static void cas_hard_reset(struct cas *cp)
3750{
3751 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3752 udelay(20);
3753 pci_restore_state(cp->pdev);
3754}
3755
3756
3757static void cas_global_reset(struct cas *cp, int blkflag)
3758{
3759 int limit;
3760
3761
3762 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3763
3764
3765
3766
3767
3768
3769 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3770 cp->regs + REG_SW_RESET);
3771 } else {
3772 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3773 }
3774
3775
3776 mdelay(3);
3777
3778 limit = STOP_TRIES;
3779 while (limit-- > 0) {
3780 u32 val = readl(cp->regs + REG_SW_RESET);
3781 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3782 goto done;
3783 udelay(10);
3784 }
3785 netdev_err(cp->dev, "sw reset failed\n");
3786
3787done:
3788
3789 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3790 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3791
3792
3793
3794
3795
3796 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3797 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3798 PCI_ERR_BIM_DMA_READ), cp->regs +
3799 REG_PCI_ERR_STATUS_MASK);
3800
3801
3802
3803
3804 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3805}
3806
3807static void cas_reset(struct cas *cp, int blkflag)
3808{
3809 u32 val;
3810
3811 cas_mask_intr(cp);
3812 cas_global_reset(cp, blkflag);
3813 cas_mac_reset(cp);
3814 cas_entropy_reset(cp);
3815
3816
3817 val = readl(cp->regs + REG_TX_CFG);
3818 val &= ~TX_CFG_DMA_EN;
3819 writel(val, cp->regs + REG_TX_CFG);
3820
3821 val = readl(cp->regs + REG_RX_CFG);
3822 val &= ~RX_CFG_DMA_EN;
3823 writel(val, cp->regs + REG_RX_CFG);
3824
3825
3826 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3827 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3828 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3829 } else {
3830 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3831 }
3832
3833
3834 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3835 cas_clear_mac_err(cp);
3836 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3837}
3838
3839
3840static void cas_shutdown(struct cas *cp)
3841{
3842 unsigned long flags;
3843
3844
3845 cp->hw_running = 0;
3846
3847 del_timer_sync(&cp->link_timer);
3848
3849
3850#if 0
3851 while (atomic_read(&cp->reset_task_pending_mtu) ||
3852 atomic_read(&cp->reset_task_pending_spare) ||
3853 atomic_read(&cp->reset_task_pending_all))
3854 schedule();
3855
3856#else
3857 while (atomic_read(&cp->reset_task_pending))
3858 schedule();
3859#endif
3860
3861 cas_lock_all_save(cp, flags);
3862 cas_reset(cp, 0);
3863 if (cp->cas_flags & CAS_FLAG_SATURN)
3864 cas_phy_powerdown(cp);
3865 cas_unlock_all_restore(cp, flags);
3866}
3867
3868static int cas_change_mtu(struct net_device *dev, int new_mtu)
3869{
3870 struct cas *cp = netdev_priv(dev);
3871
3872 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3873 return -EINVAL;
3874
3875 dev->mtu = new_mtu;
3876 if (!netif_running(dev) || !netif_device_present(dev))
3877 return 0;
3878
3879
3880#if 1
3881 atomic_inc(&cp->reset_task_pending);
3882 if ((cp->phy_type & CAS_PHY_SERDES)) {
3883 atomic_inc(&cp->reset_task_pending_all);
3884 } else {
3885 atomic_inc(&cp->reset_task_pending_mtu);
3886 }
3887 schedule_work(&cp->reset_task);
3888#else
3889 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3890 CAS_RESET_ALL : CAS_RESET_MTU);
3891 pr_err("reset called in cas_change_mtu\n");
3892 schedule_work(&cp->reset_task);
3893#endif
3894
3895 flush_work_sync(&cp->reset_task);
3896 return 0;
3897}
3898
3899static void cas_clean_txd(struct cas *cp, int ring)
3900{
3901 struct cas_tx_desc *txd = cp->init_txds[ring];
3902 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3903 u64 daddr, dlen;
3904 int i, size;
3905
3906 size = TX_DESC_RINGN_SIZE(ring);
3907 for (i = 0; i < size; i++) {
3908 int frag;
3909
3910 if (skbs[i] == NULL)
3911 continue;
3912
3913 skb = skbs[i];
3914 skbs[i] = NULL;
3915
3916 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3917 int ent = i & (size - 1);
3918
3919
3920
3921
3922 daddr = le64_to_cpu(txd[ent].buffer);
3923 dlen = CAS_VAL(TX_DESC_BUFLEN,
3924 le64_to_cpu(txd[ent].control));
3925 pci_unmap_page(cp->pdev, daddr, dlen,
3926 PCI_DMA_TODEVICE);
3927
3928 if (frag != skb_shinfo(skb)->nr_frags) {
3929 i++;
3930
3931
3932
3933
3934 ent = i & (size - 1);
3935 if (cp->tx_tiny_use[ring][ent].used)
3936 i++;
3937 }
3938 }
3939 dev_kfree_skb_any(skb);
3940 }
3941
3942
3943 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3944}
3945
3946
3947static inline void cas_free_rx_desc(struct cas *cp, int ring)
3948{
3949 cas_page_t **page = cp->rx_pages[ring];
3950 int i, size;
3951
3952 size = RX_DESC_RINGN_SIZE(ring);
3953 for (i = 0; i < size; i++) {
3954 if (page[i]) {
3955 cas_page_free(cp, page[i]);
3956 page[i] = NULL;
3957 }
3958 }
3959}
3960
3961static void cas_free_rxds(struct cas *cp)
3962{
3963 int i;
3964
3965 for (i = 0; i < N_RX_DESC_RINGS; i++)
3966 cas_free_rx_desc(cp, i);
3967}
3968
3969
3970static void cas_clean_rings(struct cas *cp)
3971{
3972 int i;
3973
3974
3975 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3976 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3977 for (i = 0; i < N_TX_RINGS; i++)
3978 cas_clean_txd(cp, i);
3979
3980
3981 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3982 cas_clean_rxds(cp);
3983 cas_clean_rxcs(cp);
3984}
3985
3986
3987static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3988{
3989 cas_page_t **page = cp->rx_pages[ring];
3990 int size, i = 0;
3991
3992 size = RX_DESC_RINGN_SIZE(ring);
3993 for (i = 0; i < size; i++) {
3994 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3995 return -1;
3996 }
3997 return 0;
3998}
3999
4000static int cas_alloc_rxds(struct cas *cp)
4001{
4002 int i;
4003
4004 for (i = 0; i < N_RX_DESC_RINGS; i++) {
4005 if (cas_alloc_rx_desc(cp, i) < 0) {
4006 cas_free_rxds(cp);
4007 return -1;
4008 }
4009 }
4010 return 0;
4011}
4012
4013static void cas_reset_task(struct work_struct *work)
4014{
4015 struct cas *cp = container_of(work, struct cas, reset_task);
4016#if 0
4017 int pending = atomic_read(&cp->reset_task_pending);
4018#else
4019 int pending_all = atomic_read(&cp->reset_task_pending_all);
4020 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4021 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4022
4023 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4024
4025
4026
4027 atomic_dec(&cp->reset_task_pending);
4028 return;
4029 }
4030#endif
4031
4032
4033
4034
4035 if (cp->hw_running) {
4036 unsigned long flags;
4037
4038
4039 netif_device_detach(cp->dev);
4040 cas_lock_all_save(cp, flags);
4041
4042 if (cp->opened) {
4043
4044
4045
4046
4047 cas_spare_recover(cp, GFP_ATOMIC);
4048 }
4049#if 1
4050
4051 if (!pending_all && !pending_mtu)
4052 goto done;
4053#else
4054 if (pending == CAS_RESET_SPARE)
4055 goto done;
4056#endif
4057
4058
4059
4060
4061
4062
4063
4064#if 1
4065 cas_reset(cp, !(pending_all > 0));
4066 if (cp->opened)
4067 cas_clean_rings(cp);
4068 cas_init_hw(cp, (pending_all > 0));
4069#else
4070 cas_reset(cp, !(pending == CAS_RESET_ALL));
4071 if (cp->opened)
4072 cas_clean_rings(cp);
4073 cas_init_hw(cp, pending == CAS_RESET_ALL);
4074#endif
4075
4076done:
4077 cas_unlock_all_restore(cp, flags);
4078 netif_device_attach(cp->dev);
4079 }
4080#if 1
4081 atomic_sub(pending_all, &cp->reset_task_pending_all);
4082 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4083 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4084 atomic_dec(&cp->reset_task_pending);
4085#else
4086 atomic_set(&cp->reset_task_pending, 0);
4087#endif
4088}
4089
4090static void cas_link_timer(unsigned long data)
4091{
4092 struct cas *cp = (struct cas *) data;
4093 int mask, pending = 0, reset = 0;
4094 unsigned long flags;
4095
4096 if (link_transition_timeout != 0 &&
4097 cp->link_transition_jiffies_valid &&
4098 ((jiffies - cp->link_transition_jiffies) >
4099 (link_transition_timeout))) {
4100
4101
4102
4103
4104 cp->link_transition_jiffies_valid = 0;
4105 }
4106
4107 if (!cp->hw_running)
4108 return;
4109
4110 spin_lock_irqsave(&cp->lock, flags);
4111 cas_lock_tx(cp);
4112 cas_entropy_gather(cp);
4113
4114
4115
4116
4117#if 1
4118 if (atomic_read(&cp->reset_task_pending_all) ||
4119 atomic_read(&cp->reset_task_pending_spare) ||
4120 atomic_read(&cp->reset_task_pending_mtu))
4121 goto done;
4122#else
4123 if (atomic_read(&cp->reset_task_pending))
4124 goto done;
4125#endif
4126
4127
4128 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4129 int i, rmask;
4130
4131 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4132 rmask = CAS_FLAG_RXD_POST(i);
4133 if ((mask & rmask) == 0)
4134 continue;
4135
4136
4137 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4138 pending = 1;
4139 continue;
4140 }
4141 cp->cas_flags &= ~rmask;
4142 }
4143 }
4144
4145 if (CAS_PHY_MII(cp->phy_type)) {
4146 u16 bmsr;
4147 cas_mif_poll(cp, 0);
4148 bmsr = cas_phy_read(cp, MII_BMSR);
4149
4150
4151
4152
4153
4154 bmsr = cas_phy_read(cp, MII_BMSR);
4155 cas_mif_poll(cp, 1);
4156 readl(cp->regs + REG_MIF_STATUS);
4157 reset = cas_mii_link_check(cp, bmsr);
4158 } else {
4159 reset = cas_pcs_link_check(cp);
4160 }
4161
4162 if (reset)
4163 goto done;
4164
4165
4166 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4167 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4168 u32 wptr, rptr;
4169 int tlm = CAS_VAL(MAC_SM_TLM, val);
4170
4171 if (((tlm == 0x5) || (tlm == 0x3)) &&
4172 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4173 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4174 "tx err: MAC_STATE[%08x]\n", val);
4175 reset = 1;
4176 goto done;
4177 }
4178
4179 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4180 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4181 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4182 if ((val == 0) && (wptr != rptr)) {
4183 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4184 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4185 val, wptr, rptr);
4186 reset = 1;
4187 }
4188
4189 if (reset)
4190 cas_hard_reset(cp);
4191 }
4192
4193done:
4194 if (reset) {
4195#if 1
4196 atomic_inc(&cp->reset_task_pending);
4197 atomic_inc(&cp->reset_task_pending_all);
4198 schedule_work(&cp->reset_task);
4199#else
4200 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4201 pr_err("reset called in cas_link_timer\n");
4202 schedule_work(&cp->reset_task);
4203#endif
4204 }
4205
4206 if (!pending)
4207 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4208 cas_unlock_tx(cp);
4209 spin_unlock_irqrestore(&cp->lock, flags);
4210}
4211
4212
4213
4214
4215static void cas_tx_tiny_free(struct cas *cp)
4216{
4217 struct pci_dev *pdev = cp->pdev;
4218 int i;
4219
4220 for (i = 0; i < N_TX_RINGS; i++) {
4221 if (!cp->tx_tiny_bufs[i])
4222 continue;
4223
4224 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4225 cp->tx_tiny_bufs[i],
4226 cp->tx_tiny_dvma[i]);
4227 cp->tx_tiny_bufs[i] = NULL;
4228 }
4229}
4230
4231static int cas_tx_tiny_alloc(struct cas *cp)
4232{
4233 struct pci_dev *pdev = cp->pdev;
4234 int i;
4235
4236 for (i = 0; i < N_TX_RINGS; i++) {
4237 cp->tx_tiny_bufs[i] =
4238 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4239 &cp->tx_tiny_dvma[i]);
4240 if (!cp->tx_tiny_bufs[i]) {
4241 cas_tx_tiny_free(cp);
4242 return -1;
4243 }
4244 }
4245 return 0;
4246}
4247
4248
4249static int cas_open(struct net_device *dev)
4250{
4251 struct cas *cp = netdev_priv(dev);
4252 int hw_was_up, err;
4253 unsigned long flags;
4254
4255 mutex_lock(&cp->pm_mutex);
4256
4257 hw_was_up = cp->hw_running;
4258
4259
4260
4261
4262 if (!cp->hw_running) {
4263
4264 cas_lock_all_save(cp, flags);
4265
4266
4267
4268
4269
4270 cas_reset(cp, 0);
4271 cp->hw_running = 1;
4272 cas_unlock_all_restore(cp, flags);
4273 }
4274
4275 err = -ENOMEM;
4276 if (cas_tx_tiny_alloc(cp) < 0)
4277 goto err_unlock;
4278
4279
4280 if (cas_alloc_rxds(cp) < 0)
4281 goto err_tx_tiny;
4282
4283
4284 cas_spare_init(cp);
4285 cas_spare_recover(cp, GFP_KERNEL);
4286
4287
4288
4289
4290
4291
4292 if (request_irq(cp->pdev->irq, cas_interrupt,
4293 IRQF_SHARED, dev->name, (void *) dev)) {
4294 netdev_err(cp->dev, "failed to request irq !\n");
4295 err = -EAGAIN;
4296 goto err_spare;
4297 }
4298
4299#ifdef USE_NAPI
4300 napi_enable(&cp->napi);
4301#endif
4302
4303 cas_lock_all_save(cp, flags);
4304 cas_clean_rings(cp);
4305 cas_init_hw(cp, !hw_was_up);
4306 cp->opened = 1;
4307 cas_unlock_all_restore(cp, flags);
4308
4309 netif_start_queue(dev);
4310 mutex_unlock(&cp->pm_mutex);
4311 return 0;
4312
4313err_spare:
4314 cas_spare_free(cp);
4315 cas_free_rxds(cp);
4316err_tx_tiny:
4317 cas_tx_tiny_free(cp);
4318err_unlock:
4319 mutex_unlock(&cp->pm_mutex);
4320 return err;
4321}
4322
4323static int cas_close(struct net_device *dev)
4324{
4325 unsigned long flags;
4326 struct cas *cp = netdev_priv(dev);
4327
4328#ifdef USE_NAPI
4329 napi_disable(&cp->napi);
4330#endif
4331
4332 mutex_lock(&cp->pm_mutex);
4333
4334 netif_stop_queue(dev);
4335
4336
4337 cas_lock_all_save(cp, flags);
4338 cp->opened = 0;
4339 cas_reset(cp, 0);
4340 cas_phy_init(cp);
4341 cas_begin_auto_negotiation(cp, NULL);
4342 cas_clean_rings(cp);
4343 cas_unlock_all_restore(cp, flags);
4344
4345 free_irq(cp->pdev->irq, (void *) dev);
4346 cas_spare_free(cp);
4347 cas_free_rxds(cp);
4348 cas_tx_tiny_free(cp);
4349 mutex_unlock(&cp->pm_mutex);
4350 return 0;
4351}
4352
4353static struct {
4354 const char name[ETH_GSTRING_LEN];
4355} ethtool_cassini_statnames[] = {
4356 {"collisions"},
4357 {"rx_bytes"},
4358 {"rx_crc_errors"},
4359 {"rx_dropped"},
4360 {"rx_errors"},
4361 {"rx_fifo_errors"},
4362 {"rx_frame_errors"},
4363 {"rx_length_errors"},
4364 {"rx_over_errors"},
4365 {"rx_packets"},
4366 {"tx_aborted_errors"},
4367 {"tx_bytes"},
4368 {"tx_dropped"},
4369 {"tx_errors"},
4370 {"tx_fifo_errors"},
4371 {"tx_packets"}
4372};
4373#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4374
4375static struct {
4376 const int offsets;
4377} ethtool_register_table[] = {
4378 {-MII_BMSR},
4379 {-MII_BMCR},
4380 {REG_CAWR},
4381 {REG_INF_BURST},
4382 {REG_BIM_CFG},
4383 {REG_RX_CFG},
4384 {REG_HP_CFG},
4385 {REG_MAC_TX_CFG},
4386 {REG_MAC_RX_CFG},
4387 {REG_MAC_CTRL_CFG},
4388 {REG_MAC_XIF_CFG},
4389 {REG_MIF_CFG},
4390 {REG_PCS_CFG},
4391 {REG_SATURN_PCFG},
4392 {REG_PCS_MII_STATUS},
4393 {REG_PCS_STATE_MACHINE},
4394 {REG_MAC_COLL_EXCESS},
4395 {REG_MAC_COLL_LATE}
4396};
4397#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4398#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4399
4400static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4401{
4402 u8 *p;
4403 int i;
4404 unsigned long flags;
4405
4406 spin_lock_irqsave(&cp->lock, flags);
4407 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4408 u16 hval;
4409 u32 val;
4410 if (ethtool_register_table[i].offsets < 0) {
4411 hval = cas_phy_read(cp,
4412 -ethtool_register_table[i].offsets);
4413 val = hval;
4414 } else {
4415 val= readl(cp->regs+ethtool_register_table[i].offsets);
4416 }
4417 memcpy(p, (u8 *)&val, sizeof(u32));
4418 }
4419 spin_unlock_irqrestore(&cp->lock, flags);
4420}
4421
4422static struct net_device_stats *cas_get_stats(struct net_device *dev)
4423{
4424 struct cas *cp = netdev_priv(dev);
4425 struct net_device_stats *stats = cp->net_stats;
4426 unsigned long flags;
4427 int i;
4428 unsigned long tmp;
4429
4430
4431 if (!cp->hw_running)
4432 return stats + N_TX_RINGS;
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4443 stats[N_TX_RINGS].rx_crc_errors +=
4444 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4445 stats[N_TX_RINGS].rx_frame_errors +=
4446 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4447 stats[N_TX_RINGS].rx_length_errors +=
4448 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4449#if 1
4450 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4451 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4452 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4453 stats[N_TX_RINGS].collisions +=
4454 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4455#else
4456 stats[N_TX_RINGS].tx_aborted_errors +=
4457 readl(cp->regs + REG_MAC_COLL_EXCESS);
4458 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4459 readl(cp->regs + REG_MAC_COLL_LATE);
4460#endif
4461 cas_clear_mac_err(cp);
4462
4463
4464 spin_lock(&cp->stat_lock[0]);
4465 stats[N_TX_RINGS].collisions += stats[0].collisions;
4466 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4467 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4468 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4469 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4470 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4471 spin_unlock(&cp->stat_lock[0]);
4472
4473 for (i = 0; i < N_TX_RINGS; i++) {
4474 spin_lock(&cp->stat_lock[i]);
4475 stats[N_TX_RINGS].rx_length_errors +=
4476 stats[i].rx_length_errors;
4477 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4478 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4479 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4480 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4481 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4482 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4483 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4484 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4485 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4486 memset(stats + i, 0, sizeof(struct net_device_stats));
4487 spin_unlock(&cp->stat_lock[i]);
4488 }
4489 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4490 return stats + N_TX_RINGS;
4491}
4492
4493
4494static void cas_set_multicast(struct net_device *dev)
4495{
4496 struct cas *cp = netdev_priv(dev);
4497 u32 rxcfg, rxcfg_new;
4498 unsigned long flags;
4499 int limit = STOP_TRIES;
4500
4501 if (!cp->hw_running)
4502 return;
4503
4504 spin_lock_irqsave(&cp->lock, flags);
4505 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4506
4507
4508 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4509 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4510 if (!limit--)
4511 break;
4512 udelay(10);
4513 }
4514
4515
4516 limit = STOP_TRIES;
4517 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4518 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4519 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4520 if (!limit--)
4521 break;
4522 udelay(10);
4523 }
4524
4525
4526 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4527 rxcfg |= rxcfg_new;
4528 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4529 spin_unlock_irqrestore(&cp->lock, flags);
4530}
4531
4532static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4533{
4534 struct cas *cp = netdev_priv(dev);
4535 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4536 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4537 info->fw_version[0] = '\0';
4538 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4539 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4540 cp->casreg_len : CAS_MAX_REGS;
4541 info->n_stats = CAS_NUM_STAT_KEYS;
4542}
4543
4544static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4545{
4546 struct cas *cp = netdev_priv(dev);
4547 u16 bmcr;
4548 int full_duplex, speed, pause;
4549 unsigned long flags;
4550 enum link_state linkstate = link_up;
4551
4552 cmd->advertising = 0;
4553 cmd->supported = SUPPORTED_Autoneg;
4554 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4555 cmd->supported |= SUPPORTED_1000baseT_Full;
4556 cmd->advertising |= ADVERTISED_1000baseT_Full;
4557 }
4558
4559
4560 spin_lock_irqsave(&cp->lock, flags);
4561 bmcr = 0;
4562 linkstate = cp->lstate;
4563 if (CAS_PHY_MII(cp->phy_type)) {
4564 cmd->port = PORT_MII;
4565 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4566 XCVR_INTERNAL : XCVR_EXTERNAL;
4567 cmd->phy_address = cp->phy_addr;
4568 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4569 ADVERTISED_10baseT_Half |
4570 ADVERTISED_10baseT_Full |
4571 ADVERTISED_100baseT_Half |
4572 ADVERTISED_100baseT_Full;
4573
4574 cmd->supported |=
4575 (SUPPORTED_10baseT_Half |
4576 SUPPORTED_10baseT_Full |
4577 SUPPORTED_100baseT_Half |
4578 SUPPORTED_100baseT_Full |
4579 SUPPORTED_TP | SUPPORTED_MII);
4580
4581 if (cp->hw_running) {
4582 cas_mif_poll(cp, 0);
4583 bmcr = cas_phy_read(cp, MII_BMCR);
4584 cas_read_mii_link_mode(cp, &full_duplex,
4585 &speed, &pause);
4586 cas_mif_poll(cp, 1);
4587 }
4588
4589 } else {
4590 cmd->port = PORT_FIBRE;
4591 cmd->transceiver = XCVR_INTERNAL;
4592 cmd->phy_address = 0;
4593 cmd->supported |= SUPPORTED_FIBRE;
4594 cmd->advertising |= ADVERTISED_FIBRE;
4595
4596 if (cp->hw_running) {
4597
4598 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4599 cas_read_pcs_link_mode(cp, &full_duplex,
4600 &speed, &pause);
4601 }
4602 }
4603 spin_unlock_irqrestore(&cp->lock, flags);
4604
4605 if (bmcr & BMCR_ANENABLE) {
4606 cmd->advertising |= ADVERTISED_Autoneg;
4607 cmd->autoneg = AUTONEG_ENABLE;
4608 cmd->speed = ((speed == 10) ?
4609 SPEED_10 :
4610 ((speed == 1000) ?
4611 SPEED_1000 : SPEED_100));
4612 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4613 } else {
4614 cmd->autoneg = AUTONEG_DISABLE;
4615 cmd->speed =
4616 (bmcr & CAS_BMCR_SPEED1000) ?
4617 SPEED_1000 :
4618 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4619 SPEED_10);
4620 cmd->duplex =
4621 (bmcr & BMCR_FULLDPLX) ?
4622 DUPLEX_FULL : DUPLEX_HALF;
4623 }
4624 if (linkstate != link_up) {
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635 if (cp->link_cntl & BMCR_ANENABLE) {
4636 cmd->speed = 0;
4637 cmd->duplex = 0xff;
4638 } else {
4639 cmd->speed = SPEED_10;
4640 if (cp->link_cntl & BMCR_SPEED100) {
4641 cmd->speed = SPEED_100;
4642 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4643 cmd->speed = SPEED_1000;
4644 }
4645 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4646 DUPLEX_FULL : DUPLEX_HALF;
4647 }
4648 }
4649 return 0;
4650}
4651
4652static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4653{
4654 struct cas *cp = netdev_priv(dev);
4655 unsigned long flags;
4656
4657
4658 if (cmd->autoneg != AUTONEG_ENABLE &&
4659 cmd->autoneg != AUTONEG_DISABLE)
4660 return -EINVAL;
4661
4662 if (cmd->autoneg == AUTONEG_DISABLE &&
4663 ((cmd->speed != SPEED_1000 &&
4664 cmd->speed != SPEED_100 &&
4665 cmd->speed != SPEED_10) ||
4666 (cmd->duplex != DUPLEX_HALF &&
4667 cmd->duplex != DUPLEX_FULL)))
4668 return -EINVAL;
4669
4670
4671 spin_lock_irqsave(&cp->lock, flags);
4672 cas_begin_auto_negotiation(cp, cmd);
4673 spin_unlock_irqrestore(&cp->lock, flags);
4674 return 0;
4675}
4676
4677static int cas_nway_reset(struct net_device *dev)
4678{
4679 struct cas *cp = netdev_priv(dev);
4680 unsigned long flags;
4681
4682 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4683 return -EINVAL;
4684
4685
4686 spin_lock_irqsave(&cp->lock, flags);
4687 cas_begin_auto_negotiation(cp, NULL);
4688 spin_unlock_irqrestore(&cp->lock, flags);
4689
4690 return 0;
4691}
4692
4693static u32 cas_get_link(struct net_device *dev)
4694{
4695 struct cas *cp = netdev_priv(dev);
4696 return cp->lstate == link_up;
4697}
4698
4699static u32 cas_get_msglevel(struct net_device *dev)
4700{
4701 struct cas *cp = netdev_priv(dev);
4702 return cp->msg_enable;
4703}
4704
4705static void cas_set_msglevel(struct net_device *dev, u32 value)
4706{
4707 struct cas *cp = netdev_priv(dev);
4708 cp->msg_enable = value;
4709}
4710
4711static int cas_get_regs_len(struct net_device *dev)
4712{
4713 struct cas *cp = netdev_priv(dev);
4714 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4715}
4716
4717static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4718 void *p)
4719{
4720 struct cas *cp = netdev_priv(dev);
4721 regs->version = 0;
4722
4723 cas_read_regs(cp, p, regs->len / sizeof(u32));
4724}
4725
4726static int cas_get_sset_count(struct net_device *dev, int sset)
4727{
4728 switch (sset) {
4729 case ETH_SS_STATS:
4730 return CAS_NUM_STAT_KEYS;
4731 default:
4732 return -EOPNOTSUPP;
4733 }
4734}
4735
4736static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4737{
4738 memcpy(data, ðtool_cassini_statnames,
4739 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4740}
4741
4742static void cas_get_ethtool_stats(struct net_device *dev,
4743 struct ethtool_stats *estats, u64 *data)
4744{
4745 struct cas *cp = netdev_priv(dev);
4746 struct net_device_stats *stats = cas_get_stats(cp->dev);
4747 int i = 0;
4748 data[i++] = stats->collisions;
4749 data[i++] = stats->rx_bytes;
4750 data[i++] = stats->rx_crc_errors;
4751 data[i++] = stats->rx_dropped;
4752 data[i++] = stats->rx_errors;
4753 data[i++] = stats->rx_fifo_errors;
4754 data[i++] = stats->rx_frame_errors;
4755 data[i++] = stats->rx_length_errors;
4756 data[i++] = stats->rx_over_errors;
4757 data[i++] = stats->rx_packets;
4758 data[i++] = stats->tx_aborted_errors;
4759 data[i++] = stats->tx_bytes;
4760 data[i++] = stats->tx_dropped;
4761 data[i++] = stats->tx_errors;
4762 data[i++] = stats->tx_fifo_errors;
4763 data[i++] = stats->tx_packets;
4764 BUG_ON(i != CAS_NUM_STAT_KEYS);
4765}
4766
4767static const struct ethtool_ops cas_ethtool_ops = {
4768 .get_drvinfo = cas_get_drvinfo,
4769 .get_settings = cas_get_settings,
4770 .set_settings = cas_set_settings,
4771 .nway_reset = cas_nway_reset,
4772 .get_link = cas_get_link,
4773 .get_msglevel = cas_get_msglevel,
4774 .set_msglevel = cas_set_msglevel,
4775 .get_regs_len = cas_get_regs_len,
4776 .get_regs = cas_get_regs,
4777 .get_sset_count = cas_get_sset_count,
4778 .get_strings = cas_get_strings,
4779 .get_ethtool_stats = cas_get_ethtool_stats,
4780};
4781
4782static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4783{
4784 struct cas *cp = netdev_priv(dev);
4785 struct mii_ioctl_data *data = if_mii(ifr);
4786 unsigned long flags;
4787 int rc = -EOPNOTSUPP;
4788
4789
4790
4791
4792 mutex_lock(&cp->pm_mutex);
4793 switch (cmd) {
4794 case SIOCGMIIPHY:
4795 data->phy_id = cp->phy_addr;
4796
4797
4798 case SIOCGMIIREG:
4799 spin_lock_irqsave(&cp->lock, flags);
4800 cas_mif_poll(cp, 0);
4801 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4802 cas_mif_poll(cp, 1);
4803 spin_unlock_irqrestore(&cp->lock, flags);
4804 rc = 0;
4805 break;
4806
4807 case SIOCSMIIREG:
4808 spin_lock_irqsave(&cp->lock, flags);
4809 cas_mif_poll(cp, 0);
4810 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4811 cas_mif_poll(cp, 1);
4812 spin_unlock_irqrestore(&cp->lock, flags);
4813 break;
4814 default:
4815 break;
4816 }
4817
4818 mutex_unlock(&cp->pm_mutex);
4819 return rc;
4820}
4821
4822
4823
4824
4825
4826static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
4827{
4828 struct pci_dev *pdev = cas_pdev->bus->self;
4829 u32 val;
4830
4831 if (!pdev)
4832 return;
4833
4834 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4835 return;
4836
4837
4838
4839
4840
4841
4842 pci_read_config_dword(pdev, 0x40, &val);
4843 val &= ~0x00040000;
4844 pci_write_config_dword(pdev, 0x40, val);
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890 pci_write_config_word(pdev, 0x52,
4891 (0x7 << 13) |
4892 (0x7 << 10) |
4893 (0x7 << 7) |
4894 (0x7 << 4) |
4895 (0xf << 0));
4896
4897
4898 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4899
4900
4901
4902
4903 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4904}
4905
4906static const struct net_device_ops cas_netdev_ops = {
4907 .ndo_open = cas_open,
4908 .ndo_stop = cas_close,
4909 .ndo_start_xmit = cas_start_xmit,
4910 .ndo_get_stats = cas_get_stats,
4911 .ndo_set_multicast_list = cas_set_multicast,
4912 .ndo_do_ioctl = cas_ioctl,
4913 .ndo_tx_timeout = cas_tx_timeout,
4914 .ndo_change_mtu = cas_change_mtu,
4915 .ndo_set_mac_address = eth_mac_addr,
4916 .ndo_validate_addr = eth_validate_addr,
4917#ifdef CONFIG_NET_POLL_CONTROLLER
4918 .ndo_poll_controller = cas_netpoll,
4919#endif
4920};
4921
4922static int __devinit cas_init_one(struct pci_dev *pdev,
4923 const struct pci_device_id *ent)
4924{
4925 static int cas_version_printed = 0;
4926 unsigned long casreg_len;
4927 struct net_device *dev;
4928 struct cas *cp;
4929 int i, err, pci_using_dac;
4930 u16 pci_cmd;
4931 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4932
4933 if (cas_version_printed++ == 0)
4934 pr_info("%s", version);
4935
4936 err = pci_enable_device(pdev);
4937 if (err) {
4938 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4939 return err;
4940 }
4941
4942 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4943 dev_err(&pdev->dev, "Cannot find proper PCI device "
4944 "base address, aborting\n");
4945 err = -ENODEV;
4946 goto err_out_disable_pdev;
4947 }
4948
4949 dev = alloc_etherdev(sizeof(*cp));
4950 if (!dev) {
4951 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
4952 err = -ENOMEM;
4953 goto err_out_disable_pdev;
4954 }
4955 SET_NETDEV_DEV(dev, &pdev->dev);
4956
4957 err = pci_request_regions(pdev, dev->name);
4958 if (err) {
4959 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4960 goto err_out_free_netdev;
4961 }
4962 pci_set_master(pdev);
4963
4964
4965
4966
4967
4968 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4969 pci_cmd &= ~PCI_COMMAND_SERR;
4970 pci_cmd |= PCI_COMMAND_PARITY;
4971 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4972 if (pci_try_set_mwi(pdev))
4973 pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
4974
4975 cas_program_bridge(pdev);
4976
4977
4978
4979
4980
4981
4982
4983#if 1
4984 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4985 &orig_cacheline_size);
4986 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4987 cas_cacheline_size =
4988 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4989 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4990 if (pci_write_config_byte(pdev,
4991 PCI_CACHE_LINE_SIZE,
4992 cas_cacheline_size)) {
4993 dev_err(&pdev->dev, "Could not set PCI cache "
4994 "line size\n");
4995 goto err_write_cacheline;
4996 }
4997 }
4998#endif
4999
5000
5001
5002 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5003 pci_using_dac = 1;
5004 err = pci_set_consistent_dma_mask(pdev,
5005 DMA_BIT_MASK(64));
5006 if (err < 0) {
5007 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5008 "for consistent allocations\n");
5009 goto err_out_free_res;
5010 }
5011
5012 } else {
5013 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5014 if (err) {
5015 dev_err(&pdev->dev, "No usable DMA configuration, "
5016 "aborting\n");
5017 goto err_out_free_res;
5018 }
5019 pci_using_dac = 0;
5020 }
5021
5022 casreg_len = pci_resource_len(pdev, 0);
5023
5024 cp = netdev_priv(dev);
5025 cp->pdev = pdev;
5026#if 1
5027
5028 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5029#endif
5030 cp->dev = dev;
5031 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5032 cassini_debug;
5033
5034#if defined(CONFIG_SPARC)
5035 cp->of_node = pci_device_to_OF_node(pdev);
5036#endif
5037
5038 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5039 cp->link_transition_jiffies_valid = 0;
5040
5041 spin_lock_init(&cp->lock);
5042 spin_lock_init(&cp->rx_inuse_lock);
5043 spin_lock_init(&cp->rx_spare_lock);
5044 for (i = 0; i < N_TX_RINGS; i++) {
5045 spin_lock_init(&cp->stat_lock[i]);
5046 spin_lock_init(&cp->tx_lock[i]);
5047 }
5048 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5049 mutex_init(&cp->pm_mutex);
5050
5051 init_timer(&cp->link_timer);
5052 cp->link_timer.function = cas_link_timer;
5053 cp->link_timer.data = (unsigned long) cp;
5054
5055#if 1
5056
5057
5058
5059 atomic_set(&cp->reset_task_pending, 0);
5060 atomic_set(&cp->reset_task_pending_all, 0);
5061 atomic_set(&cp->reset_task_pending_spare, 0);
5062 atomic_set(&cp->reset_task_pending_mtu, 0);
5063#endif
5064 INIT_WORK(&cp->reset_task, cas_reset_task);
5065
5066
5067 if (link_mode >= 0 && link_mode < 6)
5068 cp->link_cntl = link_modes[link_mode];
5069 else
5070 cp->link_cntl = BMCR_ANENABLE;
5071 cp->lstate = link_down;
5072 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5073 netif_carrier_off(cp->dev);
5074 cp->timer_ticks = 0;
5075
5076
5077 cp->regs = pci_iomap(pdev, 0, casreg_len);
5078 if (!cp->regs) {
5079 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5080 goto err_out_free_res;
5081 }
5082 cp->casreg_len = casreg_len;
5083
5084 pci_save_state(pdev);
5085 cas_check_pci_invariants(cp);
5086 cas_hard_reset(cp);
5087 cas_reset(cp, 0);
5088 if (cas_check_invariants(cp))
5089 goto err_out_iounmap;
5090 if (cp->cas_flags & CAS_FLAG_SATURN)
5091 if (cas_saturn_firmware_init(cp))
5092 goto err_out_iounmap;
5093
5094 cp->init_block = (struct cas_init_block *)
5095 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5096 &cp->block_dvma);
5097 if (!cp->init_block) {
5098 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5099 goto err_out_iounmap;
5100 }
5101
5102 for (i = 0; i < N_TX_RINGS; i++)
5103 cp->init_txds[i] = cp->init_block->txds[i];
5104
5105 for (i = 0; i < N_RX_DESC_RINGS; i++)
5106 cp->init_rxds[i] = cp->init_block->rxds[i];
5107
5108 for (i = 0; i < N_RX_COMP_RINGS; i++)
5109 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5110
5111 for (i = 0; i < N_RX_FLOWS; i++)
5112 skb_queue_head_init(&cp->rx_flows[i]);
5113
5114 dev->netdev_ops = &cas_netdev_ops;
5115 dev->ethtool_ops = &cas_ethtool_ops;
5116 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5117
5118#ifdef USE_NAPI
5119 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5120#endif
5121 dev->irq = pdev->irq;
5122 dev->dma = 0;
5123
5124
5125 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5126 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5127
5128 if (pci_using_dac)
5129 dev->features |= NETIF_F_HIGHDMA;
5130
5131 if (register_netdev(dev)) {
5132 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5133 goto err_out_free_consistent;
5134 }
5135
5136 i = readl(cp->regs + REG_BIM_CFG);
5137 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5138 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5139 (i & BIM_CFG_32BIT) ? "32" : "64",
5140 (i & BIM_CFG_66MHZ) ? "66" : "33",
5141 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5142 dev->dev_addr);
5143
5144 pci_set_drvdata(pdev, dev);
5145 cp->hw_running = 1;
5146 cas_entropy_reset(cp);
5147 cas_phy_init(cp);
5148 cas_begin_auto_negotiation(cp, NULL);
5149 return 0;
5150
5151err_out_free_consistent:
5152 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5153 cp->init_block, cp->block_dvma);
5154
5155err_out_iounmap:
5156 mutex_lock(&cp->pm_mutex);
5157 if (cp->hw_running)
5158 cas_shutdown(cp);
5159 mutex_unlock(&cp->pm_mutex);
5160
5161 pci_iounmap(pdev, cp->regs);
5162
5163
5164err_out_free_res:
5165 pci_release_regions(pdev);
5166
5167err_write_cacheline:
5168
5169
5170
5171 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5172
5173err_out_free_netdev:
5174 free_netdev(dev);
5175
5176err_out_disable_pdev:
5177 pci_disable_device(pdev);
5178 pci_set_drvdata(pdev, NULL);
5179 return -ENODEV;
5180}
5181
5182static void __devexit cas_remove_one(struct pci_dev *pdev)
5183{
5184 struct net_device *dev = pci_get_drvdata(pdev);
5185 struct cas *cp;
5186 if (!dev)
5187 return;
5188
5189 cp = netdev_priv(dev);
5190 unregister_netdev(dev);
5191
5192 if (cp->fw_data)
5193 vfree(cp->fw_data);
5194
5195 mutex_lock(&cp->pm_mutex);
5196 cancel_work_sync(&cp->reset_task);
5197 if (cp->hw_running)
5198 cas_shutdown(cp);
5199 mutex_unlock(&cp->pm_mutex);
5200
5201#if 1
5202 if (cp->orig_cacheline_size) {
5203
5204
5205
5206 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5207 cp->orig_cacheline_size);
5208 }
5209#endif
5210 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5211 cp->init_block, cp->block_dvma);
5212 pci_iounmap(pdev, cp->regs);
5213 free_netdev(dev);
5214 pci_release_regions(pdev);
5215 pci_disable_device(pdev);
5216 pci_set_drvdata(pdev, NULL);
5217}
5218
5219#ifdef CONFIG_PM
5220static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5221{
5222 struct net_device *dev = pci_get_drvdata(pdev);
5223 struct cas *cp = netdev_priv(dev);
5224 unsigned long flags;
5225
5226 mutex_lock(&cp->pm_mutex);
5227
5228
5229 if (cp->opened) {
5230 netif_device_detach(dev);
5231
5232 cas_lock_all_save(cp, flags);
5233
5234
5235
5236
5237
5238
5239 cas_reset(cp, 0);
5240 cas_clean_rings(cp);
5241 cas_unlock_all_restore(cp, flags);
5242 }
5243
5244 if (cp->hw_running)
5245 cas_shutdown(cp);
5246 mutex_unlock(&cp->pm_mutex);
5247
5248 return 0;
5249}
5250
5251static int cas_resume(struct pci_dev *pdev)
5252{
5253 struct net_device *dev = pci_get_drvdata(pdev);
5254 struct cas *cp = netdev_priv(dev);
5255
5256 netdev_info(dev, "resuming\n");
5257
5258 mutex_lock(&cp->pm_mutex);
5259 cas_hard_reset(cp);
5260 if (cp->opened) {
5261 unsigned long flags;
5262 cas_lock_all_save(cp, flags);
5263 cas_reset(cp, 0);
5264 cp->hw_running = 1;
5265 cas_clean_rings(cp);
5266 cas_init_hw(cp, 1);
5267 cas_unlock_all_restore(cp, flags);
5268
5269 netif_device_attach(dev);
5270 }
5271 mutex_unlock(&cp->pm_mutex);
5272 return 0;
5273}
5274#endif
5275
5276static struct pci_driver cas_driver = {
5277 .name = DRV_MODULE_NAME,
5278 .id_table = cas_pci_tbl,
5279 .probe = cas_init_one,
5280 .remove = __devexit_p(cas_remove_one),
5281#ifdef CONFIG_PM
5282 .suspend = cas_suspend,
5283 .resume = cas_resume
5284#endif
5285};
5286
5287static int __init cas_init(void)
5288{
5289 if (linkdown_timeout > 0)
5290 link_transition_timeout = linkdown_timeout * HZ;
5291 else
5292 link_transition_timeout = 0;
5293
5294 return pci_register_driver(&cas_driver);
5295}
5296
5297static void __exit cas_cleanup(void)
5298{
5299 pci_unregister_driver(&cas_driver);
5300}
5301
5302module_init(cas_init);
5303module_exit(cas_cleanup);
5304