1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
68
69#include <linux/module.h>
70#include <linux/kernel.h>
71#include <linux/types.h>
72#include <linux/compiler.h>
73#include <linux/slab.h>
74#include <linux/delay.h>
75#include <linux/init.h>
76#include <linux/interrupt.h>
77#include <linux/vmalloc.h>
78#include <linux/ioport.h>
79#include <linux/pci.h>
80#include <linux/mm.h>
81#include <linux/highmem.h>
82#include <linux/list.h>
83#include <linux/dma-mapping.h>
84
85#include <linux/netdevice.h>
86#include <linux/etherdevice.h>
87#include <linux/skbuff.h>
88#include <linux/ethtool.h>
89#include <linux/crc32.h>
90#include <linux/random.h>
91#include <linux/mii.h>
92#include <linux/ip.h>
93#include <linux/tcp.h>
94#include <linux/mutex.h>
95#include <linux/firmware.h>
96
97#include <net/checksum.h>
98
99#include <linux/atomic.h>
100#include <asm/io.h>
101#include <asm/byteorder.h>
102#include <asm/uaccess.h>
103
104#define cas_page_map(x) kmap_atomic((x))
105#define cas_page_unmap(x) kunmap_atomic((x))
106#define CAS_NCPUS num_online_cpus()
107
108#define cas_skb_release(x) netif_rx(x)
109
110
111#define USE_HP_WORKAROUND
112#define HP_WORKAROUND_DEFAULT
113#define CAS_HP_ALT_FIRMWARE cas_prog_null
114
115#include "cassini.h"
116
117#define USE_TX_COMPWB
118#define USE_CSMA_CD_PROTO
119#define USE_RX_BLANK
120#undef USE_ENTROPY_DEV
121
122
123
124
125#undef USE_PCI_INTB
126#undef USE_PCI_INTC
127#undef USE_PCI_INTD
128#undef USE_QOS
129
130#undef USE_VPD_DEBUG
131
132
133#define USE_PAGE_ORDER
134#define RX_DONT_BATCH 0
135#define RX_COPY_ALWAYS 0
136#define RX_COPY_MIN 64
137#undef RX_COUNT_BUFFERS
138
139#define DRV_MODULE_NAME "cassini"
140#define DRV_MODULE_VERSION "1.6"
141#define DRV_MODULE_RELDATE "21 May 2008"
142
143#define CAS_DEF_MSG_ENABLE \
144 (NETIF_MSG_DRV | \
145 NETIF_MSG_PROBE | \
146 NETIF_MSG_LINK | \
147 NETIF_MSG_TIMER | \
148 NETIF_MSG_IFDOWN | \
149 NETIF_MSG_IFUP | \
150 NETIF_MSG_RX_ERR | \
151 NETIF_MSG_TX_ERR)
152
153
154
155
156#define CAS_TX_TIMEOUT (HZ)
157#define CAS_LINK_TIMEOUT (22*HZ/10)
158#define CAS_LINK_FAST_TIMEOUT (1)
159
160
161
162
163#define STOP_TRIES_PHY 1000
164#define STOP_TRIES 5000
165
166
167
168
169
170#define CAS_MIN_FRAME 97
171#define CAS_1000MB_MIN_FRAME 255
172#define CAS_MIN_MTU 60
173#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
174
175#if 1
176
177
178
179
180#else
181#define CAS_RESET_MTU 1
182#define CAS_RESET_ALL 2
183#define CAS_RESET_SPARE 3
184#endif
185
186static char version[] =
187 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
188
189static int cassini_debug = -1;
190static int link_mode;
191
192MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
193MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
194MODULE_LICENSE("GPL");
195MODULE_FIRMWARE("sun/cassini.bin");
196module_param(cassini_debug, int, 0);
197MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
198module_param(link_mode, int, 0);
199MODULE_PARM_DESC(link_mode, "default link mode");
200
201
202
203
204
205#define DEFAULT_LINKDOWN_TIMEOUT 5
206
207
208
209static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
210module_param(linkdown_timeout, int, 0);
211MODULE_PARM_DESC(linkdown_timeout,
212"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
213
214
215
216
217
218
219static int link_transition_timeout;
220
221
222
223static u16 link_modes[] = {
224 BMCR_ANENABLE,
225 0,
226 BMCR_SPEED100,
227 BMCR_FULLDPLX,
228 BMCR_SPEED100|BMCR_FULLDPLX,
229 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
230};
231
232static const struct pci_device_id cas_pci_tbl[] = {
233 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237 { 0, }
238};
239
240MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
241
242static void cas_set_link_modes(struct cas *cp);
243
244static inline void cas_lock_tx(struct cas *cp)
245{
246 int i;
247
248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock_nested(&cp->tx_lock[i], i);
250}
251
252static inline void cas_lock_all(struct cas *cp)
253{
254 spin_lock_irq(&cp->lock);
255 cas_lock_tx(cp);
256}
257
258
259
260
261
262
263
264
265
266#define cas_lock_all_save(cp, flags) \
267do { \
268 struct cas *xxxcp = (cp); \
269 spin_lock_irqsave(&xxxcp->lock, flags); \
270 cas_lock_tx(xxxcp); \
271} while (0)
272
273static inline void cas_unlock_tx(struct cas *cp)
274{
275 int i;
276
277 for (i = N_TX_RINGS; i > 0; i--)
278 spin_unlock(&cp->tx_lock[i - 1]);
279}
280
281static inline void cas_unlock_all(struct cas *cp)
282{
283 cas_unlock_tx(cp);
284 spin_unlock_irq(&cp->lock);
285}
286
287#define cas_unlock_all_restore(cp, flags) \
288do { \
289 struct cas *xxxcp = (cp); \
290 cas_unlock_tx(xxxcp); \
291 spin_unlock_irqrestore(&xxxcp->lock, flags); \
292} while (0)
293
294static void cas_disable_irq(struct cas *cp, const int ring)
295{
296
297 if (ring == 0) {
298 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
299 return;
300 }
301
302
303 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
304 switch (ring) {
305#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
306#ifdef USE_PCI_INTB
307 case 1:
308#endif
309#ifdef USE_PCI_INTC
310 case 2:
311#endif
312#ifdef USE_PCI_INTD
313 case 3:
314#endif
315 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
316 cp->regs + REG_PLUS_INTRN_MASK(ring));
317 break;
318#endif
319 default:
320 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
321 REG_PLUS_INTRN_MASK(ring));
322 break;
323 }
324 }
325}
326
327static inline void cas_mask_intr(struct cas *cp)
328{
329 int i;
330
331 for (i = 0; i < N_RX_COMP_RINGS; i++)
332 cas_disable_irq(cp, i);
333}
334
335static void cas_enable_irq(struct cas *cp, const int ring)
336{
337 if (ring == 0) {
338 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
339 return;
340 }
341
342 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
343 switch (ring) {
344#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
345#ifdef USE_PCI_INTB
346 case 1:
347#endif
348#ifdef USE_PCI_INTC
349 case 2:
350#endif
351#ifdef USE_PCI_INTD
352 case 3:
353#endif
354 writel(INTRN_MASK_RX_EN, cp->regs +
355 REG_PLUS_INTRN_MASK(ring));
356 break;
357#endif
358 default:
359 break;
360 }
361 }
362}
363
364static inline void cas_unmask_intr(struct cas *cp)
365{
366 int i;
367
368 for (i = 0; i < N_RX_COMP_RINGS; i++)
369 cas_enable_irq(cp, i);
370}
371
372static inline void cas_entropy_gather(struct cas *cp)
373{
374#ifdef USE_ENTROPY_DEV
375 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
376 return;
377
378 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
379 readl(cp->regs + REG_ENTROPY_IV),
380 sizeof(uint64_t)*8);
381#endif
382}
383
384static inline void cas_entropy_reset(struct cas *cp)
385{
386#ifdef USE_ENTROPY_DEV
387 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
388 return;
389
390 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
391 cp->regs + REG_BIM_LOCAL_DEV_EN);
392 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
393 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
394
395
396 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
397 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
398#endif
399}
400
401
402
403
404static u16 cas_phy_read(struct cas *cp, int reg)
405{
406 u32 cmd;
407 int limit = STOP_TRIES_PHY;
408
409 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
410 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
411 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
412 cmd |= MIF_FRAME_TURN_AROUND_MSB;
413 writel(cmd, cp->regs + REG_MIF_FRAME);
414
415
416 while (limit-- > 0) {
417 udelay(10);
418 cmd = readl(cp->regs + REG_MIF_FRAME);
419 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
420 return cmd & MIF_FRAME_DATA_MASK;
421 }
422 return 0xFFFF;
423}
424
425static int cas_phy_write(struct cas *cp, int reg, u16 val)
426{
427 int limit = STOP_TRIES_PHY;
428 u32 cmd;
429
430 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
431 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
432 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
433 cmd |= MIF_FRAME_TURN_AROUND_MSB;
434 cmd |= val & MIF_FRAME_DATA_MASK;
435 writel(cmd, cp->regs + REG_MIF_FRAME);
436
437
438 while (limit-- > 0) {
439 udelay(10);
440 cmd = readl(cp->regs + REG_MIF_FRAME);
441 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
442 return 0;
443 }
444 return -1;
445}
446
447static void cas_phy_powerup(struct cas *cp)
448{
449 u16 ctl = cas_phy_read(cp, MII_BMCR);
450
451 if ((ctl & BMCR_PDOWN) == 0)
452 return;
453 ctl &= ~BMCR_PDOWN;
454 cas_phy_write(cp, MII_BMCR, ctl);
455}
456
457static void cas_phy_powerdown(struct cas *cp)
458{
459 u16 ctl = cas_phy_read(cp, MII_BMCR);
460
461 if (ctl & BMCR_PDOWN)
462 return;
463 ctl |= BMCR_PDOWN;
464 cas_phy_write(cp, MII_BMCR, ctl);
465}
466
467
468static int cas_page_free(struct cas *cp, cas_page_t *page)
469{
470 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
471 PCI_DMA_FROMDEVICE);
472 __free_pages(page->buffer, cp->page_order);
473 kfree(page);
474 return 0;
475}
476
477#ifdef RX_COUNT_BUFFERS
478#define RX_USED_ADD(x, y) ((x)->used += (y))
479#define RX_USED_SET(x, y) ((x)->used = (y))
480#else
481#define RX_USED_ADD(x, y)
482#define RX_USED_SET(x, y)
483#endif
484
485
486
487
488static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
489{
490 cas_page_t *page;
491
492 page = kmalloc(sizeof(cas_page_t), flags);
493 if (!page)
494 return NULL;
495
496 INIT_LIST_HEAD(&page->list);
497 RX_USED_SET(page, 0);
498 page->buffer = alloc_pages(flags, cp->page_order);
499 if (!page->buffer)
500 goto page_err;
501 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
502 cp->page_size, PCI_DMA_FROMDEVICE);
503 return page;
504
505page_err:
506 kfree(page);
507 return NULL;
508}
509
510
511static void cas_spare_init(struct cas *cp)
512{
513 spin_lock(&cp->rx_inuse_lock);
514 INIT_LIST_HEAD(&cp->rx_inuse_list);
515 spin_unlock(&cp->rx_inuse_lock);
516
517 spin_lock(&cp->rx_spare_lock);
518 INIT_LIST_HEAD(&cp->rx_spare_list);
519 cp->rx_spares_needed = RX_SPARE_COUNT;
520 spin_unlock(&cp->rx_spare_lock);
521}
522
523
524static void cas_spare_free(struct cas *cp)
525{
526 struct list_head list, *elem, *tmp;
527
528
529 INIT_LIST_HEAD(&list);
530 spin_lock(&cp->rx_spare_lock);
531 list_splice_init(&cp->rx_spare_list, &list);
532 spin_unlock(&cp->rx_spare_lock);
533 list_for_each_safe(elem, tmp, &list) {
534 cas_page_free(cp, list_entry(elem, cas_page_t, list));
535 }
536
537 INIT_LIST_HEAD(&list);
538#if 1
539
540
541
542
543 spin_lock(&cp->rx_inuse_lock);
544 list_splice_init(&cp->rx_inuse_list, &list);
545 spin_unlock(&cp->rx_inuse_lock);
546#else
547 spin_lock(&cp->rx_spare_lock);
548 list_splice_init(&cp->rx_inuse_list, &list);
549 spin_unlock(&cp->rx_spare_lock);
550#endif
551 list_for_each_safe(elem, tmp, &list) {
552 cas_page_free(cp, list_entry(elem, cas_page_t, list));
553 }
554}
555
556
557static void cas_spare_recover(struct cas *cp, const gfp_t flags)
558{
559 struct list_head list, *elem, *tmp;
560 int needed, i;
561
562
563
564
565
566
567 INIT_LIST_HEAD(&list);
568 spin_lock(&cp->rx_inuse_lock);
569 list_splice_init(&cp->rx_inuse_list, &list);
570 spin_unlock(&cp->rx_inuse_lock);
571
572 list_for_each_safe(elem, tmp, &list) {
573 cas_page_t *page = list_entry(elem, cas_page_t, list);
574
575
576
577
578
579
580
581
582
583
584
585
586
587 if (page_count(page->buffer) > 1)
588 continue;
589
590 list_del(elem);
591 spin_lock(&cp->rx_spare_lock);
592 if (cp->rx_spares_needed > 0) {
593 list_add(elem, &cp->rx_spare_list);
594 cp->rx_spares_needed--;
595 spin_unlock(&cp->rx_spare_lock);
596 } else {
597 spin_unlock(&cp->rx_spare_lock);
598 cas_page_free(cp, page);
599 }
600 }
601
602
603 if (!list_empty(&list)) {
604 spin_lock(&cp->rx_inuse_lock);
605 list_splice(&list, &cp->rx_inuse_list);
606 spin_unlock(&cp->rx_inuse_lock);
607 }
608
609 spin_lock(&cp->rx_spare_lock);
610 needed = cp->rx_spares_needed;
611 spin_unlock(&cp->rx_spare_lock);
612 if (!needed)
613 return;
614
615
616 INIT_LIST_HEAD(&list);
617 i = 0;
618 while (i < needed) {
619 cas_page_t *spare = cas_page_alloc(cp, flags);
620 if (!spare)
621 break;
622 list_add(&spare->list, &list);
623 i++;
624 }
625
626 spin_lock(&cp->rx_spare_lock);
627 list_splice(&list, &cp->rx_spare_list);
628 cp->rx_spares_needed -= i;
629 spin_unlock(&cp->rx_spare_lock);
630}
631
632
633static cas_page_t *cas_page_dequeue(struct cas *cp)
634{
635 struct list_head *entry;
636 int recover;
637
638 spin_lock(&cp->rx_spare_lock);
639 if (list_empty(&cp->rx_spare_list)) {
640
641 spin_unlock(&cp->rx_spare_lock);
642 cas_spare_recover(cp, GFP_ATOMIC);
643 spin_lock(&cp->rx_spare_lock);
644 if (list_empty(&cp->rx_spare_list)) {
645 netif_err(cp, rx_err, cp->dev,
646 "no spare buffers available\n");
647 spin_unlock(&cp->rx_spare_lock);
648 return NULL;
649 }
650 }
651
652 entry = cp->rx_spare_list.next;
653 list_del(entry);
654 recover = ++cp->rx_spares_needed;
655 spin_unlock(&cp->rx_spare_lock);
656
657
658 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
659#if 1
660 atomic_inc(&cp->reset_task_pending);
661 atomic_inc(&cp->reset_task_pending_spare);
662 schedule_work(&cp->reset_task);
663#else
664 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
665 schedule_work(&cp->reset_task);
666#endif
667 }
668 return list_entry(entry, cas_page_t, list);
669}
670
671
672static void cas_mif_poll(struct cas *cp, const int enable)
673{
674 u32 cfg;
675
676 cfg = readl(cp->regs + REG_MIF_CFG);
677 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
678
679 if (cp->phy_type & CAS_PHY_MII_MDIO1)
680 cfg |= MIF_CFG_PHY_SELECT;
681
682
683 if (enable) {
684 cfg |= MIF_CFG_POLL_EN;
685 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
686 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
687 }
688 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
689 cp->regs + REG_MIF_MASK);
690 writel(cfg, cp->regs + REG_MIF_CFG);
691}
692
693
694static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
695{
696 u16 ctl;
697#if 1
698 int lcntl;
699 int changed = 0;
700 int oldstate = cp->lstate;
701 int link_was_not_down = !(oldstate == link_down);
702#endif
703
704 if (!ep)
705 goto start_aneg;
706 lcntl = cp->link_cntl;
707 if (ep->autoneg == AUTONEG_ENABLE)
708 cp->link_cntl = BMCR_ANENABLE;
709 else {
710 u32 speed = ethtool_cmd_speed(ep);
711 cp->link_cntl = 0;
712 if (speed == SPEED_100)
713 cp->link_cntl |= BMCR_SPEED100;
714 else if (speed == SPEED_1000)
715 cp->link_cntl |= CAS_BMCR_SPEED1000;
716 if (ep->duplex == DUPLEX_FULL)
717 cp->link_cntl |= BMCR_FULLDPLX;
718 }
719#if 1
720 changed = (lcntl != cp->link_cntl);
721#endif
722start_aneg:
723 if (cp->lstate == link_up) {
724 netdev_info(cp->dev, "PCS link down\n");
725 } else {
726 if (changed) {
727 netdev_info(cp->dev, "link configuration changed\n");
728 }
729 }
730 cp->lstate = link_down;
731 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
732 if (!cp->hw_running)
733 return;
734#if 1
735
736
737
738
739
740 if (oldstate == link_up)
741 netif_carrier_off(cp->dev);
742 if (changed && link_was_not_down) {
743
744
745
746
747
748 atomic_inc(&cp->reset_task_pending);
749 atomic_inc(&cp->reset_task_pending_all);
750 schedule_work(&cp->reset_task);
751 cp->timer_ticks = 0;
752 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
753 return;
754 }
755#endif
756 if (cp->phy_type & CAS_PHY_SERDES) {
757 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
758
759 if (cp->link_cntl & BMCR_ANENABLE) {
760 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
761 cp->lstate = link_aneg;
762 } else {
763 if (cp->link_cntl & BMCR_FULLDPLX)
764 val |= PCS_MII_CTRL_DUPLEX;
765 val &= ~PCS_MII_AUTONEG_EN;
766 cp->lstate = link_force_ok;
767 }
768 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
769 writel(val, cp->regs + REG_PCS_MII_CTRL);
770
771 } else {
772 cas_mif_poll(cp, 0);
773 ctl = cas_phy_read(cp, MII_BMCR);
774 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
775 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
776 ctl |= cp->link_cntl;
777 if (ctl & BMCR_ANENABLE) {
778 ctl |= BMCR_ANRESTART;
779 cp->lstate = link_aneg;
780 } else {
781 cp->lstate = link_force_ok;
782 }
783 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
784 cas_phy_write(cp, MII_BMCR, ctl);
785 cas_mif_poll(cp, 1);
786 }
787
788 cp->timer_ticks = 0;
789 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
790}
791
792
793static int cas_reset_mii_phy(struct cas *cp)
794{
795 int limit = STOP_TRIES_PHY;
796 u16 val;
797
798 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
799 udelay(100);
800 while (--limit) {
801 val = cas_phy_read(cp, MII_BMCR);
802 if ((val & BMCR_RESET) == 0)
803 break;
804 udelay(10);
805 }
806 return limit <= 0;
807}
808
809static void cas_saturn_firmware_init(struct cas *cp)
810{
811 const struct firmware *fw;
812 const char fw_name[] = "sun/cassini.bin";
813 int err;
814
815 if (PHY_NS_DP83065 != cp->phy_id)
816 return;
817
818 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
819 if (err) {
820 pr_err("Failed to load firmware \"%s\"\n",
821 fw_name);
822 return;
823 }
824 if (fw->size < 2) {
825 pr_err("bogus length %zu in \"%s\"\n",
826 fw->size, fw_name);
827 goto out;
828 }
829 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
830 cp->fw_size = fw->size - 2;
831 cp->fw_data = vmalloc(cp->fw_size);
832 if (!cp->fw_data)
833 goto out;
834 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
835out:
836 release_firmware(fw);
837}
838
839static void cas_saturn_firmware_load(struct cas *cp)
840{
841 int i;
842
843 if (!cp->fw_data)
844 return;
845
846 cas_phy_powerdown(cp);
847
848
849 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
850
851
852 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
853 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
854 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
855 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
856 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
857 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
858 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
859 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
860
861
862 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
863 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
864 for (i = 0; i < cp->fw_size; i++)
865 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
866
867
868 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
869 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
870}
871
872
873
874static void cas_phy_init(struct cas *cp)
875{
876 u16 val;
877
878
879 if (CAS_PHY_MII(cp->phy_type)) {
880 writel(PCS_DATAPATH_MODE_MII,
881 cp->regs + REG_PCS_DATAPATH_MODE);
882
883 cas_mif_poll(cp, 0);
884 cas_reset_mii_phy(cp);
885
886 if (PHY_LUCENT_B0 == cp->phy_id) {
887
888 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
889 cas_phy_write(cp, MII_BMCR, 0x00f1);
890 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
891
892 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
893
894 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
895 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
896 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
897 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
898 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
899 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
900 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
901 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
902 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
903 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
904 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
905
906 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
907 val = cas_phy_read(cp, BROADCOM_MII_REG4);
908 val = cas_phy_read(cp, BROADCOM_MII_REG4);
909 if (val & 0x0080) {
910
911 cas_phy_write(cp, BROADCOM_MII_REG4,
912 val & ~0x0080);
913 }
914
915 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
916 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
917 SATURN_PCFG_FSI : 0x0,
918 cp->regs + REG_SATURN_PCFG);
919
920
921
922
923
924 if (PHY_NS_DP83065 == cp->phy_id) {
925 cas_saturn_firmware_load(cp);
926 }
927 cas_phy_powerup(cp);
928 }
929
930
931 val = cas_phy_read(cp, MII_BMCR);
932 val &= ~BMCR_ANENABLE;
933 cas_phy_write(cp, MII_BMCR, val);
934 udelay(10);
935
936 cas_phy_write(cp, MII_ADVERTISE,
937 cas_phy_read(cp, MII_ADVERTISE) |
938 (ADVERTISE_10HALF | ADVERTISE_10FULL |
939 ADVERTISE_100HALF | ADVERTISE_100FULL |
940 CAS_ADVERTISE_PAUSE |
941 CAS_ADVERTISE_ASYM_PAUSE));
942
943 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
944
945
946
947 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
948 val &= ~CAS_ADVERTISE_1000HALF;
949 val |= CAS_ADVERTISE_1000FULL;
950 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
951 }
952
953 } else {
954
955 u32 val;
956 int limit;
957
958 writel(PCS_DATAPATH_MODE_SERDES,
959 cp->regs + REG_PCS_DATAPATH_MODE);
960
961
962 if (cp->cas_flags & CAS_FLAG_SATURN)
963 writel(0, cp->regs + REG_SATURN_PCFG);
964
965
966 val = readl(cp->regs + REG_PCS_MII_CTRL);
967 val |= PCS_MII_RESET;
968 writel(val, cp->regs + REG_PCS_MII_CTRL);
969
970 limit = STOP_TRIES;
971 while (--limit > 0) {
972 udelay(10);
973 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
974 PCS_MII_RESET) == 0)
975 break;
976 }
977 if (limit <= 0)
978 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
979 readl(cp->regs + REG_PCS_STATE_MACHINE));
980
981
982
983
984 writel(0x0, cp->regs + REG_PCS_CFG);
985
986
987 val = readl(cp->regs + REG_PCS_MII_ADVERT);
988 val &= ~PCS_MII_ADVERT_HD;
989 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
990 PCS_MII_ADVERT_ASYM_PAUSE);
991 writel(val, cp->regs + REG_PCS_MII_ADVERT);
992
993
994 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
995
996
997 writel(PCS_SERDES_CTRL_SYNCD_EN,
998 cp->regs + REG_PCS_SERDES_CTRL);
999 }
1000}
1001
1002
1003static int cas_pcs_link_check(struct cas *cp)
1004{
1005 u32 stat, state_machine;
1006 int retval = 0;
1007
1008
1009
1010
1011
1012 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1013 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1014 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1015
1016
1017
1018
1019 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1020 PCS_MII_STATUS_REMOTE_FAULT)) ==
1021 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1022 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1023
1024
1025
1026
1027 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1028 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1029 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1030 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1031 stat |= PCS_MII_STATUS_LINK_STATUS;
1032 }
1033
1034 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1035 if (cp->lstate != link_up) {
1036 if (cp->opened) {
1037 cp->lstate = link_up;
1038 cp->link_transition = LINK_TRANSITION_LINK_UP;
1039
1040 cas_set_link_modes(cp);
1041 netif_carrier_on(cp->dev);
1042 }
1043 }
1044 } else if (cp->lstate == link_up) {
1045 cp->lstate = link_down;
1046 if (link_transition_timeout != 0 &&
1047 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1048 !cp->link_transition_jiffies_valid) {
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 retval = 1;
1062 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1063 cp->link_transition_jiffies = jiffies;
1064 cp->link_transition_jiffies_valid = 1;
1065 } else {
1066 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1067 }
1068 netif_carrier_off(cp->dev);
1069 if (cp->opened)
1070 netif_info(cp, link, cp->dev, "PCS link down\n");
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1081
1082 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1083 if (stat == 0x03)
1084 return 1;
1085 }
1086 } else if (cp->lstate == link_down) {
1087 if (link_transition_timeout != 0 &&
1088 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1089 !cp->link_transition_jiffies_valid) {
1090
1091
1092
1093
1094
1095 retval = 1;
1096 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1097 cp->link_transition_jiffies = jiffies;
1098 cp->link_transition_jiffies_valid = 1;
1099 } else {
1100 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1101 }
1102 }
1103
1104 return retval;
1105}
1106
1107static int cas_pcs_interrupt(struct net_device *dev,
1108 struct cas *cp, u32 status)
1109{
1110 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1111
1112 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1113 return 0;
1114 return cas_pcs_link_check(cp);
1115}
1116
1117static int cas_txmac_interrupt(struct net_device *dev,
1118 struct cas *cp, u32 status)
1119{
1120 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1121
1122 if (!txmac_stat)
1123 return 0;
1124
1125 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1126 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1127
1128
1129
1130
1131 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1132 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1133 return 0;
1134
1135 spin_lock(&cp->stat_lock[0]);
1136 if (txmac_stat & MAC_TX_UNDERRUN) {
1137 netdev_err(dev, "TX MAC xmit underrun\n");
1138 cp->net_stats[0].tx_fifo_errors++;
1139 }
1140
1141 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1142 netdev_err(dev, "TX MAC max packet size error\n");
1143 cp->net_stats[0].tx_errors++;
1144 }
1145
1146
1147
1148
1149 if (txmac_stat & MAC_TX_COLL_NORMAL)
1150 cp->net_stats[0].collisions += 0x10000;
1151
1152 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1153 cp->net_stats[0].tx_aborted_errors += 0x10000;
1154 cp->net_stats[0].collisions += 0x10000;
1155 }
1156
1157 if (txmac_stat & MAC_TX_COLL_LATE) {
1158 cp->net_stats[0].tx_aborted_errors += 0x10000;
1159 cp->net_stats[0].collisions += 0x10000;
1160 }
1161 spin_unlock(&cp->stat_lock[0]);
1162
1163
1164
1165
1166 return 0;
1167}
1168
1169static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1170{
1171 cas_hp_inst_t *inst;
1172 u32 val;
1173 int i;
1174
1175 i = 0;
1176 while ((inst = firmware) && inst->note) {
1177 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1178
1179 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1180 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1181 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1182
1183 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1184 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1185 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1186 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1187 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1188 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1189 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1190 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1191
1192 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1193 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1194 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1195 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1196 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1197 ++firmware;
1198 ++i;
1199 }
1200}
1201
1202static void cas_init_rx_dma(struct cas *cp)
1203{
1204 u64 desc_dma = cp->block_dvma;
1205 u32 val;
1206 int i, size;
1207
1208
1209 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1210 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1211 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1212 if ((N_RX_DESC_RINGS > 1) &&
1213 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1214 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1215 writel(val, cp->regs + REG_RX_CFG);
1216
1217 val = (unsigned long) cp->init_rxds[0] -
1218 (unsigned long) cp->init_block;
1219 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1220 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1221 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1222
1223 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1224
1225
1226
1227 val = (unsigned long) cp->init_rxds[1] -
1228 (unsigned long) cp->init_block;
1229 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1230 writel((desc_dma + val) & 0xffffffff, cp->regs +
1231 REG_PLUS_RX_DB1_LOW);
1232 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1233 REG_PLUS_RX_KICK1);
1234 }
1235
1236
1237 val = (unsigned long) cp->init_rxcs[0] -
1238 (unsigned long) cp->init_block;
1239 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1240 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1241
1242 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1243
1244 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1245 val = (unsigned long) cp->init_rxcs[i] -
1246 (unsigned long) cp->init_block;
1247 writel((desc_dma + val) >> 32, cp->regs +
1248 REG_PLUS_RX_CBN_HI(i));
1249 writel((desc_dma + val) & 0xffffffff, cp->regs +
1250 REG_PLUS_RX_CBN_LOW(i));
1251 }
1252 }
1253
1254
1255
1256
1257
1258 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1259 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1260 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1261 for (i = 1; i < N_RX_COMP_RINGS; i++)
1262 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1263
1264
1265 if (N_RX_COMP_RINGS > 1)
1266 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1267 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1268
1269 for (i = 2; i < N_RX_COMP_RINGS; i++)
1270 writel(INTR_RX_DONE_ALT,
1271 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1272 }
1273
1274
1275 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1276 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1277 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1278 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1279 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1280
1281
1282 for (i = 0; i < 64; i++) {
1283 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1284 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1285 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1286 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1287 }
1288
1289
1290 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1291 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1292
1293
1294#ifdef USE_RX_BLANK
1295 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1296 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1297 writel(val, cp->regs + REG_RX_BLANK);
1298#else
1299 writel(0x0, cp->regs + REG_RX_BLANK);
1300#endif
1301
1302
1303
1304
1305
1306
1307
1308 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1309 writel(val, cp->regs + REG_RX_AE_THRESH);
1310 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1311 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1312 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1313 }
1314
1315
1316
1317
1318 writel(0x0, cp->regs + REG_RX_RED);
1319
1320
1321 val = 0;
1322 if (cp->page_size == 0x1000)
1323 val = 0x1;
1324 else if (cp->page_size == 0x2000)
1325 val = 0x2;
1326 else if (cp->page_size == 0x4000)
1327 val = 0x3;
1328
1329
1330 size = cp->dev->mtu + 64;
1331 if (size > cp->page_size)
1332 size = cp->page_size;
1333
1334 if (size <= 0x400)
1335 i = 0x0;
1336 else if (size <= 0x800)
1337 i = 0x1;
1338 else if (size <= 0x1000)
1339 i = 0x2;
1340 else
1341 i = 0x3;
1342
1343 cp->mtu_stride = 1 << (i + 10);
1344 val = CAS_BASE(RX_PAGE_SIZE, val);
1345 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1346 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1348 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1349
1350
1351 if (CAS_HP_FIRMWARE == cas_prog_null)
1352 return;
1353
1354 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1355 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1356 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1357 writel(val, cp->regs + REG_HP_CFG);
1358}
1359
1360static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1361{
1362 memset(rxc, 0, sizeof(*rxc));
1363 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1364}
1365
1366
1367
1368
1369
1370static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1371{
1372 cas_page_t *page = cp->rx_pages[1][index];
1373 cas_page_t *new;
1374
1375 if (page_count(page->buffer) == 1)
1376 return page;
1377
1378 new = cas_page_dequeue(cp);
1379 if (new) {
1380 spin_lock(&cp->rx_inuse_lock);
1381 list_add(&page->list, &cp->rx_inuse_list);
1382 spin_unlock(&cp->rx_inuse_lock);
1383 }
1384 return new;
1385}
1386
1387
1388static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1389 const int index)
1390{
1391 cas_page_t **page0 = cp->rx_pages[0];
1392 cas_page_t **page1 = cp->rx_pages[1];
1393
1394
1395 if (page_count(page0[index]->buffer) > 1) {
1396 cas_page_t *new = cas_page_spare(cp, index);
1397 if (new) {
1398 page1[index] = page0[index];
1399 page0[index] = new;
1400 }
1401 }
1402 RX_USED_SET(page0[index], 0);
1403 return page0[index];
1404}
1405
1406static void cas_clean_rxds(struct cas *cp)
1407{
1408
1409 struct cas_rx_desc *rxd = cp->init_rxds[0];
1410 int i, size;
1411
1412
1413 for (i = 0; i < N_RX_FLOWS; i++) {
1414 struct sk_buff *skb;
1415 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1416 cas_skb_release(skb);
1417 }
1418 }
1419
1420
1421 size = RX_DESC_RINGN_SIZE(0);
1422 for (i = 0; i < size; i++) {
1423 cas_page_t *page = cas_page_swap(cp, 0, i);
1424 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1425 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1426 CAS_BASE(RX_INDEX_RING, 0));
1427 }
1428
1429 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1430 cp->rx_last[0] = 0;
1431 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1432}
1433
1434static void cas_clean_rxcs(struct cas *cp)
1435{
1436 int i, j;
1437
1438
1439 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1440 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1441 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1442 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1443 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1444 cas_rxc_init(rxc + j);
1445 }
1446 }
1447}
1448
1449#if 0
1450
1451
1452
1453
1454
1455
1456static int cas_rxmac_reset(struct cas *cp)
1457{
1458 struct net_device *dev = cp->dev;
1459 int limit;
1460 u32 val;
1461
1462
1463 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1464 for (limit = 0; limit < STOP_TRIES; limit++) {
1465 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1466 break;
1467 udelay(10);
1468 }
1469 if (limit == STOP_TRIES) {
1470 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1471 return 1;
1472 }
1473
1474
1475 writel(0, cp->regs + REG_RX_CFG);
1476 for (limit = 0; limit < STOP_TRIES; limit++) {
1477 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1478 break;
1479 udelay(10);
1480 }
1481 if (limit == STOP_TRIES) {
1482 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1483 return 1;
1484 }
1485
1486 mdelay(5);
1487
1488
1489 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1490 for (limit = 0; limit < STOP_TRIES; limit++) {
1491 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1492 break;
1493 udelay(10);
1494 }
1495 if (limit == STOP_TRIES) {
1496 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1497 return 1;
1498 }
1499
1500
1501 cas_clean_rxds(cp);
1502 cas_clean_rxcs(cp);
1503
1504
1505 cas_init_rx_dma(cp);
1506
1507
1508 val = readl(cp->regs + REG_RX_CFG);
1509 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1510 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1511 val = readl(cp->regs + REG_MAC_RX_CFG);
1512 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1513 return 0;
1514}
1515#endif
1516
1517static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1518 u32 status)
1519{
1520 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1521
1522 if (!stat)
1523 return 0;
1524
1525 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1526
1527
1528 spin_lock(&cp->stat_lock[0]);
1529 if (stat & MAC_RX_ALIGN_ERR)
1530 cp->net_stats[0].rx_frame_errors += 0x10000;
1531
1532 if (stat & MAC_RX_CRC_ERR)
1533 cp->net_stats[0].rx_crc_errors += 0x10000;
1534
1535 if (stat & MAC_RX_LEN_ERR)
1536 cp->net_stats[0].rx_length_errors += 0x10000;
1537
1538 if (stat & MAC_RX_OVERFLOW) {
1539 cp->net_stats[0].rx_over_errors++;
1540 cp->net_stats[0].rx_fifo_errors++;
1541 }
1542
1543
1544
1545
1546 spin_unlock(&cp->stat_lock[0]);
1547 return 0;
1548}
1549
1550static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1551 u32 status)
1552{
1553 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1554
1555 if (!stat)
1556 return 0;
1557
1558 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1559 "mac interrupt, stat: 0x%x\n", stat);
1560
1561
1562
1563
1564
1565 if (stat & MAC_CTRL_PAUSE_STATE)
1566 cp->pause_entered++;
1567
1568 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1569 cp->pause_last_time_recvd = (stat >> 16);
1570
1571 return 0;
1572}
1573
1574
1575
1576static inline int cas_mdio_link_not_up(struct cas *cp)
1577{
1578 u16 val;
1579
1580 switch (cp->lstate) {
1581 case link_force_ret:
1582 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1583 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1584 cp->timer_ticks = 5;
1585 cp->lstate = link_force_ok;
1586 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1587 break;
1588
1589 case link_aneg:
1590 val = cas_phy_read(cp, MII_BMCR);
1591
1592
1593
1594
1595 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1596 val |= BMCR_FULLDPLX;
1597 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1598 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1599 cas_phy_write(cp, MII_BMCR, val);
1600 cp->timer_ticks = 5;
1601 cp->lstate = link_force_try;
1602 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1603 break;
1604
1605 case link_force_try:
1606
1607 val = cas_phy_read(cp, MII_BMCR);
1608 cp->timer_ticks = 5;
1609 if (val & CAS_BMCR_SPEED1000) {
1610 val &= ~CAS_BMCR_SPEED1000;
1611 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1612 cas_phy_write(cp, MII_BMCR, val);
1613 break;
1614 }
1615
1616 if (val & BMCR_SPEED100) {
1617 if (val & BMCR_FULLDPLX)
1618 val &= ~BMCR_FULLDPLX;
1619 else {
1620 val &= ~BMCR_SPEED100;
1621 }
1622 cas_phy_write(cp, MII_BMCR, val);
1623 break;
1624 }
1625 default:
1626 break;
1627 }
1628 return 0;
1629}
1630
1631
1632
1633static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1634{
1635 int restart;
1636
1637 if (bmsr & BMSR_LSTATUS) {
1638
1639
1640
1641
1642
1643 if ((cp->lstate == link_force_try) &&
1644 (cp->link_cntl & BMCR_ANENABLE)) {
1645 cp->lstate = link_force_ret;
1646 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1647 cas_mif_poll(cp, 0);
1648 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1649 cp->timer_ticks = 5;
1650 if (cp->opened)
1651 netif_info(cp, link, cp->dev,
1652 "Got link after fallback, retrying autoneg once...\n");
1653 cas_phy_write(cp, MII_BMCR,
1654 cp->link_fcntl | BMCR_ANENABLE |
1655 BMCR_ANRESTART);
1656 cas_mif_poll(cp, 1);
1657
1658 } else if (cp->lstate != link_up) {
1659 cp->lstate = link_up;
1660 cp->link_transition = LINK_TRANSITION_LINK_UP;
1661
1662 if (cp->opened) {
1663 cas_set_link_modes(cp);
1664 netif_carrier_on(cp->dev);
1665 }
1666 }
1667 return 0;
1668 }
1669
1670
1671
1672
1673 restart = 0;
1674 if (cp->lstate == link_up) {
1675 cp->lstate = link_down;
1676 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1677
1678 netif_carrier_off(cp->dev);
1679 if (cp->opened)
1680 netif_info(cp, link, cp->dev, "Link down\n");
1681 restart = 1;
1682
1683 } else if (++cp->timer_ticks > 10)
1684 cas_mdio_link_not_up(cp);
1685
1686 return restart;
1687}
1688
1689static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1690 u32 status)
1691{
1692 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1693 u16 bmsr;
1694
1695
1696 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1697 return 0;
1698
1699 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1700 return cas_mii_link_check(cp, bmsr);
1701}
1702
1703static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1704 u32 status)
1705{
1706 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1707
1708 if (!stat)
1709 return 0;
1710
1711 netdev_err(dev, "PCI error [%04x:%04x]",
1712 stat, readl(cp->regs + REG_BIM_DIAG));
1713
1714
1715 if ((stat & PCI_ERR_BADACK) &&
1716 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1717 pr_cont(" <No ACK64# during ABS64 cycle>");
1718
1719 if (stat & PCI_ERR_DTRTO)
1720 pr_cont(" <Delayed transaction timeout>");
1721 if (stat & PCI_ERR_OTHER)
1722 pr_cont(" <other>");
1723 if (stat & PCI_ERR_BIM_DMA_WRITE)
1724 pr_cont(" <BIM DMA 0 write req>");
1725 if (stat & PCI_ERR_BIM_DMA_READ)
1726 pr_cont(" <BIM DMA 0 read req>");
1727 pr_cont("\n");
1728
1729 if (stat & PCI_ERR_OTHER) {
1730 u16 cfg;
1731
1732
1733
1734
1735 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1736 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1737 if (cfg & PCI_STATUS_PARITY)
1738 netdev_err(dev, "PCI parity error detected\n");
1739 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1740 netdev_err(dev, "PCI target abort\n");
1741 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1742 netdev_err(dev, "PCI master acks target abort\n");
1743 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1744 netdev_err(dev, "PCI master abort\n");
1745 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1746 netdev_err(dev, "PCI system error SERR#\n");
1747 if (cfg & PCI_STATUS_DETECTED_PARITY)
1748 netdev_err(dev, "PCI parity error\n");
1749
1750
1751 cfg &= (PCI_STATUS_PARITY |
1752 PCI_STATUS_SIG_TARGET_ABORT |
1753 PCI_STATUS_REC_TARGET_ABORT |
1754 PCI_STATUS_REC_MASTER_ABORT |
1755 PCI_STATUS_SIG_SYSTEM_ERROR |
1756 PCI_STATUS_DETECTED_PARITY);
1757 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1758 }
1759
1760
1761 return 1;
1762}
1763
1764
1765
1766
1767
1768
1769static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1770 u32 status)
1771{
1772 if (status & INTR_RX_TAG_ERROR) {
1773
1774 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1775 "corrupt rx tag framing\n");
1776 spin_lock(&cp->stat_lock[0]);
1777 cp->net_stats[0].rx_errors++;
1778 spin_unlock(&cp->stat_lock[0]);
1779 goto do_reset;
1780 }
1781
1782 if (status & INTR_RX_LEN_MISMATCH) {
1783
1784 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1785 "length mismatch for rx frame\n");
1786 spin_lock(&cp->stat_lock[0]);
1787 cp->net_stats[0].rx_errors++;
1788 spin_unlock(&cp->stat_lock[0]);
1789 goto do_reset;
1790 }
1791
1792 if (status & INTR_PCS_STATUS) {
1793 if (cas_pcs_interrupt(dev, cp, status))
1794 goto do_reset;
1795 }
1796
1797 if (status & INTR_TX_MAC_STATUS) {
1798 if (cas_txmac_interrupt(dev, cp, status))
1799 goto do_reset;
1800 }
1801
1802 if (status & INTR_RX_MAC_STATUS) {
1803 if (cas_rxmac_interrupt(dev, cp, status))
1804 goto do_reset;
1805 }
1806
1807 if (status & INTR_MAC_CTRL_STATUS) {
1808 if (cas_mac_interrupt(dev, cp, status))
1809 goto do_reset;
1810 }
1811
1812 if (status & INTR_MIF_STATUS) {
1813 if (cas_mif_interrupt(dev, cp, status))
1814 goto do_reset;
1815 }
1816
1817 if (status & INTR_PCI_ERROR_STATUS) {
1818 if (cas_pci_interrupt(dev, cp, status))
1819 goto do_reset;
1820 }
1821 return 0;
1822
1823do_reset:
1824#if 1
1825 atomic_inc(&cp->reset_task_pending);
1826 atomic_inc(&cp->reset_task_pending_all);
1827 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1828 schedule_work(&cp->reset_task);
1829#else
1830 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1831 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1832 schedule_work(&cp->reset_task);
1833#endif
1834 return 1;
1835}
1836
1837
1838
1839
1840#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1841#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1842static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1843 const int len)
1844{
1845 unsigned long off = addr + len;
1846
1847 if (CAS_TABORT(cp) == 1)
1848 return 0;
1849 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1850 return 0;
1851 return TX_TARGET_ABORT_LEN;
1852}
1853
1854static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1855{
1856 struct cas_tx_desc *txds;
1857 struct sk_buff **skbs;
1858 struct net_device *dev = cp->dev;
1859 int entry, count;
1860
1861 spin_lock(&cp->tx_lock[ring]);
1862 txds = cp->init_txds[ring];
1863 skbs = cp->tx_skbs[ring];
1864 entry = cp->tx_old[ring];
1865
1866 count = TX_BUFF_COUNT(ring, entry, limit);
1867 while (entry != limit) {
1868 struct sk_buff *skb = skbs[entry];
1869 dma_addr_t daddr;
1870 u32 dlen;
1871 int frag;
1872
1873 if (!skb) {
1874
1875 entry = TX_DESC_NEXT(ring, entry);
1876 continue;
1877 }
1878
1879
1880 count -= skb_shinfo(skb)->nr_frags +
1881 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1882 if (count < 0)
1883 break;
1884
1885 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1886 "tx[%d] done, slot %d\n", ring, entry);
1887
1888 skbs[entry] = NULL;
1889 cp->tx_tiny_use[ring][entry].nbufs = 0;
1890
1891 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1892 struct cas_tx_desc *txd = txds + entry;
1893
1894 daddr = le64_to_cpu(txd->buffer);
1895 dlen = CAS_VAL(TX_DESC_BUFLEN,
1896 le64_to_cpu(txd->control));
1897 pci_unmap_page(cp->pdev, daddr, dlen,
1898 PCI_DMA_TODEVICE);
1899 entry = TX_DESC_NEXT(ring, entry);
1900
1901
1902 if (cp->tx_tiny_use[ring][entry].used) {
1903 cp->tx_tiny_use[ring][entry].used = 0;
1904 entry = TX_DESC_NEXT(ring, entry);
1905 }
1906 }
1907
1908 spin_lock(&cp->stat_lock[ring]);
1909 cp->net_stats[ring].tx_packets++;
1910 cp->net_stats[ring].tx_bytes += skb->len;
1911 spin_unlock(&cp->stat_lock[ring]);
1912 dev_kfree_skb_irq(skb);
1913 }
1914 cp->tx_old[ring] = entry;
1915
1916
1917
1918
1919
1920 if (netif_queue_stopped(dev) &&
1921 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1922 netif_wake_queue(dev);
1923 spin_unlock(&cp->tx_lock[ring]);
1924}
1925
1926static void cas_tx(struct net_device *dev, struct cas *cp,
1927 u32 status)
1928{
1929 int limit, ring;
1930#ifdef USE_TX_COMPWB
1931 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1932#endif
1933 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1934 "tx interrupt, status: 0x%x, %llx\n",
1935 status, (unsigned long long)compwb);
1936
1937 for (ring = 0; ring < N_TX_RINGS; ring++) {
1938#ifdef USE_TX_COMPWB
1939
1940 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1941 CAS_VAL(TX_COMPWB_LSB, compwb);
1942 compwb = TX_COMPWB_NEXT(compwb);
1943#else
1944 limit = readl(cp->regs + REG_TX_COMPN(ring));
1945#endif
1946 if (cp->tx_old[ring] != limit)
1947 cas_tx_ringN(cp, ring, limit);
1948 }
1949}
1950
1951
1952static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1953 int entry, const u64 *words,
1954 struct sk_buff **skbref)
1955{
1956 int dlen, hlen, len, i, alloclen;
1957 int off, swivel = RX_SWIVEL_OFF_VAL;
1958 struct cas_page *page;
1959 struct sk_buff *skb;
1960 void *addr, *crcaddr;
1961 __sum16 csum;
1962 char *p;
1963
1964 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1965 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1966 len = hlen + dlen;
1967
1968 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1969 alloclen = len;
1970 else
1971 alloclen = max(hlen, RX_COPY_MIN);
1972
1973 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1974 if (skb == NULL)
1975 return -1;
1976
1977 *skbref = skb;
1978 skb_reserve(skb, swivel);
1979
1980 p = skb->data;
1981 addr = crcaddr = NULL;
1982 if (hlen) {
1983 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1984 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1985 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1986 swivel;
1987
1988 i = hlen;
1989 if (!dlen)
1990 i += cp->crc_size;
1991 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1992 PCI_DMA_FROMDEVICE);
1993 addr = cas_page_map(page->buffer);
1994 memcpy(p, addr + off, i);
1995 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1996 PCI_DMA_FROMDEVICE);
1997 cas_page_unmap(addr);
1998 RX_USED_ADD(page, 0x100);
1999 p += hlen;
2000 swivel = 0;
2001 }
2002
2003
2004 if (alloclen < (hlen + dlen)) {
2005 skb_frag_t *frag = skb_shinfo(skb)->frags;
2006
2007
2008 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2009 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2010 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2011
2012 hlen = min(cp->page_size - off, dlen);
2013 if (hlen < 0) {
2014 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2015 "rx page overflow: %d\n", hlen);
2016 dev_kfree_skb_irq(skb);
2017 return -1;
2018 }
2019 i = hlen;
2020 if (i == dlen)
2021 i += cp->crc_size;
2022 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2023 PCI_DMA_FROMDEVICE);
2024
2025
2026 swivel = 0;
2027 if (p == (char *) skb->data) {
2028 addr = cas_page_map(page->buffer);
2029 memcpy(p, addr + off, RX_COPY_MIN);
2030 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2031 PCI_DMA_FROMDEVICE);
2032 cas_page_unmap(addr);
2033 off += RX_COPY_MIN;
2034 swivel = RX_COPY_MIN;
2035 RX_USED_ADD(page, cp->mtu_stride);
2036 } else {
2037 RX_USED_ADD(page, hlen);
2038 }
2039 skb_put(skb, alloclen);
2040
2041 skb_shinfo(skb)->nr_frags++;
2042 skb->data_len += hlen - swivel;
2043 skb->truesize += hlen - swivel;
2044 skb->len += hlen - swivel;
2045
2046 __skb_frag_set_page(frag, page->buffer);
2047 __skb_frag_ref(frag);
2048 frag->page_offset = off;
2049 skb_frag_size_set(frag, hlen - swivel);
2050
2051
2052 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2053 hlen = dlen;
2054 off = 0;
2055
2056 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2057 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2058 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2059 hlen + cp->crc_size,
2060 PCI_DMA_FROMDEVICE);
2061 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2062 hlen + cp->crc_size,
2063 PCI_DMA_FROMDEVICE);
2064
2065 skb_shinfo(skb)->nr_frags++;
2066 skb->data_len += hlen;
2067 skb->len += hlen;
2068 frag++;
2069
2070 __skb_frag_set_page(frag, page->buffer);
2071 __skb_frag_ref(frag);
2072 frag->page_offset = 0;
2073 skb_frag_size_set(frag, hlen);
2074 RX_USED_ADD(page, hlen + cp->crc_size);
2075 }
2076
2077 if (cp->crc_size) {
2078 addr = cas_page_map(page->buffer);
2079 crcaddr = addr + off + hlen;
2080 }
2081
2082 } else {
2083
2084 if (!dlen)
2085 goto end_copy_pkt;
2086
2087 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2088 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2089 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2090 hlen = min(cp->page_size - off, dlen);
2091 if (hlen < 0) {
2092 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2093 "rx page overflow: %d\n", hlen);
2094 dev_kfree_skb_irq(skb);
2095 return -1;
2096 }
2097 i = hlen;
2098 if (i == dlen)
2099 i += cp->crc_size;
2100 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2101 PCI_DMA_FROMDEVICE);
2102 addr = cas_page_map(page->buffer);
2103 memcpy(p, addr + off, i);
2104 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2105 PCI_DMA_FROMDEVICE);
2106 cas_page_unmap(addr);
2107 if (p == (char *) skb->data)
2108 RX_USED_ADD(page, cp->mtu_stride);
2109 else
2110 RX_USED_ADD(page, i);
2111
2112
2113 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2114 p += hlen;
2115 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2116 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2117 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2118 dlen + cp->crc_size,
2119 PCI_DMA_FROMDEVICE);
2120 addr = cas_page_map(page->buffer);
2121 memcpy(p, addr, dlen + cp->crc_size);
2122 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2123 dlen + cp->crc_size,
2124 PCI_DMA_FROMDEVICE);
2125 cas_page_unmap(addr);
2126 RX_USED_ADD(page, dlen + cp->crc_size);
2127 }
2128end_copy_pkt:
2129 if (cp->crc_size) {
2130 addr = NULL;
2131 crcaddr = skb->data + alloclen;
2132 }
2133 skb_put(skb, alloclen);
2134 }
2135
2136 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2137 if (cp->crc_size) {
2138
2139 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2140 csum_unfold(csum)));
2141 if (addr)
2142 cas_page_unmap(addr);
2143 }
2144 skb->protocol = eth_type_trans(skb, cp->dev);
2145 if (skb->protocol == htons(ETH_P_IP)) {
2146 skb->csum = csum_unfold(~csum);
2147 skb->ip_summed = CHECKSUM_COMPLETE;
2148 } else
2149 skb_checksum_none_assert(skb);
2150 return len;
2151}
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2169 struct sk_buff *skb)
2170{
2171 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2172 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2173
2174
2175
2176
2177
2178 __skb_queue_tail(flow, skb);
2179 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2180 while ((skb = __skb_dequeue(flow))) {
2181 cas_skb_release(skb);
2182 }
2183 }
2184}
2185
2186
2187
2188
2189static void cas_post_page(struct cas *cp, const int ring, const int index)
2190{
2191 cas_page_t *new;
2192 int entry;
2193
2194 entry = cp->rx_old[ring];
2195
2196 new = cas_page_swap(cp, ring, index);
2197 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2198 cp->init_rxds[ring][entry].index =
2199 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2200 CAS_BASE(RX_INDEX_RING, ring));
2201
2202 entry = RX_DESC_ENTRY(ring, entry + 1);
2203 cp->rx_old[ring] = entry;
2204
2205 if (entry % 4)
2206 return;
2207
2208 if (ring == 0)
2209 writel(entry, cp->regs + REG_RX_KICK);
2210 else if ((N_RX_DESC_RINGS > 1) &&
2211 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2212 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2213}
2214
2215
2216
2217static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2218{
2219 unsigned int entry, last, count, released;
2220 int cluster;
2221 cas_page_t **page = cp->rx_pages[ring];
2222
2223 entry = cp->rx_old[ring];
2224
2225 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2226 "rxd[%d] interrupt, done: %d\n", ring, entry);
2227
2228 cluster = -1;
2229 count = entry & 0x3;
2230 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2231 released = 0;
2232 while (entry != last) {
2233
2234 if (page_count(page[entry]->buffer) > 1) {
2235 cas_page_t *new = cas_page_dequeue(cp);
2236 if (!new) {
2237
2238
2239
2240 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2241 if (!timer_pending(&cp->link_timer))
2242 mod_timer(&cp->link_timer, jiffies +
2243 CAS_LINK_FAST_TIMEOUT);
2244 cp->rx_old[ring] = entry;
2245 cp->rx_last[ring] = num ? num - released : 0;
2246 return -ENOMEM;
2247 }
2248 spin_lock(&cp->rx_inuse_lock);
2249 list_add(&page[entry]->list, &cp->rx_inuse_list);
2250 spin_unlock(&cp->rx_inuse_lock);
2251 cp->init_rxds[ring][entry].buffer =
2252 cpu_to_le64(new->dma_addr);
2253 page[entry] = new;
2254
2255 }
2256
2257 if (++count == 4) {
2258 cluster = entry;
2259 count = 0;
2260 }
2261 released++;
2262 entry = RX_DESC_ENTRY(ring, entry + 1);
2263 }
2264 cp->rx_old[ring] = entry;
2265
2266 if (cluster < 0)
2267 return 0;
2268
2269 if (ring == 0)
2270 writel(cluster, cp->regs + REG_RX_KICK);
2271 else if ((N_RX_DESC_RINGS > 1) &&
2272 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2273 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2274 return 0;
2275}
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2291{
2292 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2293 int entry, drops;
2294 int npackets = 0;
2295
2296 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2297 "rx[%d] interrupt, done: %d/%d\n",
2298 ring,
2299 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2300
2301 entry = cp->rx_new[ring];
2302 drops = 0;
2303 while (1) {
2304 struct cas_rx_comp *rxc = rxcs + entry;
2305 struct sk_buff *uninitialized_var(skb);
2306 int type, len;
2307 u64 words[4];
2308 int i, dring;
2309
2310 words[0] = le64_to_cpu(rxc->word1);
2311 words[1] = le64_to_cpu(rxc->word2);
2312 words[2] = le64_to_cpu(rxc->word3);
2313 words[3] = le64_to_cpu(rxc->word4);
2314
2315
2316 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2317 if (type == 0)
2318 break;
2319
2320
2321 if (words[3] & RX_COMP4_ZERO) {
2322 break;
2323 }
2324
2325
2326 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2327 spin_lock(&cp->stat_lock[ring]);
2328 cp->net_stats[ring].rx_errors++;
2329 if (words[3] & RX_COMP4_LEN_MISMATCH)
2330 cp->net_stats[ring].rx_length_errors++;
2331 if (words[3] & RX_COMP4_BAD)
2332 cp->net_stats[ring].rx_crc_errors++;
2333 spin_unlock(&cp->stat_lock[ring]);
2334
2335
2336 drop_it:
2337 spin_lock(&cp->stat_lock[ring]);
2338 ++cp->net_stats[ring].rx_dropped;
2339 spin_unlock(&cp->stat_lock[ring]);
2340 goto next;
2341 }
2342
2343 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2344 if (len < 0) {
2345 ++drops;
2346 goto drop_it;
2347 }
2348
2349
2350
2351
2352 if (RX_DONT_BATCH || (type == 0x2)) {
2353
2354 cas_skb_release(skb);
2355 } else {
2356 cas_rx_flow_pkt(cp, words, skb);
2357 }
2358
2359 spin_lock(&cp->stat_lock[ring]);
2360 cp->net_stats[ring].rx_packets++;
2361 cp->net_stats[ring].rx_bytes += len;
2362 spin_unlock(&cp->stat_lock[ring]);
2363
2364 next:
2365 npackets++;
2366
2367
2368 if (words[0] & RX_COMP1_RELEASE_HDR) {
2369 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2370 dring = CAS_VAL(RX_INDEX_RING, i);
2371 i = CAS_VAL(RX_INDEX_NUM, i);
2372 cas_post_page(cp, dring, i);
2373 }
2374
2375 if (words[0] & RX_COMP1_RELEASE_DATA) {
2376 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2377 dring = CAS_VAL(RX_INDEX_RING, i);
2378 i = CAS_VAL(RX_INDEX_NUM, i);
2379 cas_post_page(cp, dring, i);
2380 }
2381
2382 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2383 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2384 dring = CAS_VAL(RX_INDEX_RING, i);
2385 i = CAS_VAL(RX_INDEX_NUM, i);
2386 cas_post_page(cp, dring, i);
2387 }
2388
2389
2390 entry = RX_COMP_ENTRY(ring, entry + 1 +
2391 CAS_VAL(RX_COMP1_SKIP, words[0]));
2392#ifdef USE_NAPI
2393 if (budget && (npackets >= budget))
2394 break;
2395#endif
2396 }
2397 cp->rx_new[ring] = entry;
2398
2399 if (drops)
2400 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2401 return npackets;
2402}
2403
2404
2405
2406static void cas_post_rxcs_ringN(struct net_device *dev,
2407 struct cas *cp, int ring)
2408{
2409 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2410 int last, entry;
2411
2412 last = cp->rx_cur[ring];
2413 entry = cp->rx_new[ring];
2414 netif_printk(cp, intr, KERN_DEBUG, dev,
2415 "rxc[%d] interrupt, done: %d/%d\n",
2416 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2417
2418
2419 while (last != entry) {
2420 cas_rxc_init(rxc + last);
2421 last = RX_COMP_ENTRY(ring, last + 1);
2422 }
2423 cp->rx_cur[ring] = last;
2424
2425 if (ring == 0)
2426 writel(last, cp->regs + REG_RX_COMP_TAIL);
2427 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2428 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2429}
2430
2431
2432
2433
2434
2435
2436#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2437static inline void cas_handle_irqN(struct net_device *dev,
2438 struct cas *cp, const u32 status,
2439 const int ring)
2440{
2441 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2442 cas_post_rxcs_ringN(dev, cp, ring);
2443}
2444
2445static irqreturn_t cas_interruptN(int irq, void *dev_id)
2446{
2447 struct net_device *dev = dev_id;
2448 struct cas *cp = netdev_priv(dev);
2449 unsigned long flags;
2450 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2451 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2452
2453
2454 if (status == 0)
2455 return IRQ_NONE;
2456
2457 spin_lock_irqsave(&cp->lock, flags);
2458 if (status & INTR_RX_DONE_ALT) {
2459#ifdef USE_NAPI
2460 cas_mask_intr(cp);
2461 napi_schedule(&cp->napi);
2462#else
2463 cas_rx_ringN(cp, ring, 0);
2464#endif
2465 status &= ~INTR_RX_DONE_ALT;
2466 }
2467
2468 if (status)
2469 cas_handle_irqN(dev, cp, status, ring);
2470 spin_unlock_irqrestore(&cp->lock, flags);
2471 return IRQ_HANDLED;
2472}
2473#endif
2474
2475#ifdef USE_PCI_INTB
2476
2477static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2478{
2479 if (status & INTR_RX_BUF_UNAVAIL_1) {
2480
2481
2482 cas_post_rxds_ringN(cp, 1, 0);
2483 spin_lock(&cp->stat_lock[1]);
2484 cp->net_stats[1].rx_dropped++;
2485 spin_unlock(&cp->stat_lock[1]);
2486 }
2487
2488 if (status & INTR_RX_BUF_AE_1)
2489 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2490 RX_AE_FREEN_VAL(1));
2491
2492 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493 cas_post_rxcs_ringN(cp, 1);
2494}
2495
2496
2497static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2498{
2499 struct net_device *dev = dev_id;
2500 struct cas *cp = netdev_priv(dev);
2501 unsigned long flags;
2502 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2503
2504
2505 if (status == 0)
2506 return IRQ_NONE;
2507
2508 spin_lock_irqsave(&cp->lock, flags);
2509 if (status & INTR_RX_DONE_ALT) {
2510#ifdef USE_NAPI
2511 cas_mask_intr(cp);
2512 napi_schedule(&cp->napi);
2513#else
2514 cas_rx_ringN(cp, 1, 0);
2515#endif
2516 status &= ~INTR_RX_DONE_ALT;
2517 }
2518 if (status)
2519 cas_handle_irq1(cp, status);
2520 spin_unlock_irqrestore(&cp->lock, flags);
2521 return IRQ_HANDLED;
2522}
2523#endif
2524
2525static inline void cas_handle_irq(struct net_device *dev,
2526 struct cas *cp, const u32 status)
2527{
2528
2529 if (status & INTR_ERROR_MASK)
2530 cas_abnormal_irq(dev, cp, status);
2531
2532 if (status & INTR_RX_BUF_UNAVAIL) {
2533
2534
2535
2536 cas_post_rxds_ringN(cp, 0, 0);
2537 spin_lock(&cp->stat_lock[0]);
2538 cp->net_stats[0].rx_dropped++;
2539 spin_unlock(&cp->stat_lock[0]);
2540 } else if (status & INTR_RX_BUF_AE) {
2541 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2542 RX_AE_FREEN_VAL(0));
2543 }
2544
2545 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2546 cas_post_rxcs_ringN(dev, cp, 0);
2547}
2548
2549static irqreturn_t cas_interrupt(int irq, void *dev_id)
2550{
2551 struct net_device *dev = dev_id;
2552 struct cas *cp = netdev_priv(dev);
2553 unsigned long flags;
2554 u32 status = readl(cp->regs + REG_INTR_STATUS);
2555
2556 if (status == 0)
2557 return IRQ_NONE;
2558
2559 spin_lock_irqsave(&cp->lock, flags);
2560 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2561 cas_tx(dev, cp, status);
2562 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2563 }
2564
2565 if (status & INTR_RX_DONE) {
2566#ifdef USE_NAPI
2567 cas_mask_intr(cp);
2568 napi_schedule(&cp->napi);
2569#else
2570 cas_rx_ringN(cp, 0, 0);
2571#endif
2572 status &= ~INTR_RX_DONE;
2573 }
2574
2575 if (status)
2576 cas_handle_irq(dev, cp, status);
2577 spin_unlock_irqrestore(&cp->lock, flags);
2578 return IRQ_HANDLED;
2579}
2580
2581
2582#ifdef USE_NAPI
2583static int cas_poll(struct napi_struct *napi, int budget)
2584{
2585 struct cas *cp = container_of(napi, struct cas, napi);
2586 struct net_device *dev = cp->dev;
2587 int i, enable_intr, credits;
2588 u32 status = readl(cp->regs + REG_INTR_STATUS);
2589 unsigned long flags;
2590
2591 spin_lock_irqsave(&cp->lock, flags);
2592 cas_tx(dev, cp, status);
2593 spin_unlock_irqrestore(&cp->lock, flags);
2594
2595
2596
2597
2598
2599
2600
2601
2602 enable_intr = 1;
2603 credits = 0;
2604 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2605 int j;
2606 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2607 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2608 if (credits >= budget) {
2609 enable_intr = 0;
2610 goto rx_comp;
2611 }
2612 }
2613 }
2614
2615rx_comp:
2616
2617 spin_lock_irqsave(&cp->lock, flags);
2618 if (status)
2619 cas_handle_irq(dev, cp, status);
2620
2621#ifdef USE_PCI_INTB
2622 if (N_RX_COMP_RINGS > 1) {
2623 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2624 if (status)
2625 cas_handle_irq1(dev, cp, status);
2626 }
2627#endif
2628
2629#ifdef USE_PCI_INTC
2630 if (N_RX_COMP_RINGS > 2) {
2631 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2632 if (status)
2633 cas_handle_irqN(dev, cp, status, 2);
2634 }
2635#endif
2636
2637#ifdef USE_PCI_INTD
2638 if (N_RX_COMP_RINGS > 3) {
2639 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2640 if (status)
2641 cas_handle_irqN(dev, cp, status, 3);
2642 }
2643#endif
2644 spin_unlock_irqrestore(&cp->lock, flags);
2645 if (enable_intr) {
2646 napi_complete(napi);
2647 cas_unmask_intr(cp);
2648 }
2649 return credits;
2650}
2651#endif
2652
2653#ifdef CONFIG_NET_POLL_CONTROLLER
2654static void cas_netpoll(struct net_device *dev)
2655{
2656 struct cas *cp = netdev_priv(dev);
2657
2658 cas_disable_irq(cp, 0);
2659 cas_interrupt(cp->pdev->irq, dev);
2660 cas_enable_irq(cp, 0);
2661
2662#ifdef USE_PCI_INTB
2663 if (N_RX_COMP_RINGS > 1) {
2664
2665 }
2666#endif
2667#ifdef USE_PCI_INTC
2668 if (N_RX_COMP_RINGS > 2) {
2669
2670 }
2671#endif
2672#ifdef USE_PCI_INTD
2673 if (N_RX_COMP_RINGS > 3) {
2674
2675 }
2676#endif
2677}
2678#endif
2679
2680static void cas_tx_timeout(struct net_device *dev)
2681{
2682 struct cas *cp = netdev_priv(dev);
2683
2684 netdev_err(dev, "transmit timed out, resetting\n");
2685 if (!cp->hw_running) {
2686 netdev_err(dev, "hrm.. hw not running!\n");
2687 return;
2688 }
2689
2690 netdev_err(dev, "MIF_STATE[%08x]\n",
2691 readl(cp->regs + REG_MIF_STATE_MACHINE));
2692
2693 netdev_err(dev, "MAC_STATE[%08x]\n",
2694 readl(cp->regs + REG_MAC_STATE_MACHINE));
2695
2696 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2697 readl(cp->regs + REG_TX_CFG),
2698 readl(cp->regs + REG_MAC_TX_STATUS),
2699 readl(cp->regs + REG_MAC_TX_CFG),
2700 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2701 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2702 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2703 readl(cp->regs + REG_TX_SM_1),
2704 readl(cp->regs + REG_TX_SM_2));
2705
2706 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2707 readl(cp->regs + REG_RX_CFG),
2708 readl(cp->regs + REG_MAC_RX_STATUS),
2709 readl(cp->regs + REG_MAC_RX_CFG));
2710
2711 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2712 readl(cp->regs + REG_HP_STATE_MACHINE),
2713 readl(cp->regs + REG_HP_STATUS0),
2714 readl(cp->regs + REG_HP_STATUS1),
2715 readl(cp->regs + REG_HP_STATUS2));
2716
2717#if 1
2718 atomic_inc(&cp->reset_task_pending);
2719 atomic_inc(&cp->reset_task_pending_all);
2720 schedule_work(&cp->reset_task);
2721#else
2722 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2723 schedule_work(&cp->reset_task);
2724#endif
2725}
2726
2727static inline int cas_intme(int ring, int entry)
2728{
2729
2730 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2731 return 1;
2732 return 0;
2733}
2734
2735
2736static void cas_write_txd(struct cas *cp, int ring, int entry,
2737 dma_addr_t mapping, int len, u64 ctrl, int last)
2738{
2739 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2740
2741 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2742 if (cas_intme(ring, entry))
2743 ctrl |= TX_DESC_INTME;
2744 if (last)
2745 ctrl |= TX_DESC_EOF;
2746 txd->control = cpu_to_le64(ctrl);
2747 txd->buffer = cpu_to_le64(mapping);
2748}
2749
2750static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2751 const int entry)
2752{
2753 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2754}
2755
2756static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2757 const int entry, const int tentry)
2758{
2759 cp->tx_tiny_use[ring][tentry].nbufs++;
2760 cp->tx_tiny_use[ring][entry].used = 1;
2761 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2762}
2763
2764static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2765 struct sk_buff *skb)
2766{
2767 struct net_device *dev = cp->dev;
2768 int entry, nr_frags, frag, tabort, tentry;
2769 dma_addr_t mapping;
2770 unsigned long flags;
2771 u64 ctrl;
2772 u32 len;
2773
2774 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2775
2776
2777 if (TX_BUFFS_AVAIL(cp, ring) <=
2778 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2779 netif_stop_queue(dev);
2780 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2781 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2782 return 1;
2783 }
2784
2785 ctrl = 0;
2786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2787 const u64 csum_start_off = skb_checksum_start_offset(skb);
2788 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2789
2790 ctrl = TX_DESC_CSUM_EN |
2791 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2792 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2793 }
2794
2795 entry = cp->tx_new[ring];
2796 cp->tx_skbs[ring][entry] = skb;
2797
2798 nr_frags = skb_shinfo(skb)->nr_frags;
2799 len = skb_headlen(skb);
2800 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2801 offset_in_page(skb->data), len,
2802 PCI_DMA_TODEVICE);
2803
2804 tentry = entry;
2805 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2806 if (unlikely(tabort)) {
2807
2808 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2809 ctrl | TX_DESC_SOF, 0);
2810 entry = TX_DESC_NEXT(ring, entry);
2811
2812 skb_copy_from_linear_data_offset(skb, len - tabort,
2813 tx_tiny_buf(cp, ring, entry), tabort);
2814 mapping = tx_tiny_map(cp, ring, entry, tentry);
2815 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2816 (nr_frags == 0));
2817 } else {
2818 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2819 TX_DESC_SOF, (nr_frags == 0));
2820 }
2821 entry = TX_DESC_NEXT(ring, entry);
2822
2823 for (frag = 0; frag < nr_frags; frag++) {
2824 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2825
2826 len = skb_frag_size(fragp);
2827 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2828 DMA_TO_DEVICE);
2829
2830 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2831 if (unlikely(tabort)) {
2832 void *addr;
2833
2834
2835 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2836 ctrl, 0);
2837 entry = TX_DESC_NEXT(ring, entry);
2838
2839 addr = cas_page_map(skb_frag_page(fragp));
2840 memcpy(tx_tiny_buf(cp, ring, entry),
2841 addr + fragp->page_offset + len - tabort,
2842 tabort);
2843 cas_page_unmap(addr);
2844 mapping = tx_tiny_map(cp, ring, entry, tentry);
2845 len = tabort;
2846 }
2847
2848 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2849 (frag + 1 == nr_frags));
2850 entry = TX_DESC_NEXT(ring, entry);
2851 }
2852
2853 cp->tx_new[ring] = entry;
2854 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2855 netif_stop_queue(dev);
2856
2857 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2858 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2859 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2860 writel(entry, cp->regs + REG_TX_KICKN(ring));
2861 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2862 return 0;
2863}
2864
2865static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2866{
2867 struct cas *cp = netdev_priv(dev);
2868
2869
2870
2871
2872 static int ring;
2873
2874 if (skb_padto(skb, cp->min_frame_size))
2875 return NETDEV_TX_OK;
2876
2877
2878
2879
2880 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2881 return NETDEV_TX_BUSY;
2882 return NETDEV_TX_OK;
2883}
2884
2885static void cas_init_tx_dma(struct cas *cp)
2886{
2887 u64 desc_dma = cp->block_dvma;
2888 unsigned long off;
2889 u32 val;
2890 int i;
2891
2892
2893#ifdef USE_TX_COMPWB
2894 off = offsetof(struct cas_init_block, tx_compwb);
2895 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2896 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2897#endif
2898
2899
2900
2901
2902 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2903 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2904 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2905 TX_CFG_INTR_COMPWB_DIS;
2906
2907
2908 for (i = 0; i < MAX_TX_RINGS; i++) {
2909 off = (unsigned long) cp->init_txds[i] -
2910 (unsigned long) cp->init_block;
2911
2912 val |= CAS_TX_RINGN_BASE(i);
2913 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2914 writel((desc_dma + off) & 0xffffffff, cp->regs +
2915 REG_TX_DBN_LOW(i));
2916
2917
2918
2919 }
2920 writel(val, cp->regs + REG_TX_CFG);
2921
2922
2923
2924
2925#ifdef USE_QOS
2926 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2927 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2928 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2929 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2930#else
2931 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2932 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2933 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2934 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2935#endif
2936}
2937
2938
2939static inline void cas_init_dma(struct cas *cp)
2940{
2941 cas_init_tx_dma(cp);
2942 cas_init_rx_dma(cp);
2943}
2944
2945static void cas_process_mc_list(struct cas *cp)
2946{
2947 u16 hash_table[16];
2948 u32 crc;
2949 struct netdev_hw_addr *ha;
2950 int i = 1;
2951
2952 memset(hash_table, 0, sizeof(hash_table));
2953 netdev_for_each_mc_addr(ha, cp->dev) {
2954 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2955
2956
2957
2958 writel((ha->addr[4] << 8) | ha->addr[5],
2959 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2960 writel((ha->addr[2] << 8) | ha->addr[3],
2961 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2962 writel((ha->addr[0] << 8) | ha->addr[1],
2963 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2964 i++;
2965 }
2966 else {
2967
2968
2969
2970 crc = ether_crc_le(ETH_ALEN, ha->addr);
2971 crc >>= 24;
2972 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2973 }
2974 }
2975 for (i = 0; i < 16; i++)
2976 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2977}
2978
2979
2980static u32 cas_setup_multicast(struct cas *cp)
2981{
2982 u32 rxcfg = 0;
2983 int i;
2984
2985 if (cp->dev->flags & IFF_PROMISC) {
2986 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2987
2988 } else if (cp->dev->flags & IFF_ALLMULTI) {
2989 for (i=0; i < 16; i++)
2990 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2991 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2992
2993 } else {
2994 cas_process_mc_list(cp);
2995 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2996 }
2997
2998 return rxcfg;
2999}
3000
3001
3002static void cas_clear_mac_err(struct cas *cp)
3003{
3004 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3005 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3006 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3007 writel(0, cp->regs + REG_MAC_COLL_LATE);
3008 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3009 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3010 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3011 writel(0, cp->regs + REG_MAC_LEN_ERR);
3012 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3013 writel(0, cp->regs + REG_MAC_FCS_ERR);
3014 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3015}
3016
3017
3018static void cas_mac_reset(struct cas *cp)
3019{
3020 int i;
3021
3022
3023 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3024 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3025
3026
3027 i = STOP_TRIES;
3028 while (i-- > 0) {
3029 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3030 break;
3031 udelay(10);
3032 }
3033
3034
3035 i = STOP_TRIES;
3036 while (i-- > 0) {
3037 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3038 break;
3039 udelay(10);
3040 }
3041
3042 if (readl(cp->regs + REG_MAC_TX_RESET) |
3043 readl(cp->regs + REG_MAC_RX_RESET))
3044 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3045 readl(cp->regs + REG_MAC_TX_RESET),
3046 readl(cp->regs + REG_MAC_RX_RESET),
3047 readl(cp->regs + REG_MAC_STATE_MACHINE));
3048}
3049
3050
3051
3052static void cas_init_mac(struct cas *cp)
3053{
3054 unsigned char *e = &cp->dev->dev_addr[0];
3055 int i;
3056 cas_mac_reset(cp);
3057
3058
3059 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3060
3061#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3062
3063
3064
3065 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3066 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3067#endif
3068
3069 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3070
3071 writel(0x00, cp->regs + REG_MAC_IPG0);
3072 writel(0x08, cp->regs + REG_MAC_IPG1);
3073 writel(0x04, cp->regs + REG_MAC_IPG2);
3074
3075
3076 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3077
3078
3079 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3080
3081
3082
3083
3084
3085 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3086 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3087 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3088 cp->regs + REG_MAC_FRAMESIZE_MAX);
3089
3090
3091
3092
3093
3094 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3095 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3096 else
3097 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3098 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3099 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3100 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3101
3102 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3103
3104 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3105 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3106 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3107 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3108 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3109
3110
3111 for (i = 0; i < 45; i++)
3112 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3113
3114 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3115 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3116 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3117
3118 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3119 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3120 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3121
3122 cp->mac_rx_cfg = cas_setup_multicast(cp);
3123
3124 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3125 cas_clear_mac_err(cp);
3126 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3127
3128
3129
3130
3131
3132 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3133 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3134
3135
3136
3137
3138 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3139}
3140
3141
3142static void cas_init_pause_thresholds(struct cas *cp)
3143{
3144
3145
3146
3147 if (cp->rx_fifo_size <= (2 * 1024)) {
3148 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3149 } else {
3150 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3151 if (max_frame * 3 > cp->rx_fifo_size) {
3152 cp->rx_pause_off = 7104;
3153 cp->rx_pause_on = 960;
3154 } else {
3155 int off = (cp->rx_fifo_size - (max_frame * 2));
3156 int on = off - max_frame;
3157 cp->rx_pause_off = off;
3158 cp->rx_pause_on = on;
3159 }
3160 }
3161}
3162
3163static int cas_vpd_match(const void __iomem *p, const char *str)
3164{
3165 int len = strlen(str) + 1;
3166 int i;
3167
3168 for (i = 0; i < len; i++) {
3169 if (readb(p + i) != str[i])
3170 return 0;
3171 }
3172 return 1;
3173}
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3188 const int offset)
3189{
3190 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3191 void __iomem *base, *kstart;
3192 int i, len;
3193 int found = 0;
3194#define VPD_FOUND_MAC 0x01
3195#define VPD_FOUND_PHY 0x02
3196
3197 int phy_type = CAS_PHY_MII_MDIO0;
3198 int mac_off = 0;
3199
3200#if defined(CONFIG_SPARC)
3201 const unsigned char *addr;
3202#endif
3203
3204
3205 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3206 cp->regs + REG_BIM_LOCAL_DEV_EN);
3207
3208
3209 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3210 goto use_random_mac_addr;
3211
3212
3213 base = NULL;
3214 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3215
3216 if ((readb(p + i + 0) == 0x50) &&
3217 (readb(p + i + 1) == 0x43) &&
3218 (readb(p + i + 2) == 0x49) &&
3219 (readb(p + i + 3) == 0x52)) {
3220 base = p + (readb(p + i + 8) |
3221 (readb(p + i + 9) << 8));
3222 break;
3223 }
3224 }
3225
3226 if (!base || (readb(base) != 0x82))
3227 goto use_random_mac_addr;
3228
3229 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3230 while (i < EXPANSION_ROM_SIZE) {
3231 if (readb(base + i) != 0x90)
3232 goto use_random_mac_addr;
3233
3234
3235 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3236
3237
3238 kstart = base + i + 3;
3239 p = kstart;
3240 while ((p - kstart) < len) {
3241 int klen = readb(p + 2);
3242 int j;
3243 char type;
3244
3245 p += 3;
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284 if (readb(p) != 'I')
3285 goto next;
3286
3287
3288 type = readb(p + 3);
3289 if (type == 'B') {
3290 if ((klen == 29) && readb(p + 4) == 6 &&
3291 cas_vpd_match(p + 5,
3292 "local-mac-address")) {
3293 if (mac_off++ > offset)
3294 goto next;
3295
3296
3297 for (j = 0; j < 6; j++)
3298 dev_addr[j] =
3299 readb(p + 23 + j);
3300 goto found_mac;
3301 }
3302 }
3303
3304 if (type != 'S')
3305 goto next;
3306
3307#ifdef USE_ENTROPY_DEV
3308 if ((klen == 24) &&
3309 cas_vpd_match(p + 5, "entropy-dev") &&
3310 cas_vpd_match(p + 17, "vms110")) {
3311 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3312 goto next;
3313 }
3314#endif
3315
3316 if (found & VPD_FOUND_PHY)
3317 goto next;
3318
3319 if ((klen == 18) && readb(p + 4) == 4 &&
3320 cas_vpd_match(p + 5, "phy-type")) {
3321 if (cas_vpd_match(p + 14, "pcs")) {
3322 phy_type = CAS_PHY_SERDES;
3323 goto found_phy;
3324 }
3325 }
3326
3327 if ((klen == 23) && readb(p + 4) == 4 &&
3328 cas_vpd_match(p + 5, "phy-interface")) {
3329 if (cas_vpd_match(p + 19, "pcs")) {
3330 phy_type = CAS_PHY_SERDES;
3331 goto found_phy;
3332 }
3333 }
3334found_mac:
3335 found |= VPD_FOUND_MAC;
3336 goto next;
3337
3338found_phy:
3339 found |= VPD_FOUND_PHY;
3340
3341next:
3342 p += klen;
3343 }
3344 i += len + 3;
3345 }
3346
3347use_random_mac_addr:
3348 if (found & VPD_FOUND_MAC)
3349 goto done;
3350
3351#if defined(CONFIG_SPARC)
3352 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3353 if (addr != NULL) {
3354 memcpy(dev_addr, addr, ETH_ALEN);
3355 goto done;
3356 }
3357#endif
3358
3359
3360 pr_info("MAC address not found in ROM VPD\n");
3361 dev_addr[0] = 0x08;
3362 dev_addr[1] = 0x00;
3363 dev_addr[2] = 0x20;
3364 get_random_bytes(dev_addr + 3, 3);
3365
3366done:
3367 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3368 return phy_type;
3369}
3370
3371
3372static void cas_check_pci_invariants(struct cas *cp)
3373{
3374 struct pci_dev *pdev = cp->pdev;
3375
3376 cp->cas_flags = 0;
3377 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3378 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3379 if (pdev->revision >= CAS_ID_REVPLUS)
3380 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3381 if (pdev->revision < CAS_ID_REVPLUS02u)
3382 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3383
3384
3385
3386
3387 if (pdev->revision < CAS_ID_REV2)
3388 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3389 } else {
3390
3391 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3392
3393
3394
3395
3396 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3397 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3398 cp->cas_flags |= CAS_FLAG_SATURN;
3399 }
3400}
3401
3402
3403static int cas_check_invariants(struct cas *cp)
3404{
3405 struct pci_dev *pdev = cp->pdev;
3406 u32 cfg;
3407 int i;
3408
3409
3410 cp->page_order = 0;
3411#ifdef USE_PAGE_ORDER
3412 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3413
3414 struct page *page = alloc_pages(GFP_ATOMIC,
3415 CAS_JUMBO_PAGE_SHIFT -
3416 PAGE_SHIFT);
3417 if (page) {
3418 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3419 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3420 } else {
3421 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3422 }
3423 }
3424#endif
3425 cp->page_size = (PAGE_SIZE << cp->page_order);
3426
3427
3428 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3429 cp->rx_fifo_size = RX_FIFO_SIZE;
3430
3431
3432
3433
3434 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3435 PCI_SLOT(pdev->devfn));
3436 if (cp->phy_type & CAS_PHY_SERDES) {
3437 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3438 return 0;
3439 }
3440
3441
3442 cfg = readl(cp->regs + REG_MIF_CFG);
3443 if (cfg & MIF_CFG_MDIO_1) {
3444 cp->phy_type = CAS_PHY_MII_MDIO1;
3445 } else if (cfg & MIF_CFG_MDIO_0) {
3446 cp->phy_type = CAS_PHY_MII_MDIO0;
3447 }
3448
3449 cas_mif_poll(cp, 0);
3450 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3451
3452 for (i = 0; i < 32; i++) {
3453 u32 phy_id;
3454 int j;
3455
3456 for (j = 0; j < 3; j++) {
3457 cp->phy_addr = i;
3458 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3459 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3460 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3461 cp->phy_id = phy_id;
3462 goto done;
3463 }
3464 }
3465 }
3466 pr_err("MII phy did not respond [%08x]\n",
3467 readl(cp->regs + REG_MIF_STATE_MACHINE));
3468 return -1;
3469
3470done:
3471
3472 cfg = cas_phy_read(cp, MII_BMSR);
3473 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3474 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3475 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3476 return 0;
3477}
3478
3479
3480static inline void cas_start_dma(struct cas *cp)
3481{
3482 int i;
3483 u32 val;
3484 int txfailed = 0;
3485
3486
3487 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3488 writel(val, cp->regs + REG_TX_CFG);
3489 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3490 writel(val, cp->regs + REG_RX_CFG);
3491
3492
3493 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3494 writel(val, cp->regs + REG_MAC_TX_CFG);
3495 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3496 writel(val, cp->regs + REG_MAC_RX_CFG);
3497
3498 i = STOP_TRIES;
3499 while (i-- > 0) {
3500 val = readl(cp->regs + REG_MAC_TX_CFG);
3501 if ((val & MAC_TX_CFG_EN))
3502 break;
3503 udelay(10);
3504 }
3505 if (i < 0) txfailed = 1;
3506 i = STOP_TRIES;
3507 while (i-- > 0) {
3508 val = readl(cp->regs + REG_MAC_RX_CFG);
3509 if ((val & MAC_RX_CFG_EN)) {
3510 if (txfailed) {
3511 netdev_err(cp->dev,
3512 "enabling mac failed [tx:%08x:%08x]\n",
3513 readl(cp->regs + REG_MIF_STATE_MACHINE),
3514 readl(cp->regs + REG_MAC_STATE_MACHINE));
3515 }
3516 goto enable_rx_done;
3517 }
3518 udelay(10);
3519 }
3520 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3521 (txfailed ? "tx,rx" : "rx"),
3522 readl(cp->regs + REG_MIF_STATE_MACHINE),
3523 readl(cp->regs + REG_MAC_STATE_MACHINE));
3524
3525enable_rx_done:
3526 cas_unmask_intr(cp);
3527 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3528 writel(0, cp->regs + REG_RX_COMP_TAIL);
3529
3530 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3531 if (N_RX_DESC_RINGS > 1)
3532 writel(RX_DESC_RINGN_SIZE(1) - 4,
3533 cp->regs + REG_PLUS_RX_KICK1);
3534
3535 for (i = 1; i < N_RX_COMP_RINGS; i++)
3536 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3537 }
3538}
3539
3540
3541static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3542 int *pause)
3543{
3544 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3545 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3546 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3547 if (val & PCS_MII_LPA_ASYM_PAUSE)
3548 *pause |= 0x10;
3549 *spd = 1000;
3550}
3551
3552
3553static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3554 int *pause)
3555{
3556 u32 val;
3557
3558 *fd = 0;
3559 *spd = 10;
3560 *pause = 0;
3561
3562
3563 val = cas_phy_read(cp, MII_LPA);
3564 if (val & CAS_LPA_PAUSE)
3565 *pause = 0x01;
3566
3567 if (val & CAS_LPA_ASYM_PAUSE)
3568 *pause |= 0x10;
3569
3570 if (val & LPA_DUPLEX)
3571 *fd = 1;
3572 if (val & LPA_100)
3573 *spd = 100;
3574
3575 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3576 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3577 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3578 *spd = 1000;
3579 if (val & CAS_LPA_1000FULL)
3580 *fd = 1;
3581 }
3582}
3583
3584
3585
3586
3587
3588
3589static void cas_set_link_modes(struct cas *cp)
3590{
3591 u32 val;
3592 int full_duplex, speed, pause;
3593
3594 full_duplex = 0;
3595 speed = 10;
3596 pause = 0;
3597
3598 if (CAS_PHY_MII(cp->phy_type)) {
3599 cas_mif_poll(cp, 0);
3600 val = cas_phy_read(cp, MII_BMCR);
3601 if (val & BMCR_ANENABLE) {
3602 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3603 &pause);
3604 } else {
3605 if (val & BMCR_FULLDPLX)
3606 full_duplex = 1;
3607
3608 if (val & BMCR_SPEED100)
3609 speed = 100;
3610 else if (val & CAS_BMCR_SPEED1000)
3611 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3612 1000 : 100;
3613 }
3614 cas_mif_poll(cp, 1);
3615
3616 } else {
3617 val = readl(cp->regs + REG_PCS_MII_CTRL);
3618 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3619 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3620 if (val & PCS_MII_CTRL_DUPLEX)
3621 full_duplex = 1;
3622 }
3623 }
3624
3625 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3626 speed, full_duplex ? "full" : "half");
3627
3628 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3629 if (CAS_PHY_MII(cp->phy_type)) {
3630 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3631 if (!full_duplex)
3632 val |= MAC_XIF_DISABLE_ECHO;
3633 }
3634 if (full_duplex)
3635 val |= MAC_XIF_FDPLX_LED;
3636 if (speed == 1000)
3637 val |= MAC_XIF_GMII_MODE;
3638 writel(val, cp->regs + REG_MAC_XIF_CFG);
3639
3640
3641 val = MAC_TX_CFG_IPG_EN;
3642 if (full_duplex) {
3643 val |= MAC_TX_CFG_IGNORE_CARRIER;
3644 val |= MAC_TX_CFG_IGNORE_COLL;
3645 } else {
3646#ifndef USE_CSMA_CD_PROTO
3647 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3648 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3649#endif
3650 }
3651
3652
3653
3654
3655
3656
3657
3658 if ((speed == 1000) && !full_duplex) {
3659 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3660 cp->regs + REG_MAC_TX_CFG);
3661
3662 val = readl(cp->regs + REG_MAC_RX_CFG);
3663 val &= ~MAC_RX_CFG_STRIP_FCS;
3664 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3665 cp->regs + REG_MAC_RX_CFG);
3666
3667 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3668
3669 cp->crc_size = 4;
3670
3671 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3672
3673 } else {
3674 writel(val, cp->regs + REG_MAC_TX_CFG);
3675
3676
3677
3678
3679 val = readl(cp->regs + REG_MAC_RX_CFG);
3680 if (full_duplex) {
3681 val |= MAC_RX_CFG_STRIP_FCS;
3682 cp->crc_size = 0;
3683 cp->min_frame_size = CAS_MIN_MTU;
3684 } else {
3685 val &= ~MAC_RX_CFG_STRIP_FCS;
3686 cp->crc_size = 4;
3687 cp->min_frame_size = CAS_MIN_FRAME;
3688 }
3689 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3690 cp->regs + REG_MAC_RX_CFG);
3691 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3692 }
3693
3694 if (netif_msg_link(cp)) {
3695 if (pause & 0x01) {
3696 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3697 cp->rx_fifo_size,
3698 cp->rx_pause_off,
3699 cp->rx_pause_on);
3700 } else if (pause & 0x10) {
3701 netdev_info(cp->dev, "TX pause enabled\n");
3702 } else {
3703 netdev_info(cp->dev, "Pause is disabled\n");
3704 }
3705 }
3706
3707 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3708 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3709 if (pause) {
3710 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3711 if (pause & 0x01) {
3712 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3713 }
3714 }
3715 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3716 cas_start_dma(cp);
3717}
3718
3719
3720static void cas_init_hw(struct cas *cp, int restart_link)
3721{
3722 if (restart_link)
3723 cas_phy_init(cp);
3724
3725 cas_init_pause_thresholds(cp);
3726 cas_init_mac(cp);
3727 cas_init_dma(cp);
3728
3729 if (restart_link) {
3730
3731 cp->timer_ticks = 0;
3732 cas_begin_auto_negotiation(cp, NULL);
3733 } else if (cp->lstate == link_up) {
3734 cas_set_link_modes(cp);
3735 netif_carrier_on(cp->dev);
3736 }
3737}
3738
3739
3740
3741
3742
3743static void cas_hard_reset(struct cas *cp)
3744{
3745 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3746 udelay(20);
3747 pci_restore_state(cp->pdev);
3748}
3749
3750
3751static void cas_global_reset(struct cas *cp, int blkflag)
3752{
3753 int limit;
3754
3755
3756 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3757
3758
3759
3760
3761
3762
3763 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3764 cp->regs + REG_SW_RESET);
3765 } else {
3766 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3767 }
3768
3769
3770 mdelay(3);
3771
3772 limit = STOP_TRIES;
3773 while (limit-- > 0) {
3774 u32 val = readl(cp->regs + REG_SW_RESET);
3775 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3776 goto done;
3777 udelay(10);
3778 }
3779 netdev_err(cp->dev, "sw reset failed\n");
3780
3781done:
3782
3783 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3784 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3785
3786
3787
3788
3789
3790 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3791 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3792 PCI_ERR_BIM_DMA_READ), cp->regs +
3793 REG_PCI_ERR_STATUS_MASK);
3794
3795
3796
3797
3798 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3799}
3800
3801static void cas_reset(struct cas *cp, int blkflag)
3802{
3803 u32 val;
3804
3805 cas_mask_intr(cp);
3806 cas_global_reset(cp, blkflag);
3807 cas_mac_reset(cp);
3808 cas_entropy_reset(cp);
3809
3810
3811 val = readl(cp->regs + REG_TX_CFG);
3812 val &= ~TX_CFG_DMA_EN;
3813 writel(val, cp->regs + REG_TX_CFG);
3814
3815 val = readl(cp->regs + REG_RX_CFG);
3816 val &= ~RX_CFG_DMA_EN;
3817 writel(val, cp->regs + REG_RX_CFG);
3818
3819
3820 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3821 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3822 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3823 } else {
3824 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3825 }
3826
3827
3828 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3829 cas_clear_mac_err(cp);
3830 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3831}
3832
3833
3834static void cas_shutdown(struct cas *cp)
3835{
3836 unsigned long flags;
3837
3838
3839 cp->hw_running = 0;
3840
3841 del_timer_sync(&cp->link_timer);
3842
3843
3844#if 0
3845 while (atomic_read(&cp->reset_task_pending_mtu) ||
3846 atomic_read(&cp->reset_task_pending_spare) ||
3847 atomic_read(&cp->reset_task_pending_all))
3848 schedule();
3849
3850#else
3851 while (atomic_read(&cp->reset_task_pending))
3852 schedule();
3853#endif
3854
3855 cas_lock_all_save(cp, flags);
3856 cas_reset(cp, 0);
3857 if (cp->cas_flags & CAS_FLAG_SATURN)
3858 cas_phy_powerdown(cp);
3859 cas_unlock_all_restore(cp, flags);
3860}
3861
3862static int cas_change_mtu(struct net_device *dev, int new_mtu)
3863{
3864 struct cas *cp = netdev_priv(dev);
3865
3866 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3867 return -EINVAL;
3868
3869 dev->mtu = new_mtu;
3870 if (!netif_running(dev) || !netif_device_present(dev))
3871 return 0;
3872
3873
3874#if 1
3875 atomic_inc(&cp->reset_task_pending);
3876 if ((cp->phy_type & CAS_PHY_SERDES)) {
3877 atomic_inc(&cp->reset_task_pending_all);
3878 } else {
3879 atomic_inc(&cp->reset_task_pending_mtu);
3880 }
3881 schedule_work(&cp->reset_task);
3882#else
3883 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3884 CAS_RESET_ALL : CAS_RESET_MTU);
3885 pr_err("reset called in cas_change_mtu\n");
3886 schedule_work(&cp->reset_task);
3887#endif
3888
3889 flush_work(&cp->reset_task);
3890 return 0;
3891}
3892
3893static void cas_clean_txd(struct cas *cp, int ring)
3894{
3895 struct cas_tx_desc *txd = cp->init_txds[ring];
3896 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3897 u64 daddr, dlen;
3898 int i, size;
3899
3900 size = TX_DESC_RINGN_SIZE(ring);
3901 for (i = 0; i < size; i++) {
3902 int frag;
3903
3904 if (skbs[i] == NULL)
3905 continue;
3906
3907 skb = skbs[i];
3908 skbs[i] = NULL;
3909
3910 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3911 int ent = i & (size - 1);
3912
3913
3914
3915
3916 daddr = le64_to_cpu(txd[ent].buffer);
3917 dlen = CAS_VAL(TX_DESC_BUFLEN,
3918 le64_to_cpu(txd[ent].control));
3919 pci_unmap_page(cp->pdev, daddr, dlen,
3920 PCI_DMA_TODEVICE);
3921
3922 if (frag != skb_shinfo(skb)->nr_frags) {
3923 i++;
3924
3925
3926
3927
3928 ent = i & (size - 1);
3929 if (cp->tx_tiny_use[ring][ent].used)
3930 i++;
3931 }
3932 }
3933 dev_kfree_skb_any(skb);
3934 }
3935
3936
3937 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3938}
3939
3940
3941static inline void cas_free_rx_desc(struct cas *cp, int ring)
3942{
3943 cas_page_t **page = cp->rx_pages[ring];
3944 int i, size;
3945
3946 size = RX_DESC_RINGN_SIZE(ring);
3947 for (i = 0; i < size; i++) {
3948 if (page[i]) {
3949 cas_page_free(cp, page[i]);
3950 page[i] = NULL;
3951 }
3952 }
3953}
3954
3955static void cas_free_rxds(struct cas *cp)
3956{
3957 int i;
3958
3959 for (i = 0; i < N_RX_DESC_RINGS; i++)
3960 cas_free_rx_desc(cp, i);
3961}
3962
3963
3964static void cas_clean_rings(struct cas *cp)
3965{
3966 int i;
3967
3968
3969 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3970 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3971 for (i = 0; i < N_TX_RINGS; i++)
3972 cas_clean_txd(cp, i);
3973
3974
3975 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3976 cas_clean_rxds(cp);
3977 cas_clean_rxcs(cp);
3978}
3979
3980
3981static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3982{
3983 cas_page_t **page = cp->rx_pages[ring];
3984 int size, i = 0;
3985
3986 size = RX_DESC_RINGN_SIZE(ring);
3987 for (i = 0; i < size; i++) {
3988 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3989 return -1;
3990 }
3991 return 0;
3992}
3993
3994static int cas_alloc_rxds(struct cas *cp)
3995{
3996 int i;
3997
3998 for (i = 0; i < N_RX_DESC_RINGS; i++) {
3999 if (cas_alloc_rx_desc(cp, i) < 0) {
4000 cas_free_rxds(cp);
4001 return -1;
4002 }
4003 }
4004 return 0;
4005}
4006
4007static void cas_reset_task(struct work_struct *work)
4008{
4009 struct cas *cp = container_of(work, struct cas, reset_task);
4010#if 0
4011 int pending = atomic_read(&cp->reset_task_pending);
4012#else
4013 int pending_all = atomic_read(&cp->reset_task_pending_all);
4014 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4015 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4016
4017 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4018
4019
4020
4021 atomic_dec(&cp->reset_task_pending);
4022 return;
4023 }
4024#endif
4025
4026
4027
4028
4029 if (cp->hw_running) {
4030 unsigned long flags;
4031
4032
4033 netif_device_detach(cp->dev);
4034 cas_lock_all_save(cp, flags);
4035
4036 if (cp->opened) {
4037
4038
4039
4040
4041 cas_spare_recover(cp, GFP_ATOMIC);
4042 }
4043#if 1
4044
4045 if (!pending_all && !pending_mtu)
4046 goto done;
4047#else
4048 if (pending == CAS_RESET_SPARE)
4049 goto done;
4050#endif
4051
4052
4053
4054
4055
4056
4057
4058#if 1
4059 cas_reset(cp, !(pending_all > 0));
4060 if (cp->opened)
4061 cas_clean_rings(cp);
4062 cas_init_hw(cp, (pending_all > 0));
4063#else
4064 cas_reset(cp, !(pending == CAS_RESET_ALL));
4065 if (cp->opened)
4066 cas_clean_rings(cp);
4067 cas_init_hw(cp, pending == CAS_RESET_ALL);
4068#endif
4069
4070done:
4071 cas_unlock_all_restore(cp, flags);
4072 netif_device_attach(cp->dev);
4073 }
4074#if 1
4075 atomic_sub(pending_all, &cp->reset_task_pending_all);
4076 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4077 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4078 atomic_dec(&cp->reset_task_pending);
4079#else
4080 atomic_set(&cp->reset_task_pending, 0);
4081#endif
4082}
4083
4084static void cas_link_timer(unsigned long data)
4085{
4086 struct cas *cp = (struct cas *) data;
4087 int mask, pending = 0, reset = 0;
4088 unsigned long flags;
4089
4090 if (link_transition_timeout != 0 &&
4091 cp->link_transition_jiffies_valid &&
4092 ((jiffies - cp->link_transition_jiffies) >
4093 (link_transition_timeout))) {
4094
4095
4096
4097
4098 cp->link_transition_jiffies_valid = 0;
4099 }
4100
4101 if (!cp->hw_running)
4102 return;
4103
4104 spin_lock_irqsave(&cp->lock, flags);
4105 cas_lock_tx(cp);
4106 cas_entropy_gather(cp);
4107
4108
4109
4110
4111#if 1
4112 if (atomic_read(&cp->reset_task_pending_all) ||
4113 atomic_read(&cp->reset_task_pending_spare) ||
4114 atomic_read(&cp->reset_task_pending_mtu))
4115 goto done;
4116#else
4117 if (atomic_read(&cp->reset_task_pending))
4118 goto done;
4119#endif
4120
4121
4122 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4123 int i, rmask;
4124
4125 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4126 rmask = CAS_FLAG_RXD_POST(i);
4127 if ((mask & rmask) == 0)
4128 continue;
4129
4130
4131 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4132 pending = 1;
4133 continue;
4134 }
4135 cp->cas_flags &= ~rmask;
4136 }
4137 }
4138
4139 if (CAS_PHY_MII(cp->phy_type)) {
4140 u16 bmsr;
4141 cas_mif_poll(cp, 0);
4142 bmsr = cas_phy_read(cp, MII_BMSR);
4143
4144
4145
4146
4147
4148 bmsr = cas_phy_read(cp, MII_BMSR);
4149 cas_mif_poll(cp, 1);
4150 readl(cp->regs + REG_MIF_STATUS);
4151 reset = cas_mii_link_check(cp, bmsr);
4152 } else {
4153 reset = cas_pcs_link_check(cp);
4154 }
4155
4156 if (reset)
4157 goto done;
4158
4159
4160 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4161 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4162 u32 wptr, rptr;
4163 int tlm = CAS_VAL(MAC_SM_TLM, val);
4164
4165 if (((tlm == 0x5) || (tlm == 0x3)) &&
4166 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4167 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4168 "tx err: MAC_STATE[%08x]\n", val);
4169 reset = 1;
4170 goto done;
4171 }
4172
4173 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4174 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4175 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4176 if ((val == 0) && (wptr != rptr)) {
4177 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4178 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4179 val, wptr, rptr);
4180 reset = 1;
4181 }
4182
4183 if (reset)
4184 cas_hard_reset(cp);
4185 }
4186
4187done:
4188 if (reset) {
4189#if 1
4190 atomic_inc(&cp->reset_task_pending);
4191 atomic_inc(&cp->reset_task_pending_all);
4192 schedule_work(&cp->reset_task);
4193#else
4194 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4195 pr_err("reset called in cas_link_timer\n");
4196 schedule_work(&cp->reset_task);
4197#endif
4198 }
4199
4200 if (!pending)
4201 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4202 cas_unlock_tx(cp);
4203 spin_unlock_irqrestore(&cp->lock, flags);
4204}
4205
4206
4207
4208
4209static void cas_tx_tiny_free(struct cas *cp)
4210{
4211 struct pci_dev *pdev = cp->pdev;
4212 int i;
4213
4214 for (i = 0; i < N_TX_RINGS; i++) {
4215 if (!cp->tx_tiny_bufs[i])
4216 continue;
4217
4218 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4219 cp->tx_tiny_bufs[i],
4220 cp->tx_tiny_dvma[i]);
4221 cp->tx_tiny_bufs[i] = NULL;
4222 }
4223}
4224
4225static int cas_tx_tiny_alloc(struct cas *cp)
4226{
4227 struct pci_dev *pdev = cp->pdev;
4228 int i;
4229
4230 for (i = 0; i < N_TX_RINGS; i++) {
4231 cp->tx_tiny_bufs[i] =
4232 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4233 &cp->tx_tiny_dvma[i]);
4234 if (!cp->tx_tiny_bufs[i]) {
4235 cas_tx_tiny_free(cp);
4236 return -1;
4237 }
4238 }
4239 return 0;
4240}
4241
4242
4243static int cas_open(struct net_device *dev)
4244{
4245 struct cas *cp = netdev_priv(dev);
4246 int hw_was_up, err;
4247 unsigned long flags;
4248
4249 mutex_lock(&cp->pm_mutex);
4250
4251 hw_was_up = cp->hw_running;
4252
4253
4254
4255
4256 if (!cp->hw_running) {
4257
4258 cas_lock_all_save(cp, flags);
4259
4260
4261
4262
4263
4264 cas_reset(cp, 0);
4265 cp->hw_running = 1;
4266 cas_unlock_all_restore(cp, flags);
4267 }
4268
4269 err = -ENOMEM;
4270 if (cas_tx_tiny_alloc(cp) < 0)
4271 goto err_unlock;
4272
4273
4274 if (cas_alloc_rxds(cp) < 0)
4275 goto err_tx_tiny;
4276
4277
4278 cas_spare_init(cp);
4279 cas_spare_recover(cp, GFP_KERNEL);
4280
4281
4282
4283
4284
4285
4286 if (request_irq(cp->pdev->irq, cas_interrupt,
4287 IRQF_SHARED, dev->name, (void *) dev)) {
4288 netdev_err(cp->dev, "failed to request irq !\n");
4289 err = -EAGAIN;
4290 goto err_spare;
4291 }
4292
4293#ifdef USE_NAPI
4294 napi_enable(&cp->napi);
4295#endif
4296
4297 cas_lock_all_save(cp, flags);
4298 cas_clean_rings(cp);
4299 cas_init_hw(cp, !hw_was_up);
4300 cp->opened = 1;
4301 cas_unlock_all_restore(cp, flags);
4302
4303 netif_start_queue(dev);
4304 mutex_unlock(&cp->pm_mutex);
4305 return 0;
4306
4307err_spare:
4308 cas_spare_free(cp);
4309 cas_free_rxds(cp);
4310err_tx_tiny:
4311 cas_tx_tiny_free(cp);
4312err_unlock:
4313 mutex_unlock(&cp->pm_mutex);
4314 return err;
4315}
4316
4317static int cas_close(struct net_device *dev)
4318{
4319 unsigned long flags;
4320 struct cas *cp = netdev_priv(dev);
4321
4322#ifdef USE_NAPI
4323 napi_disable(&cp->napi);
4324#endif
4325
4326 mutex_lock(&cp->pm_mutex);
4327
4328 netif_stop_queue(dev);
4329
4330
4331 cas_lock_all_save(cp, flags);
4332 cp->opened = 0;
4333 cas_reset(cp, 0);
4334 cas_phy_init(cp);
4335 cas_begin_auto_negotiation(cp, NULL);
4336 cas_clean_rings(cp);
4337 cas_unlock_all_restore(cp, flags);
4338
4339 free_irq(cp->pdev->irq, (void *) dev);
4340 cas_spare_free(cp);
4341 cas_free_rxds(cp);
4342 cas_tx_tiny_free(cp);
4343 mutex_unlock(&cp->pm_mutex);
4344 return 0;
4345}
4346
4347static struct {
4348 const char name[ETH_GSTRING_LEN];
4349} ethtool_cassini_statnames[] = {
4350 {"collisions"},
4351 {"rx_bytes"},
4352 {"rx_crc_errors"},
4353 {"rx_dropped"},
4354 {"rx_errors"},
4355 {"rx_fifo_errors"},
4356 {"rx_frame_errors"},
4357 {"rx_length_errors"},
4358 {"rx_over_errors"},
4359 {"rx_packets"},
4360 {"tx_aborted_errors"},
4361 {"tx_bytes"},
4362 {"tx_dropped"},
4363 {"tx_errors"},
4364 {"tx_fifo_errors"},
4365 {"tx_packets"}
4366};
4367#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4368
4369static struct {
4370 const int offsets;
4371} ethtool_register_table[] = {
4372 {-MII_BMSR},
4373 {-MII_BMCR},
4374 {REG_CAWR},
4375 {REG_INF_BURST},
4376 {REG_BIM_CFG},
4377 {REG_RX_CFG},
4378 {REG_HP_CFG},
4379 {REG_MAC_TX_CFG},
4380 {REG_MAC_RX_CFG},
4381 {REG_MAC_CTRL_CFG},
4382 {REG_MAC_XIF_CFG},
4383 {REG_MIF_CFG},
4384 {REG_PCS_CFG},
4385 {REG_SATURN_PCFG},
4386 {REG_PCS_MII_STATUS},
4387 {REG_PCS_STATE_MACHINE},
4388 {REG_MAC_COLL_EXCESS},
4389 {REG_MAC_COLL_LATE}
4390};
4391#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4392#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4393
4394static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4395{
4396 u8 *p;
4397 int i;
4398 unsigned long flags;
4399
4400 spin_lock_irqsave(&cp->lock, flags);
4401 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4402 u16 hval;
4403 u32 val;
4404 if (ethtool_register_table[i].offsets < 0) {
4405 hval = cas_phy_read(cp,
4406 -ethtool_register_table[i].offsets);
4407 val = hval;
4408 } else {
4409 val= readl(cp->regs+ethtool_register_table[i].offsets);
4410 }
4411 memcpy(p, (u8 *)&val, sizeof(u32));
4412 }
4413 spin_unlock_irqrestore(&cp->lock, flags);
4414}
4415
4416static struct net_device_stats *cas_get_stats(struct net_device *dev)
4417{
4418 struct cas *cp = netdev_priv(dev);
4419 struct net_device_stats *stats = cp->net_stats;
4420 unsigned long flags;
4421 int i;
4422 unsigned long tmp;
4423
4424
4425 if (!cp->hw_running)
4426 return stats + N_TX_RINGS;
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4437 stats[N_TX_RINGS].rx_crc_errors +=
4438 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4439 stats[N_TX_RINGS].rx_frame_errors +=
4440 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4441 stats[N_TX_RINGS].rx_length_errors +=
4442 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4443#if 1
4444 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4445 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4446 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4447 stats[N_TX_RINGS].collisions +=
4448 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4449#else
4450 stats[N_TX_RINGS].tx_aborted_errors +=
4451 readl(cp->regs + REG_MAC_COLL_EXCESS);
4452 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4453 readl(cp->regs + REG_MAC_COLL_LATE);
4454#endif
4455 cas_clear_mac_err(cp);
4456
4457
4458 spin_lock(&cp->stat_lock[0]);
4459 stats[N_TX_RINGS].collisions += stats[0].collisions;
4460 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4461 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4462 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4463 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4464 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4465 spin_unlock(&cp->stat_lock[0]);
4466
4467 for (i = 0; i < N_TX_RINGS; i++) {
4468 spin_lock(&cp->stat_lock[i]);
4469 stats[N_TX_RINGS].rx_length_errors +=
4470 stats[i].rx_length_errors;
4471 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4472 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4473 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4474 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4475 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4476 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4477 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4478 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4479 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4480 memset(stats + i, 0, sizeof(struct net_device_stats));
4481 spin_unlock(&cp->stat_lock[i]);
4482 }
4483 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4484 return stats + N_TX_RINGS;
4485}
4486
4487
4488static void cas_set_multicast(struct net_device *dev)
4489{
4490 struct cas *cp = netdev_priv(dev);
4491 u32 rxcfg, rxcfg_new;
4492 unsigned long flags;
4493 int limit = STOP_TRIES;
4494
4495 if (!cp->hw_running)
4496 return;
4497
4498 spin_lock_irqsave(&cp->lock, flags);
4499 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4500
4501
4502 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4503 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4504 if (!limit--)
4505 break;
4506 udelay(10);
4507 }
4508
4509
4510 limit = STOP_TRIES;
4511 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4512 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4513 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4514 if (!limit--)
4515 break;
4516 udelay(10);
4517 }
4518
4519
4520 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4521 rxcfg |= rxcfg_new;
4522 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4523 spin_unlock_irqrestore(&cp->lock, flags);
4524}
4525
4526static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4527{
4528 struct cas *cp = netdev_priv(dev);
4529 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4530 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4531 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4532}
4533
4534static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4535{
4536 struct cas *cp = netdev_priv(dev);
4537 u16 bmcr;
4538 int full_duplex, speed, pause;
4539 unsigned long flags;
4540 enum link_state linkstate = link_up;
4541
4542 cmd->advertising = 0;
4543 cmd->supported = SUPPORTED_Autoneg;
4544 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4545 cmd->supported |= SUPPORTED_1000baseT_Full;
4546 cmd->advertising |= ADVERTISED_1000baseT_Full;
4547 }
4548
4549
4550 spin_lock_irqsave(&cp->lock, flags);
4551 bmcr = 0;
4552 linkstate = cp->lstate;
4553 if (CAS_PHY_MII(cp->phy_type)) {
4554 cmd->port = PORT_MII;
4555 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4556 XCVR_INTERNAL : XCVR_EXTERNAL;
4557 cmd->phy_address = cp->phy_addr;
4558 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4559 ADVERTISED_10baseT_Half |
4560 ADVERTISED_10baseT_Full |
4561 ADVERTISED_100baseT_Half |
4562 ADVERTISED_100baseT_Full;
4563
4564 cmd->supported |=
4565 (SUPPORTED_10baseT_Half |
4566 SUPPORTED_10baseT_Full |
4567 SUPPORTED_100baseT_Half |
4568 SUPPORTED_100baseT_Full |
4569 SUPPORTED_TP | SUPPORTED_MII);
4570
4571 if (cp->hw_running) {
4572 cas_mif_poll(cp, 0);
4573 bmcr = cas_phy_read(cp, MII_BMCR);
4574 cas_read_mii_link_mode(cp, &full_duplex,
4575 &speed, &pause);
4576 cas_mif_poll(cp, 1);
4577 }
4578
4579 } else {
4580 cmd->port = PORT_FIBRE;
4581 cmd->transceiver = XCVR_INTERNAL;
4582 cmd->phy_address = 0;
4583 cmd->supported |= SUPPORTED_FIBRE;
4584 cmd->advertising |= ADVERTISED_FIBRE;
4585
4586 if (cp->hw_running) {
4587
4588 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4589 cas_read_pcs_link_mode(cp, &full_duplex,
4590 &speed, &pause);
4591 }
4592 }
4593 spin_unlock_irqrestore(&cp->lock, flags);
4594
4595 if (bmcr & BMCR_ANENABLE) {
4596 cmd->advertising |= ADVERTISED_Autoneg;
4597 cmd->autoneg = AUTONEG_ENABLE;
4598 ethtool_cmd_speed_set(cmd, ((speed == 10) ?
4599 SPEED_10 :
4600 ((speed == 1000) ?
4601 SPEED_1000 : SPEED_100)));
4602 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4603 } else {
4604 cmd->autoneg = AUTONEG_DISABLE;
4605 ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
4606 SPEED_1000 :
4607 ((bmcr & BMCR_SPEED100) ?
4608 SPEED_100 : SPEED_10)));
4609 cmd->duplex =
4610 (bmcr & BMCR_FULLDPLX) ?
4611 DUPLEX_FULL : DUPLEX_HALF;
4612 }
4613 if (linkstate != link_up) {
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624 if (cp->link_cntl & BMCR_ANENABLE) {
4625 ethtool_cmd_speed_set(cmd, 0);
4626 cmd->duplex = 0xff;
4627 } else {
4628 ethtool_cmd_speed_set(cmd, SPEED_10);
4629 if (cp->link_cntl & BMCR_SPEED100) {
4630 ethtool_cmd_speed_set(cmd, SPEED_100);
4631 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4632 ethtool_cmd_speed_set(cmd, SPEED_1000);
4633 }
4634 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4635 DUPLEX_FULL : DUPLEX_HALF;
4636 }
4637 }
4638 return 0;
4639}
4640
4641static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4642{
4643 struct cas *cp = netdev_priv(dev);
4644 unsigned long flags;
4645 u32 speed = ethtool_cmd_speed(cmd);
4646
4647
4648 if (cmd->autoneg != AUTONEG_ENABLE &&
4649 cmd->autoneg != AUTONEG_DISABLE)
4650 return -EINVAL;
4651
4652 if (cmd->autoneg == AUTONEG_DISABLE &&
4653 ((speed != SPEED_1000 &&
4654 speed != SPEED_100 &&
4655 speed != SPEED_10) ||
4656 (cmd->duplex != DUPLEX_HALF &&
4657 cmd->duplex != DUPLEX_FULL)))
4658 return -EINVAL;
4659
4660
4661 spin_lock_irqsave(&cp->lock, flags);
4662 cas_begin_auto_negotiation(cp, cmd);
4663 spin_unlock_irqrestore(&cp->lock, flags);
4664 return 0;
4665}
4666
4667static int cas_nway_reset(struct net_device *dev)
4668{
4669 struct cas *cp = netdev_priv(dev);
4670 unsigned long flags;
4671
4672 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4673 return -EINVAL;
4674
4675
4676 spin_lock_irqsave(&cp->lock, flags);
4677 cas_begin_auto_negotiation(cp, NULL);
4678 spin_unlock_irqrestore(&cp->lock, flags);
4679
4680 return 0;
4681}
4682
4683static u32 cas_get_link(struct net_device *dev)
4684{
4685 struct cas *cp = netdev_priv(dev);
4686 return cp->lstate == link_up;
4687}
4688
4689static u32 cas_get_msglevel(struct net_device *dev)
4690{
4691 struct cas *cp = netdev_priv(dev);
4692 return cp->msg_enable;
4693}
4694
4695static void cas_set_msglevel(struct net_device *dev, u32 value)
4696{
4697 struct cas *cp = netdev_priv(dev);
4698 cp->msg_enable = value;
4699}
4700
4701static int cas_get_regs_len(struct net_device *dev)
4702{
4703 struct cas *cp = netdev_priv(dev);
4704 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4705}
4706
4707static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4708 void *p)
4709{
4710 struct cas *cp = netdev_priv(dev);
4711 regs->version = 0;
4712
4713 cas_read_regs(cp, p, regs->len / sizeof(u32));
4714}
4715
4716static int cas_get_sset_count(struct net_device *dev, int sset)
4717{
4718 switch (sset) {
4719 case ETH_SS_STATS:
4720 return CAS_NUM_STAT_KEYS;
4721 default:
4722 return -EOPNOTSUPP;
4723 }
4724}
4725
4726static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4727{
4728 memcpy(data, ðtool_cassini_statnames,
4729 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4730}
4731
4732static void cas_get_ethtool_stats(struct net_device *dev,
4733 struct ethtool_stats *estats, u64 *data)
4734{
4735 struct cas *cp = netdev_priv(dev);
4736 struct net_device_stats *stats = cas_get_stats(cp->dev);
4737 int i = 0;
4738 data[i++] = stats->collisions;
4739 data[i++] = stats->rx_bytes;
4740 data[i++] = stats->rx_crc_errors;
4741 data[i++] = stats->rx_dropped;
4742 data[i++] = stats->rx_errors;
4743 data[i++] = stats->rx_fifo_errors;
4744 data[i++] = stats->rx_frame_errors;
4745 data[i++] = stats->rx_length_errors;
4746 data[i++] = stats->rx_over_errors;
4747 data[i++] = stats->rx_packets;
4748 data[i++] = stats->tx_aborted_errors;
4749 data[i++] = stats->tx_bytes;
4750 data[i++] = stats->tx_dropped;
4751 data[i++] = stats->tx_errors;
4752 data[i++] = stats->tx_fifo_errors;
4753 data[i++] = stats->tx_packets;
4754 BUG_ON(i != CAS_NUM_STAT_KEYS);
4755}
4756
4757static const struct ethtool_ops cas_ethtool_ops = {
4758 .get_drvinfo = cas_get_drvinfo,
4759 .get_settings = cas_get_settings,
4760 .set_settings = cas_set_settings,
4761 .nway_reset = cas_nway_reset,
4762 .get_link = cas_get_link,
4763 .get_msglevel = cas_get_msglevel,
4764 .set_msglevel = cas_set_msglevel,
4765 .get_regs_len = cas_get_regs_len,
4766 .get_regs = cas_get_regs,
4767 .get_sset_count = cas_get_sset_count,
4768 .get_strings = cas_get_strings,
4769 .get_ethtool_stats = cas_get_ethtool_stats,
4770};
4771
4772static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4773{
4774 struct cas *cp = netdev_priv(dev);
4775 struct mii_ioctl_data *data = if_mii(ifr);
4776 unsigned long flags;
4777 int rc = -EOPNOTSUPP;
4778
4779
4780
4781
4782 mutex_lock(&cp->pm_mutex);
4783 switch (cmd) {
4784 case SIOCGMIIPHY:
4785 data->phy_id = cp->phy_addr;
4786
4787
4788 case SIOCGMIIREG:
4789 spin_lock_irqsave(&cp->lock, flags);
4790 cas_mif_poll(cp, 0);
4791 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4792 cas_mif_poll(cp, 1);
4793 spin_unlock_irqrestore(&cp->lock, flags);
4794 rc = 0;
4795 break;
4796
4797 case SIOCSMIIREG:
4798 spin_lock_irqsave(&cp->lock, flags);
4799 cas_mif_poll(cp, 0);
4800 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4801 cas_mif_poll(cp, 1);
4802 spin_unlock_irqrestore(&cp->lock, flags);
4803 break;
4804 default:
4805 break;
4806 }
4807
4808 mutex_unlock(&cp->pm_mutex);
4809 return rc;
4810}
4811
4812
4813
4814
4815
4816static void cas_program_bridge(struct pci_dev *cas_pdev)
4817{
4818 struct pci_dev *pdev = cas_pdev->bus->self;
4819 u32 val;
4820
4821 if (!pdev)
4822 return;
4823
4824 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4825 return;
4826
4827
4828
4829
4830
4831
4832 pci_read_config_dword(pdev, 0x40, &val);
4833 val &= ~0x00040000;
4834 pci_write_config_dword(pdev, 0x40, val);
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880 pci_write_config_word(pdev, 0x52,
4881 (0x7 << 13) |
4882 (0x7 << 10) |
4883 (0x7 << 7) |
4884 (0x7 << 4) |
4885 (0xf << 0));
4886
4887
4888 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4889
4890
4891
4892
4893 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4894}
4895
4896static const struct net_device_ops cas_netdev_ops = {
4897 .ndo_open = cas_open,
4898 .ndo_stop = cas_close,
4899 .ndo_start_xmit = cas_start_xmit,
4900 .ndo_get_stats = cas_get_stats,
4901 .ndo_set_rx_mode = cas_set_multicast,
4902 .ndo_do_ioctl = cas_ioctl,
4903 .ndo_tx_timeout = cas_tx_timeout,
4904 .ndo_change_mtu = cas_change_mtu,
4905 .ndo_set_mac_address = eth_mac_addr,
4906 .ndo_validate_addr = eth_validate_addr,
4907#ifdef CONFIG_NET_POLL_CONTROLLER
4908 .ndo_poll_controller = cas_netpoll,
4909#endif
4910};
4911
4912static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4913{
4914 static int cas_version_printed = 0;
4915 unsigned long casreg_len;
4916 struct net_device *dev;
4917 struct cas *cp;
4918 int i, err, pci_using_dac;
4919 u16 pci_cmd;
4920 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4921
4922 if (cas_version_printed++ == 0)
4923 pr_info("%s", version);
4924
4925 err = pci_enable_device(pdev);
4926 if (err) {
4927 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4928 return err;
4929 }
4930
4931 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4932 dev_err(&pdev->dev, "Cannot find proper PCI device "
4933 "base address, aborting\n");
4934 err = -ENODEV;
4935 goto err_out_disable_pdev;
4936 }
4937
4938 dev = alloc_etherdev(sizeof(*cp));
4939 if (!dev) {
4940 err = -ENOMEM;
4941 goto err_out_disable_pdev;
4942 }
4943 SET_NETDEV_DEV(dev, &pdev->dev);
4944
4945 err = pci_request_regions(pdev, dev->name);
4946 if (err) {
4947 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4948 goto err_out_free_netdev;
4949 }
4950 pci_set_master(pdev);
4951
4952
4953
4954
4955
4956 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4957 pci_cmd &= ~PCI_COMMAND_SERR;
4958 pci_cmd |= PCI_COMMAND_PARITY;
4959 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4960 if (pci_try_set_mwi(pdev))
4961 pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4962
4963 cas_program_bridge(pdev);
4964
4965
4966
4967
4968
4969
4970
4971#if 1
4972 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4973 &orig_cacheline_size);
4974 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4975 cas_cacheline_size =
4976 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4977 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4978 if (pci_write_config_byte(pdev,
4979 PCI_CACHE_LINE_SIZE,
4980 cas_cacheline_size)) {
4981 dev_err(&pdev->dev, "Could not set PCI cache "
4982 "line size\n");
4983 goto err_write_cacheline;
4984 }
4985 }
4986#endif
4987
4988
4989
4990 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4991 pci_using_dac = 1;
4992 err = pci_set_consistent_dma_mask(pdev,
4993 DMA_BIT_MASK(64));
4994 if (err < 0) {
4995 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
4996 "for consistent allocations\n");
4997 goto err_out_free_res;
4998 }
4999
5000 } else {
5001 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5002 if (err) {
5003 dev_err(&pdev->dev, "No usable DMA configuration, "
5004 "aborting\n");
5005 goto err_out_free_res;
5006 }
5007 pci_using_dac = 0;
5008 }
5009
5010 casreg_len = pci_resource_len(pdev, 0);
5011
5012 cp = netdev_priv(dev);
5013 cp->pdev = pdev;
5014#if 1
5015
5016 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5017#endif
5018 cp->dev = dev;
5019 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5020 cassini_debug;
5021
5022#if defined(CONFIG_SPARC)
5023 cp->of_node = pci_device_to_OF_node(pdev);
5024#endif
5025
5026 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5027 cp->link_transition_jiffies_valid = 0;
5028
5029 spin_lock_init(&cp->lock);
5030 spin_lock_init(&cp->rx_inuse_lock);
5031 spin_lock_init(&cp->rx_spare_lock);
5032 for (i = 0; i < N_TX_RINGS; i++) {
5033 spin_lock_init(&cp->stat_lock[i]);
5034 spin_lock_init(&cp->tx_lock[i]);
5035 }
5036 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5037 mutex_init(&cp->pm_mutex);
5038
5039 init_timer(&cp->link_timer);
5040 cp->link_timer.function = cas_link_timer;
5041 cp->link_timer.data = (unsigned long) cp;
5042
5043#if 1
5044
5045
5046
5047 atomic_set(&cp->reset_task_pending, 0);
5048 atomic_set(&cp->reset_task_pending_all, 0);
5049 atomic_set(&cp->reset_task_pending_spare, 0);
5050 atomic_set(&cp->reset_task_pending_mtu, 0);
5051#endif
5052 INIT_WORK(&cp->reset_task, cas_reset_task);
5053
5054
5055 if (link_mode >= 0 && link_mode < 6)
5056 cp->link_cntl = link_modes[link_mode];
5057 else
5058 cp->link_cntl = BMCR_ANENABLE;
5059 cp->lstate = link_down;
5060 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5061 netif_carrier_off(cp->dev);
5062 cp->timer_ticks = 0;
5063
5064
5065 cp->regs = pci_iomap(pdev, 0, casreg_len);
5066 if (!cp->regs) {
5067 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5068 goto err_out_free_res;
5069 }
5070 cp->casreg_len = casreg_len;
5071
5072 pci_save_state(pdev);
5073 cas_check_pci_invariants(cp);
5074 cas_hard_reset(cp);
5075 cas_reset(cp, 0);
5076 if (cas_check_invariants(cp))
5077 goto err_out_iounmap;
5078 if (cp->cas_flags & CAS_FLAG_SATURN)
5079 cas_saturn_firmware_init(cp);
5080
5081 cp->init_block = (struct cas_init_block *)
5082 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5083 &cp->block_dvma);
5084 if (!cp->init_block) {
5085 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5086 goto err_out_iounmap;
5087 }
5088
5089 for (i = 0; i < N_TX_RINGS; i++)
5090 cp->init_txds[i] = cp->init_block->txds[i];
5091
5092 for (i = 0; i < N_RX_DESC_RINGS; i++)
5093 cp->init_rxds[i] = cp->init_block->rxds[i];
5094
5095 for (i = 0; i < N_RX_COMP_RINGS; i++)
5096 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5097
5098 for (i = 0; i < N_RX_FLOWS; i++)
5099 skb_queue_head_init(&cp->rx_flows[i]);
5100
5101 dev->netdev_ops = &cas_netdev_ops;
5102 dev->ethtool_ops = &cas_ethtool_ops;
5103 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5104
5105#ifdef USE_NAPI
5106 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5107#endif
5108 dev->irq = pdev->irq;
5109 dev->dma = 0;
5110
5111
5112 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5113 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5114
5115 if (pci_using_dac)
5116 dev->features |= NETIF_F_HIGHDMA;
5117
5118 if (register_netdev(dev)) {
5119 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5120 goto err_out_free_consistent;
5121 }
5122
5123 i = readl(cp->regs + REG_BIM_CFG);
5124 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5125 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5126 (i & BIM_CFG_32BIT) ? "32" : "64",
5127 (i & BIM_CFG_66MHZ) ? "66" : "33",
5128 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5129 dev->dev_addr);
5130
5131 pci_set_drvdata(pdev, dev);
5132 cp->hw_running = 1;
5133 cas_entropy_reset(cp);
5134 cas_phy_init(cp);
5135 cas_begin_auto_negotiation(cp, NULL);
5136 return 0;
5137
5138err_out_free_consistent:
5139 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5140 cp->init_block, cp->block_dvma);
5141
5142err_out_iounmap:
5143 mutex_lock(&cp->pm_mutex);
5144 if (cp->hw_running)
5145 cas_shutdown(cp);
5146 mutex_unlock(&cp->pm_mutex);
5147
5148 pci_iounmap(pdev, cp->regs);
5149
5150
5151err_out_free_res:
5152 pci_release_regions(pdev);
5153
5154err_write_cacheline:
5155
5156
5157
5158 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5159
5160err_out_free_netdev:
5161 free_netdev(dev);
5162
5163err_out_disable_pdev:
5164 pci_disable_device(pdev);
5165 return -ENODEV;
5166}
5167
5168static void cas_remove_one(struct pci_dev *pdev)
5169{
5170 struct net_device *dev = pci_get_drvdata(pdev);
5171 struct cas *cp;
5172 if (!dev)
5173 return;
5174
5175 cp = netdev_priv(dev);
5176 unregister_netdev(dev);
5177
5178 vfree(cp->fw_data);
5179
5180 mutex_lock(&cp->pm_mutex);
5181 cancel_work_sync(&cp->reset_task);
5182 if (cp->hw_running)
5183 cas_shutdown(cp);
5184 mutex_unlock(&cp->pm_mutex);
5185
5186#if 1
5187 if (cp->orig_cacheline_size) {
5188
5189
5190
5191 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5192 cp->orig_cacheline_size);
5193 }
5194#endif
5195 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5196 cp->init_block, cp->block_dvma);
5197 pci_iounmap(pdev, cp->regs);
5198 free_netdev(dev);
5199 pci_release_regions(pdev);
5200 pci_disable_device(pdev);
5201}
5202
5203#ifdef CONFIG_PM
5204static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5205{
5206 struct net_device *dev = pci_get_drvdata(pdev);
5207 struct cas *cp = netdev_priv(dev);
5208 unsigned long flags;
5209
5210 mutex_lock(&cp->pm_mutex);
5211
5212
5213 if (cp->opened) {
5214 netif_device_detach(dev);
5215
5216 cas_lock_all_save(cp, flags);
5217
5218
5219
5220
5221
5222
5223 cas_reset(cp, 0);
5224 cas_clean_rings(cp);
5225 cas_unlock_all_restore(cp, flags);
5226 }
5227
5228 if (cp->hw_running)
5229 cas_shutdown(cp);
5230 mutex_unlock(&cp->pm_mutex);
5231
5232 return 0;
5233}
5234
5235static int cas_resume(struct pci_dev *pdev)
5236{
5237 struct net_device *dev = pci_get_drvdata(pdev);
5238 struct cas *cp = netdev_priv(dev);
5239
5240 netdev_info(dev, "resuming\n");
5241
5242 mutex_lock(&cp->pm_mutex);
5243 cas_hard_reset(cp);
5244 if (cp->opened) {
5245 unsigned long flags;
5246 cas_lock_all_save(cp, flags);
5247 cas_reset(cp, 0);
5248 cp->hw_running = 1;
5249 cas_clean_rings(cp);
5250 cas_init_hw(cp, 1);
5251 cas_unlock_all_restore(cp, flags);
5252
5253 netif_device_attach(dev);
5254 }
5255 mutex_unlock(&cp->pm_mutex);
5256 return 0;
5257}
5258#endif
5259
5260static struct pci_driver cas_driver = {
5261 .name = DRV_MODULE_NAME,
5262 .id_table = cas_pci_tbl,
5263 .probe = cas_init_one,
5264 .remove = cas_remove_one,
5265#ifdef CONFIG_PM
5266 .suspend = cas_suspend,
5267 .resume = cas_resume
5268#endif
5269};
5270
5271static int __init cas_init(void)
5272{
5273 if (linkdown_timeout > 0)
5274 link_transition_timeout = linkdown_timeout * HZ;
5275 else
5276 link_transition_timeout = 0;
5277
5278 return pci_register_driver(&cas_driver);
5279}
5280
5281static void __exit cas_cleanup(void)
5282{
5283 pci_unregister_driver(&cas_driver);
5284}
5285
5286module_init(cas_init);
5287module_exit(cas_cleanup);
5288