1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
68
69#include <linux/module.h>
70#include <linux/kernel.h>
71#include <linux/types.h>
72#include <linux/compiler.h>
73#include <linux/slab.h>
74#include <linux/delay.h>
75#include <linux/init.h>
76#include <linux/interrupt.h>
77#include <linux/vmalloc.h>
78#include <linux/ioport.h>
79#include <linux/pci.h>
80#include <linux/mm.h>
81#include <linux/highmem.h>
82#include <linux/list.h>
83#include <linux/dma-mapping.h>
84
85#include <linux/netdevice.h>
86#include <linux/etherdevice.h>
87#include <linux/skbuff.h>
88#include <linux/ethtool.h>
89#include <linux/crc32.h>
90#include <linux/random.h>
91#include <linux/mii.h>
92#include <linux/ip.h>
93#include <linux/tcp.h>
94#include <linux/mutex.h>
95#include <linux/firmware.h>
96
97#include <net/checksum.h>
98
99#include <linux/atomic.h>
100#include <asm/io.h>
101#include <asm/byteorder.h>
102#include <linux/uaccess.h>
103
104#define cas_page_map(x) kmap_atomic((x))
105#define cas_page_unmap(x) kunmap_atomic((x))
106#define CAS_NCPUS num_online_cpus()
107
108#define cas_skb_release(x) netif_rx(x)
109
110
111#define USE_HP_WORKAROUND
112#define HP_WORKAROUND_DEFAULT
113#define CAS_HP_ALT_FIRMWARE cas_prog_null
114
115#include "cassini.h"
116
117#define USE_TX_COMPWB
118#define USE_CSMA_CD_PROTO
119#define USE_RX_BLANK
120#undef USE_ENTROPY_DEV
121
122
123
124
125#undef USE_PCI_INTB
126#undef USE_PCI_INTC
127#undef USE_PCI_INTD
128#undef USE_QOS
129
130#undef USE_VPD_DEBUG
131
132
133#define USE_PAGE_ORDER
134#define RX_DONT_BATCH 0
135#define RX_COPY_ALWAYS 0
136#define RX_COPY_MIN 64
137#undef RX_COUNT_BUFFERS
138
139#define DRV_MODULE_NAME "cassini"
140#define DRV_MODULE_VERSION "1.6"
141#define DRV_MODULE_RELDATE "21 May 2008"
142
143#define CAS_DEF_MSG_ENABLE \
144 (NETIF_MSG_DRV | \
145 NETIF_MSG_PROBE | \
146 NETIF_MSG_LINK | \
147 NETIF_MSG_TIMER | \
148 NETIF_MSG_IFDOWN | \
149 NETIF_MSG_IFUP | \
150 NETIF_MSG_RX_ERR | \
151 NETIF_MSG_TX_ERR)
152
153
154
155
156#define CAS_TX_TIMEOUT (HZ)
157#define CAS_LINK_TIMEOUT (22*HZ/10)
158#define CAS_LINK_FAST_TIMEOUT (1)
159
160
161
162
163#define STOP_TRIES_PHY 1000
164#define STOP_TRIES 5000
165
166
167
168
169
170#define CAS_MIN_FRAME 97
171#define CAS_1000MB_MIN_FRAME 255
172#define CAS_MIN_MTU 60
173#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
174
175#if 1
176
177
178
179
180#else
181#define CAS_RESET_MTU 1
182#define CAS_RESET_ALL 2
183#define CAS_RESET_SPARE 3
184#endif
185
186static char version[] =
187 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
188
189static int cassini_debug = -1;
190static int link_mode;
191
192MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
193MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
194MODULE_LICENSE("GPL");
195MODULE_FIRMWARE("sun/cassini.bin");
196module_param(cassini_debug, int, 0);
197MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
198module_param(link_mode, int, 0);
199MODULE_PARM_DESC(link_mode, "default link mode");
200
201
202
203
204
205#define DEFAULT_LINKDOWN_TIMEOUT 5
206
207
208
209static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
210module_param(linkdown_timeout, int, 0);
211MODULE_PARM_DESC(linkdown_timeout,
212"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
213
214
215
216
217
218
219static int link_transition_timeout;
220
221
222
223static u16 link_modes[] = {
224 BMCR_ANENABLE,
225 0,
226 BMCR_SPEED100,
227 BMCR_FULLDPLX,
228 BMCR_SPEED100|BMCR_FULLDPLX,
229 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
230};
231
232static const struct pci_device_id cas_pci_tbl[] = {
233 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237 { 0, }
238};
239
240MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
241
242static void cas_set_link_modes(struct cas *cp);
243
244static inline void cas_lock_tx(struct cas *cp)
245{
246 int i;
247
248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock_nested(&cp->tx_lock[i], i);
250}
251
252static inline void cas_lock_all(struct cas *cp)
253{
254 spin_lock_irq(&cp->lock);
255 cas_lock_tx(cp);
256}
257
258
259
260
261
262
263
264
265
266#define cas_lock_all_save(cp, flags) \
267do { \
268 struct cas *xxxcp = (cp); \
269 spin_lock_irqsave(&xxxcp->lock, flags); \
270 cas_lock_tx(xxxcp); \
271} while (0)
272
273static inline void cas_unlock_tx(struct cas *cp)
274{
275 int i;
276
277 for (i = N_TX_RINGS; i > 0; i--)
278 spin_unlock(&cp->tx_lock[i - 1]);
279}
280
281static inline void cas_unlock_all(struct cas *cp)
282{
283 cas_unlock_tx(cp);
284 spin_unlock_irq(&cp->lock);
285}
286
287#define cas_unlock_all_restore(cp, flags) \
288do { \
289 struct cas *xxxcp = (cp); \
290 cas_unlock_tx(xxxcp); \
291 spin_unlock_irqrestore(&xxxcp->lock, flags); \
292} while (0)
293
294static void cas_disable_irq(struct cas *cp, const int ring)
295{
296
297 if (ring == 0) {
298 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
299 return;
300 }
301
302
303 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
304 switch (ring) {
305#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
306#ifdef USE_PCI_INTB
307 case 1:
308#endif
309#ifdef USE_PCI_INTC
310 case 2:
311#endif
312#ifdef USE_PCI_INTD
313 case 3:
314#endif
315 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
316 cp->regs + REG_PLUS_INTRN_MASK(ring));
317 break;
318#endif
319 default:
320 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
321 REG_PLUS_INTRN_MASK(ring));
322 break;
323 }
324 }
325}
326
327static inline void cas_mask_intr(struct cas *cp)
328{
329 int i;
330
331 for (i = 0; i < N_RX_COMP_RINGS; i++)
332 cas_disable_irq(cp, i);
333}
334
335static void cas_enable_irq(struct cas *cp, const int ring)
336{
337 if (ring == 0) {
338 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
339 return;
340 }
341
342 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
343 switch (ring) {
344#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
345#ifdef USE_PCI_INTB
346 case 1:
347#endif
348#ifdef USE_PCI_INTC
349 case 2:
350#endif
351#ifdef USE_PCI_INTD
352 case 3:
353#endif
354 writel(INTRN_MASK_RX_EN, cp->regs +
355 REG_PLUS_INTRN_MASK(ring));
356 break;
357#endif
358 default:
359 break;
360 }
361 }
362}
363
364static inline void cas_unmask_intr(struct cas *cp)
365{
366 int i;
367
368 for (i = 0; i < N_RX_COMP_RINGS; i++)
369 cas_enable_irq(cp, i);
370}
371
372static inline void cas_entropy_gather(struct cas *cp)
373{
374#ifdef USE_ENTROPY_DEV
375 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
376 return;
377
378 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
379 readl(cp->regs + REG_ENTROPY_IV),
380 sizeof(uint64_t)*8);
381#endif
382}
383
384static inline void cas_entropy_reset(struct cas *cp)
385{
386#ifdef USE_ENTROPY_DEV
387 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
388 return;
389
390 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
391 cp->regs + REG_BIM_LOCAL_DEV_EN);
392 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
393 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
394
395
396 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
397 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
398#endif
399}
400
401
402
403
404static u16 cas_phy_read(struct cas *cp, int reg)
405{
406 u32 cmd;
407 int limit = STOP_TRIES_PHY;
408
409 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
410 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
411 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
412 cmd |= MIF_FRAME_TURN_AROUND_MSB;
413 writel(cmd, cp->regs + REG_MIF_FRAME);
414
415
416 while (limit-- > 0) {
417 udelay(10);
418 cmd = readl(cp->regs + REG_MIF_FRAME);
419 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
420 return cmd & MIF_FRAME_DATA_MASK;
421 }
422 return 0xFFFF;
423}
424
425static int cas_phy_write(struct cas *cp, int reg, u16 val)
426{
427 int limit = STOP_TRIES_PHY;
428 u32 cmd;
429
430 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
431 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
432 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
433 cmd |= MIF_FRAME_TURN_AROUND_MSB;
434 cmd |= val & MIF_FRAME_DATA_MASK;
435 writel(cmd, cp->regs + REG_MIF_FRAME);
436
437
438 while (limit-- > 0) {
439 udelay(10);
440 cmd = readl(cp->regs + REG_MIF_FRAME);
441 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
442 return 0;
443 }
444 return -1;
445}
446
447static void cas_phy_powerup(struct cas *cp)
448{
449 u16 ctl = cas_phy_read(cp, MII_BMCR);
450
451 if ((ctl & BMCR_PDOWN) == 0)
452 return;
453 ctl &= ~BMCR_PDOWN;
454 cas_phy_write(cp, MII_BMCR, ctl);
455}
456
457static void cas_phy_powerdown(struct cas *cp)
458{
459 u16 ctl = cas_phy_read(cp, MII_BMCR);
460
461 if (ctl & BMCR_PDOWN)
462 return;
463 ctl |= BMCR_PDOWN;
464 cas_phy_write(cp, MII_BMCR, ctl);
465}
466
467
468static int cas_page_free(struct cas *cp, cas_page_t *page)
469{
470 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
471 PCI_DMA_FROMDEVICE);
472 __free_pages(page->buffer, cp->page_order);
473 kfree(page);
474 return 0;
475}
476
477#ifdef RX_COUNT_BUFFERS
478#define RX_USED_ADD(x, y) ((x)->used += (y))
479#define RX_USED_SET(x, y) ((x)->used = (y))
480#else
481#define RX_USED_ADD(x, y)
482#define RX_USED_SET(x, y)
483#endif
484
485
486
487
488static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
489{
490 cas_page_t *page;
491
492 page = kmalloc(sizeof(cas_page_t), flags);
493 if (!page)
494 return NULL;
495
496 INIT_LIST_HEAD(&page->list);
497 RX_USED_SET(page, 0);
498 page->buffer = alloc_pages(flags, cp->page_order);
499 if (!page->buffer)
500 goto page_err;
501 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
502 cp->page_size, PCI_DMA_FROMDEVICE);
503 return page;
504
505page_err:
506 kfree(page);
507 return NULL;
508}
509
510
511static void cas_spare_init(struct cas *cp)
512{
513 spin_lock(&cp->rx_inuse_lock);
514 INIT_LIST_HEAD(&cp->rx_inuse_list);
515 spin_unlock(&cp->rx_inuse_lock);
516
517 spin_lock(&cp->rx_spare_lock);
518 INIT_LIST_HEAD(&cp->rx_spare_list);
519 cp->rx_spares_needed = RX_SPARE_COUNT;
520 spin_unlock(&cp->rx_spare_lock);
521}
522
523
524static void cas_spare_free(struct cas *cp)
525{
526 struct list_head list, *elem, *tmp;
527
528
529 INIT_LIST_HEAD(&list);
530 spin_lock(&cp->rx_spare_lock);
531 list_splice_init(&cp->rx_spare_list, &list);
532 spin_unlock(&cp->rx_spare_lock);
533 list_for_each_safe(elem, tmp, &list) {
534 cas_page_free(cp, list_entry(elem, cas_page_t, list));
535 }
536
537 INIT_LIST_HEAD(&list);
538#if 1
539
540
541
542
543 spin_lock(&cp->rx_inuse_lock);
544 list_splice_init(&cp->rx_inuse_list, &list);
545 spin_unlock(&cp->rx_inuse_lock);
546#else
547 spin_lock(&cp->rx_spare_lock);
548 list_splice_init(&cp->rx_inuse_list, &list);
549 spin_unlock(&cp->rx_spare_lock);
550#endif
551 list_for_each_safe(elem, tmp, &list) {
552 cas_page_free(cp, list_entry(elem, cas_page_t, list));
553 }
554}
555
556
557static void cas_spare_recover(struct cas *cp, const gfp_t flags)
558{
559 struct list_head list, *elem, *tmp;
560 int needed, i;
561
562
563
564
565
566
567 INIT_LIST_HEAD(&list);
568 spin_lock(&cp->rx_inuse_lock);
569 list_splice_init(&cp->rx_inuse_list, &list);
570 spin_unlock(&cp->rx_inuse_lock);
571
572 list_for_each_safe(elem, tmp, &list) {
573 cas_page_t *page = list_entry(elem, cas_page_t, list);
574
575
576
577
578
579
580
581
582
583
584
585
586
587 if (page_count(page->buffer) > 1)
588 continue;
589
590 list_del(elem);
591 spin_lock(&cp->rx_spare_lock);
592 if (cp->rx_spares_needed > 0) {
593 list_add(elem, &cp->rx_spare_list);
594 cp->rx_spares_needed--;
595 spin_unlock(&cp->rx_spare_lock);
596 } else {
597 spin_unlock(&cp->rx_spare_lock);
598 cas_page_free(cp, page);
599 }
600 }
601
602
603 if (!list_empty(&list)) {
604 spin_lock(&cp->rx_inuse_lock);
605 list_splice(&list, &cp->rx_inuse_list);
606 spin_unlock(&cp->rx_inuse_lock);
607 }
608
609 spin_lock(&cp->rx_spare_lock);
610 needed = cp->rx_spares_needed;
611 spin_unlock(&cp->rx_spare_lock);
612 if (!needed)
613 return;
614
615
616 INIT_LIST_HEAD(&list);
617 i = 0;
618 while (i < needed) {
619 cas_page_t *spare = cas_page_alloc(cp, flags);
620 if (!spare)
621 break;
622 list_add(&spare->list, &list);
623 i++;
624 }
625
626 spin_lock(&cp->rx_spare_lock);
627 list_splice(&list, &cp->rx_spare_list);
628 cp->rx_spares_needed -= i;
629 spin_unlock(&cp->rx_spare_lock);
630}
631
632
633static cas_page_t *cas_page_dequeue(struct cas *cp)
634{
635 struct list_head *entry;
636 int recover;
637
638 spin_lock(&cp->rx_spare_lock);
639 if (list_empty(&cp->rx_spare_list)) {
640
641 spin_unlock(&cp->rx_spare_lock);
642 cas_spare_recover(cp, GFP_ATOMIC);
643 spin_lock(&cp->rx_spare_lock);
644 if (list_empty(&cp->rx_spare_list)) {
645 netif_err(cp, rx_err, cp->dev,
646 "no spare buffers available\n");
647 spin_unlock(&cp->rx_spare_lock);
648 return NULL;
649 }
650 }
651
652 entry = cp->rx_spare_list.next;
653 list_del(entry);
654 recover = ++cp->rx_spares_needed;
655 spin_unlock(&cp->rx_spare_lock);
656
657
658 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
659#if 1
660 atomic_inc(&cp->reset_task_pending);
661 atomic_inc(&cp->reset_task_pending_spare);
662 schedule_work(&cp->reset_task);
663#else
664 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
665 schedule_work(&cp->reset_task);
666#endif
667 }
668 return list_entry(entry, cas_page_t, list);
669}
670
671
672static void cas_mif_poll(struct cas *cp, const int enable)
673{
674 u32 cfg;
675
676 cfg = readl(cp->regs + REG_MIF_CFG);
677 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
678
679 if (cp->phy_type & CAS_PHY_MII_MDIO1)
680 cfg |= MIF_CFG_PHY_SELECT;
681
682
683 if (enable) {
684 cfg |= MIF_CFG_POLL_EN;
685 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
686 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
687 }
688 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
689 cp->regs + REG_MIF_MASK);
690 writel(cfg, cp->regs + REG_MIF_CFG);
691}
692
693
694static void cas_begin_auto_negotiation(struct cas *cp,
695 const struct ethtool_link_ksettings *ep)
696{
697 u16 ctl;
698#if 1
699 int lcntl;
700 int changed = 0;
701 int oldstate = cp->lstate;
702 int link_was_not_down = !(oldstate == link_down);
703#endif
704
705 if (!ep)
706 goto start_aneg;
707 lcntl = cp->link_cntl;
708 if (ep->base.autoneg == AUTONEG_ENABLE) {
709 cp->link_cntl = BMCR_ANENABLE;
710 } else {
711 u32 speed = ep->base.speed;
712 cp->link_cntl = 0;
713 if (speed == SPEED_100)
714 cp->link_cntl |= BMCR_SPEED100;
715 else if (speed == SPEED_1000)
716 cp->link_cntl |= CAS_BMCR_SPEED1000;
717 if (ep->base.duplex == DUPLEX_FULL)
718 cp->link_cntl |= BMCR_FULLDPLX;
719 }
720#if 1
721 changed = (lcntl != cp->link_cntl);
722#endif
723start_aneg:
724 if (cp->lstate == link_up) {
725 netdev_info(cp->dev, "PCS link down\n");
726 } else {
727 if (changed) {
728 netdev_info(cp->dev, "link configuration changed\n");
729 }
730 }
731 cp->lstate = link_down;
732 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
733 if (!cp->hw_running)
734 return;
735#if 1
736
737
738
739
740
741 if (oldstate == link_up)
742 netif_carrier_off(cp->dev);
743 if (changed && link_was_not_down) {
744
745
746
747
748
749 atomic_inc(&cp->reset_task_pending);
750 atomic_inc(&cp->reset_task_pending_all);
751 schedule_work(&cp->reset_task);
752 cp->timer_ticks = 0;
753 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
754 return;
755 }
756#endif
757 if (cp->phy_type & CAS_PHY_SERDES) {
758 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
759
760 if (cp->link_cntl & BMCR_ANENABLE) {
761 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
762 cp->lstate = link_aneg;
763 } else {
764 if (cp->link_cntl & BMCR_FULLDPLX)
765 val |= PCS_MII_CTRL_DUPLEX;
766 val &= ~PCS_MII_AUTONEG_EN;
767 cp->lstate = link_force_ok;
768 }
769 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
770 writel(val, cp->regs + REG_PCS_MII_CTRL);
771
772 } else {
773 cas_mif_poll(cp, 0);
774 ctl = cas_phy_read(cp, MII_BMCR);
775 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
776 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
777 ctl |= cp->link_cntl;
778 if (ctl & BMCR_ANENABLE) {
779 ctl |= BMCR_ANRESTART;
780 cp->lstate = link_aneg;
781 } else {
782 cp->lstate = link_force_ok;
783 }
784 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
785 cas_phy_write(cp, MII_BMCR, ctl);
786 cas_mif_poll(cp, 1);
787 }
788
789 cp->timer_ticks = 0;
790 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
791}
792
793
794static int cas_reset_mii_phy(struct cas *cp)
795{
796 int limit = STOP_TRIES_PHY;
797 u16 val;
798
799 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
800 udelay(100);
801 while (--limit) {
802 val = cas_phy_read(cp, MII_BMCR);
803 if ((val & BMCR_RESET) == 0)
804 break;
805 udelay(10);
806 }
807 return limit <= 0;
808}
809
810static void cas_saturn_firmware_init(struct cas *cp)
811{
812 const struct firmware *fw;
813 const char fw_name[] = "sun/cassini.bin";
814 int err;
815
816 if (PHY_NS_DP83065 != cp->phy_id)
817 return;
818
819 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
820 if (err) {
821 pr_err("Failed to load firmware \"%s\"\n",
822 fw_name);
823 return;
824 }
825 if (fw->size < 2) {
826 pr_err("bogus length %zu in \"%s\"\n",
827 fw->size, fw_name);
828 goto out;
829 }
830 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
831 cp->fw_size = fw->size - 2;
832 cp->fw_data = vmalloc(cp->fw_size);
833 if (!cp->fw_data)
834 goto out;
835 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
836out:
837 release_firmware(fw);
838}
839
840static void cas_saturn_firmware_load(struct cas *cp)
841{
842 int i;
843
844 if (!cp->fw_data)
845 return;
846
847 cas_phy_powerdown(cp);
848
849
850 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
851
852
853 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
854 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
855 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
856 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
857 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
858 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
859 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
860 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
861
862
863 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
864 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
865 for (i = 0; i < cp->fw_size; i++)
866 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
867
868
869 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
870 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
871}
872
873
874
875static void cas_phy_init(struct cas *cp)
876{
877 u16 val;
878
879
880 if (CAS_PHY_MII(cp->phy_type)) {
881 writel(PCS_DATAPATH_MODE_MII,
882 cp->regs + REG_PCS_DATAPATH_MODE);
883
884 cas_mif_poll(cp, 0);
885 cas_reset_mii_phy(cp);
886
887 if (PHY_LUCENT_B0 == cp->phy_id) {
888
889 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
890 cas_phy_write(cp, MII_BMCR, 0x00f1);
891 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
892
893 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
894
895 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
896 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
897 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
898 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
899 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
900 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
901 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
902 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
903 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
904 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
905 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
906
907 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
908 val = cas_phy_read(cp, BROADCOM_MII_REG4);
909 val = cas_phy_read(cp, BROADCOM_MII_REG4);
910 if (val & 0x0080) {
911
912 cas_phy_write(cp, BROADCOM_MII_REG4,
913 val & ~0x0080);
914 }
915
916 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
917 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
918 SATURN_PCFG_FSI : 0x0,
919 cp->regs + REG_SATURN_PCFG);
920
921
922
923
924
925 if (PHY_NS_DP83065 == cp->phy_id) {
926 cas_saturn_firmware_load(cp);
927 }
928 cas_phy_powerup(cp);
929 }
930
931
932 val = cas_phy_read(cp, MII_BMCR);
933 val &= ~BMCR_ANENABLE;
934 cas_phy_write(cp, MII_BMCR, val);
935 udelay(10);
936
937 cas_phy_write(cp, MII_ADVERTISE,
938 cas_phy_read(cp, MII_ADVERTISE) |
939 (ADVERTISE_10HALF | ADVERTISE_10FULL |
940 ADVERTISE_100HALF | ADVERTISE_100FULL |
941 CAS_ADVERTISE_PAUSE |
942 CAS_ADVERTISE_ASYM_PAUSE));
943
944 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
945
946
947
948 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
949 val &= ~CAS_ADVERTISE_1000HALF;
950 val |= CAS_ADVERTISE_1000FULL;
951 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
952 }
953
954 } else {
955
956 u32 val;
957 int limit;
958
959 writel(PCS_DATAPATH_MODE_SERDES,
960 cp->regs + REG_PCS_DATAPATH_MODE);
961
962
963 if (cp->cas_flags & CAS_FLAG_SATURN)
964 writel(0, cp->regs + REG_SATURN_PCFG);
965
966
967 val = readl(cp->regs + REG_PCS_MII_CTRL);
968 val |= PCS_MII_RESET;
969 writel(val, cp->regs + REG_PCS_MII_CTRL);
970
971 limit = STOP_TRIES;
972 while (--limit > 0) {
973 udelay(10);
974 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
975 PCS_MII_RESET) == 0)
976 break;
977 }
978 if (limit <= 0)
979 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
980 readl(cp->regs + REG_PCS_STATE_MACHINE));
981
982
983
984
985 writel(0x0, cp->regs + REG_PCS_CFG);
986
987
988 val = readl(cp->regs + REG_PCS_MII_ADVERT);
989 val &= ~PCS_MII_ADVERT_HD;
990 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
991 PCS_MII_ADVERT_ASYM_PAUSE);
992 writel(val, cp->regs + REG_PCS_MII_ADVERT);
993
994
995 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
996
997
998 writel(PCS_SERDES_CTRL_SYNCD_EN,
999 cp->regs + REG_PCS_SERDES_CTRL);
1000 }
1001}
1002
1003
1004static int cas_pcs_link_check(struct cas *cp)
1005{
1006 u32 stat, state_machine;
1007 int retval = 0;
1008
1009
1010
1011
1012
1013 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1014 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1015 stat = readl(cp->regs + REG_PCS_MII_STATUS);
1016
1017
1018
1019
1020 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1021 PCS_MII_STATUS_REMOTE_FAULT)) ==
1022 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1023 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1024
1025
1026
1027
1028 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1029 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1030 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1031 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1032 stat |= PCS_MII_STATUS_LINK_STATUS;
1033 }
1034
1035 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1036 if (cp->lstate != link_up) {
1037 if (cp->opened) {
1038 cp->lstate = link_up;
1039 cp->link_transition = LINK_TRANSITION_LINK_UP;
1040
1041 cas_set_link_modes(cp);
1042 netif_carrier_on(cp->dev);
1043 }
1044 }
1045 } else if (cp->lstate == link_up) {
1046 cp->lstate = link_down;
1047 if (link_transition_timeout != 0 &&
1048 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1049 !cp->link_transition_jiffies_valid) {
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 retval = 1;
1063 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1064 cp->link_transition_jiffies = jiffies;
1065 cp->link_transition_jiffies_valid = 1;
1066 } else {
1067 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1068 }
1069 netif_carrier_off(cp->dev);
1070 if (cp->opened)
1071 netif_info(cp, link, cp->dev, "PCS link down\n");
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1082
1083 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1084 if (stat == 0x03)
1085 return 1;
1086 }
1087 } else if (cp->lstate == link_down) {
1088 if (link_transition_timeout != 0 &&
1089 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1090 !cp->link_transition_jiffies_valid) {
1091
1092
1093
1094
1095
1096 retval = 1;
1097 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1098 cp->link_transition_jiffies = jiffies;
1099 cp->link_transition_jiffies_valid = 1;
1100 } else {
1101 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1102 }
1103 }
1104
1105 return retval;
1106}
1107
1108static int cas_pcs_interrupt(struct net_device *dev,
1109 struct cas *cp, u32 status)
1110{
1111 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1112
1113 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1114 return 0;
1115 return cas_pcs_link_check(cp);
1116}
1117
1118static int cas_txmac_interrupt(struct net_device *dev,
1119 struct cas *cp, u32 status)
1120{
1121 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1122
1123 if (!txmac_stat)
1124 return 0;
1125
1126 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1127 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1128
1129
1130
1131
1132 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1133 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1134 return 0;
1135
1136 spin_lock(&cp->stat_lock[0]);
1137 if (txmac_stat & MAC_TX_UNDERRUN) {
1138 netdev_err(dev, "TX MAC xmit underrun\n");
1139 cp->net_stats[0].tx_fifo_errors++;
1140 }
1141
1142 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1143 netdev_err(dev, "TX MAC max packet size error\n");
1144 cp->net_stats[0].tx_errors++;
1145 }
1146
1147
1148
1149
1150 if (txmac_stat & MAC_TX_COLL_NORMAL)
1151 cp->net_stats[0].collisions += 0x10000;
1152
1153 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1154 cp->net_stats[0].tx_aborted_errors += 0x10000;
1155 cp->net_stats[0].collisions += 0x10000;
1156 }
1157
1158 if (txmac_stat & MAC_TX_COLL_LATE) {
1159 cp->net_stats[0].tx_aborted_errors += 0x10000;
1160 cp->net_stats[0].collisions += 0x10000;
1161 }
1162 spin_unlock(&cp->stat_lock[0]);
1163
1164
1165
1166
1167 return 0;
1168}
1169
1170static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1171{
1172 cas_hp_inst_t *inst;
1173 u32 val;
1174 int i;
1175
1176 i = 0;
1177 while ((inst = firmware) && inst->note) {
1178 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1179
1180 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1181 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1182 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1183
1184 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1185 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1186 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1187 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1188 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1189 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1190 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1191 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1192
1193 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1194 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1195 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1196 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1197 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1198 ++firmware;
1199 ++i;
1200 }
1201}
1202
1203static void cas_init_rx_dma(struct cas *cp)
1204{
1205 u64 desc_dma = cp->block_dvma;
1206 u32 val;
1207 int i, size;
1208
1209
1210 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1211 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1212 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1213 if ((N_RX_DESC_RINGS > 1) &&
1214 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1215 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1216 writel(val, cp->regs + REG_RX_CFG);
1217
1218 val = (unsigned long) cp->init_rxds[0] -
1219 (unsigned long) cp->init_block;
1220 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1221 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1222 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1223
1224 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1225
1226
1227
1228 val = (unsigned long) cp->init_rxds[1] -
1229 (unsigned long) cp->init_block;
1230 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1231 writel((desc_dma + val) & 0xffffffff, cp->regs +
1232 REG_PLUS_RX_DB1_LOW);
1233 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1234 REG_PLUS_RX_KICK1);
1235 }
1236
1237
1238 val = (unsigned long) cp->init_rxcs[0] -
1239 (unsigned long) cp->init_block;
1240 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1241 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1242
1243 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1244
1245 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1246 val = (unsigned long) cp->init_rxcs[i] -
1247 (unsigned long) cp->init_block;
1248 writel((desc_dma + val) >> 32, cp->regs +
1249 REG_PLUS_RX_CBN_HI(i));
1250 writel((desc_dma + val) & 0xffffffff, cp->regs +
1251 REG_PLUS_RX_CBN_LOW(i));
1252 }
1253 }
1254
1255
1256
1257
1258
1259 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1260 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1261 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1262 for (i = 1; i < N_RX_COMP_RINGS; i++)
1263 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1264
1265
1266 if (N_RX_COMP_RINGS > 1)
1267 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1268 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1269
1270 for (i = 2; i < N_RX_COMP_RINGS; i++)
1271 writel(INTR_RX_DONE_ALT,
1272 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1273 }
1274
1275
1276 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1277 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1278 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1279 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1280 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1281
1282
1283 for (i = 0; i < 64; i++) {
1284 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1285 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1286 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1287 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1288 }
1289
1290
1291 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1292 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1293
1294
1295#ifdef USE_RX_BLANK
1296 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1297 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1298 writel(val, cp->regs + REG_RX_BLANK);
1299#else
1300 writel(0x0, cp->regs + REG_RX_BLANK);
1301#endif
1302
1303
1304
1305
1306
1307
1308
1309 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1310 writel(val, cp->regs + REG_RX_AE_THRESH);
1311 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1312 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1313 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1314 }
1315
1316
1317
1318
1319 writel(0x0, cp->regs + REG_RX_RED);
1320
1321
1322 val = 0;
1323 if (cp->page_size == 0x1000)
1324 val = 0x1;
1325 else if (cp->page_size == 0x2000)
1326 val = 0x2;
1327 else if (cp->page_size == 0x4000)
1328 val = 0x3;
1329
1330
1331 size = cp->dev->mtu + 64;
1332 if (size > cp->page_size)
1333 size = cp->page_size;
1334
1335 if (size <= 0x400)
1336 i = 0x0;
1337 else if (size <= 0x800)
1338 i = 0x1;
1339 else if (size <= 0x1000)
1340 i = 0x2;
1341 else
1342 i = 0x3;
1343
1344 cp->mtu_stride = 1 << (i + 10);
1345 val = CAS_BASE(RX_PAGE_SIZE, val);
1346 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1349 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1350
1351
1352 if (CAS_HP_FIRMWARE == cas_prog_null)
1353 return;
1354
1355 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1356 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1357 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1358 writel(val, cp->regs + REG_HP_CFG);
1359}
1360
1361static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1362{
1363 memset(rxc, 0, sizeof(*rxc));
1364 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1365}
1366
1367
1368
1369
1370
1371static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1372{
1373 cas_page_t *page = cp->rx_pages[1][index];
1374 cas_page_t *new;
1375
1376 if (page_count(page->buffer) == 1)
1377 return page;
1378
1379 new = cas_page_dequeue(cp);
1380 if (new) {
1381 spin_lock(&cp->rx_inuse_lock);
1382 list_add(&page->list, &cp->rx_inuse_list);
1383 spin_unlock(&cp->rx_inuse_lock);
1384 }
1385 return new;
1386}
1387
1388
1389static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1390 const int index)
1391{
1392 cas_page_t **page0 = cp->rx_pages[0];
1393 cas_page_t **page1 = cp->rx_pages[1];
1394
1395
1396 if (page_count(page0[index]->buffer) > 1) {
1397 cas_page_t *new = cas_page_spare(cp, index);
1398 if (new) {
1399 page1[index] = page0[index];
1400 page0[index] = new;
1401 }
1402 }
1403 RX_USED_SET(page0[index], 0);
1404 return page0[index];
1405}
1406
1407static void cas_clean_rxds(struct cas *cp)
1408{
1409
1410 struct cas_rx_desc *rxd = cp->init_rxds[0];
1411 int i, size;
1412
1413
1414 for (i = 0; i < N_RX_FLOWS; i++) {
1415 struct sk_buff *skb;
1416 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1417 cas_skb_release(skb);
1418 }
1419 }
1420
1421
1422 size = RX_DESC_RINGN_SIZE(0);
1423 for (i = 0; i < size; i++) {
1424 cas_page_t *page = cas_page_swap(cp, 0, i);
1425 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1426 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1427 CAS_BASE(RX_INDEX_RING, 0));
1428 }
1429
1430 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1431 cp->rx_last[0] = 0;
1432 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1433}
1434
1435static void cas_clean_rxcs(struct cas *cp)
1436{
1437 int i, j;
1438
1439
1440 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1441 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1442 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1443 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1444 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1445 cas_rxc_init(rxc + j);
1446 }
1447 }
1448}
1449
1450#if 0
1451
1452
1453
1454
1455
1456
1457static int cas_rxmac_reset(struct cas *cp)
1458{
1459 struct net_device *dev = cp->dev;
1460 int limit;
1461 u32 val;
1462
1463
1464 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1465 for (limit = 0; limit < STOP_TRIES; limit++) {
1466 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1467 break;
1468 udelay(10);
1469 }
1470 if (limit == STOP_TRIES) {
1471 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1472 return 1;
1473 }
1474
1475
1476 writel(0, cp->regs + REG_RX_CFG);
1477 for (limit = 0; limit < STOP_TRIES; limit++) {
1478 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1479 break;
1480 udelay(10);
1481 }
1482 if (limit == STOP_TRIES) {
1483 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1484 return 1;
1485 }
1486
1487 mdelay(5);
1488
1489
1490 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1491 for (limit = 0; limit < STOP_TRIES; limit++) {
1492 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1493 break;
1494 udelay(10);
1495 }
1496 if (limit == STOP_TRIES) {
1497 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1498 return 1;
1499 }
1500
1501
1502 cas_clean_rxds(cp);
1503 cas_clean_rxcs(cp);
1504
1505
1506 cas_init_rx_dma(cp);
1507
1508
1509 val = readl(cp->regs + REG_RX_CFG);
1510 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1511 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1512 val = readl(cp->regs + REG_MAC_RX_CFG);
1513 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1514 return 0;
1515}
1516#endif
1517
1518static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1519 u32 status)
1520{
1521 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1522
1523 if (!stat)
1524 return 0;
1525
1526 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1527
1528
1529 spin_lock(&cp->stat_lock[0]);
1530 if (stat & MAC_RX_ALIGN_ERR)
1531 cp->net_stats[0].rx_frame_errors += 0x10000;
1532
1533 if (stat & MAC_RX_CRC_ERR)
1534 cp->net_stats[0].rx_crc_errors += 0x10000;
1535
1536 if (stat & MAC_RX_LEN_ERR)
1537 cp->net_stats[0].rx_length_errors += 0x10000;
1538
1539 if (stat & MAC_RX_OVERFLOW) {
1540 cp->net_stats[0].rx_over_errors++;
1541 cp->net_stats[0].rx_fifo_errors++;
1542 }
1543
1544
1545
1546
1547 spin_unlock(&cp->stat_lock[0]);
1548 return 0;
1549}
1550
1551static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1552 u32 status)
1553{
1554 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1555
1556 if (!stat)
1557 return 0;
1558
1559 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1560 "mac interrupt, stat: 0x%x\n", stat);
1561
1562
1563
1564
1565
1566 if (stat & MAC_CTRL_PAUSE_STATE)
1567 cp->pause_entered++;
1568
1569 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1570 cp->pause_last_time_recvd = (stat >> 16);
1571
1572 return 0;
1573}
1574
1575
1576
1577static inline int cas_mdio_link_not_up(struct cas *cp)
1578{
1579 u16 val;
1580
1581 switch (cp->lstate) {
1582 case link_force_ret:
1583 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1584 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1585 cp->timer_ticks = 5;
1586 cp->lstate = link_force_ok;
1587 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1588 break;
1589
1590 case link_aneg:
1591 val = cas_phy_read(cp, MII_BMCR);
1592
1593
1594
1595
1596 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1597 val |= BMCR_FULLDPLX;
1598 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1599 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1600 cas_phy_write(cp, MII_BMCR, val);
1601 cp->timer_ticks = 5;
1602 cp->lstate = link_force_try;
1603 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1604 break;
1605
1606 case link_force_try:
1607
1608 val = cas_phy_read(cp, MII_BMCR);
1609 cp->timer_ticks = 5;
1610 if (val & CAS_BMCR_SPEED1000) {
1611 val &= ~CAS_BMCR_SPEED1000;
1612 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1613 cas_phy_write(cp, MII_BMCR, val);
1614 break;
1615 }
1616
1617 if (val & BMCR_SPEED100) {
1618 if (val & BMCR_FULLDPLX)
1619 val &= ~BMCR_FULLDPLX;
1620 else {
1621 val &= ~BMCR_SPEED100;
1622 }
1623 cas_phy_write(cp, MII_BMCR, val);
1624 break;
1625 }
1626 default:
1627 break;
1628 }
1629 return 0;
1630}
1631
1632
1633
1634static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1635{
1636 int restart;
1637
1638 if (bmsr & BMSR_LSTATUS) {
1639
1640
1641
1642
1643
1644 if ((cp->lstate == link_force_try) &&
1645 (cp->link_cntl & BMCR_ANENABLE)) {
1646 cp->lstate = link_force_ret;
1647 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1648 cas_mif_poll(cp, 0);
1649 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1650 cp->timer_ticks = 5;
1651 if (cp->opened)
1652 netif_info(cp, link, cp->dev,
1653 "Got link after fallback, retrying autoneg once...\n");
1654 cas_phy_write(cp, MII_BMCR,
1655 cp->link_fcntl | BMCR_ANENABLE |
1656 BMCR_ANRESTART);
1657 cas_mif_poll(cp, 1);
1658
1659 } else if (cp->lstate != link_up) {
1660 cp->lstate = link_up;
1661 cp->link_transition = LINK_TRANSITION_LINK_UP;
1662
1663 if (cp->opened) {
1664 cas_set_link_modes(cp);
1665 netif_carrier_on(cp->dev);
1666 }
1667 }
1668 return 0;
1669 }
1670
1671
1672
1673
1674 restart = 0;
1675 if (cp->lstate == link_up) {
1676 cp->lstate = link_down;
1677 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1678
1679 netif_carrier_off(cp->dev);
1680 if (cp->opened)
1681 netif_info(cp, link, cp->dev, "Link down\n");
1682 restart = 1;
1683
1684 } else if (++cp->timer_ticks > 10)
1685 cas_mdio_link_not_up(cp);
1686
1687 return restart;
1688}
1689
1690static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1691 u32 status)
1692{
1693 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1694 u16 bmsr;
1695
1696
1697 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1698 return 0;
1699
1700 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1701 return cas_mii_link_check(cp, bmsr);
1702}
1703
1704static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1705 u32 status)
1706{
1707 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1708
1709 if (!stat)
1710 return 0;
1711
1712 netdev_err(dev, "PCI error [%04x:%04x]",
1713 stat, readl(cp->regs + REG_BIM_DIAG));
1714
1715
1716 if ((stat & PCI_ERR_BADACK) &&
1717 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1718 pr_cont(" <No ACK64# during ABS64 cycle>");
1719
1720 if (stat & PCI_ERR_DTRTO)
1721 pr_cont(" <Delayed transaction timeout>");
1722 if (stat & PCI_ERR_OTHER)
1723 pr_cont(" <other>");
1724 if (stat & PCI_ERR_BIM_DMA_WRITE)
1725 pr_cont(" <BIM DMA 0 write req>");
1726 if (stat & PCI_ERR_BIM_DMA_READ)
1727 pr_cont(" <BIM DMA 0 read req>");
1728 pr_cont("\n");
1729
1730 if (stat & PCI_ERR_OTHER) {
1731 u16 cfg;
1732
1733
1734
1735
1736 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1737 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1738 if (cfg & PCI_STATUS_PARITY)
1739 netdev_err(dev, "PCI parity error detected\n");
1740 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1741 netdev_err(dev, "PCI target abort\n");
1742 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1743 netdev_err(dev, "PCI master acks target abort\n");
1744 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1745 netdev_err(dev, "PCI master abort\n");
1746 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1747 netdev_err(dev, "PCI system error SERR#\n");
1748 if (cfg & PCI_STATUS_DETECTED_PARITY)
1749 netdev_err(dev, "PCI parity error\n");
1750
1751
1752 cfg &= (PCI_STATUS_PARITY |
1753 PCI_STATUS_SIG_TARGET_ABORT |
1754 PCI_STATUS_REC_TARGET_ABORT |
1755 PCI_STATUS_REC_MASTER_ABORT |
1756 PCI_STATUS_SIG_SYSTEM_ERROR |
1757 PCI_STATUS_DETECTED_PARITY);
1758 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1759 }
1760
1761
1762 return 1;
1763}
1764
1765
1766
1767
1768
1769
1770static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1771 u32 status)
1772{
1773 if (status & INTR_RX_TAG_ERROR) {
1774
1775 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1776 "corrupt rx tag framing\n");
1777 spin_lock(&cp->stat_lock[0]);
1778 cp->net_stats[0].rx_errors++;
1779 spin_unlock(&cp->stat_lock[0]);
1780 goto do_reset;
1781 }
1782
1783 if (status & INTR_RX_LEN_MISMATCH) {
1784
1785 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1786 "length mismatch for rx frame\n");
1787 spin_lock(&cp->stat_lock[0]);
1788 cp->net_stats[0].rx_errors++;
1789 spin_unlock(&cp->stat_lock[0]);
1790 goto do_reset;
1791 }
1792
1793 if (status & INTR_PCS_STATUS) {
1794 if (cas_pcs_interrupt(dev, cp, status))
1795 goto do_reset;
1796 }
1797
1798 if (status & INTR_TX_MAC_STATUS) {
1799 if (cas_txmac_interrupt(dev, cp, status))
1800 goto do_reset;
1801 }
1802
1803 if (status & INTR_RX_MAC_STATUS) {
1804 if (cas_rxmac_interrupt(dev, cp, status))
1805 goto do_reset;
1806 }
1807
1808 if (status & INTR_MAC_CTRL_STATUS) {
1809 if (cas_mac_interrupt(dev, cp, status))
1810 goto do_reset;
1811 }
1812
1813 if (status & INTR_MIF_STATUS) {
1814 if (cas_mif_interrupt(dev, cp, status))
1815 goto do_reset;
1816 }
1817
1818 if (status & INTR_PCI_ERROR_STATUS) {
1819 if (cas_pci_interrupt(dev, cp, status))
1820 goto do_reset;
1821 }
1822 return 0;
1823
1824do_reset:
1825#if 1
1826 atomic_inc(&cp->reset_task_pending);
1827 atomic_inc(&cp->reset_task_pending_all);
1828 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1829 schedule_work(&cp->reset_task);
1830#else
1831 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1832 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1833 schedule_work(&cp->reset_task);
1834#endif
1835 return 1;
1836}
1837
1838
1839
1840
1841#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1842#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1843static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1844 const int len)
1845{
1846 unsigned long off = addr + len;
1847
1848 if (CAS_TABORT(cp) == 1)
1849 return 0;
1850 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1851 return 0;
1852 return TX_TARGET_ABORT_LEN;
1853}
1854
1855static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1856{
1857 struct cas_tx_desc *txds;
1858 struct sk_buff **skbs;
1859 struct net_device *dev = cp->dev;
1860 int entry, count;
1861
1862 spin_lock(&cp->tx_lock[ring]);
1863 txds = cp->init_txds[ring];
1864 skbs = cp->tx_skbs[ring];
1865 entry = cp->tx_old[ring];
1866
1867 count = TX_BUFF_COUNT(ring, entry, limit);
1868 while (entry != limit) {
1869 struct sk_buff *skb = skbs[entry];
1870 dma_addr_t daddr;
1871 u32 dlen;
1872 int frag;
1873
1874 if (!skb) {
1875
1876 entry = TX_DESC_NEXT(ring, entry);
1877 continue;
1878 }
1879
1880
1881 count -= skb_shinfo(skb)->nr_frags +
1882 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1883 if (count < 0)
1884 break;
1885
1886 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1887 "tx[%d] done, slot %d\n", ring, entry);
1888
1889 skbs[entry] = NULL;
1890 cp->tx_tiny_use[ring][entry].nbufs = 0;
1891
1892 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1893 struct cas_tx_desc *txd = txds + entry;
1894
1895 daddr = le64_to_cpu(txd->buffer);
1896 dlen = CAS_VAL(TX_DESC_BUFLEN,
1897 le64_to_cpu(txd->control));
1898 pci_unmap_page(cp->pdev, daddr, dlen,
1899 PCI_DMA_TODEVICE);
1900 entry = TX_DESC_NEXT(ring, entry);
1901
1902
1903 if (cp->tx_tiny_use[ring][entry].used) {
1904 cp->tx_tiny_use[ring][entry].used = 0;
1905 entry = TX_DESC_NEXT(ring, entry);
1906 }
1907 }
1908
1909 spin_lock(&cp->stat_lock[ring]);
1910 cp->net_stats[ring].tx_packets++;
1911 cp->net_stats[ring].tx_bytes += skb->len;
1912 spin_unlock(&cp->stat_lock[ring]);
1913 dev_kfree_skb_irq(skb);
1914 }
1915 cp->tx_old[ring] = entry;
1916
1917
1918
1919
1920
1921 if (netif_queue_stopped(dev) &&
1922 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1923 netif_wake_queue(dev);
1924 spin_unlock(&cp->tx_lock[ring]);
1925}
1926
1927static void cas_tx(struct net_device *dev, struct cas *cp,
1928 u32 status)
1929{
1930 int limit, ring;
1931#ifdef USE_TX_COMPWB
1932 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1933#endif
1934 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1935 "tx interrupt, status: 0x%x, %llx\n",
1936 status, (unsigned long long)compwb);
1937
1938 for (ring = 0; ring < N_TX_RINGS; ring++) {
1939#ifdef USE_TX_COMPWB
1940
1941 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1942 CAS_VAL(TX_COMPWB_LSB, compwb);
1943 compwb = TX_COMPWB_NEXT(compwb);
1944#else
1945 limit = readl(cp->regs + REG_TX_COMPN(ring));
1946#endif
1947 if (cp->tx_old[ring] != limit)
1948 cas_tx_ringN(cp, ring, limit);
1949 }
1950}
1951
1952
1953static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1954 int entry, const u64 *words,
1955 struct sk_buff **skbref)
1956{
1957 int dlen, hlen, len, i, alloclen;
1958 int off, swivel = RX_SWIVEL_OFF_VAL;
1959 struct cas_page *page;
1960 struct sk_buff *skb;
1961 void *addr, *crcaddr;
1962 __sum16 csum;
1963 char *p;
1964
1965 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1966 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1967 len = hlen + dlen;
1968
1969 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1970 alloclen = len;
1971 else
1972 alloclen = max(hlen, RX_COPY_MIN);
1973
1974 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1975 if (skb == NULL)
1976 return -1;
1977
1978 *skbref = skb;
1979 skb_reserve(skb, swivel);
1980
1981 p = skb->data;
1982 addr = crcaddr = NULL;
1983 if (hlen) {
1984 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1985 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1986 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1987 swivel;
1988
1989 i = hlen;
1990 if (!dlen)
1991 i += cp->crc_size;
1992 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1993 PCI_DMA_FROMDEVICE);
1994 addr = cas_page_map(page->buffer);
1995 memcpy(p, addr + off, i);
1996 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1997 PCI_DMA_FROMDEVICE);
1998 cas_page_unmap(addr);
1999 RX_USED_ADD(page, 0x100);
2000 p += hlen;
2001 swivel = 0;
2002 }
2003
2004
2005 if (alloclen < (hlen + dlen)) {
2006 skb_frag_t *frag = skb_shinfo(skb)->frags;
2007
2008
2009 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2010 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2011 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2012
2013 hlen = min(cp->page_size - off, dlen);
2014 if (hlen < 0) {
2015 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2016 "rx page overflow: %d\n", hlen);
2017 dev_kfree_skb_irq(skb);
2018 return -1;
2019 }
2020 i = hlen;
2021 if (i == dlen)
2022 i += cp->crc_size;
2023 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2024 PCI_DMA_FROMDEVICE);
2025
2026
2027 swivel = 0;
2028 if (p == (char *) skb->data) {
2029 addr = cas_page_map(page->buffer);
2030 memcpy(p, addr + off, RX_COPY_MIN);
2031 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2032 PCI_DMA_FROMDEVICE);
2033 cas_page_unmap(addr);
2034 off += RX_COPY_MIN;
2035 swivel = RX_COPY_MIN;
2036 RX_USED_ADD(page, cp->mtu_stride);
2037 } else {
2038 RX_USED_ADD(page, hlen);
2039 }
2040 skb_put(skb, alloclen);
2041
2042 skb_shinfo(skb)->nr_frags++;
2043 skb->data_len += hlen - swivel;
2044 skb->truesize += hlen - swivel;
2045 skb->len += hlen - swivel;
2046
2047 __skb_frag_set_page(frag, page->buffer);
2048 __skb_frag_ref(frag);
2049 frag->page_offset = off;
2050 skb_frag_size_set(frag, hlen - swivel);
2051
2052
2053 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2054 hlen = dlen;
2055 off = 0;
2056
2057 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2058 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2059 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2060 hlen + cp->crc_size,
2061 PCI_DMA_FROMDEVICE);
2062 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2063 hlen + cp->crc_size,
2064 PCI_DMA_FROMDEVICE);
2065
2066 skb_shinfo(skb)->nr_frags++;
2067 skb->data_len += hlen;
2068 skb->len += hlen;
2069 frag++;
2070
2071 __skb_frag_set_page(frag, page->buffer);
2072 __skb_frag_ref(frag);
2073 frag->page_offset = 0;
2074 skb_frag_size_set(frag, hlen);
2075 RX_USED_ADD(page, hlen + cp->crc_size);
2076 }
2077
2078 if (cp->crc_size) {
2079 addr = cas_page_map(page->buffer);
2080 crcaddr = addr + off + hlen;
2081 }
2082
2083 } else {
2084
2085 if (!dlen)
2086 goto end_copy_pkt;
2087
2088 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2089 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2090 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2091 hlen = min(cp->page_size - off, dlen);
2092 if (hlen < 0) {
2093 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2094 "rx page overflow: %d\n", hlen);
2095 dev_kfree_skb_irq(skb);
2096 return -1;
2097 }
2098 i = hlen;
2099 if (i == dlen)
2100 i += cp->crc_size;
2101 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2102 PCI_DMA_FROMDEVICE);
2103 addr = cas_page_map(page->buffer);
2104 memcpy(p, addr + off, i);
2105 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2106 PCI_DMA_FROMDEVICE);
2107 cas_page_unmap(addr);
2108 if (p == (char *) skb->data)
2109 RX_USED_ADD(page, cp->mtu_stride);
2110 else
2111 RX_USED_ADD(page, i);
2112
2113
2114 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2115 p += hlen;
2116 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2117 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2118 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2119 dlen + cp->crc_size,
2120 PCI_DMA_FROMDEVICE);
2121 addr = cas_page_map(page->buffer);
2122 memcpy(p, addr, dlen + cp->crc_size);
2123 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2124 dlen + cp->crc_size,
2125 PCI_DMA_FROMDEVICE);
2126 cas_page_unmap(addr);
2127 RX_USED_ADD(page, dlen + cp->crc_size);
2128 }
2129end_copy_pkt:
2130 if (cp->crc_size) {
2131 addr = NULL;
2132 crcaddr = skb->data + alloclen;
2133 }
2134 skb_put(skb, alloclen);
2135 }
2136
2137 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2138 if (cp->crc_size) {
2139
2140 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2141 csum_unfold(csum)));
2142 if (addr)
2143 cas_page_unmap(addr);
2144 }
2145 skb->protocol = eth_type_trans(skb, cp->dev);
2146 if (skb->protocol == htons(ETH_P_IP)) {
2147 skb->csum = csum_unfold(~csum);
2148 skb->ip_summed = CHECKSUM_COMPLETE;
2149 } else
2150 skb_checksum_none_assert(skb);
2151 return len;
2152}
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2170 struct sk_buff *skb)
2171{
2172 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2173 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2174
2175
2176
2177
2178
2179 __skb_queue_tail(flow, skb);
2180 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2181 while ((skb = __skb_dequeue(flow))) {
2182 cas_skb_release(skb);
2183 }
2184 }
2185}
2186
2187
2188
2189
2190static void cas_post_page(struct cas *cp, const int ring, const int index)
2191{
2192 cas_page_t *new;
2193 int entry;
2194
2195 entry = cp->rx_old[ring];
2196
2197 new = cas_page_swap(cp, ring, index);
2198 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2199 cp->init_rxds[ring][entry].index =
2200 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2201 CAS_BASE(RX_INDEX_RING, ring));
2202
2203 entry = RX_DESC_ENTRY(ring, entry + 1);
2204 cp->rx_old[ring] = entry;
2205
2206 if (entry % 4)
2207 return;
2208
2209 if (ring == 0)
2210 writel(entry, cp->regs + REG_RX_KICK);
2211 else if ((N_RX_DESC_RINGS > 1) &&
2212 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2213 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2214}
2215
2216
2217
2218static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2219{
2220 unsigned int entry, last, count, released;
2221 int cluster;
2222 cas_page_t **page = cp->rx_pages[ring];
2223
2224 entry = cp->rx_old[ring];
2225
2226 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2227 "rxd[%d] interrupt, done: %d\n", ring, entry);
2228
2229 cluster = -1;
2230 count = entry & 0x3;
2231 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2232 released = 0;
2233 while (entry != last) {
2234
2235 if (page_count(page[entry]->buffer) > 1) {
2236 cas_page_t *new = cas_page_dequeue(cp);
2237 if (!new) {
2238
2239
2240
2241 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2242 if (!timer_pending(&cp->link_timer))
2243 mod_timer(&cp->link_timer, jiffies +
2244 CAS_LINK_FAST_TIMEOUT);
2245 cp->rx_old[ring] = entry;
2246 cp->rx_last[ring] = num ? num - released : 0;
2247 return -ENOMEM;
2248 }
2249 spin_lock(&cp->rx_inuse_lock);
2250 list_add(&page[entry]->list, &cp->rx_inuse_list);
2251 spin_unlock(&cp->rx_inuse_lock);
2252 cp->init_rxds[ring][entry].buffer =
2253 cpu_to_le64(new->dma_addr);
2254 page[entry] = new;
2255
2256 }
2257
2258 if (++count == 4) {
2259 cluster = entry;
2260 count = 0;
2261 }
2262 released++;
2263 entry = RX_DESC_ENTRY(ring, entry + 1);
2264 }
2265 cp->rx_old[ring] = entry;
2266
2267 if (cluster < 0)
2268 return 0;
2269
2270 if (ring == 0)
2271 writel(cluster, cp->regs + REG_RX_KICK);
2272 else if ((N_RX_DESC_RINGS > 1) &&
2273 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2274 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2275 return 0;
2276}
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2292{
2293 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2294 int entry, drops;
2295 int npackets = 0;
2296
2297 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2298 "rx[%d] interrupt, done: %d/%d\n",
2299 ring,
2300 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2301
2302 entry = cp->rx_new[ring];
2303 drops = 0;
2304 while (1) {
2305 struct cas_rx_comp *rxc = rxcs + entry;
2306 struct sk_buff *uninitialized_var(skb);
2307 int type, len;
2308 u64 words[4];
2309 int i, dring;
2310
2311 words[0] = le64_to_cpu(rxc->word1);
2312 words[1] = le64_to_cpu(rxc->word2);
2313 words[2] = le64_to_cpu(rxc->word3);
2314 words[3] = le64_to_cpu(rxc->word4);
2315
2316
2317 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2318 if (type == 0)
2319 break;
2320
2321
2322 if (words[3] & RX_COMP4_ZERO) {
2323 break;
2324 }
2325
2326
2327 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2328 spin_lock(&cp->stat_lock[ring]);
2329 cp->net_stats[ring].rx_errors++;
2330 if (words[3] & RX_COMP4_LEN_MISMATCH)
2331 cp->net_stats[ring].rx_length_errors++;
2332 if (words[3] & RX_COMP4_BAD)
2333 cp->net_stats[ring].rx_crc_errors++;
2334 spin_unlock(&cp->stat_lock[ring]);
2335
2336
2337 drop_it:
2338 spin_lock(&cp->stat_lock[ring]);
2339 ++cp->net_stats[ring].rx_dropped;
2340 spin_unlock(&cp->stat_lock[ring]);
2341 goto next;
2342 }
2343
2344 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2345 if (len < 0) {
2346 ++drops;
2347 goto drop_it;
2348 }
2349
2350
2351
2352
2353 if (RX_DONT_BATCH || (type == 0x2)) {
2354
2355 cas_skb_release(skb);
2356 } else {
2357 cas_rx_flow_pkt(cp, words, skb);
2358 }
2359
2360 spin_lock(&cp->stat_lock[ring]);
2361 cp->net_stats[ring].rx_packets++;
2362 cp->net_stats[ring].rx_bytes += len;
2363 spin_unlock(&cp->stat_lock[ring]);
2364
2365 next:
2366 npackets++;
2367
2368
2369 if (words[0] & RX_COMP1_RELEASE_HDR) {
2370 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2371 dring = CAS_VAL(RX_INDEX_RING, i);
2372 i = CAS_VAL(RX_INDEX_NUM, i);
2373 cas_post_page(cp, dring, i);
2374 }
2375
2376 if (words[0] & RX_COMP1_RELEASE_DATA) {
2377 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2378 dring = CAS_VAL(RX_INDEX_RING, i);
2379 i = CAS_VAL(RX_INDEX_NUM, i);
2380 cas_post_page(cp, dring, i);
2381 }
2382
2383 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2384 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2385 dring = CAS_VAL(RX_INDEX_RING, i);
2386 i = CAS_VAL(RX_INDEX_NUM, i);
2387 cas_post_page(cp, dring, i);
2388 }
2389
2390
2391 entry = RX_COMP_ENTRY(ring, entry + 1 +
2392 CAS_VAL(RX_COMP1_SKIP, words[0]));
2393#ifdef USE_NAPI
2394 if (budget && (npackets >= budget))
2395 break;
2396#endif
2397 }
2398 cp->rx_new[ring] = entry;
2399
2400 if (drops)
2401 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2402 return npackets;
2403}
2404
2405
2406
2407static void cas_post_rxcs_ringN(struct net_device *dev,
2408 struct cas *cp, int ring)
2409{
2410 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2411 int last, entry;
2412
2413 last = cp->rx_cur[ring];
2414 entry = cp->rx_new[ring];
2415 netif_printk(cp, intr, KERN_DEBUG, dev,
2416 "rxc[%d] interrupt, done: %d/%d\n",
2417 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2418
2419
2420 while (last != entry) {
2421 cas_rxc_init(rxc + last);
2422 last = RX_COMP_ENTRY(ring, last + 1);
2423 }
2424 cp->rx_cur[ring] = last;
2425
2426 if (ring == 0)
2427 writel(last, cp->regs + REG_RX_COMP_TAIL);
2428 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2429 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2430}
2431
2432
2433
2434
2435
2436
2437#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2438static inline void cas_handle_irqN(struct net_device *dev,
2439 struct cas *cp, const u32 status,
2440 const int ring)
2441{
2442 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2443 cas_post_rxcs_ringN(dev, cp, ring);
2444}
2445
2446static irqreturn_t cas_interruptN(int irq, void *dev_id)
2447{
2448 struct net_device *dev = dev_id;
2449 struct cas *cp = netdev_priv(dev);
2450 unsigned long flags;
2451 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2452 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2453
2454
2455 if (status == 0)
2456 return IRQ_NONE;
2457
2458 spin_lock_irqsave(&cp->lock, flags);
2459 if (status & INTR_RX_DONE_ALT) {
2460#ifdef USE_NAPI
2461 cas_mask_intr(cp);
2462 napi_schedule(&cp->napi);
2463#else
2464 cas_rx_ringN(cp, ring, 0);
2465#endif
2466 status &= ~INTR_RX_DONE_ALT;
2467 }
2468
2469 if (status)
2470 cas_handle_irqN(dev, cp, status, ring);
2471 spin_unlock_irqrestore(&cp->lock, flags);
2472 return IRQ_HANDLED;
2473}
2474#endif
2475
2476#ifdef USE_PCI_INTB
2477
2478static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2479{
2480 if (status & INTR_RX_BUF_UNAVAIL_1) {
2481
2482
2483 cas_post_rxds_ringN(cp, 1, 0);
2484 spin_lock(&cp->stat_lock[1]);
2485 cp->net_stats[1].rx_dropped++;
2486 spin_unlock(&cp->stat_lock[1]);
2487 }
2488
2489 if (status & INTR_RX_BUF_AE_1)
2490 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2491 RX_AE_FREEN_VAL(1));
2492
2493 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2494 cas_post_rxcs_ringN(cp, 1);
2495}
2496
2497
2498static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2499{
2500 struct net_device *dev = dev_id;
2501 struct cas *cp = netdev_priv(dev);
2502 unsigned long flags;
2503 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2504
2505
2506 if (status == 0)
2507 return IRQ_NONE;
2508
2509 spin_lock_irqsave(&cp->lock, flags);
2510 if (status & INTR_RX_DONE_ALT) {
2511#ifdef USE_NAPI
2512 cas_mask_intr(cp);
2513 napi_schedule(&cp->napi);
2514#else
2515 cas_rx_ringN(cp, 1, 0);
2516#endif
2517 status &= ~INTR_RX_DONE_ALT;
2518 }
2519 if (status)
2520 cas_handle_irq1(cp, status);
2521 spin_unlock_irqrestore(&cp->lock, flags);
2522 return IRQ_HANDLED;
2523}
2524#endif
2525
2526static inline void cas_handle_irq(struct net_device *dev,
2527 struct cas *cp, const u32 status)
2528{
2529
2530 if (status & INTR_ERROR_MASK)
2531 cas_abnormal_irq(dev, cp, status);
2532
2533 if (status & INTR_RX_BUF_UNAVAIL) {
2534
2535
2536
2537 cas_post_rxds_ringN(cp, 0, 0);
2538 spin_lock(&cp->stat_lock[0]);
2539 cp->net_stats[0].rx_dropped++;
2540 spin_unlock(&cp->stat_lock[0]);
2541 } else if (status & INTR_RX_BUF_AE) {
2542 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2543 RX_AE_FREEN_VAL(0));
2544 }
2545
2546 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2547 cas_post_rxcs_ringN(dev, cp, 0);
2548}
2549
2550static irqreturn_t cas_interrupt(int irq, void *dev_id)
2551{
2552 struct net_device *dev = dev_id;
2553 struct cas *cp = netdev_priv(dev);
2554 unsigned long flags;
2555 u32 status = readl(cp->regs + REG_INTR_STATUS);
2556
2557 if (status == 0)
2558 return IRQ_NONE;
2559
2560 spin_lock_irqsave(&cp->lock, flags);
2561 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2562 cas_tx(dev, cp, status);
2563 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2564 }
2565
2566 if (status & INTR_RX_DONE) {
2567#ifdef USE_NAPI
2568 cas_mask_intr(cp);
2569 napi_schedule(&cp->napi);
2570#else
2571 cas_rx_ringN(cp, 0, 0);
2572#endif
2573 status &= ~INTR_RX_DONE;
2574 }
2575
2576 if (status)
2577 cas_handle_irq(dev, cp, status);
2578 spin_unlock_irqrestore(&cp->lock, flags);
2579 return IRQ_HANDLED;
2580}
2581
2582
2583#ifdef USE_NAPI
2584static int cas_poll(struct napi_struct *napi, int budget)
2585{
2586 struct cas *cp = container_of(napi, struct cas, napi);
2587 struct net_device *dev = cp->dev;
2588 int i, enable_intr, credits;
2589 u32 status = readl(cp->regs + REG_INTR_STATUS);
2590 unsigned long flags;
2591
2592 spin_lock_irqsave(&cp->lock, flags);
2593 cas_tx(dev, cp, status);
2594 spin_unlock_irqrestore(&cp->lock, flags);
2595
2596
2597
2598
2599
2600
2601
2602
2603 enable_intr = 1;
2604 credits = 0;
2605 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2606 int j;
2607 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2608 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2609 if (credits >= budget) {
2610 enable_intr = 0;
2611 goto rx_comp;
2612 }
2613 }
2614 }
2615
2616rx_comp:
2617
2618 spin_lock_irqsave(&cp->lock, flags);
2619 if (status)
2620 cas_handle_irq(dev, cp, status);
2621
2622#ifdef USE_PCI_INTB
2623 if (N_RX_COMP_RINGS > 1) {
2624 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2625 if (status)
2626 cas_handle_irq1(dev, cp, status);
2627 }
2628#endif
2629
2630#ifdef USE_PCI_INTC
2631 if (N_RX_COMP_RINGS > 2) {
2632 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2633 if (status)
2634 cas_handle_irqN(dev, cp, status, 2);
2635 }
2636#endif
2637
2638#ifdef USE_PCI_INTD
2639 if (N_RX_COMP_RINGS > 3) {
2640 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2641 if (status)
2642 cas_handle_irqN(dev, cp, status, 3);
2643 }
2644#endif
2645 spin_unlock_irqrestore(&cp->lock, flags);
2646 if (enable_intr) {
2647 napi_complete(napi);
2648 cas_unmask_intr(cp);
2649 }
2650 return credits;
2651}
2652#endif
2653
2654#ifdef CONFIG_NET_POLL_CONTROLLER
2655static void cas_netpoll(struct net_device *dev)
2656{
2657 struct cas *cp = netdev_priv(dev);
2658
2659 cas_disable_irq(cp, 0);
2660 cas_interrupt(cp->pdev->irq, dev);
2661 cas_enable_irq(cp, 0);
2662
2663#ifdef USE_PCI_INTB
2664 if (N_RX_COMP_RINGS > 1) {
2665
2666 }
2667#endif
2668#ifdef USE_PCI_INTC
2669 if (N_RX_COMP_RINGS > 2) {
2670
2671 }
2672#endif
2673#ifdef USE_PCI_INTD
2674 if (N_RX_COMP_RINGS > 3) {
2675
2676 }
2677#endif
2678}
2679#endif
2680
2681static void cas_tx_timeout(struct net_device *dev)
2682{
2683 struct cas *cp = netdev_priv(dev);
2684
2685 netdev_err(dev, "transmit timed out, resetting\n");
2686 if (!cp->hw_running) {
2687 netdev_err(dev, "hrm.. hw not running!\n");
2688 return;
2689 }
2690
2691 netdev_err(dev, "MIF_STATE[%08x]\n",
2692 readl(cp->regs + REG_MIF_STATE_MACHINE));
2693
2694 netdev_err(dev, "MAC_STATE[%08x]\n",
2695 readl(cp->regs + REG_MAC_STATE_MACHINE));
2696
2697 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2698 readl(cp->regs + REG_TX_CFG),
2699 readl(cp->regs + REG_MAC_TX_STATUS),
2700 readl(cp->regs + REG_MAC_TX_CFG),
2701 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2702 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2703 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2704 readl(cp->regs + REG_TX_SM_1),
2705 readl(cp->regs + REG_TX_SM_2));
2706
2707 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2708 readl(cp->regs + REG_RX_CFG),
2709 readl(cp->regs + REG_MAC_RX_STATUS),
2710 readl(cp->regs + REG_MAC_RX_CFG));
2711
2712 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2713 readl(cp->regs + REG_HP_STATE_MACHINE),
2714 readl(cp->regs + REG_HP_STATUS0),
2715 readl(cp->regs + REG_HP_STATUS1),
2716 readl(cp->regs + REG_HP_STATUS2));
2717
2718#if 1
2719 atomic_inc(&cp->reset_task_pending);
2720 atomic_inc(&cp->reset_task_pending_all);
2721 schedule_work(&cp->reset_task);
2722#else
2723 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2724 schedule_work(&cp->reset_task);
2725#endif
2726}
2727
2728static inline int cas_intme(int ring, int entry)
2729{
2730
2731 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2732 return 1;
2733 return 0;
2734}
2735
2736
2737static void cas_write_txd(struct cas *cp, int ring, int entry,
2738 dma_addr_t mapping, int len, u64 ctrl, int last)
2739{
2740 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2741
2742 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2743 if (cas_intme(ring, entry))
2744 ctrl |= TX_DESC_INTME;
2745 if (last)
2746 ctrl |= TX_DESC_EOF;
2747 txd->control = cpu_to_le64(ctrl);
2748 txd->buffer = cpu_to_le64(mapping);
2749}
2750
2751static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2752 const int entry)
2753{
2754 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2755}
2756
2757static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2758 const int entry, const int tentry)
2759{
2760 cp->tx_tiny_use[ring][tentry].nbufs++;
2761 cp->tx_tiny_use[ring][entry].used = 1;
2762 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2763}
2764
2765static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2766 struct sk_buff *skb)
2767{
2768 struct net_device *dev = cp->dev;
2769 int entry, nr_frags, frag, tabort, tentry;
2770 dma_addr_t mapping;
2771 unsigned long flags;
2772 u64 ctrl;
2773 u32 len;
2774
2775 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2776
2777
2778 if (TX_BUFFS_AVAIL(cp, ring) <=
2779 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2780 netif_stop_queue(dev);
2781 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2782 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2783 return 1;
2784 }
2785
2786 ctrl = 0;
2787 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2788 const u64 csum_start_off = skb_checksum_start_offset(skb);
2789 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2790
2791 ctrl = TX_DESC_CSUM_EN |
2792 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2793 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2794 }
2795
2796 entry = cp->tx_new[ring];
2797 cp->tx_skbs[ring][entry] = skb;
2798
2799 nr_frags = skb_shinfo(skb)->nr_frags;
2800 len = skb_headlen(skb);
2801 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2802 offset_in_page(skb->data), len,
2803 PCI_DMA_TODEVICE);
2804
2805 tentry = entry;
2806 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2807 if (unlikely(tabort)) {
2808
2809 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2810 ctrl | TX_DESC_SOF, 0);
2811 entry = TX_DESC_NEXT(ring, entry);
2812
2813 skb_copy_from_linear_data_offset(skb, len - tabort,
2814 tx_tiny_buf(cp, ring, entry), tabort);
2815 mapping = tx_tiny_map(cp, ring, entry, tentry);
2816 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2817 (nr_frags == 0));
2818 } else {
2819 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2820 TX_DESC_SOF, (nr_frags == 0));
2821 }
2822 entry = TX_DESC_NEXT(ring, entry);
2823
2824 for (frag = 0; frag < nr_frags; frag++) {
2825 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2826
2827 len = skb_frag_size(fragp);
2828 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2829 DMA_TO_DEVICE);
2830
2831 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2832 if (unlikely(tabort)) {
2833 void *addr;
2834
2835
2836 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2837 ctrl, 0);
2838 entry = TX_DESC_NEXT(ring, entry);
2839
2840 addr = cas_page_map(skb_frag_page(fragp));
2841 memcpy(tx_tiny_buf(cp, ring, entry),
2842 addr + fragp->page_offset + len - tabort,
2843 tabort);
2844 cas_page_unmap(addr);
2845 mapping = tx_tiny_map(cp, ring, entry, tentry);
2846 len = tabort;
2847 }
2848
2849 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2850 (frag + 1 == nr_frags));
2851 entry = TX_DESC_NEXT(ring, entry);
2852 }
2853
2854 cp->tx_new[ring] = entry;
2855 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2856 netif_stop_queue(dev);
2857
2858 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2859 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2860 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2861 writel(entry, cp->regs + REG_TX_KICKN(ring));
2862 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2863 return 0;
2864}
2865
2866static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2867{
2868 struct cas *cp = netdev_priv(dev);
2869
2870
2871
2872
2873 static int ring;
2874
2875 if (skb_padto(skb, cp->min_frame_size))
2876 return NETDEV_TX_OK;
2877
2878
2879
2880
2881 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2882 return NETDEV_TX_BUSY;
2883 return NETDEV_TX_OK;
2884}
2885
2886static void cas_init_tx_dma(struct cas *cp)
2887{
2888 u64 desc_dma = cp->block_dvma;
2889 unsigned long off;
2890 u32 val;
2891 int i;
2892
2893
2894#ifdef USE_TX_COMPWB
2895 off = offsetof(struct cas_init_block, tx_compwb);
2896 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2897 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2898#endif
2899
2900
2901
2902
2903 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2904 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2905 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2906 TX_CFG_INTR_COMPWB_DIS;
2907
2908
2909 for (i = 0; i < MAX_TX_RINGS; i++) {
2910 off = (unsigned long) cp->init_txds[i] -
2911 (unsigned long) cp->init_block;
2912
2913 val |= CAS_TX_RINGN_BASE(i);
2914 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2915 writel((desc_dma + off) & 0xffffffff, cp->regs +
2916 REG_TX_DBN_LOW(i));
2917
2918
2919
2920 }
2921 writel(val, cp->regs + REG_TX_CFG);
2922
2923
2924
2925
2926#ifdef USE_QOS
2927 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2928 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2929 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2930 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2931#else
2932 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2933 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2934 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2935 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2936#endif
2937}
2938
2939
2940static inline void cas_init_dma(struct cas *cp)
2941{
2942 cas_init_tx_dma(cp);
2943 cas_init_rx_dma(cp);
2944}
2945
2946static void cas_process_mc_list(struct cas *cp)
2947{
2948 u16 hash_table[16];
2949 u32 crc;
2950 struct netdev_hw_addr *ha;
2951 int i = 1;
2952
2953 memset(hash_table, 0, sizeof(hash_table));
2954 netdev_for_each_mc_addr(ha, cp->dev) {
2955 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2956
2957
2958
2959 writel((ha->addr[4] << 8) | ha->addr[5],
2960 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2961 writel((ha->addr[2] << 8) | ha->addr[3],
2962 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2963 writel((ha->addr[0] << 8) | ha->addr[1],
2964 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2965 i++;
2966 }
2967 else {
2968
2969
2970
2971 crc = ether_crc_le(ETH_ALEN, ha->addr);
2972 crc >>= 24;
2973 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2974 }
2975 }
2976 for (i = 0; i < 16; i++)
2977 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2978}
2979
2980
2981static u32 cas_setup_multicast(struct cas *cp)
2982{
2983 u32 rxcfg = 0;
2984 int i;
2985
2986 if (cp->dev->flags & IFF_PROMISC) {
2987 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2988
2989 } else if (cp->dev->flags & IFF_ALLMULTI) {
2990 for (i=0; i < 16; i++)
2991 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2992 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2993
2994 } else {
2995 cas_process_mc_list(cp);
2996 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2997 }
2998
2999 return rxcfg;
3000}
3001
3002
3003static void cas_clear_mac_err(struct cas *cp)
3004{
3005 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3006 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3007 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3008 writel(0, cp->regs + REG_MAC_COLL_LATE);
3009 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3010 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3011 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3012 writel(0, cp->regs + REG_MAC_LEN_ERR);
3013 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3014 writel(0, cp->regs + REG_MAC_FCS_ERR);
3015 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3016}
3017
3018
3019static void cas_mac_reset(struct cas *cp)
3020{
3021 int i;
3022
3023
3024 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3025 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3026
3027
3028 i = STOP_TRIES;
3029 while (i-- > 0) {
3030 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3031 break;
3032 udelay(10);
3033 }
3034
3035
3036 i = STOP_TRIES;
3037 while (i-- > 0) {
3038 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3039 break;
3040 udelay(10);
3041 }
3042
3043 if (readl(cp->regs + REG_MAC_TX_RESET) |
3044 readl(cp->regs + REG_MAC_RX_RESET))
3045 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3046 readl(cp->regs + REG_MAC_TX_RESET),
3047 readl(cp->regs + REG_MAC_RX_RESET),
3048 readl(cp->regs + REG_MAC_STATE_MACHINE));
3049}
3050
3051
3052
3053static void cas_init_mac(struct cas *cp)
3054{
3055 unsigned char *e = &cp->dev->dev_addr[0];
3056 int i;
3057 cas_mac_reset(cp);
3058
3059
3060 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3061
3062#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3063
3064
3065
3066 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3067 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3068#endif
3069
3070 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3071
3072 writel(0x00, cp->regs + REG_MAC_IPG0);
3073 writel(0x08, cp->regs + REG_MAC_IPG1);
3074 writel(0x04, cp->regs + REG_MAC_IPG2);
3075
3076
3077 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3078
3079
3080 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3081
3082
3083
3084
3085
3086 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3087 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3088 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3089 cp->regs + REG_MAC_FRAMESIZE_MAX);
3090
3091
3092
3093
3094
3095 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3096 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3097 else
3098 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3099 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3100 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3101 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3102
3103 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3104
3105 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3106 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3107 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3108 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3109 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3110
3111
3112 for (i = 0; i < 45; i++)
3113 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3114
3115 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3116 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3117 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3118
3119 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3120 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3121 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3122
3123 cp->mac_rx_cfg = cas_setup_multicast(cp);
3124
3125 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3126 cas_clear_mac_err(cp);
3127 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3128
3129
3130
3131
3132
3133 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3134 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3135
3136
3137
3138
3139 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3140}
3141
3142
3143static void cas_init_pause_thresholds(struct cas *cp)
3144{
3145
3146
3147
3148 if (cp->rx_fifo_size <= (2 * 1024)) {
3149 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3150 } else {
3151 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3152 if (max_frame * 3 > cp->rx_fifo_size) {
3153 cp->rx_pause_off = 7104;
3154 cp->rx_pause_on = 960;
3155 } else {
3156 int off = (cp->rx_fifo_size - (max_frame * 2));
3157 int on = off - max_frame;
3158 cp->rx_pause_off = off;
3159 cp->rx_pause_on = on;
3160 }
3161 }
3162}
3163
3164static int cas_vpd_match(const void __iomem *p, const char *str)
3165{
3166 int len = strlen(str) + 1;
3167 int i;
3168
3169 for (i = 0; i < len; i++) {
3170 if (readb(p + i) != str[i])
3171 return 0;
3172 }
3173 return 1;
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3189 const int offset)
3190{
3191 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3192 void __iomem *base, *kstart;
3193 int i, len;
3194 int found = 0;
3195#define VPD_FOUND_MAC 0x01
3196#define VPD_FOUND_PHY 0x02
3197
3198 int phy_type = CAS_PHY_MII_MDIO0;
3199 int mac_off = 0;
3200
3201#if defined(CONFIG_SPARC)
3202 const unsigned char *addr;
3203#endif
3204
3205
3206 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3207 cp->regs + REG_BIM_LOCAL_DEV_EN);
3208
3209
3210 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3211 goto use_random_mac_addr;
3212
3213
3214 base = NULL;
3215 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3216
3217 if ((readb(p + i + 0) == 0x50) &&
3218 (readb(p + i + 1) == 0x43) &&
3219 (readb(p + i + 2) == 0x49) &&
3220 (readb(p + i + 3) == 0x52)) {
3221 base = p + (readb(p + i + 8) |
3222 (readb(p + i + 9) << 8));
3223 break;
3224 }
3225 }
3226
3227 if (!base || (readb(base) != 0x82))
3228 goto use_random_mac_addr;
3229
3230 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3231 while (i < EXPANSION_ROM_SIZE) {
3232 if (readb(base + i) != 0x90)
3233 goto use_random_mac_addr;
3234
3235
3236 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3237
3238
3239 kstart = base + i + 3;
3240 p = kstart;
3241 while ((p - kstart) < len) {
3242 int klen = readb(p + 2);
3243 int j;
3244 char type;
3245
3246 p += 3;
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285 if (readb(p) != 'I')
3286 goto next;
3287
3288
3289 type = readb(p + 3);
3290 if (type == 'B') {
3291 if ((klen == 29) && readb(p + 4) == 6 &&
3292 cas_vpd_match(p + 5,
3293 "local-mac-address")) {
3294 if (mac_off++ > offset)
3295 goto next;
3296
3297
3298 for (j = 0; j < 6; j++)
3299 dev_addr[j] =
3300 readb(p + 23 + j);
3301 goto found_mac;
3302 }
3303 }
3304
3305 if (type != 'S')
3306 goto next;
3307
3308#ifdef USE_ENTROPY_DEV
3309 if ((klen == 24) &&
3310 cas_vpd_match(p + 5, "entropy-dev") &&
3311 cas_vpd_match(p + 17, "vms110")) {
3312 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3313 goto next;
3314 }
3315#endif
3316
3317 if (found & VPD_FOUND_PHY)
3318 goto next;
3319
3320 if ((klen == 18) && readb(p + 4) == 4 &&
3321 cas_vpd_match(p + 5, "phy-type")) {
3322 if (cas_vpd_match(p + 14, "pcs")) {
3323 phy_type = CAS_PHY_SERDES;
3324 goto found_phy;
3325 }
3326 }
3327
3328 if ((klen == 23) && readb(p + 4) == 4 &&
3329 cas_vpd_match(p + 5, "phy-interface")) {
3330 if (cas_vpd_match(p + 19, "pcs")) {
3331 phy_type = CAS_PHY_SERDES;
3332 goto found_phy;
3333 }
3334 }
3335found_mac:
3336 found |= VPD_FOUND_MAC;
3337 goto next;
3338
3339found_phy:
3340 found |= VPD_FOUND_PHY;
3341
3342next:
3343 p += klen;
3344 }
3345 i += len + 3;
3346 }
3347
3348use_random_mac_addr:
3349 if (found & VPD_FOUND_MAC)
3350 goto done;
3351
3352#if defined(CONFIG_SPARC)
3353 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3354 if (addr != NULL) {
3355 memcpy(dev_addr, addr, ETH_ALEN);
3356 goto done;
3357 }
3358#endif
3359
3360
3361 pr_info("MAC address not found in ROM VPD\n");
3362 dev_addr[0] = 0x08;
3363 dev_addr[1] = 0x00;
3364 dev_addr[2] = 0x20;
3365 get_random_bytes(dev_addr + 3, 3);
3366
3367done:
3368 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3369 return phy_type;
3370}
3371
3372
3373static void cas_check_pci_invariants(struct cas *cp)
3374{
3375 struct pci_dev *pdev = cp->pdev;
3376
3377 cp->cas_flags = 0;
3378 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3379 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3380 if (pdev->revision >= CAS_ID_REVPLUS)
3381 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3382 if (pdev->revision < CAS_ID_REVPLUS02u)
3383 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3384
3385
3386
3387
3388 if (pdev->revision < CAS_ID_REV2)
3389 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3390 } else {
3391
3392 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3393
3394
3395
3396
3397 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3398 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3399 cp->cas_flags |= CAS_FLAG_SATURN;
3400 }
3401}
3402
3403
3404static int cas_check_invariants(struct cas *cp)
3405{
3406 struct pci_dev *pdev = cp->pdev;
3407 u32 cfg;
3408 int i;
3409
3410
3411 cp->page_order = 0;
3412#ifdef USE_PAGE_ORDER
3413 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3414
3415 struct page *page = alloc_pages(GFP_ATOMIC,
3416 CAS_JUMBO_PAGE_SHIFT -
3417 PAGE_SHIFT);
3418 if (page) {
3419 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3420 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3421 } else {
3422 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3423 }
3424 }
3425#endif
3426 cp->page_size = (PAGE_SIZE << cp->page_order);
3427
3428
3429 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3430 cp->rx_fifo_size = RX_FIFO_SIZE;
3431
3432
3433
3434
3435 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3436 PCI_SLOT(pdev->devfn));
3437 if (cp->phy_type & CAS_PHY_SERDES) {
3438 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3439 return 0;
3440 }
3441
3442
3443 cfg = readl(cp->regs + REG_MIF_CFG);
3444 if (cfg & MIF_CFG_MDIO_1) {
3445 cp->phy_type = CAS_PHY_MII_MDIO1;
3446 } else if (cfg & MIF_CFG_MDIO_0) {
3447 cp->phy_type = CAS_PHY_MII_MDIO0;
3448 }
3449
3450 cas_mif_poll(cp, 0);
3451 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3452
3453 for (i = 0; i < 32; i++) {
3454 u32 phy_id;
3455 int j;
3456
3457 for (j = 0; j < 3; j++) {
3458 cp->phy_addr = i;
3459 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3460 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3461 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3462 cp->phy_id = phy_id;
3463 goto done;
3464 }
3465 }
3466 }
3467 pr_err("MII phy did not respond [%08x]\n",
3468 readl(cp->regs + REG_MIF_STATE_MACHINE));
3469 return -1;
3470
3471done:
3472
3473 cfg = cas_phy_read(cp, MII_BMSR);
3474 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3475 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3476 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3477 return 0;
3478}
3479
3480
3481static inline void cas_start_dma(struct cas *cp)
3482{
3483 int i;
3484 u32 val;
3485 int txfailed = 0;
3486
3487
3488 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3489 writel(val, cp->regs + REG_TX_CFG);
3490 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3491 writel(val, cp->regs + REG_RX_CFG);
3492
3493
3494 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3495 writel(val, cp->regs + REG_MAC_TX_CFG);
3496 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3497 writel(val, cp->regs + REG_MAC_RX_CFG);
3498
3499 i = STOP_TRIES;
3500 while (i-- > 0) {
3501 val = readl(cp->regs + REG_MAC_TX_CFG);
3502 if ((val & MAC_TX_CFG_EN))
3503 break;
3504 udelay(10);
3505 }
3506 if (i < 0) txfailed = 1;
3507 i = STOP_TRIES;
3508 while (i-- > 0) {
3509 val = readl(cp->regs + REG_MAC_RX_CFG);
3510 if ((val & MAC_RX_CFG_EN)) {
3511 if (txfailed) {
3512 netdev_err(cp->dev,
3513 "enabling mac failed [tx:%08x:%08x]\n",
3514 readl(cp->regs + REG_MIF_STATE_MACHINE),
3515 readl(cp->regs + REG_MAC_STATE_MACHINE));
3516 }
3517 goto enable_rx_done;
3518 }
3519 udelay(10);
3520 }
3521 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3522 (txfailed ? "tx,rx" : "rx"),
3523 readl(cp->regs + REG_MIF_STATE_MACHINE),
3524 readl(cp->regs + REG_MAC_STATE_MACHINE));
3525
3526enable_rx_done:
3527 cas_unmask_intr(cp);
3528 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3529 writel(0, cp->regs + REG_RX_COMP_TAIL);
3530
3531 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3532 if (N_RX_DESC_RINGS > 1)
3533 writel(RX_DESC_RINGN_SIZE(1) - 4,
3534 cp->regs + REG_PLUS_RX_KICK1);
3535
3536 for (i = 1; i < N_RX_COMP_RINGS; i++)
3537 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3538 }
3539}
3540
3541
3542static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3543 int *pause)
3544{
3545 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3546 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3547 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3548 if (val & PCS_MII_LPA_ASYM_PAUSE)
3549 *pause |= 0x10;
3550 *spd = 1000;
3551}
3552
3553
3554static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3555 int *pause)
3556{
3557 u32 val;
3558
3559 *fd = 0;
3560 *spd = 10;
3561 *pause = 0;
3562
3563
3564 val = cas_phy_read(cp, MII_LPA);
3565 if (val & CAS_LPA_PAUSE)
3566 *pause = 0x01;
3567
3568 if (val & CAS_LPA_ASYM_PAUSE)
3569 *pause |= 0x10;
3570
3571 if (val & LPA_DUPLEX)
3572 *fd = 1;
3573 if (val & LPA_100)
3574 *spd = 100;
3575
3576 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3577 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3578 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3579 *spd = 1000;
3580 if (val & CAS_LPA_1000FULL)
3581 *fd = 1;
3582 }
3583}
3584
3585
3586
3587
3588
3589
3590static void cas_set_link_modes(struct cas *cp)
3591{
3592 u32 val;
3593 int full_duplex, speed, pause;
3594
3595 full_duplex = 0;
3596 speed = 10;
3597 pause = 0;
3598
3599 if (CAS_PHY_MII(cp->phy_type)) {
3600 cas_mif_poll(cp, 0);
3601 val = cas_phy_read(cp, MII_BMCR);
3602 if (val & BMCR_ANENABLE) {
3603 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3604 &pause);
3605 } else {
3606 if (val & BMCR_FULLDPLX)
3607 full_duplex = 1;
3608
3609 if (val & BMCR_SPEED100)
3610 speed = 100;
3611 else if (val & CAS_BMCR_SPEED1000)
3612 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3613 1000 : 100;
3614 }
3615 cas_mif_poll(cp, 1);
3616
3617 } else {
3618 val = readl(cp->regs + REG_PCS_MII_CTRL);
3619 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3620 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3621 if (val & PCS_MII_CTRL_DUPLEX)
3622 full_duplex = 1;
3623 }
3624 }
3625
3626 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3627 speed, full_duplex ? "full" : "half");
3628
3629 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3630 if (CAS_PHY_MII(cp->phy_type)) {
3631 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3632 if (!full_duplex)
3633 val |= MAC_XIF_DISABLE_ECHO;
3634 }
3635 if (full_duplex)
3636 val |= MAC_XIF_FDPLX_LED;
3637 if (speed == 1000)
3638 val |= MAC_XIF_GMII_MODE;
3639 writel(val, cp->regs + REG_MAC_XIF_CFG);
3640
3641
3642 val = MAC_TX_CFG_IPG_EN;
3643 if (full_duplex) {
3644 val |= MAC_TX_CFG_IGNORE_CARRIER;
3645 val |= MAC_TX_CFG_IGNORE_COLL;
3646 } else {
3647#ifndef USE_CSMA_CD_PROTO
3648 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3649 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3650#endif
3651 }
3652
3653
3654
3655
3656
3657
3658
3659 if ((speed == 1000) && !full_duplex) {
3660 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3661 cp->regs + REG_MAC_TX_CFG);
3662
3663 val = readl(cp->regs + REG_MAC_RX_CFG);
3664 val &= ~MAC_RX_CFG_STRIP_FCS;
3665 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3666 cp->regs + REG_MAC_RX_CFG);
3667
3668 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3669
3670 cp->crc_size = 4;
3671
3672 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3673
3674 } else {
3675 writel(val, cp->regs + REG_MAC_TX_CFG);
3676
3677
3678
3679
3680 val = readl(cp->regs + REG_MAC_RX_CFG);
3681 if (full_duplex) {
3682 val |= MAC_RX_CFG_STRIP_FCS;
3683 cp->crc_size = 0;
3684 cp->min_frame_size = CAS_MIN_MTU;
3685 } else {
3686 val &= ~MAC_RX_CFG_STRIP_FCS;
3687 cp->crc_size = 4;
3688 cp->min_frame_size = CAS_MIN_FRAME;
3689 }
3690 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3691 cp->regs + REG_MAC_RX_CFG);
3692 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3693 }
3694
3695 if (netif_msg_link(cp)) {
3696 if (pause & 0x01) {
3697 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3698 cp->rx_fifo_size,
3699 cp->rx_pause_off,
3700 cp->rx_pause_on);
3701 } else if (pause & 0x10) {
3702 netdev_info(cp->dev, "TX pause enabled\n");
3703 } else {
3704 netdev_info(cp->dev, "Pause is disabled\n");
3705 }
3706 }
3707
3708 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3709 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3710 if (pause) {
3711 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3712 if (pause & 0x01) {
3713 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3714 }
3715 }
3716 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3717 cas_start_dma(cp);
3718}
3719
3720
3721static void cas_init_hw(struct cas *cp, int restart_link)
3722{
3723 if (restart_link)
3724 cas_phy_init(cp);
3725
3726 cas_init_pause_thresholds(cp);
3727 cas_init_mac(cp);
3728 cas_init_dma(cp);
3729
3730 if (restart_link) {
3731
3732 cp->timer_ticks = 0;
3733 cas_begin_auto_negotiation(cp, NULL);
3734 } else if (cp->lstate == link_up) {
3735 cas_set_link_modes(cp);
3736 netif_carrier_on(cp->dev);
3737 }
3738}
3739
3740
3741
3742
3743
3744static void cas_hard_reset(struct cas *cp)
3745{
3746 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3747 udelay(20);
3748 pci_restore_state(cp->pdev);
3749}
3750
3751
3752static void cas_global_reset(struct cas *cp, int blkflag)
3753{
3754 int limit;
3755
3756
3757 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3758
3759
3760
3761
3762
3763
3764 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3765 cp->regs + REG_SW_RESET);
3766 } else {
3767 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3768 }
3769
3770
3771 mdelay(3);
3772
3773 limit = STOP_TRIES;
3774 while (limit-- > 0) {
3775 u32 val = readl(cp->regs + REG_SW_RESET);
3776 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3777 goto done;
3778 udelay(10);
3779 }
3780 netdev_err(cp->dev, "sw reset failed\n");
3781
3782done:
3783
3784 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3785 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3786
3787
3788
3789
3790
3791 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3792 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3793 PCI_ERR_BIM_DMA_READ), cp->regs +
3794 REG_PCI_ERR_STATUS_MASK);
3795
3796
3797
3798
3799 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3800}
3801
3802static void cas_reset(struct cas *cp, int blkflag)
3803{
3804 u32 val;
3805
3806 cas_mask_intr(cp);
3807 cas_global_reset(cp, blkflag);
3808 cas_mac_reset(cp);
3809 cas_entropy_reset(cp);
3810
3811
3812 val = readl(cp->regs + REG_TX_CFG);
3813 val &= ~TX_CFG_DMA_EN;
3814 writel(val, cp->regs + REG_TX_CFG);
3815
3816 val = readl(cp->regs + REG_RX_CFG);
3817 val &= ~RX_CFG_DMA_EN;
3818 writel(val, cp->regs + REG_RX_CFG);
3819
3820
3821 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3822 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3823 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3824 } else {
3825 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3826 }
3827
3828
3829 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3830 cas_clear_mac_err(cp);
3831 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3832}
3833
3834
3835static void cas_shutdown(struct cas *cp)
3836{
3837 unsigned long flags;
3838
3839
3840 cp->hw_running = 0;
3841
3842 del_timer_sync(&cp->link_timer);
3843
3844
3845#if 0
3846 while (atomic_read(&cp->reset_task_pending_mtu) ||
3847 atomic_read(&cp->reset_task_pending_spare) ||
3848 atomic_read(&cp->reset_task_pending_all))
3849 schedule();
3850
3851#else
3852 while (atomic_read(&cp->reset_task_pending))
3853 schedule();
3854#endif
3855
3856 cas_lock_all_save(cp, flags);
3857 cas_reset(cp, 0);
3858 if (cp->cas_flags & CAS_FLAG_SATURN)
3859 cas_phy_powerdown(cp);
3860 cas_unlock_all_restore(cp, flags);
3861}
3862
3863static int cas_change_mtu(struct net_device *dev, int new_mtu)
3864{
3865 struct cas *cp = netdev_priv(dev);
3866
3867 dev->mtu = new_mtu;
3868 if (!netif_running(dev) || !netif_device_present(dev))
3869 return 0;
3870
3871
3872#if 1
3873 atomic_inc(&cp->reset_task_pending);
3874 if ((cp->phy_type & CAS_PHY_SERDES)) {
3875 atomic_inc(&cp->reset_task_pending_all);
3876 } else {
3877 atomic_inc(&cp->reset_task_pending_mtu);
3878 }
3879 schedule_work(&cp->reset_task);
3880#else
3881 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3882 CAS_RESET_ALL : CAS_RESET_MTU);
3883 pr_err("reset called in cas_change_mtu\n");
3884 schedule_work(&cp->reset_task);
3885#endif
3886
3887 flush_work(&cp->reset_task);
3888 return 0;
3889}
3890
3891static void cas_clean_txd(struct cas *cp, int ring)
3892{
3893 struct cas_tx_desc *txd = cp->init_txds[ring];
3894 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3895 u64 daddr, dlen;
3896 int i, size;
3897
3898 size = TX_DESC_RINGN_SIZE(ring);
3899 for (i = 0; i < size; i++) {
3900 int frag;
3901
3902 if (skbs[i] == NULL)
3903 continue;
3904
3905 skb = skbs[i];
3906 skbs[i] = NULL;
3907
3908 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3909 int ent = i & (size - 1);
3910
3911
3912
3913
3914 daddr = le64_to_cpu(txd[ent].buffer);
3915 dlen = CAS_VAL(TX_DESC_BUFLEN,
3916 le64_to_cpu(txd[ent].control));
3917 pci_unmap_page(cp->pdev, daddr, dlen,
3918 PCI_DMA_TODEVICE);
3919
3920 if (frag != skb_shinfo(skb)->nr_frags) {
3921 i++;
3922
3923
3924
3925
3926 ent = i & (size - 1);
3927 if (cp->tx_tiny_use[ring][ent].used)
3928 i++;
3929 }
3930 }
3931 dev_kfree_skb_any(skb);
3932 }
3933
3934
3935 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3936}
3937
3938
3939static inline void cas_free_rx_desc(struct cas *cp, int ring)
3940{
3941 cas_page_t **page = cp->rx_pages[ring];
3942 int i, size;
3943
3944 size = RX_DESC_RINGN_SIZE(ring);
3945 for (i = 0; i < size; i++) {
3946 if (page[i]) {
3947 cas_page_free(cp, page[i]);
3948 page[i] = NULL;
3949 }
3950 }
3951}
3952
3953static void cas_free_rxds(struct cas *cp)
3954{
3955 int i;
3956
3957 for (i = 0; i < N_RX_DESC_RINGS; i++)
3958 cas_free_rx_desc(cp, i);
3959}
3960
3961
3962static void cas_clean_rings(struct cas *cp)
3963{
3964 int i;
3965
3966
3967 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3968 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3969 for (i = 0; i < N_TX_RINGS; i++)
3970 cas_clean_txd(cp, i);
3971
3972
3973 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3974 cas_clean_rxds(cp);
3975 cas_clean_rxcs(cp);
3976}
3977
3978
3979static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3980{
3981 cas_page_t **page = cp->rx_pages[ring];
3982 int size, i = 0;
3983
3984 size = RX_DESC_RINGN_SIZE(ring);
3985 for (i = 0; i < size; i++) {
3986 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3987 return -1;
3988 }
3989 return 0;
3990}
3991
3992static int cas_alloc_rxds(struct cas *cp)
3993{
3994 int i;
3995
3996 for (i = 0; i < N_RX_DESC_RINGS; i++) {
3997 if (cas_alloc_rx_desc(cp, i) < 0) {
3998 cas_free_rxds(cp);
3999 return -1;
4000 }
4001 }
4002 return 0;
4003}
4004
4005static void cas_reset_task(struct work_struct *work)
4006{
4007 struct cas *cp = container_of(work, struct cas, reset_task);
4008#if 0
4009 int pending = atomic_read(&cp->reset_task_pending);
4010#else
4011 int pending_all = atomic_read(&cp->reset_task_pending_all);
4012 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4013 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4014
4015 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4016
4017
4018
4019 atomic_dec(&cp->reset_task_pending);
4020 return;
4021 }
4022#endif
4023
4024
4025
4026
4027 if (cp->hw_running) {
4028 unsigned long flags;
4029
4030
4031 netif_device_detach(cp->dev);
4032 cas_lock_all_save(cp, flags);
4033
4034 if (cp->opened) {
4035
4036
4037
4038
4039 cas_spare_recover(cp, GFP_ATOMIC);
4040 }
4041#if 1
4042
4043 if (!pending_all && !pending_mtu)
4044 goto done;
4045#else
4046 if (pending == CAS_RESET_SPARE)
4047 goto done;
4048#endif
4049
4050
4051
4052
4053
4054
4055
4056#if 1
4057 cas_reset(cp, !(pending_all > 0));
4058 if (cp->opened)
4059 cas_clean_rings(cp);
4060 cas_init_hw(cp, (pending_all > 0));
4061#else
4062 cas_reset(cp, !(pending == CAS_RESET_ALL));
4063 if (cp->opened)
4064 cas_clean_rings(cp);
4065 cas_init_hw(cp, pending == CAS_RESET_ALL);
4066#endif
4067
4068done:
4069 cas_unlock_all_restore(cp, flags);
4070 netif_device_attach(cp->dev);
4071 }
4072#if 1
4073 atomic_sub(pending_all, &cp->reset_task_pending_all);
4074 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4075 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4076 atomic_dec(&cp->reset_task_pending);
4077#else
4078 atomic_set(&cp->reset_task_pending, 0);
4079#endif
4080}
4081
4082static void cas_link_timer(struct timer_list *t)
4083{
4084 struct cas *cp = from_timer(cp, t, link_timer);
4085 int mask, pending = 0, reset = 0;
4086 unsigned long flags;
4087
4088 if (link_transition_timeout != 0 &&
4089 cp->link_transition_jiffies_valid &&
4090 ((jiffies - cp->link_transition_jiffies) >
4091 (link_transition_timeout))) {
4092
4093
4094
4095
4096 cp->link_transition_jiffies_valid = 0;
4097 }
4098
4099 if (!cp->hw_running)
4100 return;
4101
4102 spin_lock_irqsave(&cp->lock, flags);
4103 cas_lock_tx(cp);
4104 cas_entropy_gather(cp);
4105
4106
4107
4108
4109#if 1
4110 if (atomic_read(&cp->reset_task_pending_all) ||
4111 atomic_read(&cp->reset_task_pending_spare) ||
4112 atomic_read(&cp->reset_task_pending_mtu))
4113 goto done;
4114#else
4115 if (atomic_read(&cp->reset_task_pending))
4116 goto done;
4117#endif
4118
4119
4120 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4121 int i, rmask;
4122
4123 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4124 rmask = CAS_FLAG_RXD_POST(i);
4125 if ((mask & rmask) == 0)
4126 continue;
4127
4128
4129 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4130 pending = 1;
4131 continue;
4132 }
4133 cp->cas_flags &= ~rmask;
4134 }
4135 }
4136
4137 if (CAS_PHY_MII(cp->phy_type)) {
4138 u16 bmsr;
4139 cas_mif_poll(cp, 0);
4140 bmsr = cas_phy_read(cp, MII_BMSR);
4141
4142
4143
4144
4145
4146 bmsr = cas_phy_read(cp, MII_BMSR);
4147 cas_mif_poll(cp, 1);
4148 readl(cp->regs + REG_MIF_STATUS);
4149 reset = cas_mii_link_check(cp, bmsr);
4150 } else {
4151 reset = cas_pcs_link_check(cp);
4152 }
4153
4154 if (reset)
4155 goto done;
4156
4157
4158 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4159 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4160 u32 wptr, rptr;
4161 int tlm = CAS_VAL(MAC_SM_TLM, val);
4162
4163 if (((tlm == 0x5) || (tlm == 0x3)) &&
4164 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4165 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4166 "tx err: MAC_STATE[%08x]\n", val);
4167 reset = 1;
4168 goto done;
4169 }
4170
4171 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4172 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4173 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4174 if ((val == 0) && (wptr != rptr)) {
4175 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4176 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4177 val, wptr, rptr);
4178 reset = 1;
4179 }
4180
4181 if (reset)
4182 cas_hard_reset(cp);
4183 }
4184
4185done:
4186 if (reset) {
4187#if 1
4188 atomic_inc(&cp->reset_task_pending);
4189 atomic_inc(&cp->reset_task_pending_all);
4190 schedule_work(&cp->reset_task);
4191#else
4192 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4193 pr_err("reset called in cas_link_timer\n");
4194 schedule_work(&cp->reset_task);
4195#endif
4196 }
4197
4198 if (!pending)
4199 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4200 cas_unlock_tx(cp);
4201 spin_unlock_irqrestore(&cp->lock, flags);
4202}
4203
4204
4205
4206
4207static void cas_tx_tiny_free(struct cas *cp)
4208{
4209 struct pci_dev *pdev = cp->pdev;
4210 int i;
4211
4212 for (i = 0; i < N_TX_RINGS; i++) {
4213 if (!cp->tx_tiny_bufs[i])
4214 continue;
4215
4216 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4217 cp->tx_tiny_bufs[i],
4218 cp->tx_tiny_dvma[i]);
4219 cp->tx_tiny_bufs[i] = NULL;
4220 }
4221}
4222
4223static int cas_tx_tiny_alloc(struct cas *cp)
4224{
4225 struct pci_dev *pdev = cp->pdev;
4226 int i;
4227
4228 for (i = 0; i < N_TX_RINGS; i++) {
4229 cp->tx_tiny_bufs[i] =
4230 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4231 &cp->tx_tiny_dvma[i]);
4232 if (!cp->tx_tiny_bufs[i]) {
4233 cas_tx_tiny_free(cp);
4234 return -1;
4235 }
4236 }
4237 return 0;
4238}
4239
4240
4241static int cas_open(struct net_device *dev)
4242{
4243 struct cas *cp = netdev_priv(dev);
4244 int hw_was_up, err;
4245 unsigned long flags;
4246
4247 mutex_lock(&cp->pm_mutex);
4248
4249 hw_was_up = cp->hw_running;
4250
4251
4252
4253
4254 if (!cp->hw_running) {
4255
4256 cas_lock_all_save(cp, flags);
4257
4258
4259
4260
4261
4262 cas_reset(cp, 0);
4263 cp->hw_running = 1;
4264 cas_unlock_all_restore(cp, flags);
4265 }
4266
4267 err = -ENOMEM;
4268 if (cas_tx_tiny_alloc(cp) < 0)
4269 goto err_unlock;
4270
4271
4272 if (cas_alloc_rxds(cp) < 0)
4273 goto err_tx_tiny;
4274
4275
4276 cas_spare_init(cp);
4277 cas_spare_recover(cp, GFP_KERNEL);
4278
4279
4280
4281
4282
4283
4284 if (request_irq(cp->pdev->irq, cas_interrupt,
4285 IRQF_SHARED, dev->name, (void *) dev)) {
4286 netdev_err(cp->dev, "failed to request irq !\n");
4287 err = -EAGAIN;
4288 goto err_spare;
4289 }
4290
4291#ifdef USE_NAPI
4292 napi_enable(&cp->napi);
4293#endif
4294
4295 cas_lock_all_save(cp, flags);
4296 cas_clean_rings(cp);
4297 cas_init_hw(cp, !hw_was_up);
4298 cp->opened = 1;
4299 cas_unlock_all_restore(cp, flags);
4300
4301 netif_start_queue(dev);
4302 mutex_unlock(&cp->pm_mutex);
4303 return 0;
4304
4305err_spare:
4306 cas_spare_free(cp);
4307 cas_free_rxds(cp);
4308err_tx_tiny:
4309 cas_tx_tiny_free(cp);
4310err_unlock:
4311 mutex_unlock(&cp->pm_mutex);
4312 return err;
4313}
4314
4315static int cas_close(struct net_device *dev)
4316{
4317 unsigned long flags;
4318 struct cas *cp = netdev_priv(dev);
4319
4320#ifdef USE_NAPI
4321 napi_disable(&cp->napi);
4322#endif
4323
4324 mutex_lock(&cp->pm_mutex);
4325
4326 netif_stop_queue(dev);
4327
4328
4329 cas_lock_all_save(cp, flags);
4330 cp->opened = 0;
4331 cas_reset(cp, 0);
4332 cas_phy_init(cp);
4333 cas_begin_auto_negotiation(cp, NULL);
4334 cas_clean_rings(cp);
4335 cas_unlock_all_restore(cp, flags);
4336
4337 free_irq(cp->pdev->irq, (void *) dev);
4338 cas_spare_free(cp);
4339 cas_free_rxds(cp);
4340 cas_tx_tiny_free(cp);
4341 mutex_unlock(&cp->pm_mutex);
4342 return 0;
4343}
4344
4345static struct {
4346 const char name[ETH_GSTRING_LEN];
4347} ethtool_cassini_statnames[] = {
4348 {"collisions"},
4349 {"rx_bytes"},
4350 {"rx_crc_errors"},
4351 {"rx_dropped"},
4352 {"rx_errors"},
4353 {"rx_fifo_errors"},
4354 {"rx_frame_errors"},
4355 {"rx_length_errors"},
4356 {"rx_over_errors"},
4357 {"rx_packets"},
4358 {"tx_aborted_errors"},
4359 {"tx_bytes"},
4360 {"tx_dropped"},
4361 {"tx_errors"},
4362 {"tx_fifo_errors"},
4363 {"tx_packets"}
4364};
4365#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4366
4367static struct {
4368 const int offsets;
4369} ethtool_register_table[] = {
4370 {-MII_BMSR},
4371 {-MII_BMCR},
4372 {REG_CAWR},
4373 {REG_INF_BURST},
4374 {REG_BIM_CFG},
4375 {REG_RX_CFG},
4376 {REG_HP_CFG},
4377 {REG_MAC_TX_CFG},
4378 {REG_MAC_RX_CFG},
4379 {REG_MAC_CTRL_CFG},
4380 {REG_MAC_XIF_CFG},
4381 {REG_MIF_CFG},
4382 {REG_PCS_CFG},
4383 {REG_SATURN_PCFG},
4384 {REG_PCS_MII_STATUS},
4385 {REG_PCS_STATE_MACHINE},
4386 {REG_MAC_COLL_EXCESS},
4387 {REG_MAC_COLL_LATE}
4388};
4389#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4390#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4391
4392static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4393{
4394 u8 *p;
4395 int i;
4396 unsigned long flags;
4397
4398 spin_lock_irqsave(&cp->lock, flags);
4399 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4400 u16 hval;
4401 u32 val;
4402 if (ethtool_register_table[i].offsets < 0) {
4403 hval = cas_phy_read(cp,
4404 -ethtool_register_table[i].offsets);
4405 val = hval;
4406 } else {
4407 val= readl(cp->regs+ethtool_register_table[i].offsets);
4408 }
4409 memcpy(p, (u8 *)&val, sizeof(u32));
4410 }
4411 spin_unlock_irqrestore(&cp->lock, flags);
4412}
4413
4414static struct net_device_stats *cas_get_stats(struct net_device *dev)
4415{
4416 struct cas *cp = netdev_priv(dev);
4417 struct net_device_stats *stats = cp->net_stats;
4418 unsigned long flags;
4419 int i;
4420 unsigned long tmp;
4421
4422
4423 if (!cp->hw_running)
4424 return stats + N_TX_RINGS;
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4435 stats[N_TX_RINGS].rx_crc_errors +=
4436 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4437 stats[N_TX_RINGS].rx_frame_errors +=
4438 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4439 stats[N_TX_RINGS].rx_length_errors +=
4440 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4441#if 1
4442 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4443 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4444 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4445 stats[N_TX_RINGS].collisions +=
4446 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4447#else
4448 stats[N_TX_RINGS].tx_aborted_errors +=
4449 readl(cp->regs + REG_MAC_COLL_EXCESS);
4450 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4451 readl(cp->regs + REG_MAC_COLL_LATE);
4452#endif
4453 cas_clear_mac_err(cp);
4454
4455
4456 spin_lock(&cp->stat_lock[0]);
4457 stats[N_TX_RINGS].collisions += stats[0].collisions;
4458 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4459 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4460 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4461 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4462 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4463 spin_unlock(&cp->stat_lock[0]);
4464
4465 for (i = 0; i < N_TX_RINGS; i++) {
4466 spin_lock(&cp->stat_lock[i]);
4467 stats[N_TX_RINGS].rx_length_errors +=
4468 stats[i].rx_length_errors;
4469 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4470 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4471 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4472 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4473 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4474 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4475 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4476 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4477 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4478 memset(stats + i, 0, sizeof(struct net_device_stats));
4479 spin_unlock(&cp->stat_lock[i]);
4480 }
4481 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4482 return stats + N_TX_RINGS;
4483}
4484
4485
4486static void cas_set_multicast(struct net_device *dev)
4487{
4488 struct cas *cp = netdev_priv(dev);
4489 u32 rxcfg, rxcfg_new;
4490 unsigned long flags;
4491 int limit = STOP_TRIES;
4492
4493 if (!cp->hw_running)
4494 return;
4495
4496 spin_lock_irqsave(&cp->lock, flags);
4497 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4498
4499
4500 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4501 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4502 if (!limit--)
4503 break;
4504 udelay(10);
4505 }
4506
4507
4508 limit = STOP_TRIES;
4509 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4510 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4511 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4512 if (!limit--)
4513 break;
4514 udelay(10);
4515 }
4516
4517
4518 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4519 rxcfg |= rxcfg_new;
4520 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4521 spin_unlock_irqrestore(&cp->lock, flags);
4522}
4523
4524static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4525{
4526 struct cas *cp = netdev_priv(dev);
4527 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4528 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4529 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4530}
4531
4532static int cas_get_link_ksettings(struct net_device *dev,
4533 struct ethtool_link_ksettings *cmd)
4534{
4535 struct cas *cp = netdev_priv(dev);
4536 u16 bmcr;
4537 int full_duplex, speed, pause;
4538 unsigned long flags;
4539 enum link_state linkstate = link_up;
4540 u32 supported, advertising;
4541
4542 advertising = 0;
4543 supported = SUPPORTED_Autoneg;
4544 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4545 supported |= SUPPORTED_1000baseT_Full;
4546 advertising |= ADVERTISED_1000baseT_Full;
4547 }
4548
4549
4550 spin_lock_irqsave(&cp->lock, flags);
4551 bmcr = 0;
4552 linkstate = cp->lstate;
4553 if (CAS_PHY_MII(cp->phy_type)) {
4554 cmd->base.port = PORT_MII;
4555 cmd->base.phy_address = cp->phy_addr;
4556 advertising |= ADVERTISED_TP | ADVERTISED_MII |
4557 ADVERTISED_10baseT_Half |
4558 ADVERTISED_10baseT_Full |
4559 ADVERTISED_100baseT_Half |
4560 ADVERTISED_100baseT_Full;
4561
4562 supported |=
4563 (SUPPORTED_10baseT_Half |
4564 SUPPORTED_10baseT_Full |
4565 SUPPORTED_100baseT_Half |
4566 SUPPORTED_100baseT_Full |
4567 SUPPORTED_TP | SUPPORTED_MII);
4568
4569 if (cp->hw_running) {
4570 cas_mif_poll(cp, 0);
4571 bmcr = cas_phy_read(cp, MII_BMCR);
4572 cas_read_mii_link_mode(cp, &full_duplex,
4573 &speed, &pause);
4574 cas_mif_poll(cp, 1);
4575 }
4576
4577 } else {
4578 cmd->base.port = PORT_FIBRE;
4579 cmd->base.phy_address = 0;
4580 supported |= SUPPORTED_FIBRE;
4581 advertising |= ADVERTISED_FIBRE;
4582
4583 if (cp->hw_running) {
4584
4585 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4586 cas_read_pcs_link_mode(cp, &full_duplex,
4587 &speed, &pause);
4588 }
4589 }
4590 spin_unlock_irqrestore(&cp->lock, flags);
4591
4592 if (bmcr & BMCR_ANENABLE) {
4593 advertising |= ADVERTISED_Autoneg;
4594 cmd->base.autoneg = AUTONEG_ENABLE;
4595 cmd->base.speed = ((speed == 10) ?
4596 SPEED_10 :
4597 ((speed == 1000) ?
4598 SPEED_1000 : SPEED_100));
4599 cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4600 } else {
4601 cmd->base.autoneg = AUTONEG_DISABLE;
4602 cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4603 SPEED_1000 :
4604 ((bmcr & BMCR_SPEED100) ?
4605 SPEED_100 : SPEED_10));
4606 cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4607 DUPLEX_FULL : DUPLEX_HALF;
4608 }
4609 if (linkstate != link_up) {
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620 if (cp->link_cntl & BMCR_ANENABLE) {
4621 cmd->base.speed = 0;
4622 cmd->base.duplex = 0xff;
4623 } else {
4624 cmd->base.speed = SPEED_10;
4625 if (cp->link_cntl & BMCR_SPEED100) {
4626 cmd->base.speed = SPEED_100;
4627 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4628 cmd->base.speed = SPEED_1000;
4629 }
4630 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4631 DUPLEX_FULL : DUPLEX_HALF;
4632 }
4633 }
4634
4635 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4636 supported);
4637 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4638 advertising);
4639
4640 return 0;
4641}
4642
4643static int cas_set_link_ksettings(struct net_device *dev,
4644 const struct ethtool_link_ksettings *cmd)
4645{
4646 struct cas *cp = netdev_priv(dev);
4647 unsigned long flags;
4648 u32 speed = cmd->base.speed;
4649
4650
4651 if (cmd->base.autoneg != AUTONEG_ENABLE &&
4652 cmd->base.autoneg != AUTONEG_DISABLE)
4653 return -EINVAL;
4654
4655 if (cmd->base.autoneg == AUTONEG_DISABLE &&
4656 ((speed != SPEED_1000 &&
4657 speed != SPEED_100 &&
4658 speed != SPEED_10) ||
4659 (cmd->base.duplex != DUPLEX_HALF &&
4660 cmd->base.duplex != DUPLEX_FULL)))
4661 return -EINVAL;
4662
4663
4664 spin_lock_irqsave(&cp->lock, flags);
4665 cas_begin_auto_negotiation(cp, cmd);
4666 spin_unlock_irqrestore(&cp->lock, flags);
4667 return 0;
4668}
4669
4670static int cas_nway_reset(struct net_device *dev)
4671{
4672 struct cas *cp = netdev_priv(dev);
4673 unsigned long flags;
4674
4675 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4676 return -EINVAL;
4677
4678
4679 spin_lock_irqsave(&cp->lock, flags);
4680 cas_begin_auto_negotiation(cp, NULL);
4681 spin_unlock_irqrestore(&cp->lock, flags);
4682
4683 return 0;
4684}
4685
4686static u32 cas_get_link(struct net_device *dev)
4687{
4688 struct cas *cp = netdev_priv(dev);
4689 return cp->lstate == link_up;
4690}
4691
4692static u32 cas_get_msglevel(struct net_device *dev)
4693{
4694 struct cas *cp = netdev_priv(dev);
4695 return cp->msg_enable;
4696}
4697
4698static void cas_set_msglevel(struct net_device *dev, u32 value)
4699{
4700 struct cas *cp = netdev_priv(dev);
4701 cp->msg_enable = value;
4702}
4703
4704static int cas_get_regs_len(struct net_device *dev)
4705{
4706 struct cas *cp = netdev_priv(dev);
4707 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4708}
4709
4710static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4711 void *p)
4712{
4713 struct cas *cp = netdev_priv(dev);
4714 regs->version = 0;
4715
4716 cas_read_regs(cp, p, regs->len / sizeof(u32));
4717}
4718
4719static int cas_get_sset_count(struct net_device *dev, int sset)
4720{
4721 switch (sset) {
4722 case ETH_SS_STATS:
4723 return CAS_NUM_STAT_KEYS;
4724 default:
4725 return -EOPNOTSUPP;
4726 }
4727}
4728
4729static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4730{
4731 memcpy(data, ðtool_cassini_statnames,
4732 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4733}
4734
4735static void cas_get_ethtool_stats(struct net_device *dev,
4736 struct ethtool_stats *estats, u64 *data)
4737{
4738 struct cas *cp = netdev_priv(dev);
4739 struct net_device_stats *stats = cas_get_stats(cp->dev);
4740 int i = 0;
4741 data[i++] = stats->collisions;
4742 data[i++] = stats->rx_bytes;
4743 data[i++] = stats->rx_crc_errors;
4744 data[i++] = stats->rx_dropped;
4745 data[i++] = stats->rx_errors;
4746 data[i++] = stats->rx_fifo_errors;
4747 data[i++] = stats->rx_frame_errors;
4748 data[i++] = stats->rx_length_errors;
4749 data[i++] = stats->rx_over_errors;
4750 data[i++] = stats->rx_packets;
4751 data[i++] = stats->tx_aborted_errors;
4752 data[i++] = stats->tx_bytes;
4753 data[i++] = stats->tx_dropped;
4754 data[i++] = stats->tx_errors;
4755 data[i++] = stats->tx_fifo_errors;
4756 data[i++] = stats->tx_packets;
4757 BUG_ON(i != CAS_NUM_STAT_KEYS);
4758}
4759
4760static const struct ethtool_ops cas_ethtool_ops = {
4761 .get_drvinfo = cas_get_drvinfo,
4762 .nway_reset = cas_nway_reset,
4763 .get_link = cas_get_link,
4764 .get_msglevel = cas_get_msglevel,
4765 .set_msglevel = cas_set_msglevel,
4766 .get_regs_len = cas_get_regs_len,
4767 .get_regs = cas_get_regs,
4768 .get_sset_count = cas_get_sset_count,
4769 .get_strings = cas_get_strings,
4770 .get_ethtool_stats = cas_get_ethtool_stats,
4771 .get_link_ksettings = cas_get_link_ksettings,
4772 .set_link_ksettings = cas_set_link_ksettings,
4773};
4774
4775static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4776{
4777 struct cas *cp = netdev_priv(dev);
4778 struct mii_ioctl_data *data = if_mii(ifr);
4779 unsigned long flags;
4780 int rc = -EOPNOTSUPP;
4781
4782
4783
4784
4785 mutex_lock(&cp->pm_mutex);
4786 switch (cmd) {
4787 case SIOCGMIIPHY:
4788 data->phy_id = cp->phy_addr;
4789
4790
4791 case SIOCGMIIREG:
4792 spin_lock_irqsave(&cp->lock, flags);
4793 cas_mif_poll(cp, 0);
4794 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4795 cas_mif_poll(cp, 1);
4796 spin_unlock_irqrestore(&cp->lock, flags);
4797 rc = 0;
4798 break;
4799
4800 case SIOCSMIIREG:
4801 spin_lock_irqsave(&cp->lock, flags);
4802 cas_mif_poll(cp, 0);
4803 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4804 cas_mif_poll(cp, 1);
4805 spin_unlock_irqrestore(&cp->lock, flags);
4806 break;
4807 default:
4808 break;
4809 }
4810
4811 mutex_unlock(&cp->pm_mutex);
4812 return rc;
4813}
4814
4815
4816
4817
4818
4819static void cas_program_bridge(struct pci_dev *cas_pdev)
4820{
4821 struct pci_dev *pdev = cas_pdev->bus->self;
4822 u32 val;
4823
4824 if (!pdev)
4825 return;
4826
4827 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4828 return;
4829
4830
4831
4832
4833
4834
4835 pci_read_config_dword(pdev, 0x40, &val);
4836 val &= ~0x00040000;
4837 pci_write_config_dword(pdev, 0x40, val);
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883 pci_write_config_word(pdev, 0x52,
4884 (0x7 << 13) |
4885 (0x7 << 10) |
4886 (0x7 << 7) |
4887 (0x7 << 4) |
4888 (0xf << 0));
4889
4890
4891 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4892
4893
4894
4895
4896 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4897}
4898
4899static const struct net_device_ops cas_netdev_ops = {
4900 .ndo_open = cas_open,
4901 .ndo_stop = cas_close,
4902 .ndo_start_xmit = cas_start_xmit,
4903 .ndo_get_stats = cas_get_stats,
4904 .ndo_set_rx_mode = cas_set_multicast,
4905 .ndo_do_ioctl = cas_ioctl,
4906 .ndo_tx_timeout = cas_tx_timeout,
4907 .ndo_change_mtu = cas_change_mtu,
4908 .ndo_set_mac_address = eth_mac_addr,
4909 .ndo_validate_addr = eth_validate_addr,
4910#ifdef CONFIG_NET_POLL_CONTROLLER
4911 .ndo_poll_controller = cas_netpoll,
4912#endif
4913};
4914
4915static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4916{
4917 static int cas_version_printed = 0;
4918 unsigned long casreg_len;
4919 struct net_device *dev;
4920 struct cas *cp;
4921 int i, err, pci_using_dac;
4922 u16 pci_cmd;
4923 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4924
4925 if (cas_version_printed++ == 0)
4926 pr_info("%s", version);
4927
4928 err = pci_enable_device(pdev);
4929 if (err) {
4930 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4931 return err;
4932 }
4933
4934 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4935 dev_err(&pdev->dev, "Cannot find proper PCI device "
4936 "base address, aborting\n");
4937 err = -ENODEV;
4938 goto err_out_disable_pdev;
4939 }
4940
4941 dev = alloc_etherdev(sizeof(*cp));
4942 if (!dev) {
4943 err = -ENOMEM;
4944 goto err_out_disable_pdev;
4945 }
4946 SET_NETDEV_DEV(dev, &pdev->dev);
4947
4948 err = pci_request_regions(pdev, dev->name);
4949 if (err) {
4950 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4951 goto err_out_free_netdev;
4952 }
4953 pci_set_master(pdev);
4954
4955
4956
4957
4958
4959 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4960 pci_cmd &= ~PCI_COMMAND_SERR;
4961 pci_cmd |= PCI_COMMAND_PARITY;
4962 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4963 if (pci_try_set_mwi(pdev))
4964 pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4965
4966 cas_program_bridge(pdev);
4967
4968
4969
4970
4971
4972
4973
4974#if 1
4975 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4976 &orig_cacheline_size);
4977 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4978 cas_cacheline_size =
4979 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4980 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4981 if (pci_write_config_byte(pdev,
4982 PCI_CACHE_LINE_SIZE,
4983 cas_cacheline_size)) {
4984 dev_err(&pdev->dev, "Could not set PCI cache "
4985 "line size\n");
4986 goto err_write_cacheline;
4987 }
4988 }
4989#endif
4990
4991
4992
4993 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4994 pci_using_dac = 1;
4995 err = pci_set_consistent_dma_mask(pdev,
4996 DMA_BIT_MASK(64));
4997 if (err < 0) {
4998 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
4999 "for consistent allocations\n");
5000 goto err_out_free_res;
5001 }
5002
5003 } else {
5004 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5005 if (err) {
5006 dev_err(&pdev->dev, "No usable DMA configuration, "
5007 "aborting\n");
5008 goto err_out_free_res;
5009 }
5010 pci_using_dac = 0;
5011 }
5012
5013 casreg_len = pci_resource_len(pdev, 0);
5014
5015 cp = netdev_priv(dev);
5016 cp->pdev = pdev;
5017#if 1
5018
5019 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5020#endif
5021 cp->dev = dev;
5022 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5023 cassini_debug;
5024
5025#if defined(CONFIG_SPARC)
5026 cp->of_node = pci_device_to_OF_node(pdev);
5027#endif
5028
5029 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5030 cp->link_transition_jiffies_valid = 0;
5031
5032 spin_lock_init(&cp->lock);
5033 spin_lock_init(&cp->rx_inuse_lock);
5034 spin_lock_init(&cp->rx_spare_lock);
5035 for (i = 0; i < N_TX_RINGS; i++) {
5036 spin_lock_init(&cp->stat_lock[i]);
5037 spin_lock_init(&cp->tx_lock[i]);
5038 }
5039 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5040 mutex_init(&cp->pm_mutex);
5041
5042 timer_setup(&cp->link_timer, cas_link_timer, 0);
5043
5044#if 1
5045
5046
5047
5048 atomic_set(&cp->reset_task_pending, 0);
5049 atomic_set(&cp->reset_task_pending_all, 0);
5050 atomic_set(&cp->reset_task_pending_spare, 0);
5051 atomic_set(&cp->reset_task_pending_mtu, 0);
5052#endif
5053 INIT_WORK(&cp->reset_task, cas_reset_task);
5054
5055
5056 if (link_mode >= 0 && link_mode < 6)
5057 cp->link_cntl = link_modes[link_mode];
5058 else
5059 cp->link_cntl = BMCR_ANENABLE;
5060 cp->lstate = link_down;
5061 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5062 netif_carrier_off(cp->dev);
5063 cp->timer_ticks = 0;
5064
5065
5066 cp->regs = pci_iomap(pdev, 0, casreg_len);
5067 if (!cp->regs) {
5068 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5069 goto err_out_free_res;
5070 }
5071 cp->casreg_len = casreg_len;
5072
5073 pci_save_state(pdev);
5074 cas_check_pci_invariants(cp);
5075 cas_hard_reset(cp);
5076 cas_reset(cp, 0);
5077 if (cas_check_invariants(cp))
5078 goto err_out_iounmap;
5079 if (cp->cas_flags & CAS_FLAG_SATURN)
5080 cas_saturn_firmware_init(cp);
5081
5082 cp->init_block = (struct cas_init_block *)
5083 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5084 &cp->block_dvma);
5085 if (!cp->init_block) {
5086 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5087 goto err_out_iounmap;
5088 }
5089
5090 for (i = 0; i < N_TX_RINGS; i++)
5091 cp->init_txds[i] = cp->init_block->txds[i];
5092
5093 for (i = 0; i < N_RX_DESC_RINGS; i++)
5094 cp->init_rxds[i] = cp->init_block->rxds[i];
5095
5096 for (i = 0; i < N_RX_COMP_RINGS; i++)
5097 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5098
5099 for (i = 0; i < N_RX_FLOWS; i++)
5100 skb_queue_head_init(&cp->rx_flows[i]);
5101
5102 dev->netdev_ops = &cas_netdev_ops;
5103 dev->ethtool_ops = &cas_ethtool_ops;
5104 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5105
5106#ifdef USE_NAPI
5107 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5108#endif
5109 dev->irq = pdev->irq;
5110 dev->dma = 0;
5111
5112
5113 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5114 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5115
5116 if (pci_using_dac)
5117 dev->features |= NETIF_F_HIGHDMA;
5118
5119
5120 dev->min_mtu = CAS_MIN_MTU;
5121 dev->max_mtu = CAS_MAX_MTU;
5122
5123 if (register_netdev(dev)) {
5124 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5125 goto err_out_free_consistent;
5126 }
5127
5128 i = readl(cp->regs + REG_BIM_CFG);
5129 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5130 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5131 (i & BIM_CFG_32BIT) ? "32" : "64",
5132 (i & BIM_CFG_66MHZ) ? "66" : "33",
5133 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5134 dev->dev_addr);
5135
5136 pci_set_drvdata(pdev, dev);
5137 cp->hw_running = 1;
5138 cas_entropy_reset(cp);
5139 cas_phy_init(cp);
5140 cas_begin_auto_negotiation(cp, NULL);
5141 return 0;
5142
5143err_out_free_consistent:
5144 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5145 cp->init_block, cp->block_dvma);
5146
5147err_out_iounmap:
5148 mutex_lock(&cp->pm_mutex);
5149 if (cp->hw_running)
5150 cas_shutdown(cp);
5151 mutex_unlock(&cp->pm_mutex);
5152
5153 pci_iounmap(pdev, cp->regs);
5154
5155
5156err_out_free_res:
5157 pci_release_regions(pdev);
5158
5159err_write_cacheline:
5160
5161
5162
5163 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5164
5165err_out_free_netdev:
5166 free_netdev(dev);
5167
5168err_out_disable_pdev:
5169 pci_disable_device(pdev);
5170 return -ENODEV;
5171}
5172
5173static void cas_remove_one(struct pci_dev *pdev)
5174{
5175 struct net_device *dev = pci_get_drvdata(pdev);
5176 struct cas *cp;
5177 if (!dev)
5178 return;
5179
5180 cp = netdev_priv(dev);
5181 unregister_netdev(dev);
5182
5183 vfree(cp->fw_data);
5184
5185 mutex_lock(&cp->pm_mutex);
5186 cancel_work_sync(&cp->reset_task);
5187 if (cp->hw_running)
5188 cas_shutdown(cp);
5189 mutex_unlock(&cp->pm_mutex);
5190
5191#if 1
5192 if (cp->orig_cacheline_size) {
5193
5194
5195
5196 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5197 cp->orig_cacheline_size);
5198 }
5199#endif
5200 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5201 cp->init_block, cp->block_dvma);
5202 pci_iounmap(pdev, cp->regs);
5203 free_netdev(dev);
5204 pci_release_regions(pdev);
5205 pci_disable_device(pdev);
5206}
5207
5208#ifdef CONFIG_PM
5209static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5210{
5211 struct net_device *dev = pci_get_drvdata(pdev);
5212 struct cas *cp = netdev_priv(dev);
5213 unsigned long flags;
5214
5215 mutex_lock(&cp->pm_mutex);
5216
5217
5218 if (cp->opened) {
5219 netif_device_detach(dev);
5220
5221 cas_lock_all_save(cp, flags);
5222
5223
5224
5225
5226
5227
5228 cas_reset(cp, 0);
5229 cas_clean_rings(cp);
5230 cas_unlock_all_restore(cp, flags);
5231 }
5232
5233 if (cp->hw_running)
5234 cas_shutdown(cp);
5235 mutex_unlock(&cp->pm_mutex);
5236
5237 return 0;
5238}
5239
5240static int cas_resume(struct pci_dev *pdev)
5241{
5242 struct net_device *dev = pci_get_drvdata(pdev);
5243 struct cas *cp = netdev_priv(dev);
5244
5245 netdev_info(dev, "resuming\n");
5246
5247 mutex_lock(&cp->pm_mutex);
5248 cas_hard_reset(cp);
5249 if (cp->opened) {
5250 unsigned long flags;
5251 cas_lock_all_save(cp, flags);
5252 cas_reset(cp, 0);
5253 cp->hw_running = 1;
5254 cas_clean_rings(cp);
5255 cas_init_hw(cp, 1);
5256 cas_unlock_all_restore(cp, flags);
5257
5258 netif_device_attach(dev);
5259 }
5260 mutex_unlock(&cp->pm_mutex);
5261 return 0;
5262}
5263#endif
5264
5265static struct pci_driver cas_driver = {
5266 .name = DRV_MODULE_NAME,
5267 .id_table = cas_pci_tbl,
5268 .probe = cas_init_one,
5269 .remove = cas_remove_one,
5270#ifdef CONFIG_PM
5271 .suspend = cas_suspend,
5272 .resume = cas_resume
5273#endif
5274};
5275
5276static int __init cas_init(void)
5277{
5278 if (linkdown_timeout > 0)
5279 link_transition_timeout = linkdown_timeout * HZ;
5280 else
5281 link_transition_timeout = 0;
5282
5283 return pci_register_driver(&cas_driver);
5284}
5285
5286static void __exit cas_cleanup(void)
5287{
5288 pci_unregister_driver(&cas_driver);
5289}
5290
5291module_init(cas_init);
5292module_exit(cas_cleanup);
5293