1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/ioport.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/init.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/crc32.h>
27#include <linux/mii.h>
28#include <linux/ethtool.h>
29#include <linux/delay.h>
30#include <linux/platform_device.h>
31#include <linux/irq.h>
32#include <linux/io.h>
33
34#include <asm/irq.h>
35
36#include <mach/regs-switch.h>
37#include <mach/regs-misc.h>
38
39#include "ks8695net.h"
40
41#define MODULENAME "ks8695_ether"
42#define MODULEVERSION "1.01"
43
44
45
46
47static int watchdog = 5000;
48
49
50
51
52
53
54
55
56
57
58struct rx_ring_desc {
59 __le32 status;
60 __le32 length;
61 __le32 data_ptr;
62 __le32 next_desc;
63};
64
65
66
67
68
69
70
71
72struct tx_ring_desc {
73 __le32 owner;
74 __le32 status;
75 __le32 data_ptr;
76 __le32 next_desc;
77};
78
79
80
81
82
83
84
85struct ks8695_skbuff {
86 struct sk_buff *skb;
87 dma_addr_t dma_ptr;
88 u32 length;
89};
90
91
92
93#define MAX_TX_DESC 8
94#define MAX_TX_DESC_MASK 0x7
95#define MAX_RX_DESC 16
96#define MAX_RX_DESC_MASK 0xf
97
98#define MAX_RXBUF_SIZE 0x700
99
100#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
101#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
102#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
103
104
105
106
107
108
109
110enum ks8695_dtype {
111 KS8695_DTYPE_WAN,
112 KS8695_DTYPE_LAN,
113 KS8695_DTYPE_HPNA,
114};
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148struct ks8695_priv {
149 int in_suspend;
150 struct net_device *ndev;
151 struct device *dev;
152 enum ks8695_dtype dtype;
153 void __iomem *io_regs;
154
155 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
156 int rx_irq, tx_irq, link_irq;
157
158 struct resource *regs_req, *phyiface_req;
159 void __iomem *phyiface_regs;
160
161 void *ring_base;
162 dma_addr_t ring_base_dma;
163
164 struct tx_ring_desc *tx_ring;
165 int tx_ring_used;
166 int tx_ring_next_slot;
167 dma_addr_t tx_ring_dma;
168 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
169 spinlock_t txq_lock;
170
171 struct rx_ring_desc *rx_ring;
172 dma_addr_t rx_ring_dma;
173 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
174 int next_rx_desc_read;
175
176 int msg_enable;
177};
178
179
180
181
182
183
184
185
186static inline u32
187ks8695_readreg(struct ks8695_priv *ksp, int reg)
188{
189 return readl(ksp->io_regs + reg);
190}
191
192
193
194
195
196
197
198static inline void
199ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
200{
201 writel(value, ksp->io_regs + reg);
202}
203
204
205
206
207
208
209
210
211
212
213static const char *
214ks8695_port_type(struct ks8695_priv *ksp)
215{
216 switch (ksp->dtype) {
217 case KS8695_DTYPE_LAN:
218 return "LAN";
219 case KS8695_DTYPE_WAN:
220 return "WAN";
221 case KS8695_DTYPE_HPNA:
222 return "HPNA";
223 }
224
225 return "UNKNOWN";
226}
227
228
229
230
231
232
233
234
235static void
236ks8695_update_mac(struct ks8695_priv *ksp)
237{
238
239 struct net_device *ndev = ksp->ndev;
240 u32 machigh, maclow;
241
242 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
243 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
244 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
245
246 ks8695_writereg(ksp, KS8695_MAL, maclow);
247 ks8695_writereg(ksp, KS8695_MAH, machigh);
248
249}
250
251
252
253
254
255
256
257
258
259
260static void
261ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
262{
263
264 int buff_n;
265
266 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
267 if (!ksp->rx_buffers[buff_n].skb) {
268 struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
269 dma_addr_t mapping;
270
271 ksp->rx_buffers[buff_n].skb = skb;
272 if (skb == NULL) {
273
274
275
276 break;
277 }
278
279 mapping = dma_map_single(ksp->dev, skb->data,
280 MAX_RXBUF_SIZE,
281 DMA_FROM_DEVICE);
282 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
283
284 dev_kfree_skb_irq(skb);
285 ksp->rx_buffers[buff_n].skb = NULL;
286 break;
287 }
288 ksp->rx_buffers[buff_n].dma_ptr = mapping;
289 skb->dev = ksp->ndev;
290 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
291
292
293 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
294 ksp->rx_ring[buff_n].length =
295 cpu_to_le32(MAX_RXBUF_SIZE);
296
297 wmb();
298
299
300 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
301 }
302 }
303}
304
305
306#define KS8695_NR_ADDRESSES 16
307
308
309
310
311
312
313
314
315
316
317
318static void
319ks8695_init_partial_multicast(struct ks8695_priv *ksp,
320 struct dev_mc_list *addr,
321 int nr_addr)
322{
323 u32 low, high;
324 int i;
325
326 for (i = 0; i < nr_addr; i++, addr = addr->next) {
327
328 if (!addr)
329 break;
330
331 BUG_ON(i == KS8695_NR_ADDRESSES);
332
333 low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) |
334 (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]);
335 high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]);
336
337 ks8695_writereg(ksp, KS8695_AAL_(i), low);
338 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
339 }
340
341
342 for (; i < KS8695_NR_ADDRESSES; i++) {
343 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
344 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
345 }
346}
347
348
349
350
351
352
353
354
355
356
357
358
359static irqreturn_t
360ks8695_tx_irq(int irq, void *dev_id)
361{
362 struct net_device *ndev = (struct net_device *)dev_id;
363 struct ks8695_priv *ksp = netdev_priv(ndev);
364 int buff_n;
365
366 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
367 if (ksp->tx_buffers[buff_n].skb &&
368 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
369 rmb();
370
371
372 ndev->stats.tx_packets++;
373 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
374
375
376 ksp->tx_ring[buff_n].data_ptr = 0;
377
378
379 dma_unmap_single(ksp->dev,
380 ksp->tx_buffers[buff_n].dma_ptr,
381 ksp->tx_buffers[buff_n].length,
382 DMA_TO_DEVICE);
383 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
384 ksp->tx_buffers[buff_n].skb = NULL;
385 ksp->tx_ring_used--;
386 }
387 }
388
389 netif_wake_queue(ndev);
390
391 return IRQ_HANDLED;
392}
393
394
395
396
397
398
399
400
401
402
403static irqreturn_t
404ks8695_rx_irq(int irq, void *dev_id)
405{
406 struct net_device *ndev = (struct net_device *)dev_id;
407 struct ks8695_priv *ksp = netdev_priv(ndev);
408 struct sk_buff *skb;
409 int buff_n;
410 u32 flags;
411 int pktlen;
412 int last_rx_processed = -1;
413
414 buff_n = ksp->next_rx_desc_read;
415 do {
416 if (ksp->rx_buffers[buff_n].skb &&
417 !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) {
418 rmb();
419 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
420
421
422
423 if ((flags & (RDES_FS | RDES_LS)) !=
424 (RDES_FS | RDES_LS)) {
425
426
427
428
429
430 goto rx_failure;
431 }
432
433 if (flags & (RDES_ES | RDES_RE)) {
434
435 ndev->stats.rx_errors++;
436 if (flags & RDES_TL)
437 ndev->stats.rx_length_errors++;
438 if (flags & RDES_RF)
439 ndev->stats.rx_length_errors++;
440 if (flags & RDES_CE)
441 ndev->stats.rx_crc_errors++;
442 if (flags & RDES_RE)
443 ndev->stats.rx_missed_errors++;
444
445 goto rx_failure;
446 }
447
448 pktlen = flags & RDES_FLEN;
449 pktlen -= 4;
450
451
452 skb = ksp->rx_buffers[buff_n].skb;
453
454
455 ksp->rx_buffers[buff_n].skb = NULL;
456 ksp->rx_ring[buff_n].data_ptr = 0;
457
458
459 dma_unmap_single(ksp->dev,
460 ksp->rx_buffers[buff_n].dma_ptr,
461 ksp->rx_buffers[buff_n].length,
462 DMA_FROM_DEVICE);
463
464
465 skb_put(skb, pktlen);
466 skb->protocol = eth_type_trans(skb, ndev);
467 netif_rx(skb);
468
469
470 ndev->stats.rx_packets++;
471 ndev->stats.rx_bytes += pktlen;
472 goto rx_finished;
473
474rx_failure:
475
476
477
478
479 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
480rx_finished:
481
482
483
484 last_rx_processed = buff_n;
485 } else {
486
487 break;
488 }
489 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
490 } while (buff_n != ksp->next_rx_desc_read);
491
492
493 if (likely(last_rx_processed != -1))
494 ksp->next_rx_desc_read =
495 (last_rx_processed + 1) & MAX_RX_DESC_MASK;
496
497
498 ks8695_refill_rxbuffers(ksp);
499
500
501 ks8695_writereg(ksp, KS8695_DRSC, 0);
502
503 return IRQ_HANDLED;
504}
505
506
507
508
509
510
511
512
513
514static irqreturn_t
515ks8695_link_irq(int irq, void *dev_id)
516{
517 struct net_device *ndev = (struct net_device *)dev_id;
518 struct ks8695_priv *ksp = netdev_priv(ndev);
519 u32 ctrl;
520
521 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
522 if (ctrl & WMC_WLS) {
523 netif_carrier_on(ndev);
524 if (netif_msg_link(ksp))
525 dev_info(ksp->dev,
526 "%s: Link is now up (10%sMbps/%s-duplex)\n",
527 ndev->name,
528 (ctrl & WMC_WSS) ? "0" : "",
529 (ctrl & WMC_WDS) ? "Full" : "Half");
530 } else {
531 netif_carrier_off(ndev);
532 if (netif_msg_link(ksp))
533 dev_info(ksp->dev, "%s: Link is now down.\n",
534 ndev->name);
535 }
536
537 return IRQ_HANDLED;
538}
539
540
541
542
543
544
545
546
547
548
549
550static void
551ks8695_reset(struct ks8695_priv *ksp)
552{
553 int reset_timeout = watchdog;
554
555 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
556 while (reset_timeout--) {
557 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
558 break;
559 msleep(1);
560 }
561
562 if (reset_timeout < 0) {
563 dev_crit(ksp->dev,
564 "Timeout waiting for DMA engines to reset\n");
565
566 }
567
568
569
570
571 msleep(10);
572
573
574 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
575
576 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
577}
578
579
580
581
582
583
584
585
586
587static void
588ks8695_shutdown(struct ks8695_priv *ksp)
589{
590 u32 ctrl;
591 int buff_n;
592
593
594 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
595 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
596
597
598 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
599 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
600
601
602 free_irq(ksp->rx_irq, ksp->ndev);
603 free_irq(ksp->tx_irq, ksp->ndev);
604 if (ksp->link_irq != -1)
605 free_irq(ksp->link_irq, ksp->ndev);
606
607
608 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
609 if (ksp->tx_buffers[buff_n].skb) {
610
611 ksp->tx_ring[buff_n].owner = 0;
612 ksp->tx_ring[buff_n].status = 0;
613 ksp->tx_ring[buff_n].data_ptr = 0;
614
615
616 dma_unmap_single(ksp->dev,
617 ksp->tx_buffers[buff_n].dma_ptr,
618 ksp->tx_buffers[buff_n].length,
619 DMA_TO_DEVICE);
620 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
621 ksp->tx_buffers[buff_n].skb = NULL;
622 }
623 }
624
625
626 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
627 if (ksp->rx_buffers[buff_n].skb) {
628
629 ksp->rx_ring[buff_n].status = 0;
630 ksp->rx_ring[buff_n].data_ptr = 0;
631
632
633 dma_unmap_single(ksp->dev,
634 ksp->rx_buffers[buff_n].dma_ptr,
635 ksp->rx_buffers[buff_n].length,
636 DMA_FROM_DEVICE);
637 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
638 ksp->rx_buffers[buff_n].skb = NULL;
639 }
640 }
641}
642
643
644
645
646
647
648
649
650
651
652
653static int
654ks8695_setup_irq(int irq, const char *irq_name,
655 irq_handler_t handler, struct net_device *ndev)
656{
657 int ret;
658
659 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
660
661 if (ret) {
662 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
663 return ret;
664 }
665
666 return 0;
667}
668
669
670
671
672
673
674
675
676
677static int
678ks8695_init_net(struct ks8695_priv *ksp)
679{
680 int ret;
681 u32 ctrl;
682
683 ks8695_refill_rxbuffers(ksp);
684
685
686 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
687 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
688
689
690 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
691 ks8695_rx_irq, ksp->ndev);
692 if (ret)
693 return ret;
694 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
695 ks8695_tx_irq, ksp->ndev);
696 if (ret)
697 return ret;
698 if (ksp->link_irq != -1) {
699 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
700 ks8695_link_irq, ksp->ndev);
701 if (ret)
702 return ret;
703 }
704
705
706 ksp->next_rx_desc_read = 0;
707 ksp->tx_ring_next_slot = 0;
708 ksp->tx_ring_used = 0;
709
710
711 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
712
713 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
714
715
716 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
717
718 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
719
720 ks8695_writereg(ksp, KS8695_DRSC, 0);
721
722
723 return 0;
724}
725
726
727
728
729
730
731
732
733static void
734ks8695_release_device(struct ks8695_priv *ksp)
735{
736
737 iounmap(ksp->io_regs);
738 if (ksp->phyiface_regs)
739 iounmap(ksp->phyiface_regs);
740
741
742 release_resource(ksp->regs_req);
743 kfree(ksp->regs_req);
744 if (ksp->phyiface_req) {
745 release_resource(ksp->phyiface_req);
746 kfree(ksp->phyiface_req);
747 }
748
749
750 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
751 ksp->ring_base, ksp->ring_base_dma);
752}
753
754
755
756
757
758
759
760static u32
761ks8695_get_msglevel(struct net_device *ndev)
762{
763 struct ks8695_priv *ksp = netdev_priv(ndev);
764
765 return ksp->msg_enable;
766}
767
768
769
770
771
772
773static void
774ks8695_set_msglevel(struct net_device *ndev, u32 value)
775{
776 struct ks8695_priv *ksp = netdev_priv(ndev);
777
778 ksp->msg_enable = value;
779}
780
781
782
783
784
785
786static int
787ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
788{
789 struct ks8695_priv *ksp = netdev_priv(ndev);
790 u32 ctrl;
791
792
793 cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
794 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
795 SUPPORTED_TP | SUPPORTED_MII);
796 cmd->transceiver = XCVR_INTERNAL;
797
798
799 switch (ksp->dtype) {
800 case KS8695_DTYPE_HPNA:
801 cmd->phy_address = 0;
802
803 cmd->autoneg = AUTONEG_DISABLE;
804
805
806
807
808
809
810
811 return -EOPNOTSUPP;
812 case KS8695_DTYPE_WAN:
813 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
814 cmd->port = PORT_MII;
815 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
816 cmd->phy_address = 0;
817
818 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
819 if ((ctrl & WMC_WAND) == 0) {
820
821 cmd->advertising |= ADVERTISED_Autoneg;
822 if (ctrl & WMC_WANA100F)
823 cmd->advertising |= ADVERTISED_100baseT_Full;
824 if (ctrl & WMC_WANA100H)
825 cmd->advertising |= ADVERTISED_100baseT_Half;
826 if (ctrl & WMC_WANA10F)
827 cmd->advertising |= ADVERTISED_10baseT_Full;
828 if (ctrl & WMC_WANA10H)
829 cmd->advertising |= ADVERTISED_10baseT_Half;
830 if (ctrl & WMC_WANAP)
831 cmd->advertising |= ADVERTISED_Pause;
832 cmd->autoneg = AUTONEG_ENABLE;
833
834 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
835 cmd->duplex = (ctrl & WMC_WDS) ?
836 DUPLEX_FULL : DUPLEX_HALF;
837 } else {
838
839 cmd->autoneg = AUTONEG_DISABLE;
840
841 cmd->speed = (ctrl & WMC_WANF100) ?
842 SPEED_100 : SPEED_10;
843 cmd->duplex = (ctrl & WMC_WANFF) ?
844 DUPLEX_FULL : DUPLEX_HALF;
845 }
846 break;
847 case KS8695_DTYPE_LAN:
848 return -EOPNOTSUPP;
849 }
850
851 return 0;
852}
853
854
855
856
857
858
859static int
860ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
861{
862 struct ks8695_priv *ksp = netdev_priv(ndev);
863 u32 ctrl;
864
865 if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
866 return -EINVAL;
867 if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
868 return -EINVAL;
869 if (cmd->port != PORT_MII)
870 return -EINVAL;
871 if (cmd->transceiver != XCVR_INTERNAL)
872 return -EINVAL;
873 if ((cmd->autoneg != AUTONEG_DISABLE) &&
874 (cmd->autoneg != AUTONEG_ENABLE))
875 return -EINVAL;
876
877 if (cmd->autoneg == AUTONEG_ENABLE) {
878 if ((cmd->advertising & (ADVERTISED_10baseT_Half |
879 ADVERTISED_10baseT_Full |
880 ADVERTISED_100baseT_Half |
881 ADVERTISED_100baseT_Full)) == 0)
882 return -EINVAL;
883
884 switch (ksp->dtype) {
885 case KS8695_DTYPE_HPNA:
886
887 return -EINVAL;
888 case KS8695_DTYPE_WAN:
889 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
890
891 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
892 WMC_WANA10F | WMC_WANA10H);
893 if (cmd->advertising & ADVERTISED_100baseT_Full)
894 ctrl |= WMC_WANA100F;
895 if (cmd->advertising & ADVERTISED_100baseT_Half)
896 ctrl |= WMC_WANA100H;
897 if (cmd->advertising & ADVERTISED_10baseT_Full)
898 ctrl |= WMC_WANA10F;
899 if (cmd->advertising & ADVERTISED_10baseT_Half)
900 ctrl |= WMC_WANA10H;
901
902
903 ctrl |= WMC_WANR;
904 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
905 break;
906 case KS8695_DTYPE_LAN:
907 return -EOPNOTSUPP;
908 }
909
910 } else {
911 switch (ksp->dtype) {
912 case KS8695_DTYPE_HPNA:
913
914
915
916
917
918
919
920
921
922
923
924
925 return -EOPNOTSUPP;
926 case KS8695_DTYPE_WAN:
927 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
928
929
930 ctrl |= WMC_WAND;
931 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
932
933 if (cmd->speed == SPEED_100)
934 ctrl |= WMC_WANF100;
935 if (cmd->duplex == DUPLEX_FULL)
936 ctrl |= WMC_WANFF;
937
938 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
939 break;
940 case KS8695_DTYPE_LAN:
941 return -EOPNOTSUPP;
942 }
943 }
944
945 return 0;
946}
947
948
949
950
951
952static int
953ks8695_nwayreset(struct net_device *ndev)
954{
955 struct ks8695_priv *ksp = netdev_priv(ndev);
956 u32 ctrl;
957
958 switch (ksp->dtype) {
959 case KS8695_DTYPE_HPNA:
960
961 return -EINVAL;
962 case KS8695_DTYPE_WAN:
963 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
964
965 if ((ctrl & WMC_WAND) == 0)
966 writel(ctrl | WMC_WANR,
967 ksp->phyiface_regs + KS8695_WMC);
968 else
969
970 return -EINVAL;
971 break;
972 case KS8695_DTYPE_LAN:
973 return -EOPNOTSUPP;
974 }
975
976 return 0;
977}
978
979
980
981
982
983static u32
984ks8695_get_link(struct net_device *ndev)
985{
986 struct ks8695_priv *ksp = netdev_priv(ndev);
987 u32 ctrl;
988
989 switch (ksp->dtype) {
990 case KS8695_DTYPE_HPNA:
991
992 return 1;
993 case KS8695_DTYPE_WAN:
994
995 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
996 return ctrl & WMC_WLS;
997 case KS8695_DTYPE_LAN:
998 return -EOPNOTSUPP;
999 }
1000 return 0;
1001}
1002
1003
1004
1005
1006
1007
1008static void
1009ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1010{
1011 struct ks8695_priv *ksp = netdev_priv(ndev);
1012 u32 ctrl;
1013
1014 switch (ksp->dtype) {
1015 case KS8695_DTYPE_HPNA:
1016
1017 return;
1018 case KS8695_DTYPE_WAN:
1019 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1020
1021
1022 param->autoneg = (ctrl & WMC_WANAP);
1023
1024
1025 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1026 param->rx_pause = (ctrl & DRXC_RFCE);
1027
1028
1029 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1030 param->tx_pause = (ctrl & DTXC_TFCE);
1031 break;
1032 case KS8695_DTYPE_LAN:
1033
1034 return;
1035 }
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045static int
1046ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1047{
1048 return -EOPNOTSUPP;
1049}
1050
1051
1052
1053
1054
1055
1056static void
1057ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1058{
1059 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1060 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1061 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1062 sizeof(info->bus_info));
1063}
1064
1065static const struct ethtool_ops ks8695_ethtool_ops = {
1066 .get_msglevel = ks8695_get_msglevel,
1067 .set_msglevel = ks8695_set_msglevel,
1068 .get_settings = ks8695_get_settings,
1069 .set_settings = ks8695_set_settings,
1070 .nway_reset = ks8695_nwayreset,
1071 .get_link = ks8695_get_link,
1072 .get_pauseparam = ks8695_get_pause,
1073 .set_pauseparam = ks8695_set_pause,
1074 .get_drvinfo = ks8695_get_drvinfo,
1075};
1076
1077
1078
1079
1080
1081
1082
1083
1084static int
1085ks8695_set_mac(struct net_device *ndev, void *addr)
1086{
1087 struct ks8695_priv *ksp = netdev_priv(ndev);
1088 struct sockaddr *address = addr;
1089
1090 if (!is_valid_ether_addr(address->sa_data))
1091 return -EADDRNOTAVAIL;
1092
1093 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1094
1095 ks8695_update_mac(ksp);
1096
1097 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1098 ndev->name, ndev->dev_addr);
1099
1100 return 0;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110static void
1111ks8695_set_multicast(struct net_device *ndev)
1112{
1113 struct ks8695_priv *ksp = netdev_priv(ndev);
1114 u32 ctrl;
1115
1116 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1117
1118 if (ndev->flags & IFF_PROMISC) {
1119
1120 ctrl |= DRXC_RA;
1121 } else if (ndev->flags & ~IFF_PROMISC) {
1122
1123 ctrl &= ~DRXC_RA;
1124 }
1125
1126 if (ndev->flags & IFF_ALLMULTI) {
1127
1128 ctrl |= DRXC_RM;
1129 } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
1130
1131
1132
1133 ctrl |= DRXC_RM;
1134 } else {
1135
1136 ctrl &= ~DRXC_RM;
1137 ks8695_init_partial_multicast(ksp, ndev->mc_list,
1138 ndev->mc_count);
1139 }
1140
1141 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1142}
1143
1144
1145
1146
1147
1148
1149
1150static void
1151ks8695_timeout(struct net_device *ndev)
1152{
1153 struct ks8695_priv *ksp = netdev_priv(ndev);
1154
1155 netif_stop_queue(ndev);
1156 ks8695_shutdown(ksp);
1157
1158 ks8695_reset(ksp);
1159
1160 ks8695_update_mac(ksp);
1161
1162
1163
1164
1165 ks8695_init_net(ksp);
1166
1167
1168 ks8695_set_multicast(ndev);
1169
1170
1171 netif_start_queue(ndev);
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static int
1184ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1185{
1186 struct ks8695_priv *ksp = netdev_priv(ndev);
1187 int buff_n;
1188 dma_addr_t dmap;
1189
1190 spin_lock_irq(&ksp->txq_lock);
1191
1192 if (ksp->tx_ring_used == MAX_TX_DESC) {
1193
1194 spin_unlock_irq(&ksp->txq_lock);
1195 return NETDEV_TX_BUSY;
1196 }
1197
1198 buff_n = ksp->tx_ring_next_slot;
1199
1200 BUG_ON(ksp->tx_buffers[buff_n].skb);
1201
1202 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1203 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1204
1205 spin_unlock_irq(&ksp->txq_lock);
1206 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1207 "transmission, trying later\n", ndev->name);
1208 return NETDEV_TX_BUSY;
1209 }
1210
1211 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1212
1213 ksp->tx_buffers[buff_n].skb = skb;
1214 ksp->tx_buffers[buff_n].length = skb->len;
1215
1216
1217 ksp->tx_ring[buff_n].data_ptr =
1218 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1219 ksp->tx_ring[buff_n].status =
1220 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1221 (skb->len & TDES_TBS));
1222
1223 wmb();
1224
1225
1226 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1227
1228 if (++ksp->tx_ring_used == MAX_TX_DESC)
1229 netif_stop_queue(ndev);
1230
1231 ndev->trans_start = jiffies;
1232
1233
1234 ks8695_writereg(ksp, KS8695_DTSC, 0);
1235
1236
1237 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1238
1239 spin_unlock_irq(&ksp->txq_lock);
1240 return NETDEV_TX_OK;
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250static int
1251ks8695_stop(struct net_device *ndev)
1252{
1253 struct ks8695_priv *ksp = netdev_priv(ndev);
1254
1255 netif_stop_queue(ndev);
1256 netif_carrier_off(ndev);
1257
1258 ks8695_shutdown(ksp);
1259
1260 return 0;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271static int
1272ks8695_open(struct net_device *ndev)
1273{
1274 struct ks8695_priv *ksp = netdev_priv(ndev);
1275 int ret;
1276
1277 if (!is_valid_ether_addr(ndev->dev_addr))
1278 return -EADDRNOTAVAIL;
1279
1280 ks8695_reset(ksp);
1281
1282 ks8695_update_mac(ksp);
1283
1284 ret = ks8695_init_net(ksp);
1285 if (ret) {
1286 ks8695_shutdown(ksp);
1287 return ret;
1288 }
1289
1290 netif_start_queue(ndev);
1291
1292 return 0;
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static void __devinit
1305ks8695_init_switch(struct ks8695_priv *ksp)
1306{
1307 u32 ctrl;
1308
1309
1310 ctrl = 0x40819e00;
1311
1312
1313 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1314 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1315
1316
1317 ctrl |= SEC0_ENABLE;
1318
1319 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1320
1321
1322 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332static void __devinit
1333ks8695_init_wan_phy(struct ks8695_priv *ksp)
1334{
1335 u32 ctrl;
1336
1337
1338 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1339 WMC_WANA10F | WMC_WANA10H);
1340
1341
1342 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1343
1344
1345 ctrl |= WMC_WANR;
1346
1347 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1348
1349 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1350 writel(0, ksp->phyiface_regs + KS8695_PPS);
1351}
1352
1353static const struct net_device_ops ks8695_netdev_ops = {
1354 .ndo_open = ks8695_open,
1355 .ndo_stop = ks8695_stop,
1356 .ndo_start_xmit = ks8695_start_xmit,
1357 .ndo_tx_timeout = ks8695_timeout,
1358 .ndo_set_mac_address = ks8695_set_mac,
1359 .ndo_validate_addr = eth_validate_addr,
1360 .ndo_set_multicast_list = ks8695_set_multicast,
1361};
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376static int __devinit
1377ks8695_probe(struct platform_device *pdev)
1378{
1379 struct ks8695_priv *ksp;
1380 struct net_device *ndev;
1381 struct resource *regs_res, *phyiface_res;
1382 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1383 int ret = 0;
1384 int buff_n;
1385 u32 machigh, maclow;
1386
1387
1388 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1389 if (!ndev) {
1390 dev_err(&pdev->dev, "could not allocate device.\n");
1391 return -ENOMEM;
1392 }
1393
1394 SET_NETDEV_DEV(ndev, &pdev->dev);
1395
1396 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1397
1398
1399 ksp = netdev_priv(ndev);
1400 memset(ksp, 0, sizeof(struct ks8695_priv));
1401
1402 ksp->dev = &pdev->dev;
1403 ksp->ndev = ndev;
1404 ksp->msg_enable = NETIF_MSG_LINK;
1405
1406
1407 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1408 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1409
1410 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1411 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1412 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1413
1414 if (!(regs_res && rxirq_res && txirq_res)) {
1415 dev_err(ksp->dev, "insufficient resources\n");
1416 ret = -ENOENT;
1417 goto failure;
1418 }
1419
1420 ksp->regs_req = request_mem_region(regs_res->start,
1421 resource_size(regs_res),
1422 pdev->name);
1423
1424 if (!ksp->regs_req) {
1425 dev_err(ksp->dev, "cannot claim register space\n");
1426 ret = -EIO;
1427 goto failure;
1428 }
1429
1430 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1431
1432 if (!ksp->io_regs) {
1433 dev_err(ksp->dev, "failed to ioremap registers\n");
1434 ret = -EINVAL;
1435 goto failure;
1436 }
1437
1438 if (phyiface_res) {
1439 ksp->phyiface_req =
1440 request_mem_region(phyiface_res->start,
1441 resource_size(phyiface_res),
1442 phyiface_res->name);
1443
1444 if (!ksp->phyiface_req) {
1445 dev_err(ksp->dev,
1446 "cannot claim switch register space\n");
1447 ret = -EIO;
1448 goto failure;
1449 }
1450
1451 ksp->phyiface_regs = ioremap(phyiface_res->start,
1452 resource_size(phyiface_res));
1453
1454 if (!ksp->phyiface_regs) {
1455 dev_err(ksp->dev,
1456 "failed to ioremap switch registers\n");
1457 ret = -EINVAL;
1458 goto failure;
1459 }
1460 }
1461
1462 ksp->rx_irq = rxirq_res->start;
1463 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1464 ksp->tx_irq = txirq_res->start;
1465 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1466 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1467 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1468 linkirq_res->name : "Ethernet Link";
1469
1470
1471 ndev->netdev_ops = &ks8695_netdev_ops;
1472 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1473 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1474
1475
1476
1477
1478 machigh = ks8695_readreg(ksp, KS8695_MAH);
1479 maclow = ks8695_readreg(ksp, KS8695_MAL);
1480
1481 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1482 ndev->dev_addr[1] = machigh & 0xFF;
1483 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1484 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1485 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1486 ndev->dev_addr[5] = maclow & 0xFF;
1487
1488 if (!is_valid_ether_addr(ndev->dev_addr))
1489 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
1490 "set using ifconfig\n", ndev->name);
1491
1492
1493
1494
1495 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1496 &ksp->ring_base_dma, GFP_KERNEL);
1497 if (!ksp->ring_base) {
1498 ret = -ENOMEM;
1499 goto failure;
1500 }
1501
1502
1503 ksp->tx_ring = ksp->ring_base;
1504 ksp->tx_ring_dma = ksp->ring_base_dma;
1505
1506
1507 spin_lock_init(&ksp->txq_lock);
1508
1509
1510 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1511 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1512
1513
1514 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1515 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1516
1517
1518 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1519 ksp->tx_ring[buff_n].next_desc =
1520 cpu_to_le32(ksp->tx_ring_dma +
1521 (sizeof(struct tx_ring_desc) *
1522 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1523 }
1524
1525 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1526 ksp->rx_ring[buff_n].next_desc =
1527 cpu_to_le32(ksp->rx_ring_dma +
1528 (sizeof(struct rx_ring_desc) *
1529 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1530 }
1531
1532
1533 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1534 ks8695_init_switch(ksp);
1535 ksp->dtype = KS8695_DTYPE_LAN;
1536 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1537 ks8695_init_wan_phy(ksp);
1538 ksp->dtype = KS8695_DTYPE_WAN;
1539 } else {
1540
1541 ksp->dtype = KS8695_DTYPE_HPNA;
1542 }
1543
1544
1545 platform_set_drvdata(pdev, ndev);
1546 ret = register_netdev(ndev);
1547
1548 if (ret == 0) {
1549 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1550 ks8695_port_type(ksp), ndev->dev_addr);
1551 } else {
1552
1553 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1554 goto failure;
1555 }
1556
1557
1558 return 0;
1559
1560
1561failure:
1562 ks8695_release_device(ksp);
1563 free_netdev(ndev);
1564
1565 return ret;
1566}
1567
1568
1569
1570
1571
1572
1573
1574
1575static int
1576ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1577{
1578 struct net_device *ndev = platform_get_drvdata(pdev);
1579 struct ks8695_priv *ksp = netdev_priv(ndev);
1580
1581 ksp->in_suspend = 1;
1582
1583 if (netif_running(ndev)) {
1584 netif_device_detach(ndev);
1585 ks8695_shutdown(ksp);
1586 }
1587
1588 return 0;
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598static int
1599ks8695_drv_resume(struct platform_device *pdev)
1600{
1601 struct net_device *ndev = platform_get_drvdata(pdev);
1602 struct ks8695_priv *ksp = netdev_priv(ndev);
1603
1604 if (netif_running(ndev)) {
1605 ks8695_reset(ksp);
1606 ks8695_init_net(ksp);
1607 ks8695_set_multicast(ndev);
1608 netif_device_attach(ndev);
1609 }
1610
1611 ksp->in_suspend = 0;
1612
1613 return 0;
1614}
1615
1616
1617
1618
1619
1620
1621
1622static int __devexit
1623ks8695_drv_remove(struct platform_device *pdev)
1624{
1625 struct net_device *ndev = platform_get_drvdata(pdev);
1626 struct ks8695_priv *ksp = netdev_priv(ndev);
1627
1628 platform_set_drvdata(pdev, NULL);
1629
1630 unregister_netdev(ndev);
1631 ks8695_release_device(ksp);
1632 free_netdev(ndev);
1633
1634 dev_dbg(&pdev->dev, "released and freed device\n");
1635 return 0;
1636}
1637
1638static struct platform_driver ks8695_driver = {
1639 .driver = {
1640 .name = MODULENAME,
1641 .owner = THIS_MODULE,
1642 },
1643 .probe = ks8695_probe,
1644 .remove = __devexit_p(ks8695_drv_remove),
1645 .suspend = ks8695_drv_suspend,
1646 .resume = ks8695_drv_resume,
1647};
1648
1649
1650
1651static int __init
1652ks8695_init(void)
1653{
1654 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1655 MODULENAME, MODULEVERSION);
1656
1657 return platform_driver_register(&ks8695_driver);
1658}
1659
1660static void __exit
1661ks8695_cleanup(void)
1662{
1663 platform_driver_unregister(&ks8695_driver);
1664}
1665
1666module_init(ks8695_init);
1667module_exit(ks8695_cleanup);
1668
1669MODULE_AUTHOR("Simtec Electronics")
1670MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1671MODULE_LICENSE("GPL");
1672MODULE_ALIAS("platform:" MODULENAME);
1673
1674module_param(watchdog, int, 0400);
1675MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
1676