1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/dma-mapping.h>
20#include <linux/module.h>
21#include <linux/ioport.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/interrupt.h>
25#include <linux/skbuff.h>
26#include <linux/spinlock.h>
27#include <linux/crc32.h>
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/delay.h>
31#include <linux/platform_device.h>
32#include <linux/irq.h>
33#include <linux/io.h>
34#include <linux/slab.h>
35
36#include <asm/irq.h>
37
38#include <mach/regs-switch.h>
39#include <mach/regs-misc.h>
40#include <asm/mach/irq.h>
41#include <mach/regs-irq.h>
42
43#include "ks8695net.h"
44
45#define MODULENAME "ks8695_ether"
46#define MODULEVERSION "1.02"
47
48
49
50
51static int watchdog = 5000;
52
53
54
55
56
57
58
59
60
61
62struct rx_ring_desc {
63 __le32 status;
64 __le32 length;
65 __le32 data_ptr;
66 __le32 next_desc;
67};
68
69
70
71
72
73
74
75
76struct tx_ring_desc {
77 __le32 owner;
78 __le32 status;
79 __le32 data_ptr;
80 __le32 next_desc;
81};
82
83
84
85
86
87
88
89struct ks8695_skbuff {
90 struct sk_buff *skb;
91 dma_addr_t dma_ptr;
92 u32 length;
93};
94
95
96
97#define MAX_TX_DESC 8
98#define MAX_TX_DESC_MASK 0x7
99#define MAX_RX_DESC 16
100#define MAX_RX_DESC_MASK 0xf
101
102
103#define NAPI_WEIGHT 64
104
105#define MAX_RXBUF_SIZE 0x700
106
107#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
108#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
109#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
110
111
112
113
114
115
116
117enum ks8695_dtype {
118 KS8695_DTYPE_WAN,
119 KS8695_DTYPE_LAN,
120 KS8695_DTYPE_HPNA,
121};
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157struct ks8695_priv {
158 int in_suspend;
159 struct net_device *ndev;
160 struct device *dev;
161 enum ks8695_dtype dtype;
162 void __iomem *io_regs;
163
164 struct napi_struct napi;
165
166 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
167 int rx_irq, tx_irq, link_irq;
168
169 struct resource *regs_req, *phyiface_req;
170 void __iomem *phyiface_regs;
171
172 void *ring_base;
173 dma_addr_t ring_base_dma;
174
175 struct tx_ring_desc *tx_ring;
176 int tx_ring_used;
177 int tx_ring_next_slot;
178 dma_addr_t tx_ring_dma;
179 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
180 spinlock_t txq_lock;
181
182 struct rx_ring_desc *rx_ring;
183 dma_addr_t rx_ring_dma;
184 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
185 int next_rx_desc_read;
186 spinlock_t rx_lock;
187
188 int msg_enable;
189};
190
191
192
193
194
195
196
197
198static inline u32
199ks8695_readreg(struct ks8695_priv *ksp, int reg)
200{
201 return readl(ksp->io_regs + reg);
202}
203
204
205
206
207
208
209
210static inline void
211ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
212{
213 writel(value, ksp->io_regs + reg);
214}
215
216
217
218
219
220
221
222
223
224
225static const char *
226ks8695_port_type(struct ks8695_priv *ksp)
227{
228 switch (ksp->dtype) {
229 case KS8695_DTYPE_LAN:
230 return "LAN";
231 case KS8695_DTYPE_WAN:
232 return "WAN";
233 case KS8695_DTYPE_HPNA:
234 return "HPNA";
235 }
236
237 return "UNKNOWN";
238}
239
240
241
242
243
244
245
246
247static void
248ks8695_update_mac(struct ks8695_priv *ksp)
249{
250
251 struct net_device *ndev = ksp->ndev;
252 u32 machigh, maclow;
253
254 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
255 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
256 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
257
258 ks8695_writereg(ksp, KS8695_MAL, maclow);
259 ks8695_writereg(ksp, KS8695_MAH, machigh);
260
261}
262
263
264
265
266
267
268
269
270
271
272static void
273ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
274{
275
276 int buff_n;
277
278 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
279 if (!ksp->rx_buffers[buff_n].skb) {
280 struct sk_buff *skb =
281 netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
282 dma_addr_t mapping;
283
284 ksp->rx_buffers[buff_n].skb = skb;
285 if (skb == NULL) {
286
287
288
289 break;
290 }
291
292 mapping = dma_map_single(ksp->dev, skb->data,
293 MAX_RXBUF_SIZE,
294 DMA_FROM_DEVICE);
295 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
296
297 dev_kfree_skb_irq(skb);
298 ksp->rx_buffers[buff_n].skb = NULL;
299 break;
300 }
301 ksp->rx_buffers[buff_n].dma_ptr = mapping;
302 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
303
304
305 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
306 ksp->rx_ring[buff_n].length =
307 cpu_to_le32(MAX_RXBUF_SIZE);
308
309 wmb();
310
311
312 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
313 }
314 }
315}
316
317
318#define KS8695_NR_ADDRESSES 16
319
320
321
322
323
324
325
326
327
328
329
330static void
331ks8695_init_partial_multicast(struct ks8695_priv *ksp,
332 struct net_device *ndev)
333{
334 u32 low, high;
335 int i;
336 struct netdev_hw_addr *ha;
337
338 i = 0;
339 netdev_for_each_mc_addr(ha, ndev) {
340
341 BUG_ON(i == KS8695_NR_ADDRESSES);
342
343 low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
344 (ha->addr[4] << 8) | (ha->addr[5]);
345 high = (ha->addr[0] << 8) | (ha->addr[1]);
346
347 ks8695_writereg(ksp, KS8695_AAL_(i), low);
348 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
349 i++;
350 }
351
352
353 for (; i < KS8695_NR_ADDRESSES; i++) {
354 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
355 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
356 }
357}
358
359
360
361
362
363
364
365
366
367
368
369
370static irqreturn_t
371ks8695_tx_irq(int irq, void *dev_id)
372{
373 struct net_device *ndev = (struct net_device *)dev_id;
374 struct ks8695_priv *ksp = netdev_priv(ndev);
375 int buff_n;
376
377 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
378 if (ksp->tx_buffers[buff_n].skb &&
379 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
380 rmb();
381
382
383 ndev->stats.tx_packets++;
384 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
385
386
387 ksp->tx_ring[buff_n].data_ptr = 0;
388
389
390 dma_unmap_single(ksp->dev,
391 ksp->tx_buffers[buff_n].dma_ptr,
392 ksp->tx_buffers[buff_n].length,
393 DMA_TO_DEVICE);
394 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
395 ksp->tx_buffers[buff_n].skb = NULL;
396 ksp->tx_ring_used--;
397 }
398 }
399
400 netif_wake_queue(ndev);
401
402 return IRQ_HANDLED;
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
420{
421 return ksp->rx_irq;
422}
423
424
425
426
427
428
429
430
431
432static irqreturn_t
433ks8695_rx_irq(int irq, void *dev_id)
434{
435 struct net_device *ndev = (struct net_device *)dev_id;
436 struct ks8695_priv *ksp = netdev_priv(ndev);
437
438 spin_lock(&ksp->rx_lock);
439
440 if (napi_schedule_prep(&ksp->napi)) {
441 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
442 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
443
444 status &= ~mask_bit;
445 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
446 __napi_schedule(&ksp->napi);
447 }
448
449 spin_unlock(&ksp->rx_lock);
450 return IRQ_HANDLED;
451}
452
453
454
455
456
457
458static int ks8695_rx(struct ks8695_priv *ksp, int budget)
459{
460 struct net_device *ndev = ksp->ndev;
461 struct sk_buff *skb;
462 int buff_n;
463 u32 flags;
464 int pktlen;
465 int received = 0;
466
467 buff_n = ksp->next_rx_desc_read;
468 while (received < budget
469 && ksp->rx_buffers[buff_n].skb
470 && (!(ksp->rx_ring[buff_n].status &
471 cpu_to_le32(RDES_OWN)))) {
472 rmb();
473 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
474
475
476
477
478 if ((flags & (RDES_FS | RDES_LS)) !=
479 (RDES_FS | RDES_LS)) {
480
481
482
483
484
485 goto rx_failure;
486 }
487
488 if (flags & (RDES_ES | RDES_RE)) {
489
490 ndev->stats.rx_errors++;
491 if (flags & RDES_TL)
492 ndev->stats.rx_length_errors++;
493 if (flags & RDES_RF)
494 ndev->stats.rx_length_errors++;
495 if (flags & RDES_CE)
496 ndev->stats.rx_crc_errors++;
497 if (flags & RDES_RE)
498 ndev->stats.rx_missed_errors++;
499
500 goto rx_failure;
501 }
502
503 pktlen = flags & RDES_FLEN;
504 pktlen -= 4;
505
506
507 skb = ksp->rx_buffers[buff_n].skb;
508
509
510 ksp->rx_buffers[buff_n].skb = NULL;
511 ksp->rx_ring[buff_n].data_ptr = 0;
512
513
514 dma_unmap_single(ksp->dev,
515 ksp->rx_buffers[buff_n].dma_ptr,
516 ksp->rx_buffers[buff_n].length,
517 DMA_FROM_DEVICE);
518
519
520 skb_put(skb, pktlen);
521 skb->protocol = eth_type_trans(skb, ndev);
522 napi_gro_receive(&ksp->napi, skb);
523
524
525 ndev->stats.rx_packets++;
526 ndev->stats.rx_bytes += pktlen;
527 goto rx_finished;
528
529rx_failure:
530
531
532
533
534 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
535rx_finished:
536 received++;
537 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
538 }
539
540
541 ksp->next_rx_desc_read = buff_n;
542
543
544 ks8695_refill_rxbuffers(ksp);
545
546
547 ks8695_writereg(ksp, KS8695_DRSC, 0);
548
549 return received;
550}
551
552
553
554
555
556
557
558
559
560
561static int ks8695_poll(struct napi_struct *napi, int budget)
562{
563 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
564 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
565 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
566 int work_done;
567
568 work_done = ks8695_rx(ksp, budget);
569
570 if (work_done < budget && napi_complete_done(napi, work_done)) {
571 unsigned long flags;
572
573 spin_lock_irqsave(&ksp->rx_lock, flags);
574
575 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
576 spin_unlock_irqrestore(&ksp->rx_lock, flags);
577 }
578 return work_done;
579}
580
581
582
583
584
585
586
587
588
589static irqreturn_t
590ks8695_link_irq(int irq, void *dev_id)
591{
592 struct net_device *ndev = (struct net_device *)dev_id;
593 struct ks8695_priv *ksp = netdev_priv(ndev);
594 u32 ctrl;
595
596 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
597 if (ctrl & WMC_WLS) {
598 netif_carrier_on(ndev);
599 if (netif_msg_link(ksp))
600 dev_info(ksp->dev,
601 "%s: Link is now up (10%sMbps/%s-duplex)\n",
602 ndev->name,
603 (ctrl & WMC_WSS) ? "0" : "",
604 (ctrl & WMC_WDS) ? "Full" : "Half");
605 } else {
606 netif_carrier_off(ndev);
607 if (netif_msg_link(ksp))
608 dev_info(ksp->dev, "%s: Link is now down.\n",
609 ndev->name);
610 }
611
612 return IRQ_HANDLED;
613}
614
615
616
617
618
619
620
621
622
623
624
625static void
626ks8695_reset(struct ks8695_priv *ksp)
627{
628 int reset_timeout = watchdog;
629
630 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
631 while (reset_timeout--) {
632 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
633 break;
634 msleep(1);
635 }
636
637 if (reset_timeout < 0) {
638 dev_crit(ksp->dev,
639 "Timeout waiting for DMA engines to reset\n");
640
641 }
642
643
644
645
646 msleep(10);
647
648
649 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
650
651 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
652}
653
654
655
656
657
658
659
660
661
662static void
663ks8695_shutdown(struct ks8695_priv *ksp)
664{
665 u32 ctrl;
666 int buff_n;
667
668
669 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
670 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
671
672
673 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
674 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
675
676
677 free_irq(ksp->rx_irq, ksp->ndev);
678 free_irq(ksp->tx_irq, ksp->ndev);
679 if (ksp->link_irq != -1)
680 free_irq(ksp->link_irq, ksp->ndev);
681
682
683 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
684 if (ksp->tx_buffers[buff_n].skb) {
685
686 ksp->tx_ring[buff_n].owner = 0;
687 ksp->tx_ring[buff_n].status = 0;
688 ksp->tx_ring[buff_n].data_ptr = 0;
689
690
691 dma_unmap_single(ksp->dev,
692 ksp->tx_buffers[buff_n].dma_ptr,
693 ksp->tx_buffers[buff_n].length,
694 DMA_TO_DEVICE);
695 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
696 ksp->tx_buffers[buff_n].skb = NULL;
697 }
698 }
699
700
701 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
702 if (ksp->rx_buffers[buff_n].skb) {
703
704 ksp->rx_ring[buff_n].status = 0;
705 ksp->rx_ring[buff_n].data_ptr = 0;
706
707
708 dma_unmap_single(ksp->dev,
709 ksp->rx_buffers[buff_n].dma_ptr,
710 ksp->rx_buffers[buff_n].length,
711 DMA_FROM_DEVICE);
712 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
713 ksp->rx_buffers[buff_n].skb = NULL;
714 }
715 }
716}
717
718
719
720
721
722
723
724
725
726
727
728static int
729ks8695_setup_irq(int irq, const char *irq_name,
730 irq_handler_t handler, struct net_device *ndev)
731{
732 int ret;
733
734 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
735
736 if (ret) {
737 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
738 return ret;
739 }
740
741 return 0;
742}
743
744
745
746
747
748
749
750
751
752static int
753ks8695_init_net(struct ks8695_priv *ksp)
754{
755 int ret;
756 u32 ctrl;
757
758 ks8695_refill_rxbuffers(ksp);
759
760
761 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
762 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
763
764
765 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
766 ks8695_rx_irq, ksp->ndev);
767 if (ret)
768 return ret;
769 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
770 ks8695_tx_irq, ksp->ndev);
771 if (ret)
772 return ret;
773 if (ksp->link_irq != -1) {
774 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
775 ks8695_link_irq, ksp->ndev);
776 if (ret)
777 return ret;
778 }
779
780
781 ksp->next_rx_desc_read = 0;
782 ksp->tx_ring_next_slot = 0;
783 ksp->tx_ring_used = 0;
784
785
786 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
787
788 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
789
790
791 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
792
793 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
794
795 ks8695_writereg(ksp, KS8695_DRSC, 0);
796
797
798 return 0;
799}
800
801
802
803
804
805
806
807
808static void
809ks8695_release_device(struct ks8695_priv *ksp)
810{
811
812 iounmap(ksp->io_regs);
813 if (ksp->phyiface_regs)
814 iounmap(ksp->phyiface_regs);
815
816
817 release_resource(ksp->regs_req);
818 kfree(ksp->regs_req);
819 if (ksp->phyiface_req) {
820 release_resource(ksp->phyiface_req);
821 kfree(ksp->phyiface_req);
822 }
823
824
825 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
826 ksp->ring_base, ksp->ring_base_dma);
827}
828
829
830
831
832
833
834
835static u32
836ks8695_get_msglevel(struct net_device *ndev)
837{
838 struct ks8695_priv *ksp = netdev_priv(ndev);
839
840 return ksp->msg_enable;
841}
842
843
844
845
846
847
848static void
849ks8695_set_msglevel(struct net_device *ndev, u32 value)
850{
851 struct ks8695_priv *ksp = netdev_priv(ndev);
852
853 ksp->msg_enable = value;
854}
855
856
857
858
859
860
861static int
862ks8695_wan_get_link_ksettings(struct net_device *ndev,
863 struct ethtool_link_ksettings *cmd)
864{
865 struct ks8695_priv *ksp = netdev_priv(ndev);
866 u32 ctrl;
867 u32 supported, advertising;
868
869
870 supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
871 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
872 SUPPORTED_TP | SUPPORTED_MII);
873
874 advertising = ADVERTISED_TP | ADVERTISED_MII;
875 cmd->base.port = PORT_MII;
876 supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
877 cmd->base.phy_address = 0;
878
879 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
880 if ((ctrl & WMC_WAND) == 0) {
881
882 advertising |= ADVERTISED_Autoneg;
883 if (ctrl & WMC_WANA100F)
884 advertising |= ADVERTISED_100baseT_Full;
885 if (ctrl & WMC_WANA100H)
886 advertising |= ADVERTISED_100baseT_Half;
887 if (ctrl & WMC_WANA10F)
888 advertising |= ADVERTISED_10baseT_Full;
889 if (ctrl & WMC_WANA10H)
890 advertising |= ADVERTISED_10baseT_Half;
891 if (ctrl & WMC_WANAP)
892 advertising |= ADVERTISED_Pause;
893 cmd->base.autoneg = AUTONEG_ENABLE;
894
895 cmd->base.speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
896 cmd->base.duplex = (ctrl & WMC_WDS) ?
897 DUPLEX_FULL : DUPLEX_HALF;
898 } else {
899
900 cmd->base.autoneg = AUTONEG_DISABLE;
901
902 cmd->base.speed = (ctrl & WMC_WANF100) ?
903 SPEED_100 : SPEED_10;
904 cmd->base.duplex = (ctrl & WMC_WANFF) ?
905 DUPLEX_FULL : DUPLEX_HALF;
906 }
907
908 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
909 supported);
910 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
911 advertising);
912
913 return 0;
914}
915
916
917
918
919
920
921static int
922ks8695_wan_set_link_ksettings(struct net_device *ndev,
923 const struct ethtool_link_ksettings *cmd)
924{
925 struct ks8695_priv *ksp = netdev_priv(ndev);
926 u32 ctrl;
927 u32 advertising;
928
929 ethtool_convert_link_mode_to_legacy_u32(&advertising,
930 cmd->link_modes.advertising);
931
932 if ((cmd->base.speed != SPEED_10) && (cmd->base.speed != SPEED_100))
933 return -EINVAL;
934 if ((cmd->base.duplex != DUPLEX_HALF) &&
935 (cmd->base.duplex != DUPLEX_FULL))
936 return -EINVAL;
937 if (cmd->base.port != PORT_MII)
938 return -EINVAL;
939 if ((cmd->base.autoneg != AUTONEG_DISABLE) &&
940 (cmd->base.autoneg != AUTONEG_ENABLE))
941 return -EINVAL;
942
943 if (cmd->base.autoneg == AUTONEG_ENABLE) {
944 if ((advertising & (ADVERTISED_10baseT_Half |
945 ADVERTISED_10baseT_Full |
946 ADVERTISED_100baseT_Half |
947 ADVERTISED_100baseT_Full)) == 0)
948 return -EINVAL;
949
950 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
951
952 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
953 WMC_WANA10F | WMC_WANA10H);
954 if (advertising & ADVERTISED_100baseT_Full)
955 ctrl |= WMC_WANA100F;
956 if (advertising & ADVERTISED_100baseT_Half)
957 ctrl |= WMC_WANA100H;
958 if (advertising & ADVERTISED_10baseT_Full)
959 ctrl |= WMC_WANA10F;
960 if (advertising & ADVERTISED_10baseT_Half)
961 ctrl |= WMC_WANA10H;
962
963
964 ctrl |= WMC_WANR;
965 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
966 } else {
967 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
968
969
970 ctrl |= WMC_WAND;
971 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
972
973 if (cmd->base.speed == SPEED_100)
974 ctrl |= WMC_WANF100;
975 if (cmd->base.duplex == DUPLEX_FULL)
976 ctrl |= WMC_WANFF;
977
978 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
979 }
980
981 return 0;
982}
983
984
985
986
987
988static int
989ks8695_wan_nwayreset(struct net_device *ndev)
990{
991 struct ks8695_priv *ksp = netdev_priv(ndev);
992 u32 ctrl;
993
994 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
995
996 if ((ctrl & WMC_WAND) == 0)
997 writel(ctrl | WMC_WANR,
998 ksp->phyiface_regs + KS8695_WMC);
999 else
1000
1001 return -EINVAL;
1002
1003 return 0;
1004}
1005
1006
1007
1008
1009
1010
1011static void
1012ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1013{
1014 struct ks8695_priv *ksp = netdev_priv(ndev);
1015 u32 ctrl;
1016
1017 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1018
1019
1020 param->autoneg = (ctrl & WMC_WANAP);
1021
1022
1023 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1024 param->rx_pause = (ctrl & DRXC_RFCE);
1025
1026
1027 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1028 param->tx_pause = (ctrl & DTXC_TFCE);
1029}
1030
1031
1032
1033
1034
1035
1036static void
1037ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1038{
1039 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1040 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1041 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1042 sizeof(info->bus_info));
1043}
1044
1045static const struct ethtool_ops ks8695_ethtool_ops = {
1046 .get_msglevel = ks8695_get_msglevel,
1047 .set_msglevel = ks8695_set_msglevel,
1048 .get_drvinfo = ks8695_get_drvinfo,
1049};
1050
1051static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1052 .get_msglevel = ks8695_get_msglevel,
1053 .set_msglevel = ks8695_set_msglevel,
1054 .nway_reset = ks8695_wan_nwayreset,
1055 .get_link = ethtool_op_get_link,
1056 .get_pauseparam = ks8695_wan_get_pause,
1057 .get_drvinfo = ks8695_get_drvinfo,
1058 .get_link_ksettings = ks8695_wan_get_link_ksettings,
1059 .set_link_ksettings = ks8695_wan_set_link_ksettings,
1060};
1061
1062
1063
1064
1065
1066
1067
1068
1069static int
1070ks8695_set_mac(struct net_device *ndev, void *addr)
1071{
1072 struct ks8695_priv *ksp = netdev_priv(ndev);
1073 struct sockaddr *address = addr;
1074
1075 if (!is_valid_ether_addr(address->sa_data))
1076 return -EADDRNOTAVAIL;
1077
1078 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1079
1080 ks8695_update_mac(ksp);
1081
1082 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1083 ndev->name, ndev->dev_addr);
1084
1085 return 0;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095static void
1096ks8695_set_multicast(struct net_device *ndev)
1097{
1098 struct ks8695_priv *ksp = netdev_priv(ndev);
1099 u32 ctrl;
1100
1101 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1102
1103 if (ndev->flags & IFF_PROMISC) {
1104
1105 ctrl |= DRXC_RA;
1106 } else if (ndev->flags & ~IFF_PROMISC) {
1107
1108 ctrl &= ~DRXC_RA;
1109 }
1110
1111 if (ndev->flags & IFF_ALLMULTI) {
1112
1113 ctrl |= DRXC_RM;
1114 } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
1115
1116
1117
1118 ctrl |= DRXC_RM;
1119 } else {
1120
1121 ctrl &= ~DRXC_RM;
1122 ks8695_init_partial_multicast(ksp, ndev);
1123 }
1124
1125 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1126}
1127
1128
1129
1130
1131
1132
1133
1134static void
1135ks8695_timeout(struct net_device *ndev)
1136{
1137 struct ks8695_priv *ksp = netdev_priv(ndev);
1138
1139 netif_stop_queue(ndev);
1140 ks8695_shutdown(ksp);
1141
1142 ks8695_reset(ksp);
1143
1144 ks8695_update_mac(ksp);
1145
1146
1147
1148
1149 ks8695_init_net(ksp);
1150
1151
1152 ks8695_set_multicast(ndev);
1153
1154
1155 netif_start_queue(ndev);
1156}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static int
1168ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1169{
1170 struct ks8695_priv *ksp = netdev_priv(ndev);
1171 int buff_n;
1172 dma_addr_t dmap;
1173
1174 spin_lock_irq(&ksp->txq_lock);
1175
1176 if (ksp->tx_ring_used == MAX_TX_DESC) {
1177
1178 spin_unlock_irq(&ksp->txq_lock);
1179 return NETDEV_TX_BUSY;
1180 }
1181
1182 buff_n = ksp->tx_ring_next_slot;
1183
1184 BUG_ON(ksp->tx_buffers[buff_n].skb);
1185
1186 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1187 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1188
1189 spin_unlock_irq(&ksp->txq_lock);
1190 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1191 "transmission, trying later\n", ndev->name);
1192 return NETDEV_TX_BUSY;
1193 }
1194
1195 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1196
1197 ksp->tx_buffers[buff_n].skb = skb;
1198 ksp->tx_buffers[buff_n].length = skb->len;
1199
1200
1201 ksp->tx_ring[buff_n].data_ptr =
1202 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1203 ksp->tx_ring[buff_n].status =
1204 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1205 (skb->len & TDES_TBS));
1206
1207 wmb();
1208
1209
1210 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1211
1212 if (++ksp->tx_ring_used == MAX_TX_DESC)
1213 netif_stop_queue(ndev);
1214
1215
1216 ks8695_writereg(ksp, KS8695_DTSC, 0);
1217
1218
1219 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1220
1221 spin_unlock_irq(&ksp->txq_lock);
1222 return NETDEV_TX_OK;
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232static int
1233ks8695_stop(struct net_device *ndev)
1234{
1235 struct ks8695_priv *ksp = netdev_priv(ndev);
1236
1237 netif_stop_queue(ndev);
1238 napi_disable(&ksp->napi);
1239
1240 ks8695_shutdown(ksp);
1241
1242 return 0;
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253static int
1254ks8695_open(struct net_device *ndev)
1255{
1256 struct ks8695_priv *ksp = netdev_priv(ndev);
1257 int ret;
1258
1259 ks8695_reset(ksp);
1260
1261 ks8695_update_mac(ksp);
1262
1263 ret = ks8695_init_net(ksp);
1264 if (ret) {
1265 ks8695_shutdown(ksp);
1266 return ret;
1267 }
1268
1269 napi_enable(&ksp->napi);
1270 netif_start_queue(ndev);
1271
1272 return 0;
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static void
1285ks8695_init_switch(struct ks8695_priv *ksp)
1286{
1287 u32 ctrl;
1288
1289
1290 ctrl = 0x40819e00;
1291
1292
1293 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1294 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1295
1296
1297 ctrl |= SEC0_ENABLE;
1298
1299 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1300
1301
1302 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312static void
1313ks8695_init_wan_phy(struct ks8695_priv *ksp)
1314{
1315 u32 ctrl;
1316
1317
1318 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1319 WMC_WANA10F | WMC_WANA10H);
1320
1321
1322 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1323
1324
1325 ctrl |= WMC_WANR;
1326
1327 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1328
1329 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1330 writel(0, ksp->phyiface_regs + KS8695_PPS);
1331}
1332
1333static const struct net_device_ops ks8695_netdev_ops = {
1334 .ndo_open = ks8695_open,
1335 .ndo_stop = ks8695_stop,
1336 .ndo_start_xmit = ks8695_start_xmit,
1337 .ndo_tx_timeout = ks8695_timeout,
1338 .ndo_set_mac_address = ks8695_set_mac,
1339 .ndo_validate_addr = eth_validate_addr,
1340 .ndo_set_rx_mode = ks8695_set_multicast,
1341};
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356static int
1357ks8695_probe(struct platform_device *pdev)
1358{
1359 struct ks8695_priv *ksp;
1360 struct net_device *ndev;
1361 struct resource *regs_res, *phyiface_res;
1362 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1363 int ret = 0;
1364 int buff_n;
1365 bool inv_mac_addr = false;
1366 u32 machigh, maclow;
1367
1368
1369 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1370 if (!ndev)
1371 return -ENOMEM;
1372
1373 SET_NETDEV_DEV(ndev, &pdev->dev);
1374
1375 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1376
1377
1378 ksp = netdev_priv(ndev);
1379
1380 ksp->dev = &pdev->dev;
1381 ksp->ndev = ndev;
1382 ksp->msg_enable = NETIF_MSG_LINK;
1383
1384
1385 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1386 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1387
1388 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1389 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1390 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1391
1392 if (!(regs_res && rxirq_res && txirq_res)) {
1393 dev_err(ksp->dev, "insufficient resources\n");
1394 ret = -ENOENT;
1395 goto failure;
1396 }
1397
1398 ksp->regs_req = request_mem_region(regs_res->start,
1399 resource_size(regs_res),
1400 pdev->name);
1401
1402 if (!ksp->regs_req) {
1403 dev_err(ksp->dev, "cannot claim register space\n");
1404 ret = -EIO;
1405 goto failure;
1406 }
1407
1408 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1409
1410 if (!ksp->io_regs) {
1411 dev_err(ksp->dev, "failed to ioremap registers\n");
1412 ret = -EINVAL;
1413 goto failure;
1414 }
1415
1416 if (phyiface_res) {
1417 ksp->phyiface_req =
1418 request_mem_region(phyiface_res->start,
1419 resource_size(phyiface_res),
1420 phyiface_res->name);
1421
1422 if (!ksp->phyiface_req) {
1423 dev_err(ksp->dev,
1424 "cannot claim switch register space\n");
1425 ret = -EIO;
1426 goto failure;
1427 }
1428
1429 ksp->phyiface_regs = ioremap(phyiface_res->start,
1430 resource_size(phyiface_res));
1431
1432 if (!ksp->phyiface_regs) {
1433 dev_err(ksp->dev,
1434 "failed to ioremap switch registers\n");
1435 ret = -EINVAL;
1436 goto failure;
1437 }
1438 }
1439
1440 ksp->rx_irq = rxirq_res->start;
1441 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1442 ksp->tx_irq = txirq_res->start;
1443 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1444 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1445 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1446 linkirq_res->name : "Ethernet Link";
1447
1448
1449 ndev->netdev_ops = &ks8695_netdev_ops;
1450 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1451
1452 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1453
1454
1455
1456
1457 machigh = ks8695_readreg(ksp, KS8695_MAH);
1458 maclow = ks8695_readreg(ksp, KS8695_MAL);
1459
1460 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1461 ndev->dev_addr[1] = machigh & 0xFF;
1462 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1463 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1464 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1465 ndev->dev_addr[5] = maclow & 0xFF;
1466
1467 if (!is_valid_ether_addr(ndev->dev_addr))
1468 inv_mac_addr = true;
1469
1470
1471
1472
1473 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1474 &ksp->ring_base_dma, GFP_KERNEL);
1475 if (!ksp->ring_base) {
1476 ret = -ENOMEM;
1477 goto failure;
1478 }
1479
1480
1481 ksp->tx_ring = ksp->ring_base;
1482 ksp->tx_ring_dma = ksp->ring_base_dma;
1483
1484
1485 spin_lock_init(&ksp->txq_lock);
1486 spin_lock_init(&ksp->rx_lock);
1487
1488
1489 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1490 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1491
1492
1493 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1494 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1495
1496
1497 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1498 ksp->tx_ring[buff_n].next_desc =
1499 cpu_to_le32(ksp->tx_ring_dma +
1500 (sizeof(struct tx_ring_desc) *
1501 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1502 }
1503
1504 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1505 ksp->rx_ring[buff_n].next_desc =
1506 cpu_to_le32(ksp->rx_ring_dma +
1507 (sizeof(struct rx_ring_desc) *
1508 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1509 }
1510
1511
1512 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1513 ks8695_init_switch(ksp);
1514 ksp->dtype = KS8695_DTYPE_LAN;
1515 ndev->ethtool_ops = &ks8695_ethtool_ops;
1516 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1517 ks8695_init_wan_phy(ksp);
1518 ksp->dtype = KS8695_DTYPE_WAN;
1519 ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
1520 } else {
1521
1522 ksp->dtype = KS8695_DTYPE_HPNA;
1523 ndev->ethtool_ops = &ks8695_ethtool_ops;
1524 }
1525
1526
1527 platform_set_drvdata(pdev, ndev);
1528 ret = register_netdev(ndev);
1529
1530 if (ret == 0) {
1531 if (inv_mac_addr)
1532 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1533 ndev->name);
1534 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1535 ks8695_port_type(ksp), ndev->dev_addr);
1536 } else {
1537
1538 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1539 goto failure;
1540 }
1541
1542
1543 return 0;
1544
1545
1546failure:
1547 ks8695_release_device(ksp);
1548 free_netdev(ndev);
1549
1550 return ret;
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560static int
1561ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1562{
1563 struct net_device *ndev = platform_get_drvdata(pdev);
1564 struct ks8695_priv *ksp = netdev_priv(ndev);
1565
1566 ksp->in_suspend = 1;
1567
1568 if (netif_running(ndev)) {
1569 netif_device_detach(ndev);
1570 ks8695_shutdown(ksp);
1571 }
1572
1573 return 0;
1574}
1575
1576
1577
1578
1579
1580
1581
1582
1583static int
1584ks8695_drv_resume(struct platform_device *pdev)
1585{
1586 struct net_device *ndev = platform_get_drvdata(pdev);
1587 struct ks8695_priv *ksp = netdev_priv(ndev);
1588
1589 if (netif_running(ndev)) {
1590 ks8695_reset(ksp);
1591 ks8695_init_net(ksp);
1592 ks8695_set_multicast(ndev);
1593 netif_device_attach(ndev);
1594 }
1595
1596 ksp->in_suspend = 0;
1597
1598 return 0;
1599}
1600
1601
1602
1603
1604
1605
1606
1607static int
1608ks8695_drv_remove(struct platform_device *pdev)
1609{
1610 struct net_device *ndev = platform_get_drvdata(pdev);
1611 struct ks8695_priv *ksp = netdev_priv(ndev);
1612
1613 netif_napi_del(&ksp->napi);
1614
1615 unregister_netdev(ndev);
1616 ks8695_release_device(ksp);
1617 free_netdev(ndev);
1618
1619 dev_dbg(&pdev->dev, "released and freed device\n");
1620 return 0;
1621}
1622
1623static struct platform_driver ks8695_driver = {
1624 .driver = {
1625 .name = MODULENAME,
1626 },
1627 .probe = ks8695_probe,
1628 .remove = ks8695_drv_remove,
1629 .suspend = ks8695_drv_suspend,
1630 .resume = ks8695_drv_resume,
1631};
1632
1633module_platform_driver(ks8695_driver);
1634
1635MODULE_AUTHOR("Simtec Electronics");
1636MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1637MODULE_LICENSE("GPL");
1638MODULE_ALIAS("platform:" MODULENAME);
1639
1640module_param(watchdog, int, 0400);
1641MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
1642