1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/dma-mapping.h>
20#include <linux/module.h>
21#include <linux/ioport.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/interrupt.h>
25#include <linux/skbuff.h>
26#include <linux/spinlock.h>
27#include <linux/crc32.h>
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/delay.h>
31#include <linux/platform_device.h>
32#include <linux/irq.h>
33#include <linux/io.h>
34#include <linux/slab.h>
35
36#include <asm/irq.h>
37
38#include <mach/regs-switch.h>
39#include <mach/regs-misc.h>
40#include <asm/mach/irq.h>
41#include <mach/regs-irq.h>
42
43#include "ks8695net.h"
44
45#define MODULENAME "ks8695_ether"
46#define MODULEVERSION "1.02"
47
48
49
50
51static int watchdog = 5000;
52
53
54
55
56
57
58
59
60
61
62struct rx_ring_desc {
63 __le32 status;
64 __le32 length;
65 __le32 data_ptr;
66 __le32 next_desc;
67};
68
69
70
71
72
73
74
75
76struct tx_ring_desc {
77 __le32 owner;
78 __le32 status;
79 __le32 data_ptr;
80 __le32 next_desc;
81};
82
83
84
85
86
87
88
89struct ks8695_skbuff {
90 struct sk_buff *skb;
91 dma_addr_t dma_ptr;
92 u32 length;
93};
94
95
96
97#define MAX_TX_DESC 8
98#define MAX_TX_DESC_MASK 0x7
99#define MAX_RX_DESC 16
100#define MAX_RX_DESC_MASK 0xf
101
102
103#define NAPI_WEIGHT 64
104
105#define MAX_RXBUF_SIZE 0x700
106
107#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
108#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
109#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
110
111
112
113
114
115
116
117enum ks8695_dtype {
118 KS8695_DTYPE_WAN,
119 KS8695_DTYPE_LAN,
120 KS8695_DTYPE_HPNA,
121};
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157struct ks8695_priv {
158 int in_suspend;
159 struct net_device *ndev;
160 struct device *dev;
161 enum ks8695_dtype dtype;
162 void __iomem *io_regs;
163
164 struct napi_struct napi;
165
166 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
167 int rx_irq, tx_irq, link_irq;
168
169 struct resource *regs_req, *phyiface_req;
170 void __iomem *phyiface_regs;
171
172 void *ring_base;
173 dma_addr_t ring_base_dma;
174
175 struct tx_ring_desc *tx_ring;
176 int tx_ring_used;
177 int tx_ring_next_slot;
178 dma_addr_t tx_ring_dma;
179 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
180 spinlock_t txq_lock;
181
182 struct rx_ring_desc *rx_ring;
183 dma_addr_t rx_ring_dma;
184 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
185 int next_rx_desc_read;
186 spinlock_t rx_lock;
187
188 int msg_enable;
189};
190
191
192
193
194
195
196
197
198static inline u32
199ks8695_readreg(struct ks8695_priv *ksp, int reg)
200{
201 return readl(ksp->io_regs + reg);
202}
203
204
205
206
207
208
209
210static inline void
211ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
212{
213 writel(value, ksp->io_regs + reg);
214}
215
216
217
218
219
220
221
222
223
224
225static const char *
226ks8695_port_type(struct ks8695_priv *ksp)
227{
228 switch (ksp->dtype) {
229 case KS8695_DTYPE_LAN:
230 return "LAN";
231 case KS8695_DTYPE_WAN:
232 return "WAN";
233 case KS8695_DTYPE_HPNA:
234 return "HPNA";
235 }
236
237 return "UNKNOWN";
238}
239
240
241
242
243
244
245
246
247static void
248ks8695_update_mac(struct ks8695_priv *ksp)
249{
250
251 struct net_device *ndev = ksp->ndev;
252 u32 machigh, maclow;
253
254 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
255 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
256 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
257
258 ks8695_writereg(ksp, KS8695_MAL, maclow);
259 ks8695_writereg(ksp, KS8695_MAH, machigh);
260
261}
262
263
264
265
266
267
268
269
270
271
272static void
273ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
274{
275
276 int buff_n;
277
278 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
279 if (!ksp->rx_buffers[buff_n].skb) {
280 struct sk_buff *skb =
281 netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
282 dma_addr_t mapping;
283
284 ksp->rx_buffers[buff_n].skb = skb;
285 if (skb == NULL) {
286
287
288
289 break;
290 }
291
292 mapping = dma_map_single(ksp->dev, skb->data,
293 MAX_RXBUF_SIZE,
294 DMA_FROM_DEVICE);
295 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
296
297 dev_kfree_skb_irq(skb);
298 ksp->rx_buffers[buff_n].skb = NULL;
299 break;
300 }
301 ksp->rx_buffers[buff_n].dma_ptr = mapping;
302 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
303
304
305 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
306 ksp->rx_ring[buff_n].length =
307 cpu_to_le32(MAX_RXBUF_SIZE);
308
309 wmb();
310
311
312 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
313 }
314 }
315}
316
317
318#define KS8695_NR_ADDRESSES 16
319
320
321
322
323
324
325
326
327
328
329
330static void
331ks8695_init_partial_multicast(struct ks8695_priv *ksp,
332 struct net_device *ndev)
333{
334 u32 low, high;
335 int i;
336 struct netdev_hw_addr *ha;
337
338 i = 0;
339 netdev_for_each_mc_addr(ha, ndev) {
340
341 BUG_ON(i == KS8695_NR_ADDRESSES);
342
343 low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
344 (ha->addr[4] << 8) | (ha->addr[5]);
345 high = (ha->addr[0] << 8) | (ha->addr[1]);
346
347 ks8695_writereg(ksp, KS8695_AAL_(i), low);
348 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
349 i++;
350 }
351
352
353 for (; i < KS8695_NR_ADDRESSES; i++) {
354 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
355 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
356 }
357}
358
359
360
361
362
363
364
365
366
367
368
369
370static irqreturn_t
371ks8695_tx_irq(int irq, void *dev_id)
372{
373 struct net_device *ndev = (struct net_device *)dev_id;
374 struct ks8695_priv *ksp = netdev_priv(ndev);
375 int buff_n;
376
377 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
378 if (ksp->tx_buffers[buff_n].skb &&
379 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
380 rmb();
381
382
383 ndev->stats.tx_packets++;
384 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
385
386
387 ksp->tx_ring[buff_n].data_ptr = 0;
388
389
390 dma_unmap_single(ksp->dev,
391 ksp->tx_buffers[buff_n].dma_ptr,
392 ksp->tx_buffers[buff_n].length,
393 DMA_TO_DEVICE);
394 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
395 ksp->tx_buffers[buff_n].skb = NULL;
396 ksp->tx_ring_used--;
397 }
398 }
399
400 netif_wake_queue(ndev);
401
402 return IRQ_HANDLED;
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
420{
421 return ksp->rx_irq;
422}
423
424
425
426
427
428
429
430
431
432static irqreturn_t
433ks8695_rx_irq(int irq, void *dev_id)
434{
435 struct net_device *ndev = (struct net_device *)dev_id;
436 struct ks8695_priv *ksp = netdev_priv(ndev);
437
438 spin_lock(&ksp->rx_lock);
439
440 if (napi_schedule_prep(&ksp->napi)) {
441 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
442 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
443
444 status &= ~mask_bit;
445 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
446 __napi_schedule(&ksp->napi);
447 }
448
449 spin_unlock(&ksp->rx_lock);
450 return IRQ_HANDLED;
451}
452
453
454
455
456
457
458static int ks8695_rx(struct ks8695_priv *ksp, int budget)
459{
460 struct net_device *ndev = ksp->ndev;
461 struct sk_buff *skb;
462 int buff_n;
463 u32 flags;
464 int pktlen;
465 int received = 0;
466
467 buff_n = ksp->next_rx_desc_read;
468 while (received < budget
469 && ksp->rx_buffers[buff_n].skb
470 && (!(ksp->rx_ring[buff_n].status &
471 cpu_to_le32(RDES_OWN)))) {
472 rmb();
473 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
474
475
476
477
478 if ((flags & (RDES_FS | RDES_LS)) !=
479 (RDES_FS | RDES_LS)) {
480
481
482
483
484
485 goto rx_failure;
486 }
487
488 if (flags & (RDES_ES | RDES_RE)) {
489
490 ndev->stats.rx_errors++;
491 if (flags & RDES_TL)
492 ndev->stats.rx_length_errors++;
493 if (flags & RDES_RF)
494 ndev->stats.rx_length_errors++;
495 if (flags & RDES_CE)
496 ndev->stats.rx_crc_errors++;
497 if (flags & RDES_RE)
498 ndev->stats.rx_missed_errors++;
499
500 goto rx_failure;
501 }
502
503 pktlen = flags & RDES_FLEN;
504 pktlen -= 4;
505
506
507 skb = ksp->rx_buffers[buff_n].skb;
508
509
510 ksp->rx_buffers[buff_n].skb = NULL;
511 ksp->rx_ring[buff_n].data_ptr = 0;
512
513
514 dma_unmap_single(ksp->dev,
515 ksp->rx_buffers[buff_n].dma_ptr,
516 ksp->rx_buffers[buff_n].length,
517 DMA_FROM_DEVICE);
518
519
520 skb_put(skb, pktlen);
521 skb->protocol = eth_type_trans(skb, ndev);
522 netif_receive_skb(skb);
523
524
525 ndev->stats.rx_packets++;
526 ndev->stats.rx_bytes += pktlen;
527 goto rx_finished;
528
529rx_failure:
530
531
532
533
534 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
535rx_finished:
536 received++;
537 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
538 }
539
540
541 ksp->next_rx_desc_read = buff_n;
542
543
544 ks8695_refill_rxbuffers(ksp);
545
546
547 ks8695_writereg(ksp, KS8695_DRSC, 0);
548
549 return received;
550}
551
552
553
554
555
556
557
558
559
560
561static int ks8695_poll(struct napi_struct *napi, int budget)
562{
563 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
564 unsigned long work_done;
565
566 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
567 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
568
569 work_done = ks8695_rx(ksp, budget);
570
571 if (work_done < budget) {
572 unsigned long flags;
573 spin_lock_irqsave(&ksp->rx_lock, flags);
574 __napi_complete(napi);
575
576 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
577 spin_unlock_irqrestore(&ksp->rx_lock, flags);
578 }
579 return work_done;
580}
581
582
583
584
585
586
587
588
589
590static irqreturn_t
591ks8695_link_irq(int irq, void *dev_id)
592{
593 struct net_device *ndev = (struct net_device *)dev_id;
594 struct ks8695_priv *ksp = netdev_priv(ndev);
595 u32 ctrl;
596
597 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
598 if (ctrl & WMC_WLS) {
599 netif_carrier_on(ndev);
600 if (netif_msg_link(ksp))
601 dev_info(ksp->dev,
602 "%s: Link is now up (10%sMbps/%s-duplex)\n",
603 ndev->name,
604 (ctrl & WMC_WSS) ? "0" : "",
605 (ctrl & WMC_WDS) ? "Full" : "Half");
606 } else {
607 netif_carrier_off(ndev);
608 if (netif_msg_link(ksp))
609 dev_info(ksp->dev, "%s: Link is now down.\n",
610 ndev->name);
611 }
612
613 return IRQ_HANDLED;
614}
615
616
617
618
619
620
621
622
623
624
625
626static void
627ks8695_reset(struct ks8695_priv *ksp)
628{
629 int reset_timeout = watchdog;
630
631 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
632 while (reset_timeout--) {
633 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
634 break;
635 msleep(1);
636 }
637
638 if (reset_timeout < 0) {
639 dev_crit(ksp->dev,
640 "Timeout waiting for DMA engines to reset\n");
641
642 }
643
644
645
646
647 msleep(10);
648
649
650 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
651
652 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
653}
654
655
656
657
658
659
660
661
662
663static void
664ks8695_shutdown(struct ks8695_priv *ksp)
665{
666 u32 ctrl;
667 int buff_n;
668
669
670 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
671 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
672
673
674 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
675 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
676
677
678 free_irq(ksp->rx_irq, ksp->ndev);
679 free_irq(ksp->tx_irq, ksp->ndev);
680 if (ksp->link_irq != -1)
681 free_irq(ksp->link_irq, ksp->ndev);
682
683
684 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
685 if (ksp->tx_buffers[buff_n].skb) {
686
687 ksp->tx_ring[buff_n].owner = 0;
688 ksp->tx_ring[buff_n].status = 0;
689 ksp->tx_ring[buff_n].data_ptr = 0;
690
691
692 dma_unmap_single(ksp->dev,
693 ksp->tx_buffers[buff_n].dma_ptr,
694 ksp->tx_buffers[buff_n].length,
695 DMA_TO_DEVICE);
696 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
697 ksp->tx_buffers[buff_n].skb = NULL;
698 }
699 }
700
701
702 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
703 if (ksp->rx_buffers[buff_n].skb) {
704
705 ksp->rx_ring[buff_n].status = 0;
706 ksp->rx_ring[buff_n].data_ptr = 0;
707
708
709 dma_unmap_single(ksp->dev,
710 ksp->rx_buffers[buff_n].dma_ptr,
711 ksp->rx_buffers[buff_n].length,
712 DMA_FROM_DEVICE);
713 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
714 ksp->rx_buffers[buff_n].skb = NULL;
715 }
716 }
717}
718
719
720
721
722
723
724
725
726
727
728
729static int
730ks8695_setup_irq(int irq, const char *irq_name,
731 irq_handler_t handler, struct net_device *ndev)
732{
733 int ret;
734
735 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
736
737 if (ret) {
738 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
739 return ret;
740 }
741
742 return 0;
743}
744
745
746
747
748
749
750
751
752
753static int
754ks8695_init_net(struct ks8695_priv *ksp)
755{
756 int ret;
757 u32 ctrl;
758
759 ks8695_refill_rxbuffers(ksp);
760
761
762 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
763 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
764
765
766 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
767 ks8695_rx_irq, ksp->ndev);
768 if (ret)
769 return ret;
770 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
771 ks8695_tx_irq, ksp->ndev);
772 if (ret)
773 return ret;
774 if (ksp->link_irq != -1) {
775 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
776 ks8695_link_irq, ksp->ndev);
777 if (ret)
778 return ret;
779 }
780
781
782 ksp->next_rx_desc_read = 0;
783 ksp->tx_ring_next_slot = 0;
784 ksp->tx_ring_used = 0;
785
786
787 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
788
789 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
790
791
792 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
793
794 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
795
796 ks8695_writereg(ksp, KS8695_DRSC, 0);
797
798
799 return 0;
800}
801
802
803
804
805
806
807
808
809static void
810ks8695_release_device(struct ks8695_priv *ksp)
811{
812
813 iounmap(ksp->io_regs);
814 if (ksp->phyiface_regs)
815 iounmap(ksp->phyiface_regs);
816
817
818 release_resource(ksp->regs_req);
819 kfree(ksp->regs_req);
820 if (ksp->phyiface_req) {
821 release_resource(ksp->phyiface_req);
822 kfree(ksp->phyiface_req);
823 }
824
825
826 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
827 ksp->ring_base, ksp->ring_base_dma);
828}
829
830
831
832
833
834
835
836static u32
837ks8695_get_msglevel(struct net_device *ndev)
838{
839 struct ks8695_priv *ksp = netdev_priv(ndev);
840
841 return ksp->msg_enable;
842}
843
844
845
846
847
848
849static void
850ks8695_set_msglevel(struct net_device *ndev, u32 value)
851{
852 struct ks8695_priv *ksp = netdev_priv(ndev);
853
854 ksp->msg_enable = value;
855}
856
857
858
859
860
861
862static int
863ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
864{
865 struct ks8695_priv *ksp = netdev_priv(ndev);
866 u32 ctrl;
867
868
869 cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
870 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
871 SUPPORTED_TP | SUPPORTED_MII);
872 cmd->transceiver = XCVR_INTERNAL;
873
874 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
875 cmd->port = PORT_MII;
876 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
877 cmd->phy_address = 0;
878
879 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
880 if ((ctrl & WMC_WAND) == 0) {
881
882 cmd->advertising |= ADVERTISED_Autoneg;
883 if (ctrl & WMC_WANA100F)
884 cmd->advertising |= ADVERTISED_100baseT_Full;
885 if (ctrl & WMC_WANA100H)
886 cmd->advertising |= ADVERTISED_100baseT_Half;
887 if (ctrl & WMC_WANA10F)
888 cmd->advertising |= ADVERTISED_10baseT_Full;
889 if (ctrl & WMC_WANA10H)
890 cmd->advertising |= ADVERTISED_10baseT_Half;
891 if (ctrl & WMC_WANAP)
892 cmd->advertising |= ADVERTISED_Pause;
893 cmd->autoneg = AUTONEG_ENABLE;
894
895 ethtool_cmd_speed_set(cmd,
896 (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
897 cmd->duplex = (ctrl & WMC_WDS) ?
898 DUPLEX_FULL : DUPLEX_HALF;
899 } else {
900
901 cmd->autoneg = AUTONEG_DISABLE;
902
903 ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
904 SPEED_100 : SPEED_10));
905 cmd->duplex = (ctrl & WMC_WANFF) ?
906 DUPLEX_FULL : DUPLEX_HALF;
907 }
908
909 return 0;
910}
911
912
913
914
915
916
917static int
918ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
919{
920 struct ks8695_priv *ksp = netdev_priv(ndev);
921 u32 ctrl;
922
923 if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
924 return -EINVAL;
925 if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
926 return -EINVAL;
927 if (cmd->port != PORT_MII)
928 return -EINVAL;
929 if (cmd->transceiver != XCVR_INTERNAL)
930 return -EINVAL;
931 if ((cmd->autoneg != AUTONEG_DISABLE) &&
932 (cmd->autoneg != AUTONEG_ENABLE))
933 return -EINVAL;
934
935 if (cmd->autoneg == AUTONEG_ENABLE) {
936 if ((cmd->advertising & (ADVERTISED_10baseT_Half |
937 ADVERTISED_10baseT_Full |
938 ADVERTISED_100baseT_Half |
939 ADVERTISED_100baseT_Full)) == 0)
940 return -EINVAL;
941
942 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
943
944 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
945 WMC_WANA10F | WMC_WANA10H);
946 if (cmd->advertising & ADVERTISED_100baseT_Full)
947 ctrl |= WMC_WANA100F;
948 if (cmd->advertising & ADVERTISED_100baseT_Half)
949 ctrl |= WMC_WANA100H;
950 if (cmd->advertising & ADVERTISED_10baseT_Full)
951 ctrl |= WMC_WANA10F;
952 if (cmd->advertising & ADVERTISED_10baseT_Half)
953 ctrl |= WMC_WANA10H;
954
955
956 ctrl |= WMC_WANR;
957 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
958 } else {
959 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
960
961
962 ctrl |= WMC_WAND;
963 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
964
965 if (cmd->speed == SPEED_100)
966 ctrl |= WMC_WANF100;
967 if (cmd->duplex == DUPLEX_FULL)
968 ctrl |= WMC_WANFF;
969
970 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
971 }
972
973 return 0;
974}
975
976
977
978
979
980static int
981ks8695_wan_nwayreset(struct net_device *ndev)
982{
983 struct ks8695_priv *ksp = netdev_priv(ndev);
984 u32 ctrl;
985
986 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
987
988 if ((ctrl & WMC_WAND) == 0)
989 writel(ctrl | WMC_WANR,
990 ksp->phyiface_regs + KS8695_WMC);
991 else
992
993 return -EINVAL;
994
995 return 0;
996}
997
998
999
1000
1001
1002
1003static void
1004ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1005{
1006 struct ks8695_priv *ksp = netdev_priv(ndev);
1007 u32 ctrl;
1008
1009 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1010
1011
1012 param->autoneg = (ctrl & WMC_WANAP);
1013
1014
1015 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1016 param->rx_pause = (ctrl & DRXC_RFCE);
1017
1018
1019 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1020 param->tx_pause = (ctrl & DTXC_TFCE);
1021}
1022
1023
1024
1025
1026
1027
1028static void
1029ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1030{
1031 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1032 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1033 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1034 sizeof(info->bus_info));
1035}
1036
1037static const struct ethtool_ops ks8695_ethtool_ops = {
1038 .get_msglevel = ks8695_get_msglevel,
1039 .set_msglevel = ks8695_set_msglevel,
1040 .get_drvinfo = ks8695_get_drvinfo,
1041};
1042
1043static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1044 .get_msglevel = ks8695_get_msglevel,
1045 .set_msglevel = ks8695_set_msglevel,
1046 .get_settings = ks8695_wan_get_settings,
1047 .set_settings = ks8695_wan_set_settings,
1048 .nway_reset = ks8695_wan_nwayreset,
1049 .get_link = ethtool_op_get_link,
1050 .get_pauseparam = ks8695_wan_get_pause,
1051 .get_drvinfo = ks8695_get_drvinfo,
1052};
1053
1054
1055
1056
1057
1058
1059
1060
1061static int
1062ks8695_set_mac(struct net_device *ndev, void *addr)
1063{
1064 struct ks8695_priv *ksp = netdev_priv(ndev);
1065 struct sockaddr *address = addr;
1066
1067 if (!is_valid_ether_addr(address->sa_data))
1068 return -EADDRNOTAVAIL;
1069
1070 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1071
1072 ks8695_update_mac(ksp);
1073
1074 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1075 ndev->name, ndev->dev_addr);
1076
1077 return 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087static void
1088ks8695_set_multicast(struct net_device *ndev)
1089{
1090 struct ks8695_priv *ksp = netdev_priv(ndev);
1091 u32 ctrl;
1092
1093 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1094
1095 if (ndev->flags & IFF_PROMISC) {
1096
1097 ctrl |= DRXC_RA;
1098 } else if (ndev->flags & ~IFF_PROMISC) {
1099
1100 ctrl &= ~DRXC_RA;
1101 }
1102
1103 if (ndev->flags & IFF_ALLMULTI) {
1104
1105 ctrl |= DRXC_RM;
1106 } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
1107
1108
1109
1110 ctrl |= DRXC_RM;
1111 } else {
1112
1113 ctrl &= ~DRXC_RM;
1114 ks8695_init_partial_multicast(ksp, ndev);
1115 }
1116
1117 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1118}
1119
1120
1121
1122
1123
1124
1125
1126static void
1127ks8695_timeout(struct net_device *ndev)
1128{
1129 struct ks8695_priv *ksp = netdev_priv(ndev);
1130
1131 netif_stop_queue(ndev);
1132 ks8695_shutdown(ksp);
1133
1134 ks8695_reset(ksp);
1135
1136 ks8695_update_mac(ksp);
1137
1138
1139
1140
1141 ks8695_init_net(ksp);
1142
1143
1144 ks8695_set_multicast(ndev);
1145
1146
1147 netif_start_queue(ndev);
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159static int
1160ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1161{
1162 struct ks8695_priv *ksp = netdev_priv(ndev);
1163 int buff_n;
1164 dma_addr_t dmap;
1165
1166 spin_lock_irq(&ksp->txq_lock);
1167
1168 if (ksp->tx_ring_used == MAX_TX_DESC) {
1169
1170 spin_unlock_irq(&ksp->txq_lock);
1171 return NETDEV_TX_BUSY;
1172 }
1173
1174 buff_n = ksp->tx_ring_next_slot;
1175
1176 BUG_ON(ksp->tx_buffers[buff_n].skb);
1177
1178 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1179 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1180
1181 spin_unlock_irq(&ksp->txq_lock);
1182 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1183 "transmission, trying later\n", ndev->name);
1184 return NETDEV_TX_BUSY;
1185 }
1186
1187 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1188
1189 ksp->tx_buffers[buff_n].skb = skb;
1190 ksp->tx_buffers[buff_n].length = skb->len;
1191
1192
1193 ksp->tx_ring[buff_n].data_ptr =
1194 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1195 ksp->tx_ring[buff_n].status =
1196 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1197 (skb->len & TDES_TBS));
1198
1199 wmb();
1200
1201
1202 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1203
1204 if (++ksp->tx_ring_used == MAX_TX_DESC)
1205 netif_stop_queue(ndev);
1206
1207
1208 ks8695_writereg(ksp, KS8695_DTSC, 0);
1209
1210
1211 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1212
1213 spin_unlock_irq(&ksp->txq_lock);
1214 return NETDEV_TX_OK;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224static int
1225ks8695_stop(struct net_device *ndev)
1226{
1227 struct ks8695_priv *ksp = netdev_priv(ndev);
1228
1229 netif_stop_queue(ndev);
1230 napi_disable(&ksp->napi);
1231
1232 ks8695_shutdown(ksp);
1233
1234 return 0;
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245static int
1246ks8695_open(struct net_device *ndev)
1247{
1248 struct ks8695_priv *ksp = netdev_priv(ndev);
1249 int ret;
1250
1251 ks8695_reset(ksp);
1252
1253 ks8695_update_mac(ksp);
1254
1255 ret = ks8695_init_net(ksp);
1256 if (ret) {
1257 ks8695_shutdown(ksp);
1258 return ret;
1259 }
1260
1261 napi_enable(&ksp->napi);
1262 netif_start_queue(ndev);
1263
1264 return 0;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static void
1277ks8695_init_switch(struct ks8695_priv *ksp)
1278{
1279 u32 ctrl;
1280
1281
1282 ctrl = 0x40819e00;
1283
1284
1285 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1286 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1287
1288
1289 ctrl |= SEC0_ENABLE;
1290
1291 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1292
1293
1294 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304static void
1305ks8695_init_wan_phy(struct ks8695_priv *ksp)
1306{
1307 u32 ctrl;
1308
1309
1310 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1311 WMC_WANA10F | WMC_WANA10H);
1312
1313
1314 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1315
1316
1317 ctrl |= WMC_WANR;
1318
1319 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1320
1321 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1322 writel(0, ksp->phyiface_regs + KS8695_PPS);
1323}
1324
1325static const struct net_device_ops ks8695_netdev_ops = {
1326 .ndo_open = ks8695_open,
1327 .ndo_stop = ks8695_stop,
1328 .ndo_start_xmit = ks8695_start_xmit,
1329 .ndo_tx_timeout = ks8695_timeout,
1330 .ndo_set_mac_address = ks8695_set_mac,
1331 .ndo_validate_addr = eth_validate_addr,
1332 .ndo_set_rx_mode = ks8695_set_multicast,
1333};
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static int
1349ks8695_probe(struct platform_device *pdev)
1350{
1351 struct ks8695_priv *ksp;
1352 struct net_device *ndev;
1353 struct resource *regs_res, *phyiface_res;
1354 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1355 int ret = 0;
1356 int buff_n;
1357 bool inv_mac_addr = false;
1358 u32 machigh, maclow;
1359
1360
1361 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1362 if (!ndev)
1363 return -ENOMEM;
1364
1365 SET_NETDEV_DEV(ndev, &pdev->dev);
1366
1367 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1368
1369
1370 ksp = netdev_priv(ndev);
1371
1372 ksp->dev = &pdev->dev;
1373 ksp->ndev = ndev;
1374 ksp->msg_enable = NETIF_MSG_LINK;
1375
1376
1377 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1378 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1379
1380 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1381 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1382 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1383
1384 if (!(regs_res && rxirq_res && txirq_res)) {
1385 dev_err(ksp->dev, "insufficient resources\n");
1386 ret = -ENOENT;
1387 goto failure;
1388 }
1389
1390 ksp->regs_req = request_mem_region(regs_res->start,
1391 resource_size(regs_res),
1392 pdev->name);
1393
1394 if (!ksp->regs_req) {
1395 dev_err(ksp->dev, "cannot claim register space\n");
1396 ret = -EIO;
1397 goto failure;
1398 }
1399
1400 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1401
1402 if (!ksp->io_regs) {
1403 dev_err(ksp->dev, "failed to ioremap registers\n");
1404 ret = -EINVAL;
1405 goto failure;
1406 }
1407
1408 if (phyiface_res) {
1409 ksp->phyiface_req =
1410 request_mem_region(phyiface_res->start,
1411 resource_size(phyiface_res),
1412 phyiface_res->name);
1413
1414 if (!ksp->phyiface_req) {
1415 dev_err(ksp->dev,
1416 "cannot claim switch register space\n");
1417 ret = -EIO;
1418 goto failure;
1419 }
1420
1421 ksp->phyiface_regs = ioremap(phyiface_res->start,
1422 resource_size(phyiface_res));
1423
1424 if (!ksp->phyiface_regs) {
1425 dev_err(ksp->dev,
1426 "failed to ioremap switch registers\n");
1427 ret = -EINVAL;
1428 goto failure;
1429 }
1430 }
1431
1432 ksp->rx_irq = rxirq_res->start;
1433 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1434 ksp->tx_irq = txirq_res->start;
1435 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1436 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1437 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1438 linkirq_res->name : "Ethernet Link";
1439
1440
1441 ndev->netdev_ops = &ks8695_netdev_ops;
1442 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1443
1444 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1445
1446
1447
1448
1449 machigh = ks8695_readreg(ksp, KS8695_MAH);
1450 maclow = ks8695_readreg(ksp, KS8695_MAL);
1451
1452 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1453 ndev->dev_addr[1] = machigh & 0xFF;
1454 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1455 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1456 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1457 ndev->dev_addr[5] = maclow & 0xFF;
1458
1459 if (!is_valid_ether_addr(ndev->dev_addr))
1460 inv_mac_addr = true;
1461
1462
1463
1464
1465 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1466 &ksp->ring_base_dma, GFP_KERNEL);
1467 if (!ksp->ring_base) {
1468 ret = -ENOMEM;
1469 goto failure;
1470 }
1471
1472
1473 ksp->tx_ring = ksp->ring_base;
1474 ksp->tx_ring_dma = ksp->ring_base_dma;
1475
1476
1477 spin_lock_init(&ksp->txq_lock);
1478 spin_lock_init(&ksp->rx_lock);
1479
1480
1481 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1482 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1483
1484
1485 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1486 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1487
1488
1489 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1490 ksp->tx_ring[buff_n].next_desc =
1491 cpu_to_le32(ksp->tx_ring_dma +
1492 (sizeof(struct tx_ring_desc) *
1493 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1494 }
1495
1496 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1497 ksp->rx_ring[buff_n].next_desc =
1498 cpu_to_le32(ksp->rx_ring_dma +
1499 (sizeof(struct rx_ring_desc) *
1500 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1501 }
1502
1503
1504 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1505 ks8695_init_switch(ksp);
1506 ksp->dtype = KS8695_DTYPE_LAN;
1507 ndev->ethtool_ops = &ks8695_ethtool_ops;
1508 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1509 ks8695_init_wan_phy(ksp);
1510 ksp->dtype = KS8695_DTYPE_WAN;
1511 ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
1512 } else {
1513
1514 ksp->dtype = KS8695_DTYPE_HPNA;
1515 ndev->ethtool_ops = &ks8695_ethtool_ops;
1516 }
1517
1518
1519 platform_set_drvdata(pdev, ndev);
1520 ret = register_netdev(ndev);
1521
1522 if (ret == 0) {
1523 if (inv_mac_addr)
1524 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1525 ndev->name);
1526 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1527 ks8695_port_type(ksp), ndev->dev_addr);
1528 } else {
1529
1530 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1531 goto failure;
1532 }
1533
1534
1535 return 0;
1536
1537
1538failure:
1539 ks8695_release_device(ksp);
1540 free_netdev(ndev);
1541
1542 return ret;
1543}
1544
1545
1546
1547
1548
1549
1550
1551
1552static int
1553ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1554{
1555 struct net_device *ndev = platform_get_drvdata(pdev);
1556 struct ks8695_priv *ksp = netdev_priv(ndev);
1557
1558 ksp->in_suspend = 1;
1559
1560 if (netif_running(ndev)) {
1561 netif_device_detach(ndev);
1562 ks8695_shutdown(ksp);
1563 }
1564
1565 return 0;
1566}
1567
1568
1569
1570
1571
1572
1573
1574
1575static int
1576ks8695_drv_resume(struct platform_device *pdev)
1577{
1578 struct net_device *ndev = platform_get_drvdata(pdev);
1579 struct ks8695_priv *ksp = netdev_priv(ndev);
1580
1581 if (netif_running(ndev)) {
1582 ks8695_reset(ksp);
1583 ks8695_init_net(ksp);
1584 ks8695_set_multicast(ndev);
1585 netif_device_attach(ndev);
1586 }
1587
1588 ksp->in_suspend = 0;
1589
1590 return 0;
1591}
1592
1593
1594
1595
1596
1597
1598
1599static int
1600ks8695_drv_remove(struct platform_device *pdev)
1601{
1602 struct net_device *ndev = platform_get_drvdata(pdev);
1603 struct ks8695_priv *ksp = netdev_priv(ndev);
1604
1605 netif_napi_del(&ksp->napi);
1606
1607 unregister_netdev(ndev);
1608 ks8695_release_device(ksp);
1609 free_netdev(ndev);
1610
1611 dev_dbg(&pdev->dev, "released and freed device\n");
1612 return 0;
1613}
1614
1615static struct platform_driver ks8695_driver = {
1616 .driver = {
1617 .name = MODULENAME,
1618 },
1619 .probe = ks8695_probe,
1620 .remove = ks8695_drv_remove,
1621 .suspend = ks8695_drv_suspend,
1622 .resume = ks8695_drv_resume,
1623};
1624
1625module_platform_driver(ks8695_driver);
1626
1627MODULE_AUTHOR("Simtec Electronics");
1628MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1629MODULE_LICENSE("GPL");
1630MODULE_ALIAS("platform:" MODULENAME);
1631
1632module_param(watchdog, int, 0400);
1633MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
1634