1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/dma-mapping.h>
20#include <linux/module.h>
21#include <linux/ioport.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/platform_device.h>
33#include <linux/irq.h>
34#include <linux/io.h>
35#include <linux/slab.h>
36
37#include <asm/irq.h>
38
39#include <mach/regs-switch.h>
40#include <mach/regs-misc.h>
41#include <asm/mach/irq.h>
42#include <mach/regs-irq.h>
43
44#include "ks8695net.h"
45
46#define MODULENAME "ks8695_ether"
47#define MODULEVERSION "1.02"
48
49
50
51
52static int watchdog = 5000;
53
54
55
56
57
58
59
60
61
62
63struct rx_ring_desc {
64 __le32 status;
65 __le32 length;
66 __le32 data_ptr;
67 __le32 next_desc;
68};
69
70
71
72
73
74
75
76
77struct tx_ring_desc {
78 __le32 owner;
79 __le32 status;
80 __le32 data_ptr;
81 __le32 next_desc;
82};
83
84
85
86
87
88
89
90struct ks8695_skbuff {
91 struct sk_buff *skb;
92 dma_addr_t dma_ptr;
93 u32 length;
94};
95
96
97
98#define MAX_TX_DESC 8
99#define MAX_TX_DESC_MASK 0x7
100#define MAX_RX_DESC 16
101#define MAX_RX_DESC_MASK 0xf
102
103
104#define NAPI_WEIGHT 64
105
106#define MAX_RXBUF_SIZE 0x700
107
108#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
109#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
110#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
111
112
113
114
115
116
117
118enum ks8695_dtype {
119 KS8695_DTYPE_WAN,
120 KS8695_DTYPE_LAN,
121 KS8695_DTYPE_HPNA,
122};
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158struct ks8695_priv {
159 int in_suspend;
160 struct net_device *ndev;
161 struct device *dev;
162 enum ks8695_dtype dtype;
163 void __iomem *io_regs;
164
165 struct napi_struct napi;
166
167 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
168 int rx_irq, tx_irq, link_irq;
169
170 struct resource *regs_req, *phyiface_req;
171 void __iomem *phyiface_regs;
172
173 void *ring_base;
174 dma_addr_t ring_base_dma;
175
176 struct tx_ring_desc *tx_ring;
177 int tx_ring_used;
178 int tx_ring_next_slot;
179 dma_addr_t tx_ring_dma;
180 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
181 spinlock_t txq_lock;
182
183 struct rx_ring_desc *rx_ring;
184 dma_addr_t rx_ring_dma;
185 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
186 int next_rx_desc_read;
187 spinlock_t rx_lock;
188
189 int msg_enable;
190};
191
192
193
194
195
196
197
198
199static inline u32
200ks8695_readreg(struct ks8695_priv *ksp, int reg)
201{
202 return readl(ksp->io_regs + reg);
203}
204
205
206
207
208
209
210
211static inline void
212ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
213{
214 writel(value, ksp->io_regs + reg);
215}
216
217
218
219
220
221
222
223
224
225
226static const char *
227ks8695_port_type(struct ks8695_priv *ksp)
228{
229 switch (ksp->dtype) {
230 case KS8695_DTYPE_LAN:
231 return "LAN";
232 case KS8695_DTYPE_WAN:
233 return "WAN";
234 case KS8695_DTYPE_HPNA:
235 return "HPNA";
236 }
237
238 return "UNKNOWN";
239}
240
241
242
243
244
245
246
247
248static void
249ks8695_update_mac(struct ks8695_priv *ksp)
250{
251
252 struct net_device *ndev = ksp->ndev;
253 u32 machigh, maclow;
254
255 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
256 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
257 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
258
259 ks8695_writereg(ksp, KS8695_MAL, maclow);
260 ks8695_writereg(ksp, KS8695_MAH, machigh);
261
262}
263
264
265
266
267
268
269
270
271
272
273static void
274ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
275{
276
277 int buff_n;
278
279 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
280 if (!ksp->rx_buffers[buff_n].skb) {
281 struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
282 dma_addr_t mapping;
283
284 ksp->rx_buffers[buff_n].skb = skb;
285 if (skb == NULL) {
286
287
288
289 break;
290 }
291
292 mapping = dma_map_single(ksp->dev, skb->data,
293 MAX_RXBUF_SIZE,
294 DMA_FROM_DEVICE);
295 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
296
297 dev_kfree_skb_irq(skb);
298 ksp->rx_buffers[buff_n].skb = NULL;
299 break;
300 }
301 ksp->rx_buffers[buff_n].dma_ptr = mapping;
302 skb->dev = ksp->ndev;
303 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
304
305
306 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
307 ksp->rx_ring[buff_n].length =
308 cpu_to_le32(MAX_RXBUF_SIZE);
309
310 wmb();
311
312
313 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
314 }
315 }
316}
317
318
319#define KS8695_NR_ADDRESSES 16
320
321
322
323
324
325
326
327
328
329
330
331static void
332ks8695_init_partial_multicast(struct ks8695_priv *ksp,
333 struct net_device *ndev)
334{
335 u32 low, high;
336 int i;
337 struct netdev_hw_addr *ha;
338
339 i = 0;
340 netdev_for_each_mc_addr(ha, ndev) {
341
342 BUG_ON(i == KS8695_NR_ADDRESSES);
343
344 low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
345 (ha->addr[4] << 8) | (ha->addr[5]);
346 high = (ha->addr[0] << 8) | (ha->addr[1]);
347
348 ks8695_writereg(ksp, KS8695_AAL_(i), low);
349 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
350 i++;
351 }
352
353
354 for (; i < KS8695_NR_ADDRESSES; i++) {
355 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
356 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
357 }
358}
359
360
361
362
363
364
365
366
367
368
369
370
371static irqreturn_t
372ks8695_tx_irq(int irq, void *dev_id)
373{
374 struct net_device *ndev = (struct net_device *)dev_id;
375 struct ks8695_priv *ksp = netdev_priv(ndev);
376 int buff_n;
377
378 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
379 if (ksp->tx_buffers[buff_n].skb &&
380 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
381 rmb();
382
383
384 ndev->stats.tx_packets++;
385 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
386
387
388 ksp->tx_ring[buff_n].data_ptr = 0;
389
390
391 dma_unmap_single(ksp->dev,
392 ksp->tx_buffers[buff_n].dma_ptr,
393 ksp->tx_buffers[buff_n].length,
394 DMA_TO_DEVICE);
395 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
396 ksp->tx_buffers[buff_n].skb = NULL;
397 ksp->tx_ring_used--;
398 }
399 }
400
401 netif_wake_queue(ndev);
402
403 return IRQ_HANDLED;
404}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
421{
422 return ksp->rx_irq;
423}
424
425
426
427
428
429
430
431
432
433static irqreturn_t
434ks8695_rx_irq(int irq, void *dev_id)
435{
436 struct net_device *ndev = (struct net_device *)dev_id;
437 struct ks8695_priv *ksp = netdev_priv(ndev);
438
439 spin_lock(&ksp->rx_lock);
440
441 if (napi_schedule_prep(&ksp->napi)) {
442 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
443 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
444
445 status &= ~mask_bit;
446 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
447 __napi_schedule(&ksp->napi);
448 }
449
450 spin_unlock(&ksp->rx_lock);
451 return IRQ_HANDLED;
452}
453
454
455
456
457
458
459static int ks8695_rx(struct ks8695_priv *ksp, int budget)
460{
461 struct net_device *ndev = ksp->ndev;
462 struct sk_buff *skb;
463 int buff_n;
464 u32 flags;
465 int pktlen;
466 int received = 0;
467
468 buff_n = ksp->next_rx_desc_read;
469 while (received < budget
470 && ksp->rx_buffers[buff_n].skb
471 && (!(ksp->rx_ring[buff_n].status &
472 cpu_to_le32(RDES_OWN)))) {
473 rmb();
474 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
475
476
477
478
479 if ((flags & (RDES_FS | RDES_LS)) !=
480 (RDES_FS | RDES_LS)) {
481
482
483
484
485
486 goto rx_failure;
487 }
488
489 if (flags & (RDES_ES | RDES_RE)) {
490
491 ndev->stats.rx_errors++;
492 if (flags & RDES_TL)
493 ndev->stats.rx_length_errors++;
494 if (flags & RDES_RF)
495 ndev->stats.rx_length_errors++;
496 if (flags & RDES_CE)
497 ndev->stats.rx_crc_errors++;
498 if (flags & RDES_RE)
499 ndev->stats.rx_missed_errors++;
500
501 goto rx_failure;
502 }
503
504 pktlen = flags & RDES_FLEN;
505 pktlen -= 4;
506
507
508 skb = ksp->rx_buffers[buff_n].skb;
509
510
511 ksp->rx_buffers[buff_n].skb = NULL;
512 ksp->rx_ring[buff_n].data_ptr = 0;
513
514
515 dma_unmap_single(ksp->dev,
516 ksp->rx_buffers[buff_n].dma_ptr,
517 ksp->rx_buffers[buff_n].length,
518 DMA_FROM_DEVICE);
519
520
521 skb_put(skb, pktlen);
522 skb->protocol = eth_type_trans(skb, ndev);
523 netif_receive_skb(skb);
524
525
526 ndev->stats.rx_packets++;
527 ndev->stats.rx_bytes += pktlen;
528 goto rx_finished;
529
530rx_failure:
531
532
533
534
535 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
536rx_finished:
537 received++;
538 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
539 }
540
541
542 ksp->next_rx_desc_read = buff_n;
543
544
545 ks8695_refill_rxbuffers(ksp);
546
547
548 ks8695_writereg(ksp, KS8695_DRSC, 0);
549
550 return received;
551}
552
553
554
555
556
557
558
559
560
561
562static int ks8695_poll(struct napi_struct *napi, int budget)
563{
564 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
565 unsigned long work_done;
566
567 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
568 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
569
570 work_done = ks8695_rx(ksp, budget);
571
572 if (work_done < budget) {
573 unsigned long flags;
574 spin_lock_irqsave(&ksp->rx_lock, flags);
575 __napi_complete(napi);
576
577 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
578 spin_unlock_irqrestore(&ksp->rx_lock, flags);
579 }
580 return work_done;
581}
582
583
584
585
586
587
588
589
590
591static irqreturn_t
592ks8695_link_irq(int irq, void *dev_id)
593{
594 struct net_device *ndev = (struct net_device *)dev_id;
595 struct ks8695_priv *ksp = netdev_priv(ndev);
596 u32 ctrl;
597
598 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
599 if (ctrl & WMC_WLS) {
600 netif_carrier_on(ndev);
601 if (netif_msg_link(ksp))
602 dev_info(ksp->dev,
603 "%s: Link is now up (10%sMbps/%s-duplex)\n",
604 ndev->name,
605 (ctrl & WMC_WSS) ? "0" : "",
606 (ctrl & WMC_WDS) ? "Full" : "Half");
607 } else {
608 netif_carrier_off(ndev);
609 if (netif_msg_link(ksp))
610 dev_info(ksp->dev, "%s: Link is now down.\n",
611 ndev->name);
612 }
613
614 return IRQ_HANDLED;
615}
616
617
618
619
620
621
622
623
624
625
626
627static void
628ks8695_reset(struct ks8695_priv *ksp)
629{
630 int reset_timeout = watchdog;
631
632 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
633 while (reset_timeout--) {
634 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
635 break;
636 msleep(1);
637 }
638
639 if (reset_timeout < 0) {
640 dev_crit(ksp->dev,
641 "Timeout waiting for DMA engines to reset\n");
642
643 }
644
645
646
647
648 msleep(10);
649
650
651 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
652
653 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
654}
655
656
657
658
659
660
661
662
663
664static void
665ks8695_shutdown(struct ks8695_priv *ksp)
666{
667 u32 ctrl;
668 int buff_n;
669
670
671 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
672 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
673
674
675 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
676 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
677
678
679 free_irq(ksp->rx_irq, ksp->ndev);
680 free_irq(ksp->tx_irq, ksp->ndev);
681 if (ksp->link_irq != -1)
682 free_irq(ksp->link_irq, ksp->ndev);
683
684
685 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
686 if (ksp->tx_buffers[buff_n].skb) {
687
688 ksp->tx_ring[buff_n].owner = 0;
689 ksp->tx_ring[buff_n].status = 0;
690 ksp->tx_ring[buff_n].data_ptr = 0;
691
692
693 dma_unmap_single(ksp->dev,
694 ksp->tx_buffers[buff_n].dma_ptr,
695 ksp->tx_buffers[buff_n].length,
696 DMA_TO_DEVICE);
697 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
698 ksp->tx_buffers[buff_n].skb = NULL;
699 }
700 }
701
702
703 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
704 if (ksp->rx_buffers[buff_n].skb) {
705
706 ksp->rx_ring[buff_n].status = 0;
707 ksp->rx_ring[buff_n].data_ptr = 0;
708
709
710 dma_unmap_single(ksp->dev,
711 ksp->rx_buffers[buff_n].dma_ptr,
712 ksp->rx_buffers[buff_n].length,
713 DMA_FROM_DEVICE);
714 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
715 ksp->rx_buffers[buff_n].skb = NULL;
716 }
717 }
718}
719
720
721
722
723
724
725
726
727
728
729
730static int
731ks8695_setup_irq(int irq, const char *irq_name,
732 irq_handler_t handler, struct net_device *ndev)
733{
734 int ret;
735
736 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
737
738 if (ret) {
739 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
740 return ret;
741 }
742
743 return 0;
744}
745
746
747
748
749
750
751
752
753
754static int
755ks8695_init_net(struct ks8695_priv *ksp)
756{
757 int ret;
758 u32 ctrl;
759
760 ks8695_refill_rxbuffers(ksp);
761
762
763 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
764 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
765
766
767 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
768 ks8695_rx_irq, ksp->ndev);
769 if (ret)
770 return ret;
771 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
772 ks8695_tx_irq, ksp->ndev);
773 if (ret)
774 return ret;
775 if (ksp->link_irq != -1) {
776 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
777 ks8695_link_irq, ksp->ndev);
778 if (ret)
779 return ret;
780 }
781
782
783 ksp->next_rx_desc_read = 0;
784 ksp->tx_ring_next_slot = 0;
785 ksp->tx_ring_used = 0;
786
787
788 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
789
790 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
791
792
793 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
794
795 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
796
797 ks8695_writereg(ksp, KS8695_DRSC, 0);
798
799
800 return 0;
801}
802
803
804
805
806
807
808
809
810static void
811ks8695_release_device(struct ks8695_priv *ksp)
812{
813
814 iounmap(ksp->io_regs);
815 if (ksp->phyiface_regs)
816 iounmap(ksp->phyiface_regs);
817
818
819 release_resource(ksp->regs_req);
820 kfree(ksp->regs_req);
821 if (ksp->phyiface_req) {
822 release_resource(ksp->phyiface_req);
823 kfree(ksp->phyiface_req);
824 }
825
826
827 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
828 ksp->ring_base, ksp->ring_base_dma);
829}
830
831
832
833
834
835
836
837static u32
838ks8695_get_msglevel(struct net_device *ndev)
839{
840 struct ks8695_priv *ksp = netdev_priv(ndev);
841
842 return ksp->msg_enable;
843}
844
845
846
847
848
849
850static void
851ks8695_set_msglevel(struct net_device *ndev, u32 value)
852{
853 struct ks8695_priv *ksp = netdev_priv(ndev);
854
855 ksp->msg_enable = value;
856}
857
858
859
860
861
862
863static int
864ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
865{
866 struct ks8695_priv *ksp = netdev_priv(ndev);
867 u32 ctrl;
868
869
870 cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
871 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
872 SUPPORTED_TP | SUPPORTED_MII);
873 cmd->transceiver = XCVR_INTERNAL;
874
875 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
876 cmd->port = PORT_MII;
877 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
878 cmd->phy_address = 0;
879
880 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
881 if ((ctrl & WMC_WAND) == 0) {
882
883 cmd->advertising |= ADVERTISED_Autoneg;
884 if (ctrl & WMC_WANA100F)
885 cmd->advertising |= ADVERTISED_100baseT_Full;
886 if (ctrl & WMC_WANA100H)
887 cmd->advertising |= ADVERTISED_100baseT_Half;
888 if (ctrl & WMC_WANA10F)
889 cmd->advertising |= ADVERTISED_10baseT_Full;
890 if (ctrl & WMC_WANA10H)
891 cmd->advertising |= ADVERTISED_10baseT_Half;
892 if (ctrl & WMC_WANAP)
893 cmd->advertising |= ADVERTISED_Pause;
894 cmd->autoneg = AUTONEG_ENABLE;
895
896 ethtool_cmd_speed_set(cmd,
897 (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
898 cmd->duplex = (ctrl & WMC_WDS) ?
899 DUPLEX_FULL : DUPLEX_HALF;
900 } else {
901
902 cmd->autoneg = AUTONEG_DISABLE;
903
904 ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
905 SPEED_100 : SPEED_10));
906 cmd->duplex = (ctrl & WMC_WANFF) ?
907 DUPLEX_FULL : DUPLEX_HALF;
908 }
909
910 return 0;
911}
912
913
914
915
916
917
918static int
919ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
920{
921 struct ks8695_priv *ksp = netdev_priv(ndev);
922 u32 ctrl;
923
924 if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
925 return -EINVAL;
926 if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
927 return -EINVAL;
928 if (cmd->port != PORT_MII)
929 return -EINVAL;
930 if (cmd->transceiver != XCVR_INTERNAL)
931 return -EINVAL;
932 if ((cmd->autoneg != AUTONEG_DISABLE) &&
933 (cmd->autoneg != AUTONEG_ENABLE))
934 return -EINVAL;
935
936 if (cmd->autoneg == AUTONEG_ENABLE) {
937 if ((cmd->advertising & (ADVERTISED_10baseT_Half |
938 ADVERTISED_10baseT_Full |
939 ADVERTISED_100baseT_Half |
940 ADVERTISED_100baseT_Full)) == 0)
941 return -EINVAL;
942
943 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
944
945 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
946 WMC_WANA10F | WMC_WANA10H);
947 if (cmd->advertising & ADVERTISED_100baseT_Full)
948 ctrl |= WMC_WANA100F;
949 if (cmd->advertising & ADVERTISED_100baseT_Half)
950 ctrl |= WMC_WANA100H;
951 if (cmd->advertising & ADVERTISED_10baseT_Full)
952 ctrl |= WMC_WANA10F;
953 if (cmd->advertising & ADVERTISED_10baseT_Half)
954 ctrl |= WMC_WANA10H;
955
956
957 ctrl |= WMC_WANR;
958 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
959 } else {
960 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
961
962
963 ctrl |= WMC_WAND;
964 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
965
966 if (cmd->speed == SPEED_100)
967 ctrl |= WMC_WANF100;
968 if (cmd->duplex == DUPLEX_FULL)
969 ctrl |= WMC_WANFF;
970
971 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
972 }
973
974 return 0;
975}
976
977
978
979
980
981static int
982ks8695_wan_nwayreset(struct net_device *ndev)
983{
984 struct ks8695_priv *ksp = netdev_priv(ndev);
985 u32 ctrl;
986
987 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
988
989 if ((ctrl & WMC_WAND) == 0)
990 writel(ctrl | WMC_WANR,
991 ksp->phyiface_regs + KS8695_WMC);
992 else
993
994 return -EINVAL;
995
996 return 0;
997}
998
999
1000
1001
1002
1003
1004static void
1005ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1006{
1007 struct ks8695_priv *ksp = netdev_priv(ndev);
1008 u32 ctrl;
1009
1010 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1011
1012
1013 param->autoneg = (ctrl & WMC_WANAP);
1014
1015
1016 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1017 param->rx_pause = (ctrl & DRXC_RFCE);
1018
1019
1020 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1021 param->tx_pause = (ctrl & DTXC_TFCE);
1022}
1023
1024
1025
1026
1027
1028
1029static void
1030ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1031{
1032 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1033 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1034 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1035 sizeof(info->bus_info));
1036}
1037
1038static const struct ethtool_ops ks8695_ethtool_ops = {
1039 .get_msglevel = ks8695_get_msglevel,
1040 .set_msglevel = ks8695_set_msglevel,
1041 .get_drvinfo = ks8695_get_drvinfo,
1042};
1043
1044static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1045 .get_msglevel = ks8695_get_msglevel,
1046 .set_msglevel = ks8695_set_msglevel,
1047 .get_settings = ks8695_wan_get_settings,
1048 .set_settings = ks8695_wan_set_settings,
1049 .nway_reset = ks8695_wan_nwayreset,
1050 .get_link = ethtool_op_get_link,
1051 .get_pauseparam = ks8695_wan_get_pause,
1052 .get_drvinfo = ks8695_get_drvinfo,
1053};
1054
1055
1056
1057
1058
1059
1060
1061
1062static int
1063ks8695_set_mac(struct net_device *ndev, void *addr)
1064{
1065 struct ks8695_priv *ksp = netdev_priv(ndev);
1066 struct sockaddr *address = addr;
1067
1068 if (!is_valid_ether_addr(address->sa_data))
1069 return -EADDRNOTAVAIL;
1070
1071 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1072
1073 ks8695_update_mac(ksp);
1074
1075 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1076 ndev->name, ndev->dev_addr);
1077
1078 return 0;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088static void
1089ks8695_set_multicast(struct net_device *ndev)
1090{
1091 struct ks8695_priv *ksp = netdev_priv(ndev);
1092 u32 ctrl;
1093
1094 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1095
1096 if (ndev->flags & IFF_PROMISC) {
1097
1098 ctrl |= DRXC_RA;
1099 } else if (ndev->flags & ~IFF_PROMISC) {
1100
1101 ctrl &= ~DRXC_RA;
1102 }
1103
1104 if (ndev->flags & IFF_ALLMULTI) {
1105
1106 ctrl |= DRXC_RM;
1107 } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
1108
1109
1110
1111 ctrl |= DRXC_RM;
1112 } else {
1113
1114 ctrl &= ~DRXC_RM;
1115 ks8695_init_partial_multicast(ksp, ndev);
1116 }
1117
1118 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1119}
1120
1121
1122
1123
1124
1125
1126
1127static void
1128ks8695_timeout(struct net_device *ndev)
1129{
1130 struct ks8695_priv *ksp = netdev_priv(ndev);
1131
1132 netif_stop_queue(ndev);
1133 ks8695_shutdown(ksp);
1134
1135 ks8695_reset(ksp);
1136
1137 ks8695_update_mac(ksp);
1138
1139
1140
1141
1142 ks8695_init_net(ksp);
1143
1144
1145 ks8695_set_multicast(ndev);
1146
1147
1148 netif_start_queue(ndev);
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160static int
1161ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1162{
1163 struct ks8695_priv *ksp = netdev_priv(ndev);
1164 int buff_n;
1165 dma_addr_t dmap;
1166
1167 spin_lock_irq(&ksp->txq_lock);
1168
1169 if (ksp->tx_ring_used == MAX_TX_DESC) {
1170
1171 spin_unlock_irq(&ksp->txq_lock);
1172 return NETDEV_TX_BUSY;
1173 }
1174
1175 buff_n = ksp->tx_ring_next_slot;
1176
1177 BUG_ON(ksp->tx_buffers[buff_n].skb);
1178
1179 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1180 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1181
1182 spin_unlock_irq(&ksp->txq_lock);
1183 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1184 "transmission, trying later\n", ndev->name);
1185 return NETDEV_TX_BUSY;
1186 }
1187
1188 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1189
1190 ksp->tx_buffers[buff_n].skb = skb;
1191 ksp->tx_buffers[buff_n].length = skb->len;
1192
1193
1194 ksp->tx_ring[buff_n].data_ptr =
1195 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1196 ksp->tx_ring[buff_n].status =
1197 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1198 (skb->len & TDES_TBS));
1199
1200 wmb();
1201
1202
1203 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1204
1205 if (++ksp->tx_ring_used == MAX_TX_DESC)
1206 netif_stop_queue(ndev);
1207
1208
1209 ks8695_writereg(ksp, KS8695_DTSC, 0);
1210
1211
1212 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1213
1214 spin_unlock_irq(&ksp->txq_lock);
1215 return NETDEV_TX_OK;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225static int
1226ks8695_stop(struct net_device *ndev)
1227{
1228 struct ks8695_priv *ksp = netdev_priv(ndev);
1229
1230 netif_stop_queue(ndev);
1231 napi_disable(&ksp->napi);
1232
1233 ks8695_shutdown(ksp);
1234
1235 return 0;
1236}
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static int
1247ks8695_open(struct net_device *ndev)
1248{
1249 struct ks8695_priv *ksp = netdev_priv(ndev);
1250 int ret;
1251
1252 if (!is_valid_ether_addr(ndev->dev_addr))
1253 return -EADDRNOTAVAIL;
1254
1255 ks8695_reset(ksp);
1256
1257 ks8695_update_mac(ksp);
1258
1259 ret = ks8695_init_net(ksp);
1260 if (ret) {
1261 ks8695_shutdown(ksp);
1262 return ret;
1263 }
1264
1265 napi_enable(&ksp->napi);
1266 netif_start_queue(ndev);
1267
1268 return 0;
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280static void __devinit
1281ks8695_init_switch(struct ks8695_priv *ksp)
1282{
1283 u32 ctrl;
1284
1285
1286 ctrl = 0x40819e00;
1287
1288
1289 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1290 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1291
1292
1293 ctrl |= SEC0_ENABLE;
1294
1295 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1296
1297
1298 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1299}
1300
1301
1302
1303
1304
1305
1306
1307
1308static void __devinit
1309ks8695_init_wan_phy(struct ks8695_priv *ksp)
1310{
1311 u32 ctrl;
1312
1313
1314 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1315 WMC_WANA10F | WMC_WANA10H);
1316
1317
1318 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1319
1320
1321 ctrl |= WMC_WANR;
1322
1323 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1324
1325 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1326 writel(0, ksp->phyiface_regs + KS8695_PPS);
1327}
1328
1329static const struct net_device_ops ks8695_netdev_ops = {
1330 .ndo_open = ks8695_open,
1331 .ndo_stop = ks8695_stop,
1332 .ndo_start_xmit = ks8695_start_xmit,
1333 .ndo_tx_timeout = ks8695_timeout,
1334 .ndo_set_mac_address = ks8695_set_mac,
1335 .ndo_validate_addr = eth_validate_addr,
1336 .ndo_set_rx_mode = ks8695_set_multicast,
1337};
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352static int __devinit
1353ks8695_probe(struct platform_device *pdev)
1354{
1355 struct ks8695_priv *ksp;
1356 struct net_device *ndev;
1357 struct resource *regs_res, *phyiface_res;
1358 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1359 int ret = 0;
1360 int buff_n;
1361 u32 machigh, maclow;
1362
1363
1364 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1365 if (!ndev) {
1366 dev_err(&pdev->dev, "could not allocate device.\n");
1367 return -ENOMEM;
1368 }
1369
1370 SET_NETDEV_DEV(ndev, &pdev->dev);
1371
1372 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1373
1374
1375 ksp = netdev_priv(ndev);
1376
1377 ksp->dev = &pdev->dev;
1378 ksp->ndev = ndev;
1379 ksp->msg_enable = NETIF_MSG_LINK;
1380
1381
1382 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1383 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1384
1385 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1386 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1387 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1388
1389 if (!(regs_res && rxirq_res && txirq_res)) {
1390 dev_err(ksp->dev, "insufficient resources\n");
1391 ret = -ENOENT;
1392 goto failure;
1393 }
1394
1395 ksp->regs_req = request_mem_region(regs_res->start,
1396 resource_size(regs_res),
1397 pdev->name);
1398
1399 if (!ksp->regs_req) {
1400 dev_err(ksp->dev, "cannot claim register space\n");
1401 ret = -EIO;
1402 goto failure;
1403 }
1404
1405 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1406
1407 if (!ksp->io_regs) {
1408 dev_err(ksp->dev, "failed to ioremap registers\n");
1409 ret = -EINVAL;
1410 goto failure;
1411 }
1412
1413 if (phyiface_res) {
1414 ksp->phyiface_req =
1415 request_mem_region(phyiface_res->start,
1416 resource_size(phyiface_res),
1417 phyiface_res->name);
1418
1419 if (!ksp->phyiface_req) {
1420 dev_err(ksp->dev,
1421 "cannot claim switch register space\n");
1422 ret = -EIO;
1423 goto failure;
1424 }
1425
1426 ksp->phyiface_regs = ioremap(phyiface_res->start,
1427 resource_size(phyiface_res));
1428
1429 if (!ksp->phyiface_regs) {
1430 dev_err(ksp->dev,
1431 "failed to ioremap switch registers\n");
1432 ret = -EINVAL;
1433 goto failure;
1434 }
1435 }
1436
1437 ksp->rx_irq = rxirq_res->start;
1438 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1439 ksp->tx_irq = txirq_res->start;
1440 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1441 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1442 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1443 linkirq_res->name : "Ethernet Link";
1444
1445
1446 ndev->netdev_ops = &ks8695_netdev_ops;
1447 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1448
1449 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1450
1451
1452
1453
1454 machigh = ks8695_readreg(ksp, KS8695_MAH);
1455 maclow = ks8695_readreg(ksp, KS8695_MAL);
1456
1457 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1458 ndev->dev_addr[1] = machigh & 0xFF;
1459 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1460 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1461 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1462 ndev->dev_addr[5] = maclow & 0xFF;
1463
1464 if (!is_valid_ether_addr(ndev->dev_addr))
1465 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
1466 "set using ifconfig\n", ndev->name);
1467
1468
1469
1470
1471 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1472 &ksp->ring_base_dma, GFP_KERNEL);
1473 if (!ksp->ring_base) {
1474 ret = -ENOMEM;
1475 goto failure;
1476 }
1477
1478
1479 ksp->tx_ring = ksp->ring_base;
1480 ksp->tx_ring_dma = ksp->ring_base_dma;
1481
1482
1483 spin_lock_init(&ksp->txq_lock);
1484 spin_lock_init(&ksp->rx_lock);
1485
1486
1487 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1488 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1489
1490
1491 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1492 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1493
1494
1495 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1496 ksp->tx_ring[buff_n].next_desc =
1497 cpu_to_le32(ksp->tx_ring_dma +
1498 (sizeof(struct tx_ring_desc) *
1499 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1500 }
1501
1502 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1503 ksp->rx_ring[buff_n].next_desc =
1504 cpu_to_le32(ksp->rx_ring_dma +
1505 (sizeof(struct rx_ring_desc) *
1506 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1507 }
1508
1509
1510 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1511 ks8695_init_switch(ksp);
1512 ksp->dtype = KS8695_DTYPE_LAN;
1513 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1514 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1515 ks8695_init_wan_phy(ksp);
1516 ksp->dtype = KS8695_DTYPE_WAN;
1517 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
1518 } else {
1519
1520 ksp->dtype = KS8695_DTYPE_HPNA;
1521 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1522 }
1523
1524
1525 platform_set_drvdata(pdev, ndev);
1526 ret = register_netdev(ndev);
1527
1528 if (ret == 0) {
1529 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1530 ks8695_port_type(ksp), ndev->dev_addr);
1531 } else {
1532
1533 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1534 goto failure;
1535 }
1536
1537
1538 return 0;
1539
1540
1541failure:
1542 ks8695_release_device(ksp);
1543 free_netdev(ndev);
1544
1545 return ret;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555static int
1556ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1557{
1558 struct net_device *ndev = platform_get_drvdata(pdev);
1559 struct ks8695_priv *ksp = netdev_priv(ndev);
1560
1561 ksp->in_suspend = 1;
1562
1563 if (netif_running(ndev)) {
1564 netif_device_detach(ndev);
1565 ks8695_shutdown(ksp);
1566 }
1567
1568 return 0;
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578static int
1579ks8695_drv_resume(struct platform_device *pdev)
1580{
1581 struct net_device *ndev = platform_get_drvdata(pdev);
1582 struct ks8695_priv *ksp = netdev_priv(ndev);
1583
1584 if (netif_running(ndev)) {
1585 ks8695_reset(ksp);
1586 ks8695_init_net(ksp);
1587 ks8695_set_multicast(ndev);
1588 netif_device_attach(ndev);
1589 }
1590
1591 ksp->in_suspend = 0;
1592
1593 return 0;
1594}
1595
1596
1597
1598
1599
1600
1601
1602static int __devexit
1603ks8695_drv_remove(struct platform_device *pdev)
1604{
1605 struct net_device *ndev = platform_get_drvdata(pdev);
1606 struct ks8695_priv *ksp = netdev_priv(ndev);
1607
1608 platform_set_drvdata(pdev, NULL);
1609 netif_napi_del(&ksp->napi);
1610
1611 unregister_netdev(ndev);
1612 ks8695_release_device(ksp);
1613 free_netdev(ndev);
1614
1615 dev_dbg(&pdev->dev, "released and freed device\n");
1616 return 0;
1617}
1618
1619static struct platform_driver ks8695_driver = {
1620 .driver = {
1621 .name = MODULENAME,
1622 .owner = THIS_MODULE,
1623 },
1624 .probe = ks8695_probe,
1625 .remove = __devexit_p(ks8695_drv_remove),
1626 .suspend = ks8695_drv_suspend,
1627 .resume = ks8695_drv_resume,
1628};
1629
1630
1631
1632static int __init
1633ks8695_init(void)
1634{
1635 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1636 MODULENAME, MODULEVERSION);
1637
1638 return platform_driver_register(&ks8695_driver);
1639}
1640
1641static void __exit
1642ks8695_cleanup(void)
1643{
1644 platform_driver_unregister(&ks8695_driver);
1645}
1646
1647module_init(ks8695_init);
1648module_exit(ks8695_cleanup);
1649
1650MODULE_AUTHOR("Simtec Electronics");
1651MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1652MODULE_LICENSE("GPL");
1653MODULE_ALIAS("platform:" MODULENAME);
1654
1655module_param(watchdog, int, 0400);
1656MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
1657