1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/cache.h>
32#include <linux/crc32.h>
33#include <linux/crc32poly.h>
34#include <linux/mii.h>
35#include <linux/platform_device.h>
36#include <linux/delay.h>
37#include <linux/slab.h>
38#include <linux/ks8851_mll.h>
39#include <linux/of.h>
40#include <linux/of_device.h>
41#include <linux/of_net.h>
42
43#include "ks8851.h"
44
45#define DRV_NAME "ks8851_mll"
46
47static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
48#define MAX_RECV_FRAMES 255
49#define MAX_BUF_SIZE 2048
50#define TX_BUF_SIZE 2000
51#define RX_BUF_SIZE 2000
52
53#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
54 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
55#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
56
57#define ENUM_BUS_NONE 0
58#define ENUM_BUS_8BIT 1
59#define ENUM_BUS_16BIT 2
60#define ENUM_BUS_32BIT 3
61
62#define MAX_MCAST_LST 32
63#define HW_MCAST_SIZE 8
64
65
66
67
68
69
70
71
72
73
74union ks_tx_hdr {
75 u8 txb[4];
76 __le16 txw[2];
77};
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121struct type_frame_head {
122 u16 sts;
123 u16 len;
124};
125
126struct ks_net {
127 struct net_device *netdev;
128 void __iomem *hw_addr;
129 void __iomem *hw_addr_cmd;
130 union ks_tx_hdr txh ____cacheline_aligned;
131 struct mutex lock;
132 struct platform_device *pdev;
133 struct mii_if_info mii;
134 struct type_frame_head *frame_head_info;
135 spinlock_t statelock;
136 u32 msg_enable;
137 u32 frame_cnt;
138 int bus_width;
139
140 u16 rc_rxqcr;
141 u16 rc_txcr;
142 u16 rc_ier;
143 u16 sharedbus;
144 u16 cmd_reg_cache;
145 u16 cmd_reg_cache_int;
146 u16 promiscuous;
147 u16 all_mcast;
148 u16 mcast_lst_size;
149 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
150 u8 mcast_bits[HW_MCAST_SIZE];
151 u8 mac_addr[6];
152 u8 fid;
153 u8 extra_byte;
154 u8 enabled;
155};
156
157static int msg_enable;
158
159#define BE3 0x8000
160#define BE2 0x4000
161#define BE1 0x2000
162#define BE0 0x1000
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178static u8 ks_rdreg8(struct ks_net *ks, int offset)
179{
180 u16 data;
181 u8 shift_bit = offset & 0x03;
182 u8 shift_data = (offset & 1) << 3;
183 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
184 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
185 data = ioread16(ks->hw_addr);
186 return (u8)(data >> shift_data);
187}
188
189
190
191
192
193
194
195
196
197static u16 ks_rdreg16(struct ks_net *ks, int offset)
198{
199 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
200 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
201 return ioread16(ks->hw_addr);
202}
203
204
205
206
207
208
209
210
211static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
212{
213 u8 shift_bit = (offset & 0x03);
214 u16 value_write = (u16)(value << ((offset & 1) << 3));
215 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
216 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
217 iowrite16(value_write, ks->hw_addr);
218}
219
220
221
222
223
224
225
226
227
228static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
229{
230 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
231 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
232 iowrite16(value, ks->hw_addr);
233}
234
235
236
237
238
239
240
241
242static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
243{
244 len >>= 1;
245 while (len--)
246 *wptr++ = (u16)ioread16(ks->hw_addr);
247}
248
249
250
251
252
253
254
255
256static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
257{
258 len >>= 1;
259 while (len--)
260 iowrite16(*wptr++, ks->hw_addr);
261}
262
263static void ks_disable_int(struct ks_net *ks)
264{
265 ks_wrreg16(ks, KS_IER, 0x0000);
266}
267
268static void ks_enable_int(struct ks_net *ks)
269{
270 ks_wrreg16(ks, KS_IER, ks->rc_ier);
271}
272
273
274
275
276
277
278static inline u16 ks_tx_fifo_space(struct ks_net *ks)
279{
280 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
281}
282
283
284
285
286
287
288static inline void ks_save_cmd_reg(struct ks_net *ks)
289{
290
291
292
293 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
294}
295
296
297
298
299
300
301
302static inline void ks_restore_cmd_reg(struct ks_net *ks)
303{
304 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
305 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
306}
307
308
309
310
311
312
313
314
315static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
316{
317 unsigned pmecr;
318
319 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
320
321 ks_rdreg16(ks, KS_GRR);
322 pmecr = ks_rdreg16(ks, KS_PMECR);
323 pmecr &= ~PMECR_PM_MASK;
324 pmecr |= pwrmode;
325
326 ks_wrreg16(ks, KS_PMECR, pmecr);
327}
328
329
330
331
332
333
334static void ks_read_config(struct ks_net *ks)
335{
336 u16 reg_data = 0;
337
338
339 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
340 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
341
342
343 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
344
345
346
347
348
349 if (reg_data & CCR_8BIT) {
350 ks->bus_width = ENUM_BUS_8BIT;
351 ks->extra_byte = 1;
352 } else if (reg_data & CCR_16BIT) {
353 ks->bus_width = ENUM_BUS_16BIT;
354 ks->extra_byte = 2;
355 } else {
356 ks->bus_width = ENUM_BUS_32BIT;
357 ks->extra_byte = 4;
358 }
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374static void ks_soft_reset(struct ks_net *ks, unsigned op)
375{
376
377 ks_wrreg16(ks, KS_IER, 0x0000);
378 ks_wrreg16(ks, KS_GRR, op);
379 mdelay(10);
380 ks_wrreg16(ks, KS_GRR, 0);
381 mdelay(1);
382}
383
384
385static void ks_enable_qmu(struct ks_net *ks)
386{
387 u16 w;
388
389 w = ks_rdreg16(ks, KS_TXCR);
390
391 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
392
393
394
395
396
397
398 w = ks_rdreg16(ks, KS_RXQCR);
399 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
400
401
402 w = ks_rdreg16(ks, KS_RXCR1);
403 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
404 ks->enabled = true;
405}
406
407static void ks_disable_qmu(struct ks_net *ks)
408{
409 u16 w;
410
411 w = ks_rdreg16(ks, KS_TXCR);
412
413
414 w &= ~TXCR_TXE;
415 ks_wrreg16(ks, KS_TXCR, w);
416
417
418 w = ks_rdreg16(ks, KS_RXCR1);
419 w &= ~RXCR1_RXE ;
420 ks_wrreg16(ks, KS_RXCR1, w);
421
422 ks->enabled = false;
423
424}
425
426
427
428
429
430
431
432
433
434
435
436
437static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
438{
439 u32 r = ks->extra_byte & 0x1 ;
440 u32 w = ks->extra_byte - r;
441
442
443 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
444 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
445
446
447
448
449
450
451
452
453 if (unlikely(r))
454 ioread8(ks->hw_addr);
455 ks_inblk(ks, buf, w + 2 + 2);
456
457
458 ks_inblk(ks, buf, ALIGN(len, 4));
459
460
461 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
462}
463
464
465
466
467
468
469
470
471
472
473static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
474{
475 u32 i;
476 struct type_frame_head *frame_hdr = ks->frame_head_info;
477 struct sk_buff *skb;
478
479 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
480
481
482 for (i = 0; i < ks->frame_cnt; i++) {
483
484 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
485
486 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
487 frame_hdr++;
488 }
489
490 frame_hdr = ks->frame_head_info;
491 while (ks->frame_cnt--) {
492 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
493 frame_hdr->len >= RX_BUF_SIZE ||
494 frame_hdr->len <= 0)) {
495
496
497 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
498 netdev->stats.rx_dropped++;
499 if (!(frame_hdr->sts & RXFSHR_RXFV))
500 netdev->stats.rx_frame_errors++;
501 else
502 netdev->stats.rx_length_errors++;
503 frame_hdr++;
504 continue;
505 }
506
507 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
508 if (likely(skb)) {
509 skb_reserve(skb, 2);
510
511 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
512 skb_put(skb, frame_hdr->len - 4);
513 skb->protocol = eth_type_trans(skb, netdev);
514 netif_rx(skb);
515
516 netdev->stats.rx_bytes += frame_hdr->len - 4;
517 netdev->stats.rx_packets++;
518 } else {
519 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
520 netdev->stats.rx_dropped++;
521 }
522 frame_hdr++;
523 }
524}
525
526
527
528
529
530
531
532
533static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
534{
535
536 u32 link_up_status;
537 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
538 netif_carrier_on(netdev);
539 link_up_status = true;
540 } else {
541 netif_carrier_off(netdev);
542 link_up_status = false;
543 }
544 netif_dbg(ks, link, ks->netdev,
545 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
546}
547
548
549
550
551
552
553
554
555
556
557
558
559static irqreturn_t ks_irq(int irq, void *pw)
560{
561 struct net_device *netdev = pw;
562 struct ks_net *ks = netdev_priv(netdev);
563 u16 status;
564
565
566 ks_save_cmd_reg(ks);
567
568 status = ks_rdreg16(ks, KS_ISR);
569 if (unlikely(!status)) {
570 ks_restore_cmd_reg(ks);
571 return IRQ_NONE;
572 }
573
574 ks_wrreg16(ks, KS_ISR, status);
575
576 if (likely(status & IRQ_RXI))
577 ks_rcv(ks, netdev);
578
579 if (unlikely(status & IRQ_LCI))
580 ks_update_link_status(netdev, ks);
581
582 if (unlikely(status & IRQ_TXI))
583 netif_wake_queue(netdev);
584
585 if (unlikely(status & IRQ_LDI)) {
586
587 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
588 pmecr &= ~PMECR_WKEVT_MASK;
589 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
590 }
591
592 if (unlikely(status & IRQ_RXOI))
593 ks->netdev->stats.rx_over_errors++;
594
595 ks_restore_cmd_reg(ks);
596 return IRQ_HANDLED;
597}
598
599
600
601
602
603
604
605
606
607static int ks_net_open(struct net_device *netdev)
608{
609 struct ks_net *ks = netdev_priv(netdev);
610 int err;
611
612#define KS_INT_FLAGS IRQF_TRIGGER_LOW
613
614
615
616
617 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
618
619
620 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
621
622 if (err) {
623 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
624 return err;
625 }
626
627
628 ks_set_powermode(ks, PMECR_PM_NORMAL);
629 mdelay(1);
630
631 ks_wrreg16(ks, KS_ISR, 0xffff);
632 ks_enable_int(ks);
633 ks_enable_qmu(ks);
634 netif_start_queue(ks->netdev);
635
636 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
637
638 return 0;
639}
640
641
642
643
644
645
646
647
648
649static int ks_net_stop(struct net_device *netdev)
650{
651 struct ks_net *ks = netdev_priv(netdev);
652
653 netif_info(ks, ifdown, netdev, "shutting down\n");
654
655 netif_stop_queue(netdev);
656
657 mutex_lock(&ks->lock);
658
659
660 ks_wrreg16(ks, KS_IER, 0x0000);
661 ks_wrreg16(ks, KS_ISR, 0xffff);
662
663
664 ks_disable_qmu(ks);
665
666
667 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
668 free_irq(netdev->irq, netdev);
669 mutex_unlock(&ks->lock);
670 return 0;
671}
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
688{
689
690 ks->txh.txw[0] = 0;
691 ks->txh.txw[1] = cpu_to_le16(len);
692
693
694 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
695
696 ks_outblk(ks, ks->txh.txw, 4);
697
698 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
699
700 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
701
702 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
703
704 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
705 ;
706}
707
708
709
710
711
712
713
714
715
716
717static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
718{
719 netdev_tx_t retv = NETDEV_TX_OK;
720 struct ks_net *ks = netdev_priv(netdev);
721
722 disable_irq(netdev->irq);
723 ks_disable_int(ks);
724 spin_lock(&ks->statelock);
725
726
727
728
729
730 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
731 ks_write_qmu(ks, skb->data, skb->len);
732
733 netdev->stats.tx_bytes += skb->len;
734 netdev->stats.tx_packets++;
735 dev_kfree_skb(skb);
736 } else
737 retv = NETDEV_TX_BUSY;
738 spin_unlock(&ks->statelock);
739 ks_enable_int(ks);
740 enable_irq(netdev->irq);
741 return retv;
742}
743
744
745
746
747
748
749static void ks_start_rx(struct ks_net *ks)
750{
751 u16 cntl;
752
753
754 cntl = ks_rdreg16(ks, KS_RXCR1);
755 cntl |= RXCR1_RXE ;
756 ks_wrreg16(ks, KS_RXCR1, cntl);
757}
758
759
760
761
762
763
764static void ks_stop_rx(struct ks_net *ks)
765{
766 u16 cntl;
767
768
769 cntl = ks_rdreg16(ks, KS_RXCR1);
770 cntl &= ~RXCR1_RXE ;
771 ks_wrreg16(ks, KS_RXCR1, cntl);
772
773}
774
775static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
776
777static unsigned long ether_gen_crc(int length, u8 *data)
778{
779 long crc = -1;
780 while (--length >= 0) {
781 u8 current_octet = *data++;
782 int bit;
783
784 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
785 crc = (crc << 1) ^
786 ((crc < 0) ^ (current_octet & 1) ?
787 ethernet_polynomial : 0);
788 }
789 }
790 return (unsigned long)crc;
791}
792
793
794
795
796
797
798static void ks_set_grpaddr(struct ks_net *ks)
799{
800 u8 i;
801 u32 index, position, value;
802
803 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
804
805 for (i = 0; i < ks->mcast_lst_size; i++) {
806 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
807 index = position >> 3;
808 value = 1 << (position & 7);
809 ks->mcast_bits[index] |= (u8)value;
810 }
811
812 for (i = 0; i < HW_MCAST_SIZE; i++) {
813 if (i & 1) {
814 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
815 (ks->mcast_bits[i] << 8) |
816 ks->mcast_bits[i - 1]);
817 }
818 }
819}
820
821
822
823
824
825
826
827
828static void ks_clear_mcast(struct ks_net *ks)
829{
830 u16 i, mcast_size;
831 for (i = 0; i < HW_MCAST_SIZE; i++)
832 ks->mcast_bits[i] = 0;
833
834 mcast_size = HW_MCAST_SIZE >> 2;
835 for (i = 0; i < mcast_size; i++)
836 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
837}
838
839static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
840{
841 u16 cntl;
842 ks->promiscuous = promiscuous_mode;
843 ks_stop_rx(ks);
844 cntl = ks_rdreg16(ks, KS_RXCR1);
845
846 cntl &= ~RXCR1_FILTER_MASK;
847 if (promiscuous_mode)
848
849 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
850 else
851
852 cntl |= RXCR1_RXPAFMA;
853
854 ks_wrreg16(ks, KS_RXCR1, cntl);
855
856 if (ks->enabled)
857 ks_start_rx(ks);
858
859}
860
861static void ks_set_mcast(struct ks_net *ks, u16 mcast)
862{
863 u16 cntl;
864
865 ks->all_mcast = mcast;
866 ks_stop_rx(ks);
867 cntl = ks_rdreg16(ks, KS_RXCR1);
868 cntl &= ~RXCR1_FILTER_MASK;
869 if (mcast)
870
871 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
872 else
873
874
875
876
877 cntl |= RXCR1_RXPAFMA;
878
879 ks_wrreg16(ks, KS_RXCR1, cntl);
880
881 if (ks->enabled)
882 ks_start_rx(ks);
883}
884
885static void ks_set_rx_mode(struct net_device *netdev)
886{
887 struct ks_net *ks = netdev_priv(netdev);
888 struct netdev_hw_addr *ha;
889
890
891 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
892 ks_set_promis(ks,
893 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
894
895 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
896 ks_set_mcast(ks,
897 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
898 else
899 ks_set_promis(ks, false);
900
901 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
902 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
903 int i = 0;
904
905 netdev_for_each_mc_addr(ha, netdev) {
906 if (i >= MAX_MCAST_LST)
907 break;
908 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
909 }
910 ks->mcast_lst_size = (u8)i;
911 ks_set_grpaddr(ks);
912 } else {
913
914
915
916
917 ks->mcast_lst_size = MAX_MCAST_LST;
918 ks_set_mcast(ks, true);
919 }
920 } else {
921 ks->mcast_lst_size = 0;
922 ks_clear_mcast(ks);
923 }
924}
925
926static void ks_set_mac(struct ks_net *ks, u8 *data)
927{
928 u16 *pw = (u16 *)data;
929 u16 w, u;
930
931 ks_stop_rx(ks);
932
933 u = *pw++;
934 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
935 ks_wrreg16(ks, KS_MARH, w);
936
937 u = *pw++;
938 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
939 ks_wrreg16(ks, KS_MARM, w);
940
941 u = *pw;
942 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
943 ks_wrreg16(ks, KS_MARL, w);
944
945 memcpy(ks->mac_addr, data, ETH_ALEN);
946
947 if (ks->enabled)
948 ks_start_rx(ks);
949}
950
951static int ks_set_mac_address(struct net_device *netdev, void *paddr)
952{
953 struct ks_net *ks = netdev_priv(netdev);
954 struct sockaddr *addr = paddr;
955 u8 *da;
956
957 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
958
959 da = (u8 *)netdev->dev_addr;
960
961 ks_set_mac(ks, da);
962 return 0;
963}
964
965static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
966{
967 struct ks_net *ks = netdev_priv(netdev);
968
969 if (!netif_running(netdev))
970 return -EINVAL;
971
972 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
973}
974
975static const struct net_device_ops ks_netdev_ops = {
976 .ndo_open = ks_net_open,
977 .ndo_stop = ks_net_stop,
978 .ndo_do_ioctl = ks_net_ioctl,
979 .ndo_start_xmit = ks_start_xmit,
980 .ndo_set_mac_address = ks_set_mac_address,
981 .ndo_set_rx_mode = ks_set_rx_mode,
982 .ndo_validate_addr = eth_validate_addr,
983};
984
985
986
987static void ks_get_drvinfo(struct net_device *netdev,
988 struct ethtool_drvinfo *di)
989{
990 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
991 strlcpy(di->version, "1.00", sizeof(di->version));
992 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
993 sizeof(di->bus_info));
994}
995
996static u32 ks_get_msglevel(struct net_device *netdev)
997{
998 struct ks_net *ks = netdev_priv(netdev);
999 return ks->msg_enable;
1000}
1001
1002static void ks_set_msglevel(struct net_device *netdev, u32 to)
1003{
1004 struct ks_net *ks = netdev_priv(netdev);
1005 ks->msg_enable = to;
1006}
1007
1008static int ks_get_link_ksettings(struct net_device *netdev,
1009 struct ethtool_link_ksettings *cmd)
1010{
1011 struct ks_net *ks = netdev_priv(netdev);
1012
1013 mii_ethtool_get_link_ksettings(&ks->mii, cmd);
1014
1015 return 0;
1016}
1017
1018static int ks_set_link_ksettings(struct net_device *netdev,
1019 const struct ethtool_link_ksettings *cmd)
1020{
1021 struct ks_net *ks = netdev_priv(netdev);
1022 return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
1023}
1024
1025static u32 ks_get_link(struct net_device *netdev)
1026{
1027 struct ks_net *ks = netdev_priv(netdev);
1028 return mii_link_ok(&ks->mii);
1029}
1030
1031static int ks_nway_reset(struct net_device *netdev)
1032{
1033 struct ks_net *ks = netdev_priv(netdev);
1034 return mii_nway_restart(&ks->mii);
1035}
1036
1037static const struct ethtool_ops ks_ethtool_ops = {
1038 .get_drvinfo = ks_get_drvinfo,
1039 .get_msglevel = ks_get_msglevel,
1040 .set_msglevel = ks_set_msglevel,
1041 .get_link = ks_get_link,
1042 .nway_reset = ks_nway_reset,
1043 .get_link_ksettings = ks_get_link_ksettings,
1044 .set_link_ksettings = ks_set_link_ksettings,
1045};
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057static int ks_phy_reg(int reg)
1058{
1059 switch (reg) {
1060 case MII_BMCR:
1061 return KS_P1MBCR;
1062 case MII_BMSR:
1063 return KS_P1MBSR;
1064 case MII_PHYSID1:
1065 return KS_PHY1ILR;
1066 case MII_PHYSID2:
1067 return KS_PHY1IHR;
1068 case MII_ADVERTISE:
1069 return KS_P1ANAR;
1070 case MII_LPA:
1071 return KS_P1ANLPR;
1072 }
1073
1074 return 0x0;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1093{
1094 struct ks_net *ks = netdev_priv(netdev);
1095 int ksreg;
1096 int result;
1097
1098 ksreg = ks_phy_reg(reg);
1099 if (!ksreg)
1100 return 0x0;
1101
1102 mutex_lock(&ks->lock);
1103 result = ks_rdreg16(ks, ksreg);
1104 mutex_unlock(&ks->lock);
1105
1106 return result;
1107}
1108
1109static void ks_phy_write(struct net_device *netdev,
1110 int phy, int reg, int value)
1111{
1112 struct ks_net *ks = netdev_priv(netdev);
1113 int ksreg;
1114
1115 ksreg = ks_phy_reg(reg);
1116 if (ksreg) {
1117 mutex_lock(&ks->lock);
1118 ks_wrreg16(ks, ksreg, value);
1119 mutex_unlock(&ks->lock);
1120 }
1121}
1122
1123
1124
1125
1126
1127
1128
1129static int ks_read_selftest(struct ks_net *ks)
1130{
1131 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1132 int ret = 0;
1133 unsigned rd;
1134
1135 rd = ks_rdreg16(ks, KS_MBIR);
1136
1137 if ((rd & both_done) != both_done) {
1138 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1139 return 0;
1140 }
1141
1142 if (rd & MBIR_TXMBFA) {
1143 netdev_err(ks->netdev, "TX memory selftest fails\n");
1144 ret |= 1;
1145 }
1146
1147 if (rd & MBIR_RXMBFA) {
1148 netdev_err(ks->netdev, "RX memory selftest fails\n");
1149 ret |= 2;
1150 }
1151
1152 netdev_info(ks->netdev, "the selftest passes\n");
1153 return ret;
1154}
1155
1156static void ks_setup(struct ks_net *ks)
1157{
1158 u16 w;
1159
1160
1161
1162
1163
1164
1165 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1166
1167
1168 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1169
1170
1171 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
1172
1173
1174 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1175 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1176
1177
1178
1179
1180
1181
1182
1183 w = ks_rdreg16(ks, KS_P1MBCR);
1184 w &= ~BMCR_FULLDPLX;
1185 ks_wrreg16(ks, KS_P1MBCR, w);
1186
1187 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1188 ks_wrreg16(ks, KS_TXCR, w);
1189
1190 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1191
1192 if (ks->promiscuous)
1193 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1194 else if (ks->all_mcast)
1195 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1196 else
1197 w |= RXCR1_RXPAFMA;
1198
1199 ks_wrreg16(ks, KS_RXCR1, w);
1200}
1201
1202
1203static void ks_setup_int(struct ks_net *ks)
1204{
1205 ks->rc_ier = 0x00;
1206
1207 ks_wrreg16(ks, KS_ISR, 0xffff);
1208
1209
1210 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1211}
1212
1213static int ks_hw_init(struct ks_net *ks)
1214{
1215#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1216 ks->promiscuous = 0;
1217 ks->all_mcast = 0;
1218 ks->mcast_lst_size = 0;
1219
1220 ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
1221 GFP_KERNEL);
1222 if (!ks->frame_head_info)
1223 return false;
1224
1225 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1226 return true;
1227}
1228
1229#if defined(CONFIG_OF)
1230static const struct of_device_id ks8851_ml_dt_ids[] = {
1231 { .compatible = "micrel,ks8851-mll" },
1232 { }
1233};
1234MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1235#endif
1236
1237static int ks8851_probe(struct platform_device *pdev)
1238{
1239 int err;
1240 struct resource *io_d, *io_c;
1241 struct net_device *netdev;
1242 struct ks_net *ks;
1243 u16 id, data;
1244 const char *mac;
1245
1246 netdev = alloc_etherdev(sizeof(struct ks_net));
1247 if (!netdev)
1248 return -ENOMEM;
1249
1250 SET_NETDEV_DEV(netdev, &pdev->dev);
1251
1252 ks = netdev_priv(netdev);
1253 ks->netdev = netdev;
1254
1255 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1256 ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
1257 if (IS_ERR(ks->hw_addr)) {
1258 err = PTR_ERR(ks->hw_addr);
1259 goto err_free;
1260 }
1261
1262 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1263 ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
1264 if (IS_ERR(ks->hw_addr_cmd)) {
1265 err = PTR_ERR(ks->hw_addr_cmd);
1266 goto err_free;
1267 }
1268
1269 netdev->irq = platform_get_irq(pdev, 0);
1270
1271 if ((int)netdev->irq < 0) {
1272 err = netdev->irq;
1273 goto err_free;
1274 }
1275
1276 ks->pdev = pdev;
1277
1278 mutex_init(&ks->lock);
1279 spin_lock_init(&ks->statelock);
1280
1281 netdev->netdev_ops = &ks_netdev_ops;
1282 netdev->ethtool_ops = &ks_ethtool_ops;
1283
1284
1285 ks->mii.dev = netdev;
1286 ks->mii.phy_id = 1,
1287 ks->mii.phy_id_mask = 1;
1288 ks->mii.reg_num_mask = 0xf;
1289 ks->mii.mdio_read = ks_phy_read;
1290 ks->mii.mdio_write = ks_phy_write;
1291
1292 netdev_info(netdev, "message enable is %d\n", msg_enable);
1293
1294 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1295 NETIF_MSG_PROBE |
1296 NETIF_MSG_LINK));
1297 ks_read_config(ks);
1298
1299
1300 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1301 netdev_err(netdev, "failed to read device ID\n");
1302 err = -ENODEV;
1303 goto err_free;
1304 }
1305
1306 if (ks_read_selftest(ks)) {
1307 netdev_err(netdev, "failed to read device ID\n");
1308 err = -ENODEV;
1309 goto err_free;
1310 }
1311
1312 err = register_netdev(netdev);
1313 if (err)
1314 goto err_free;
1315
1316 platform_set_drvdata(pdev, netdev);
1317
1318 ks_soft_reset(ks, GRR_GSR);
1319 ks_hw_init(ks);
1320 ks_disable_qmu(ks);
1321 ks_setup(ks);
1322 ks_setup_int(ks);
1323
1324 data = ks_rdreg16(ks, KS_OBCR);
1325 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
1326
1327
1328 if (pdev->dev.of_node) {
1329 mac = of_get_mac_address(pdev->dev.of_node);
1330 if (mac)
1331 memcpy(ks->mac_addr, mac, ETH_ALEN);
1332 } else {
1333 struct ks8851_mll_platform_data *pdata;
1334
1335 pdata = dev_get_platdata(&pdev->dev);
1336 if (!pdata) {
1337 netdev_err(netdev, "No platform data\n");
1338 err = -ENODEV;
1339 goto err_pdata;
1340 }
1341 memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1342 }
1343 if (!is_valid_ether_addr(ks->mac_addr)) {
1344
1345 eth_random_addr(ks->mac_addr);
1346 netdev_info(netdev, "Using random mac address\n");
1347 }
1348 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1349
1350 memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1351
1352 ks_set_mac(ks, netdev->dev_addr);
1353
1354 id = ks_rdreg16(ks, KS_CIDER);
1355
1356 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1357 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1358 return 0;
1359
1360err_pdata:
1361 unregister_netdev(netdev);
1362err_free:
1363 free_netdev(netdev);
1364 return err;
1365}
1366
1367static int ks8851_remove(struct platform_device *pdev)
1368{
1369 struct net_device *netdev = platform_get_drvdata(pdev);
1370
1371 unregister_netdev(netdev);
1372 free_netdev(netdev);
1373 return 0;
1374
1375}
1376
1377static struct platform_driver ks8851_platform_driver = {
1378 .driver = {
1379 .name = DRV_NAME,
1380 .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
1381 },
1382 .probe = ks8851_probe,
1383 .remove = ks8851_remove,
1384};
1385
1386module_platform_driver(ks8851_platform_driver);
1387
1388MODULE_DESCRIPTION("KS8851 MLL Network driver");
1389MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1390MODULE_LICENSE("GPL");
1391module_param_named(message, msg_enable, int, 0);
1392MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1393
1394