1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/ethtool.h>
19#include <linux/cache.h>
20#include <linux/crc32.h>
21#include <linux/crc32poly.h>
22#include <linux/mii.h>
23#include <linux/platform_device.h>
24#include <linux/delay.h>
25#include <linux/slab.h>
26#include <linux/ks8851_mll.h>
27#include <linux/of.h>
28#include <linux/of_device.h>
29#include <linux/of_net.h>
30
31#include "ks8851.h"
32
33#define DRV_NAME "ks8851_mll"
34
35static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
36#define MAX_RECV_FRAMES 255
37#define MAX_BUF_SIZE 2048
38#define TX_BUF_SIZE 2000
39#define RX_BUF_SIZE 2000
40
41#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
42 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
43#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
44
45#define ENUM_BUS_NONE 0
46#define ENUM_BUS_8BIT 1
47#define ENUM_BUS_16BIT 2
48#define ENUM_BUS_32BIT 3
49
50#define MAX_MCAST_LST 32
51#define HW_MCAST_SIZE 8
52
53
54
55
56
57
58
59
60
61
62union ks_tx_hdr {
63 u8 txb[4];
64 __le16 txw[2];
65};
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109struct type_frame_head {
110 u16 sts;
111 u16 len;
112};
113
114struct ks_net {
115 struct net_device *netdev;
116 void __iomem *hw_addr;
117 void __iomem *hw_addr_cmd;
118 union ks_tx_hdr txh ____cacheline_aligned;
119 struct mutex lock;
120 struct platform_device *pdev;
121 struct mii_if_info mii;
122 struct type_frame_head *frame_head_info;
123 spinlock_t statelock;
124 u32 msg_enable;
125 u32 frame_cnt;
126 int bus_width;
127
128 u16 rc_rxqcr;
129 u16 rc_txcr;
130 u16 rc_ier;
131 u16 sharedbus;
132 u16 cmd_reg_cache;
133 u16 cmd_reg_cache_int;
134 u16 promiscuous;
135 u16 all_mcast;
136 u16 mcast_lst_size;
137 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
138 u8 mcast_bits[HW_MCAST_SIZE];
139 u8 mac_addr[6];
140 u8 fid;
141 u8 extra_byte;
142 u8 enabled;
143};
144
145static int msg_enable;
146
147#define BE3 0x8000
148#define BE2 0x4000
149#define BE1 0x2000
150#define BE0 0x1000
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166static u8 ks_rdreg8(struct ks_net *ks, int offset)
167{
168 u16 data;
169 u8 shift_bit = offset & 0x03;
170 u8 shift_data = (offset & 1) << 3;
171 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
172 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
173 data = ioread16(ks->hw_addr);
174 return (u8)(data >> shift_data);
175}
176
177
178
179
180
181
182
183
184
185static u16 ks_rdreg16(struct ks_net *ks, int offset)
186{
187 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
188 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
189 return ioread16(ks->hw_addr);
190}
191
192
193
194
195
196
197
198
199static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
200{
201 u8 shift_bit = (offset & 0x03);
202 u16 value_write = (u16)(value << ((offset & 1) << 3));
203 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
204 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
205 iowrite16(value_write, ks->hw_addr);
206}
207
208
209
210
211
212
213
214
215
216static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
217{
218 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
219 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
220 iowrite16(value, ks->hw_addr);
221}
222
223
224
225
226
227
228
229
230static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
231{
232 len >>= 1;
233 while (len--)
234 *wptr++ = (u16)ioread16(ks->hw_addr);
235}
236
237
238
239
240
241
242
243
244static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
245{
246 len >>= 1;
247 while (len--)
248 iowrite16(*wptr++, ks->hw_addr);
249}
250
251static void ks_disable_int(struct ks_net *ks)
252{
253 ks_wrreg16(ks, KS_IER, 0x0000);
254}
255
256static void ks_enable_int(struct ks_net *ks)
257{
258 ks_wrreg16(ks, KS_IER, ks->rc_ier);
259}
260
261
262
263
264
265
266static inline u16 ks_tx_fifo_space(struct ks_net *ks)
267{
268 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
269}
270
271
272
273
274
275
276static inline void ks_save_cmd_reg(struct ks_net *ks)
277{
278
279
280
281 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
282}
283
284
285
286
287
288
289
290static inline void ks_restore_cmd_reg(struct ks_net *ks)
291{
292 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
293 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
294}
295
296
297
298
299
300
301
302
303static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
304{
305 unsigned pmecr;
306
307 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
308
309 ks_rdreg16(ks, KS_GRR);
310 pmecr = ks_rdreg16(ks, KS_PMECR);
311 pmecr &= ~PMECR_PM_MASK;
312 pmecr |= pwrmode;
313
314 ks_wrreg16(ks, KS_PMECR, pmecr);
315}
316
317
318
319
320
321
322static void ks_read_config(struct ks_net *ks)
323{
324 u16 reg_data = 0;
325
326
327 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
328 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
329
330
331 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
332
333
334
335
336
337 if (reg_data & CCR_8BIT) {
338 ks->bus_width = ENUM_BUS_8BIT;
339 ks->extra_byte = 1;
340 } else if (reg_data & CCR_16BIT) {
341 ks->bus_width = ENUM_BUS_16BIT;
342 ks->extra_byte = 2;
343 } else {
344 ks->bus_width = ENUM_BUS_32BIT;
345 ks->extra_byte = 4;
346 }
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362static void ks_soft_reset(struct ks_net *ks, unsigned op)
363{
364
365 ks_wrreg16(ks, KS_IER, 0x0000);
366 ks_wrreg16(ks, KS_GRR, op);
367 mdelay(10);
368 ks_wrreg16(ks, KS_GRR, 0);
369 mdelay(1);
370}
371
372
373static void ks_enable_qmu(struct ks_net *ks)
374{
375 u16 w;
376
377 w = ks_rdreg16(ks, KS_TXCR);
378
379 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
380
381
382
383
384
385
386 w = ks_rdreg16(ks, KS_RXQCR);
387 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
388
389
390 w = ks_rdreg16(ks, KS_RXCR1);
391 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
392 ks->enabled = true;
393}
394
395static void ks_disable_qmu(struct ks_net *ks)
396{
397 u16 w;
398
399 w = ks_rdreg16(ks, KS_TXCR);
400
401
402 w &= ~TXCR_TXE;
403 ks_wrreg16(ks, KS_TXCR, w);
404
405
406 w = ks_rdreg16(ks, KS_RXCR1);
407 w &= ~RXCR1_RXE ;
408 ks_wrreg16(ks, KS_RXCR1, w);
409
410 ks->enabled = false;
411
412}
413
414
415
416
417
418
419
420
421
422
423
424
425static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
426{
427 u32 r = ks->extra_byte & 0x1 ;
428 u32 w = ks->extra_byte - r;
429
430
431 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
432 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
433
434
435
436
437
438
439
440
441 if (unlikely(r))
442 ioread8(ks->hw_addr);
443 ks_inblk(ks, buf, w + 2 + 2);
444
445
446 ks_inblk(ks, buf, ALIGN(len, 4));
447
448
449 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
450}
451
452
453
454
455
456
457
458
459
460
461static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
462{
463 u32 i;
464 struct type_frame_head *frame_hdr = ks->frame_head_info;
465 struct sk_buff *skb;
466
467 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
468
469
470 for (i = 0; i < ks->frame_cnt; i++) {
471
472 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
473
474 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
475 frame_hdr++;
476 }
477
478 frame_hdr = ks->frame_head_info;
479 while (ks->frame_cnt--) {
480 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
481 frame_hdr->len >= RX_BUF_SIZE ||
482 frame_hdr->len <= 0)) {
483
484
485 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
486 netdev->stats.rx_dropped++;
487 if (!(frame_hdr->sts & RXFSHR_RXFV))
488 netdev->stats.rx_frame_errors++;
489 else
490 netdev->stats.rx_length_errors++;
491 frame_hdr++;
492 continue;
493 }
494
495 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
496 if (likely(skb)) {
497 skb_reserve(skb, 2);
498
499 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
500 skb_put(skb, frame_hdr->len - 4);
501 skb->protocol = eth_type_trans(skb, netdev);
502 netif_rx(skb);
503
504 netdev->stats.rx_bytes += frame_hdr->len - 4;
505 netdev->stats.rx_packets++;
506 } else {
507 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
508 netdev->stats.rx_dropped++;
509 }
510 frame_hdr++;
511 }
512}
513
514
515
516
517
518
519
520
521static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
522{
523
524 u32 link_up_status;
525 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
526 netif_carrier_on(netdev);
527 link_up_status = true;
528 } else {
529 netif_carrier_off(netdev);
530 link_up_status = false;
531 }
532 netif_dbg(ks, link, ks->netdev,
533 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
534}
535
536
537
538
539
540
541
542
543
544
545
546
547static irqreturn_t ks_irq(int irq, void *pw)
548{
549 struct net_device *netdev = pw;
550 struct ks_net *ks = netdev_priv(netdev);
551 u16 status;
552
553
554 ks_save_cmd_reg(ks);
555
556 status = ks_rdreg16(ks, KS_ISR);
557 if (unlikely(!status)) {
558 ks_restore_cmd_reg(ks);
559 return IRQ_NONE;
560 }
561
562 ks_wrreg16(ks, KS_ISR, status);
563
564 if (likely(status & IRQ_RXI))
565 ks_rcv(ks, netdev);
566
567 if (unlikely(status & IRQ_LCI))
568 ks_update_link_status(netdev, ks);
569
570 if (unlikely(status & IRQ_TXI))
571 netif_wake_queue(netdev);
572
573 if (unlikely(status & IRQ_LDI)) {
574
575 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
576 pmecr &= ~PMECR_WKEVT_MASK;
577 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
578 }
579
580 if (unlikely(status & IRQ_RXOI))
581 ks->netdev->stats.rx_over_errors++;
582
583 ks_restore_cmd_reg(ks);
584 return IRQ_HANDLED;
585}
586
587
588
589
590
591
592
593
594
595static int ks_net_open(struct net_device *netdev)
596{
597 struct ks_net *ks = netdev_priv(netdev);
598 int err;
599
600#define KS_INT_FLAGS IRQF_TRIGGER_LOW
601
602
603
604
605 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
606
607
608 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
609
610 if (err) {
611 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
612 return err;
613 }
614
615
616 ks_set_powermode(ks, PMECR_PM_NORMAL);
617 mdelay(1);
618
619 ks_wrreg16(ks, KS_ISR, 0xffff);
620 ks_enable_int(ks);
621 ks_enable_qmu(ks);
622 netif_start_queue(ks->netdev);
623
624 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
625
626 return 0;
627}
628
629
630
631
632
633
634
635
636
637static int ks_net_stop(struct net_device *netdev)
638{
639 struct ks_net *ks = netdev_priv(netdev);
640
641 netif_info(ks, ifdown, netdev, "shutting down\n");
642
643 netif_stop_queue(netdev);
644
645 mutex_lock(&ks->lock);
646
647
648 ks_wrreg16(ks, KS_IER, 0x0000);
649 ks_wrreg16(ks, KS_ISR, 0xffff);
650
651
652 ks_disable_qmu(ks);
653
654
655 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
656 free_irq(netdev->irq, netdev);
657 mutex_unlock(&ks->lock);
658 return 0;
659}
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
676{
677
678 ks->txh.txw[0] = 0;
679 ks->txh.txw[1] = cpu_to_le16(len);
680
681
682 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
683
684 ks_outblk(ks, ks->txh.txw, 4);
685
686 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
687
688 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
689
690 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
691
692 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
693 ;
694}
695
696
697
698
699
700
701
702
703
704
705static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
706{
707 netdev_tx_t retv = NETDEV_TX_OK;
708 struct ks_net *ks = netdev_priv(netdev);
709
710 disable_irq(netdev->irq);
711 ks_disable_int(ks);
712 spin_lock(&ks->statelock);
713
714
715
716
717
718 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
719 ks_write_qmu(ks, skb->data, skb->len);
720
721 netdev->stats.tx_bytes += skb->len;
722 netdev->stats.tx_packets++;
723 dev_kfree_skb(skb);
724 } else
725 retv = NETDEV_TX_BUSY;
726 spin_unlock(&ks->statelock);
727 ks_enable_int(ks);
728 enable_irq(netdev->irq);
729 return retv;
730}
731
732
733
734
735
736
737static void ks_start_rx(struct ks_net *ks)
738{
739 u16 cntl;
740
741
742 cntl = ks_rdreg16(ks, KS_RXCR1);
743 cntl |= RXCR1_RXE ;
744 ks_wrreg16(ks, KS_RXCR1, cntl);
745}
746
747
748
749
750
751
752static void ks_stop_rx(struct ks_net *ks)
753{
754 u16 cntl;
755
756
757 cntl = ks_rdreg16(ks, KS_RXCR1);
758 cntl &= ~RXCR1_RXE ;
759 ks_wrreg16(ks, KS_RXCR1, cntl);
760
761}
762
763static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
764
765static unsigned long ether_gen_crc(int length, u8 *data)
766{
767 long crc = -1;
768 while (--length >= 0) {
769 u8 current_octet = *data++;
770 int bit;
771
772 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
773 crc = (crc << 1) ^
774 ((crc < 0) ^ (current_octet & 1) ?
775 ethernet_polynomial : 0);
776 }
777 }
778 return (unsigned long)crc;
779}
780
781
782
783
784
785
786static void ks_set_grpaddr(struct ks_net *ks)
787{
788 u8 i;
789 u32 index, position, value;
790
791 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
792
793 for (i = 0; i < ks->mcast_lst_size; i++) {
794 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
795 index = position >> 3;
796 value = 1 << (position & 7);
797 ks->mcast_bits[index] |= (u8)value;
798 }
799
800 for (i = 0; i < HW_MCAST_SIZE; i++) {
801 if (i & 1) {
802 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
803 (ks->mcast_bits[i] << 8) |
804 ks->mcast_bits[i - 1]);
805 }
806 }
807}
808
809
810
811
812
813
814
815
816static void ks_clear_mcast(struct ks_net *ks)
817{
818 u16 i, mcast_size;
819 for (i = 0; i < HW_MCAST_SIZE; i++)
820 ks->mcast_bits[i] = 0;
821
822 mcast_size = HW_MCAST_SIZE >> 2;
823 for (i = 0; i < mcast_size; i++)
824 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
825}
826
827static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
828{
829 u16 cntl;
830 ks->promiscuous = promiscuous_mode;
831 ks_stop_rx(ks);
832 cntl = ks_rdreg16(ks, KS_RXCR1);
833
834 cntl &= ~RXCR1_FILTER_MASK;
835 if (promiscuous_mode)
836
837 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
838 else
839
840 cntl |= RXCR1_RXPAFMA;
841
842 ks_wrreg16(ks, KS_RXCR1, cntl);
843
844 if (ks->enabled)
845 ks_start_rx(ks);
846
847}
848
849static void ks_set_mcast(struct ks_net *ks, u16 mcast)
850{
851 u16 cntl;
852
853 ks->all_mcast = mcast;
854 ks_stop_rx(ks);
855 cntl = ks_rdreg16(ks, KS_RXCR1);
856 cntl &= ~RXCR1_FILTER_MASK;
857 if (mcast)
858
859 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
860 else
861
862
863
864
865 cntl |= RXCR1_RXPAFMA;
866
867 ks_wrreg16(ks, KS_RXCR1, cntl);
868
869 if (ks->enabled)
870 ks_start_rx(ks);
871}
872
873static void ks_set_rx_mode(struct net_device *netdev)
874{
875 struct ks_net *ks = netdev_priv(netdev);
876 struct netdev_hw_addr *ha;
877
878
879 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
880 ks_set_promis(ks,
881 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
882
883 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
884 ks_set_mcast(ks,
885 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
886 else
887 ks_set_promis(ks, false);
888
889 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
890 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
891 int i = 0;
892
893 netdev_for_each_mc_addr(ha, netdev) {
894 if (i >= MAX_MCAST_LST)
895 break;
896 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
897 }
898 ks->mcast_lst_size = (u8)i;
899 ks_set_grpaddr(ks);
900 } else {
901
902
903
904
905 ks->mcast_lst_size = MAX_MCAST_LST;
906 ks_set_mcast(ks, true);
907 }
908 } else {
909 ks->mcast_lst_size = 0;
910 ks_clear_mcast(ks);
911 }
912}
913
914static void ks_set_mac(struct ks_net *ks, u8 *data)
915{
916 u16 *pw = (u16 *)data;
917 u16 w, u;
918
919 ks_stop_rx(ks);
920
921 u = *pw++;
922 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
923 ks_wrreg16(ks, KS_MARH, w);
924
925 u = *pw++;
926 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
927 ks_wrreg16(ks, KS_MARM, w);
928
929 u = *pw;
930 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
931 ks_wrreg16(ks, KS_MARL, w);
932
933 memcpy(ks->mac_addr, data, ETH_ALEN);
934
935 if (ks->enabled)
936 ks_start_rx(ks);
937}
938
939static int ks_set_mac_address(struct net_device *netdev, void *paddr)
940{
941 struct ks_net *ks = netdev_priv(netdev);
942 struct sockaddr *addr = paddr;
943 u8 *da;
944
945 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
946
947 da = (u8 *)netdev->dev_addr;
948
949 ks_set_mac(ks, da);
950 return 0;
951}
952
953static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
954{
955 struct ks_net *ks = netdev_priv(netdev);
956
957 if (!netif_running(netdev))
958 return -EINVAL;
959
960 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
961}
962
963static const struct net_device_ops ks_netdev_ops = {
964 .ndo_open = ks_net_open,
965 .ndo_stop = ks_net_stop,
966 .ndo_do_ioctl = ks_net_ioctl,
967 .ndo_start_xmit = ks_start_xmit,
968 .ndo_set_mac_address = ks_set_mac_address,
969 .ndo_set_rx_mode = ks_set_rx_mode,
970 .ndo_validate_addr = eth_validate_addr,
971};
972
973
974
975static void ks_get_drvinfo(struct net_device *netdev,
976 struct ethtool_drvinfo *di)
977{
978 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
979 strlcpy(di->version, "1.00", sizeof(di->version));
980 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
981 sizeof(di->bus_info));
982}
983
984static u32 ks_get_msglevel(struct net_device *netdev)
985{
986 struct ks_net *ks = netdev_priv(netdev);
987 return ks->msg_enable;
988}
989
990static void ks_set_msglevel(struct net_device *netdev, u32 to)
991{
992 struct ks_net *ks = netdev_priv(netdev);
993 ks->msg_enable = to;
994}
995
996static int ks_get_link_ksettings(struct net_device *netdev,
997 struct ethtool_link_ksettings *cmd)
998{
999 struct ks_net *ks = netdev_priv(netdev);
1000
1001 mii_ethtool_get_link_ksettings(&ks->mii, cmd);
1002
1003 return 0;
1004}
1005
1006static int ks_set_link_ksettings(struct net_device *netdev,
1007 const struct ethtool_link_ksettings *cmd)
1008{
1009 struct ks_net *ks = netdev_priv(netdev);
1010 return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
1011}
1012
1013static u32 ks_get_link(struct net_device *netdev)
1014{
1015 struct ks_net *ks = netdev_priv(netdev);
1016 return mii_link_ok(&ks->mii);
1017}
1018
1019static int ks_nway_reset(struct net_device *netdev)
1020{
1021 struct ks_net *ks = netdev_priv(netdev);
1022 return mii_nway_restart(&ks->mii);
1023}
1024
1025static const struct ethtool_ops ks_ethtool_ops = {
1026 .get_drvinfo = ks_get_drvinfo,
1027 .get_msglevel = ks_get_msglevel,
1028 .set_msglevel = ks_set_msglevel,
1029 .get_link = ks_get_link,
1030 .nway_reset = ks_nway_reset,
1031 .get_link_ksettings = ks_get_link_ksettings,
1032 .set_link_ksettings = ks_set_link_ksettings,
1033};
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static int ks_phy_reg(int reg)
1046{
1047 switch (reg) {
1048 case MII_BMCR:
1049 return KS_P1MBCR;
1050 case MII_BMSR:
1051 return KS_P1MBSR;
1052 case MII_PHYSID1:
1053 return KS_PHY1ILR;
1054 case MII_PHYSID2:
1055 return KS_PHY1IHR;
1056 case MII_ADVERTISE:
1057 return KS_P1ANAR;
1058 case MII_LPA:
1059 return KS_P1ANLPR;
1060 }
1061
1062 return 0x0;
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1081{
1082 struct ks_net *ks = netdev_priv(netdev);
1083 int ksreg;
1084 int result;
1085
1086 ksreg = ks_phy_reg(reg);
1087 if (!ksreg)
1088 return 0x0;
1089
1090 mutex_lock(&ks->lock);
1091 result = ks_rdreg16(ks, ksreg);
1092 mutex_unlock(&ks->lock);
1093
1094 return result;
1095}
1096
1097static void ks_phy_write(struct net_device *netdev,
1098 int phy, int reg, int value)
1099{
1100 struct ks_net *ks = netdev_priv(netdev);
1101 int ksreg;
1102
1103 ksreg = ks_phy_reg(reg);
1104 if (ksreg) {
1105 mutex_lock(&ks->lock);
1106 ks_wrreg16(ks, ksreg, value);
1107 mutex_unlock(&ks->lock);
1108 }
1109}
1110
1111
1112
1113
1114
1115
1116
1117static int ks_read_selftest(struct ks_net *ks)
1118{
1119 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1120 int ret = 0;
1121 unsigned rd;
1122
1123 rd = ks_rdreg16(ks, KS_MBIR);
1124
1125 if ((rd & both_done) != both_done) {
1126 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1127 return 0;
1128 }
1129
1130 if (rd & MBIR_TXMBFA) {
1131 netdev_err(ks->netdev, "TX memory selftest fails\n");
1132 ret |= 1;
1133 }
1134
1135 if (rd & MBIR_RXMBFA) {
1136 netdev_err(ks->netdev, "RX memory selftest fails\n");
1137 ret |= 2;
1138 }
1139
1140 netdev_info(ks->netdev, "the selftest passes\n");
1141 return ret;
1142}
1143
1144static void ks_setup(struct ks_net *ks)
1145{
1146 u16 w;
1147
1148
1149
1150
1151
1152
1153 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1154
1155
1156 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1157
1158
1159 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
1160
1161
1162 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1163 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1164
1165
1166
1167
1168
1169
1170
1171 w = ks_rdreg16(ks, KS_P1MBCR);
1172 w &= ~BMCR_FULLDPLX;
1173 ks_wrreg16(ks, KS_P1MBCR, w);
1174
1175 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1176 ks_wrreg16(ks, KS_TXCR, w);
1177
1178 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1179
1180 if (ks->promiscuous)
1181 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1182 else if (ks->all_mcast)
1183 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1184 else
1185 w |= RXCR1_RXPAFMA;
1186
1187 ks_wrreg16(ks, KS_RXCR1, w);
1188}
1189
1190
1191static void ks_setup_int(struct ks_net *ks)
1192{
1193 ks->rc_ier = 0x00;
1194
1195 ks_wrreg16(ks, KS_ISR, 0xffff);
1196
1197
1198 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1199}
1200
1201static int ks_hw_init(struct ks_net *ks)
1202{
1203#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1204 ks->promiscuous = 0;
1205 ks->all_mcast = 0;
1206 ks->mcast_lst_size = 0;
1207
1208 ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
1209 GFP_KERNEL);
1210 if (!ks->frame_head_info)
1211 return false;
1212
1213 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1214 return true;
1215}
1216
1217#if defined(CONFIG_OF)
1218static const struct of_device_id ks8851_ml_dt_ids[] = {
1219 { .compatible = "micrel,ks8851-mll" },
1220 { }
1221};
1222MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1223#endif
1224
1225static int ks8851_probe(struct platform_device *pdev)
1226{
1227 int err;
1228 struct resource *io_d, *io_c;
1229 struct net_device *netdev;
1230 struct ks_net *ks;
1231 u16 id, data;
1232 const char *mac;
1233
1234 netdev = alloc_etherdev(sizeof(struct ks_net));
1235 if (!netdev)
1236 return -ENOMEM;
1237
1238 SET_NETDEV_DEV(netdev, &pdev->dev);
1239
1240 ks = netdev_priv(netdev);
1241 ks->netdev = netdev;
1242
1243 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1244 ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
1245 if (IS_ERR(ks->hw_addr)) {
1246 err = PTR_ERR(ks->hw_addr);
1247 goto err_free;
1248 }
1249
1250 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1251 ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
1252 if (IS_ERR(ks->hw_addr_cmd)) {
1253 err = PTR_ERR(ks->hw_addr_cmd);
1254 goto err_free;
1255 }
1256
1257 netdev->irq = platform_get_irq(pdev, 0);
1258
1259 if ((int)netdev->irq < 0) {
1260 err = netdev->irq;
1261 goto err_free;
1262 }
1263
1264 ks->pdev = pdev;
1265
1266 mutex_init(&ks->lock);
1267 spin_lock_init(&ks->statelock);
1268
1269 netdev->netdev_ops = &ks_netdev_ops;
1270 netdev->ethtool_ops = &ks_ethtool_ops;
1271
1272
1273 ks->mii.dev = netdev;
1274 ks->mii.phy_id = 1,
1275 ks->mii.phy_id_mask = 1;
1276 ks->mii.reg_num_mask = 0xf;
1277 ks->mii.mdio_read = ks_phy_read;
1278 ks->mii.mdio_write = ks_phy_write;
1279
1280 netdev_info(netdev, "message enable is %d\n", msg_enable);
1281
1282 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1283 NETIF_MSG_PROBE |
1284 NETIF_MSG_LINK));
1285 ks_read_config(ks);
1286
1287
1288 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1289 netdev_err(netdev, "failed to read device ID\n");
1290 err = -ENODEV;
1291 goto err_free;
1292 }
1293
1294 if (ks_read_selftest(ks)) {
1295 netdev_err(netdev, "failed to read device ID\n");
1296 err = -ENODEV;
1297 goto err_free;
1298 }
1299
1300 err = register_netdev(netdev);
1301 if (err)
1302 goto err_free;
1303
1304 platform_set_drvdata(pdev, netdev);
1305
1306 ks_soft_reset(ks, GRR_GSR);
1307 ks_hw_init(ks);
1308 ks_disable_qmu(ks);
1309 ks_setup(ks);
1310 ks_setup_int(ks);
1311
1312 data = ks_rdreg16(ks, KS_OBCR);
1313 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
1314
1315
1316 if (pdev->dev.of_node) {
1317 mac = of_get_mac_address(pdev->dev.of_node);
1318 if (!IS_ERR(mac))
1319 ether_addr_copy(ks->mac_addr, mac);
1320 } else {
1321 struct ks8851_mll_platform_data *pdata;
1322
1323 pdata = dev_get_platdata(&pdev->dev);
1324 if (!pdata) {
1325 netdev_err(netdev, "No platform data\n");
1326 err = -ENODEV;
1327 goto err_pdata;
1328 }
1329 memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1330 }
1331 if (!is_valid_ether_addr(ks->mac_addr)) {
1332
1333 eth_random_addr(ks->mac_addr);
1334 netdev_info(netdev, "Using random mac address\n");
1335 }
1336 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1337
1338 memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1339
1340 ks_set_mac(ks, netdev->dev_addr);
1341
1342 id = ks_rdreg16(ks, KS_CIDER);
1343
1344 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1345 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1346 return 0;
1347
1348err_pdata:
1349 unregister_netdev(netdev);
1350err_free:
1351 free_netdev(netdev);
1352 return err;
1353}
1354
1355static int ks8851_remove(struct platform_device *pdev)
1356{
1357 struct net_device *netdev = platform_get_drvdata(pdev);
1358
1359 unregister_netdev(netdev);
1360 free_netdev(netdev);
1361 return 0;
1362
1363}
1364
1365static struct platform_driver ks8851_platform_driver = {
1366 .driver = {
1367 .name = DRV_NAME,
1368 .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
1369 },
1370 .probe = ks8851_probe,
1371 .remove = ks8851_remove,
1372};
1373
1374module_platform_driver(ks8851_platform_driver);
1375
1376MODULE_DESCRIPTION("KS8851 MLL Network driver");
1377MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1378MODULE_LICENSE("GPL");
1379module_param_named(message, msg_enable, int, 0);
1380MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1381
1382