1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/cache.h>
32#include <linux/crc32.h>
33#include <linux/mii.h>
34#include <linux/platform_device.h>
35#include <linux/delay.h>
36#include <linux/slab.h>
37#include <linux/ks8851_mll.h>
38
39#define DRV_NAME "ks8851_mll"
40
41static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
42#define MAX_RECV_FRAMES 255
43#define MAX_BUF_SIZE 2048
44#define TX_BUF_SIZE 2000
45#define RX_BUF_SIZE 2000
46
47#define KS_CCR 0x08
48#define CCR_EEPROM (1 << 9)
49#define CCR_SPI (1 << 8)
50#define CCR_8BIT (1 << 7)
51#define CCR_16BIT (1 << 6)
52#define CCR_32BIT (1 << 5)
53#define CCR_SHARED (1 << 4)
54#define CCR_32PIN (1 << 0)
55
56
57#define KS_MARL 0x10
58#define KS_MARM 0x12
59#define KS_MARH 0x14
60
61#define KS_OBCR 0x20
62#define OBCR_ODS_16MA (1 << 6)
63
64#define KS_EEPCR 0x22
65#define EEPCR_EESA (1 << 4)
66#define EEPCR_EESB (1 << 3)
67#define EEPCR_EEDO (1 << 2)
68#define EEPCR_EESCK (1 << 1)
69#define EEPCR_EECS (1 << 0)
70
71#define KS_MBIR 0x24
72#define MBIR_TXMBF (1 << 12)
73#define MBIR_TXMBFA (1 << 11)
74#define MBIR_RXMBF (1 << 4)
75#define MBIR_RXMBFA (1 << 3)
76
77#define KS_GRR 0x26
78#define GRR_QMU (1 << 1)
79#define GRR_GSR (1 << 0)
80
81#define KS_WFCR 0x2A
82#define WFCR_MPRXE (1 << 7)
83#define WFCR_WF3E (1 << 3)
84#define WFCR_WF2E (1 << 2)
85#define WFCR_WF1E (1 << 1)
86#define WFCR_WF0E (1 << 0)
87
88#define KS_WF0CRC0 0x30
89#define KS_WF0CRC1 0x32
90#define KS_WF0BM0 0x34
91#define KS_WF0BM1 0x36
92#define KS_WF0BM2 0x38
93#define KS_WF0BM3 0x3A
94
95#define KS_WF1CRC0 0x40
96#define KS_WF1CRC1 0x42
97#define KS_WF1BM0 0x44
98#define KS_WF1BM1 0x46
99#define KS_WF1BM2 0x48
100#define KS_WF1BM3 0x4A
101
102#define KS_WF2CRC0 0x50
103#define KS_WF2CRC1 0x52
104#define KS_WF2BM0 0x54
105#define KS_WF2BM1 0x56
106#define KS_WF2BM2 0x58
107#define KS_WF2BM3 0x5A
108
109#define KS_WF3CRC0 0x60
110#define KS_WF3CRC1 0x62
111#define KS_WF3BM0 0x64
112#define KS_WF3BM1 0x66
113#define KS_WF3BM2 0x68
114#define KS_WF3BM3 0x6A
115
116#define KS_TXCR 0x70
117#define TXCR_TCGICMP (1 << 8)
118#define TXCR_TCGUDP (1 << 7)
119#define TXCR_TCGTCP (1 << 6)
120#define TXCR_TCGIP (1 << 5)
121#define TXCR_FTXQ (1 << 4)
122#define TXCR_TXFCE (1 << 3)
123#define TXCR_TXPE (1 << 2)
124#define TXCR_TXCRC (1 << 1)
125#define TXCR_TXE (1 << 0)
126
127#define KS_TXSR 0x72
128#define TXSR_TXLC (1 << 13)
129#define TXSR_TXMC (1 << 12)
130#define TXSR_TXFID_MASK (0x3f << 0)
131#define TXSR_TXFID_SHIFT (0)
132#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
133
134
135#define KS_RXCR1 0x74
136#define RXCR1_FRXQ (1 << 15)
137#define RXCR1_RXUDPFCC (1 << 14)
138#define RXCR1_RXTCPFCC (1 << 13)
139#define RXCR1_RXIPFCC (1 << 12)
140#define RXCR1_RXPAFMA (1 << 11)
141#define RXCR1_RXFCE (1 << 10)
142#define RXCR1_RXEFE (1 << 9)
143#define RXCR1_RXMAFMA (1 << 8)
144#define RXCR1_RXBE (1 << 7)
145#define RXCR1_RXME (1 << 6)
146#define RXCR1_RXUE (1 << 5)
147#define RXCR1_RXAE (1 << 4)
148#define RXCR1_RXINVF (1 << 1)
149#define RXCR1_RXE (1 << 0)
150#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
151 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
152
153#define KS_RXCR2 0x76
154#define RXCR2_SRDBL_MASK (0x7 << 5)
155#define RXCR2_SRDBL_SHIFT (5)
156#define RXCR2_SRDBL_4B (0x0 << 5)
157#define RXCR2_SRDBL_8B (0x1 << 5)
158#define RXCR2_SRDBL_16B (0x2 << 5)
159#define RXCR2_SRDBL_32B (0x3 << 5)
160
161#define RXCR2_IUFFP (1 << 4)
162#define RXCR2_RXIUFCEZ (1 << 3)
163#define RXCR2_UDPLFE (1 << 2)
164#define RXCR2_RXICMPFCC (1 << 1)
165#define RXCR2_RXSAF (1 << 0)
166
167#define KS_TXMIR 0x78
168
169#define KS_RXFHSR 0x7C
170#define RXFSHR_RXFV (1 << 15)
171#define RXFSHR_RXICMPFCS (1 << 13)
172#define RXFSHR_RXIPFCS (1 << 12)
173#define RXFSHR_RXTCPFCS (1 << 11)
174#define RXFSHR_RXUDPFCS (1 << 10)
175#define RXFSHR_RXBF (1 << 7)
176#define RXFSHR_RXMF (1 << 6)
177#define RXFSHR_RXUF (1 << 5)
178#define RXFSHR_RXMR (1 << 4)
179#define RXFSHR_RXFT (1 << 3)
180#define RXFSHR_RXFTL (1 << 2)
181#define RXFSHR_RXRF (1 << 1)
182#define RXFSHR_RXCE (1 << 0)
183#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
184 RXFSHR_RXFTL | RXFSHR_RXMR |\
185 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
186 RXFSHR_RXTCPFCS)
187#define KS_RXFHBCR 0x7E
188#define RXFHBCR_CNT_MASK 0x0FFF
189
190#define KS_TXQCR 0x80
191#define TXQCR_AETFE (1 << 2)
192#define TXQCR_TXQMAM (1 << 1)
193#define TXQCR_METFE (1 << 0)
194
195#define KS_RXQCR 0x82
196#define RXQCR_RXDTTS (1 << 12)
197#define RXQCR_RXDBCTS (1 << 11)
198#define RXQCR_RXFCTS (1 << 10)
199#define RXQCR_RXIPHTOE (1 << 9)
200#define RXQCR_RXDTTE (1 << 7)
201#define RXQCR_RXDBCTE (1 << 6)
202#define RXQCR_RXFCTE (1 << 5)
203#define RXQCR_ADRFE (1 << 4)
204#define RXQCR_SDA (1 << 3)
205#define RXQCR_RRXEF (1 << 0)
206#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
207
208#define KS_TXFDPR 0x84
209#define TXFDPR_TXFPAI (1 << 14)
210#define TXFDPR_TXFP_MASK (0x7ff << 0)
211#define TXFDPR_TXFP_SHIFT (0)
212
213#define KS_RXFDPR 0x86
214#define RXFDPR_RXFPAI (1 << 14)
215
216#define KS_RXDTTR 0x8C
217#define KS_RXDBCTR 0x8E
218
219#define KS_IER 0x90
220#define KS_ISR 0x92
221#define IRQ_LCI (1 << 15)
222#define IRQ_TXI (1 << 14)
223#define IRQ_RXI (1 << 13)
224#define IRQ_RXOI (1 << 11)
225#define IRQ_TXPSI (1 << 9)
226#define IRQ_RXPSI (1 << 8)
227#define IRQ_TXSAI (1 << 6)
228#define IRQ_RXWFDI (1 << 5)
229#define IRQ_RXMPDI (1 << 4)
230#define IRQ_LDI (1 << 3)
231#define IRQ_EDI (1 << 2)
232#define IRQ_SPIBEI (1 << 1)
233#define IRQ_DEDI (1 << 0)
234
235#define KS_RXFCTR 0x9C
236#define RXFCTR_THRESHOLD_MASK 0x00FF
237
238#define KS_RXFC 0x9D
239#define RXFCTR_RXFC_MASK (0xff << 8)
240#define RXFCTR_RXFC_SHIFT (8)
241#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
242#define RXFCTR_RXFCT_MASK (0xff << 0)
243#define RXFCTR_RXFCT_SHIFT (0)
244
245#define KS_TXNTFSR 0x9E
246
247#define KS_MAHTR0 0xA0
248#define KS_MAHTR1 0xA2
249#define KS_MAHTR2 0xA4
250#define KS_MAHTR3 0xA6
251
252#define KS_FCLWR 0xB0
253#define KS_FCHWR 0xB2
254#define KS_FCOWR 0xB4
255
256#define KS_CIDER 0xC0
257#define CIDER_ID 0x8870
258#define CIDER_REV_MASK (0x7 << 1)
259#define CIDER_REV_SHIFT (1)
260#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
261
262#define KS_CGCR 0xC6
263#define KS_IACR 0xC8
264#define IACR_RDEN (1 << 12)
265#define IACR_TSEL_MASK (0x3 << 10)
266#define IACR_TSEL_SHIFT (10)
267#define IACR_TSEL_MIB (0x3 << 10)
268#define IACR_ADDR_MASK (0x1f << 0)
269#define IACR_ADDR_SHIFT (0)
270
271#define KS_IADLR 0xD0
272#define KS_IAHDR 0xD2
273
274#define KS_PMECR 0xD4
275#define PMECR_PME_DELAY (1 << 14)
276#define PMECR_PME_POL (1 << 12)
277#define PMECR_WOL_WAKEUP (1 << 11)
278#define PMECR_WOL_MAGICPKT (1 << 10)
279#define PMECR_WOL_LINKUP (1 << 9)
280#define PMECR_WOL_ENERGY (1 << 8)
281#define PMECR_AUTO_WAKE_EN (1 << 7)
282#define PMECR_WAKEUP_NORMAL (1 << 6)
283#define PMECR_WKEVT_MASK (0xf << 2)
284#define PMECR_WKEVT_SHIFT (2)
285#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
286#define PMECR_WKEVT_ENERGY (0x1 << 2)
287#define PMECR_WKEVT_LINK (0x2 << 2)
288#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
289#define PMECR_WKEVT_FRAME (0x8 << 2)
290#define PMECR_PM_MASK (0x3 << 0)
291#define PMECR_PM_SHIFT (0)
292#define PMECR_PM_NORMAL (0x0 << 0)
293#define PMECR_PM_ENERGY (0x1 << 0)
294#define PMECR_PM_SOFTDOWN (0x2 << 0)
295#define PMECR_PM_POWERSAVE (0x3 << 0)
296
297
298#define KS_P1MBCR 0xE4
299#define P1MBCR_FORCE_FDX (1 << 8)
300
301#define KS_P1MBSR 0xE6
302#define P1MBSR_AN_COMPLETE (1 << 5)
303#define P1MBSR_AN_CAPABLE (1 << 3)
304#define P1MBSR_LINK_UP (1 << 2)
305
306#define KS_PHY1ILR 0xE8
307#define KS_PHY1IHR 0xEA
308#define KS_P1ANAR 0xEC
309#define KS_P1ANLPR 0xEE
310
311#define KS_P1SCLMD 0xF4
312#define P1SCLMD_LEDOFF (1 << 15)
313#define P1SCLMD_TXIDS (1 << 14)
314#define P1SCLMD_RESTARTAN (1 << 13)
315#define P1SCLMD_DISAUTOMDIX (1 << 10)
316#define P1SCLMD_FORCEMDIX (1 << 9)
317#define P1SCLMD_AUTONEGEN (1 << 7)
318#define P1SCLMD_FORCE100 (1 << 6)
319#define P1SCLMD_FORCEFDX (1 << 5)
320#define P1SCLMD_ADV_FLOW (1 << 4)
321#define P1SCLMD_ADV_100BT_FDX (1 << 3)
322#define P1SCLMD_ADV_100BT_HDX (1 << 2)
323#define P1SCLMD_ADV_10BT_FDX (1 << 1)
324#define P1SCLMD_ADV_10BT_HDX (1 << 0)
325
326#define KS_P1CR 0xF6
327#define P1CR_HP_MDIX (1 << 15)
328#define P1CR_REV_POL (1 << 13)
329#define P1CR_OP_100M (1 << 10)
330#define P1CR_OP_FDX (1 << 9)
331#define P1CR_OP_MDI (1 << 7)
332#define P1CR_AN_DONE (1 << 6)
333#define P1CR_LINK_GOOD (1 << 5)
334#define P1CR_PNTR_FLOW (1 << 4)
335#define P1CR_PNTR_100BT_FDX (1 << 3)
336#define P1CR_PNTR_100BT_HDX (1 << 2)
337#define P1CR_PNTR_10BT_FDX (1 << 1)
338#define P1CR_PNTR_10BT_HDX (1 << 0)
339
340
341
342#define TXFR_TXIC (1 << 15)
343#define TXFR_TXFID_MASK (0x3f << 0)
344#define TXFR_TXFID_SHIFT (0)
345
346#define KS_P1SR 0xF8
347#define P1SR_HP_MDIX (1 << 15)
348#define P1SR_REV_POL (1 << 13)
349#define P1SR_OP_100M (1 << 10)
350#define P1SR_OP_FDX (1 << 9)
351#define P1SR_OP_MDI (1 << 7)
352#define P1SR_AN_DONE (1 << 6)
353#define P1SR_LINK_GOOD (1 << 5)
354#define P1SR_PNTR_FLOW (1 << 4)
355#define P1SR_PNTR_100BT_FDX (1 << 3)
356#define P1SR_PNTR_100BT_HDX (1 << 2)
357#define P1SR_PNTR_10BT_FDX (1 << 1)
358#define P1SR_PNTR_10BT_HDX (1 << 0)
359
360#define ENUM_BUS_NONE 0
361#define ENUM_BUS_8BIT 1
362#define ENUM_BUS_16BIT 2
363#define ENUM_BUS_32BIT 3
364
365#define MAX_MCAST_LST 32
366#define HW_MCAST_SIZE 8
367
368
369
370
371
372
373
374
375
376
377union ks_tx_hdr {
378 u8 txb[4];
379 __le16 txw[2];
380};
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424struct type_frame_head {
425 u16 sts;
426 u16 len;
427};
428
429struct ks_net {
430 struct net_device *netdev;
431 void __iomem *hw_addr;
432 void __iomem *hw_addr_cmd;
433 union ks_tx_hdr txh ____cacheline_aligned;
434 struct mutex lock;
435 struct platform_device *pdev;
436 struct mii_if_info mii;
437 struct type_frame_head *frame_head_info;
438 spinlock_t statelock;
439 u32 msg_enable;
440 u32 frame_cnt;
441 int bus_width;
442
443 u16 rc_rxqcr;
444 u16 rc_txcr;
445 u16 rc_ier;
446 u16 sharedbus;
447 u16 cmd_reg_cache;
448 u16 cmd_reg_cache_int;
449 u16 promiscuous;
450 u16 all_mcast;
451 u16 mcast_lst_size;
452 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
453 u8 mcast_bits[HW_MCAST_SIZE];
454 u8 mac_addr[6];
455 u8 fid;
456 u8 extra_byte;
457 u8 enabled;
458};
459
460static int msg_enable;
461
462#define BE3 0x8000
463#define BE2 0x4000
464#define BE1 0x2000
465#define BE0 0x1000
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481static u8 ks_rdreg8(struct ks_net *ks, int offset)
482{
483 u16 data;
484 u8 shift_bit = offset & 0x03;
485 u8 shift_data = (offset & 1) << 3;
486 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
487 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
488 data = ioread16(ks->hw_addr);
489 return (u8)(data >> shift_data);
490}
491
492
493
494
495
496
497
498
499
500static u16 ks_rdreg16(struct ks_net *ks, int offset)
501{
502 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
503 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
504 return ioread16(ks->hw_addr);
505}
506
507
508
509
510
511
512
513
514static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
515{
516 u8 shift_bit = (offset & 0x03);
517 u16 value_write = (u16)(value << ((offset & 1) << 3));
518 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
519 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
520 iowrite16(value_write, ks->hw_addr);
521}
522
523
524
525
526
527
528
529
530
531static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
532{
533 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
534 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
535 iowrite16(value, ks->hw_addr);
536}
537
538
539
540
541
542
543
544
545static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
546{
547 len >>= 1;
548 while (len--)
549 *wptr++ = (u16)ioread16(ks->hw_addr);
550}
551
552
553
554
555
556
557
558
559static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
560{
561 len >>= 1;
562 while (len--)
563 iowrite16(*wptr++, ks->hw_addr);
564}
565
566static void ks_disable_int(struct ks_net *ks)
567{
568 ks_wrreg16(ks, KS_IER, 0x0000);
569}
570
571static void ks_enable_int(struct ks_net *ks)
572{
573 ks_wrreg16(ks, KS_IER, ks->rc_ier);
574}
575
576
577
578
579
580
581static inline u16 ks_tx_fifo_space(struct ks_net *ks)
582{
583 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
584}
585
586
587
588
589
590
591static inline void ks_save_cmd_reg(struct ks_net *ks)
592{
593
594
595
596 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
597}
598
599
600
601
602
603
604
605static inline void ks_restore_cmd_reg(struct ks_net *ks)
606{
607 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
608 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
609}
610
611
612
613
614
615
616
617
618static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
619{
620 unsigned pmecr;
621
622 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
623
624 ks_rdreg16(ks, KS_GRR);
625 pmecr = ks_rdreg16(ks, KS_PMECR);
626 pmecr &= ~PMECR_PM_MASK;
627 pmecr |= pwrmode;
628
629 ks_wrreg16(ks, KS_PMECR, pmecr);
630}
631
632
633
634
635
636
637static void ks_read_config(struct ks_net *ks)
638{
639 u16 reg_data = 0;
640
641
642 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
643 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
644
645
646 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
647
648
649
650
651
652 if (reg_data & CCR_8BIT) {
653 ks->bus_width = ENUM_BUS_8BIT;
654 ks->extra_byte = 1;
655 } else if (reg_data & CCR_16BIT) {
656 ks->bus_width = ENUM_BUS_16BIT;
657 ks->extra_byte = 2;
658 } else {
659 ks->bus_width = ENUM_BUS_32BIT;
660 ks->extra_byte = 4;
661 }
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677static void ks_soft_reset(struct ks_net *ks, unsigned op)
678{
679
680 ks_wrreg16(ks, KS_IER, 0x0000);
681 ks_wrreg16(ks, KS_GRR, op);
682 mdelay(10);
683 ks_wrreg16(ks, KS_GRR, 0);
684 mdelay(1);
685}
686
687
688void ks_enable_qmu(struct ks_net *ks)
689{
690 u16 w;
691
692 w = ks_rdreg16(ks, KS_TXCR);
693
694 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
695
696
697
698
699
700
701 w = ks_rdreg16(ks, KS_RXQCR);
702 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
703
704
705 w = ks_rdreg16(ks, KS_RXCR1);
706 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
707 ks->enabled = true;
708}
709
710static void ks_disable_qmu(struct ks_net *ks)
711{
712 u16 w;
713
714 w = ks_rdreg16(ks, KS_TXCR);
715
716
717 w &= ~TXCR_TXE;
718 ks_wrreg16(ks, KS_TXCR, w);
719
720
721 w = ks_rdreg16(ks, KS_RXCR1);
722 w &= ~RXCR1_RXE ;
723 ks_wrreg16(ks, KS_RXCR1, w);
724
725 ks->enabled = false;
726
727}
728
729
730
731
732
733
734
735
736
737
738
739
740static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
741{
742 u32 r = ks->extra_byte & 0x1 ;
743 u32 w = ks->extra_byte - r;
744
745
746 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
747 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
748
749
750
751
752
753
754
755
756 if (unlikely(r))
757 ioread8(ks->hw_addr);
758 ks_inblk(ks, buf, w + 2 + 2);
759
760
761 ks_inblk(ks, buf, ALIGN(len, 4));
762
763
764 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
765}
766
767
768
769
770
771
772
773
774
775
776static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
777{
778 u32 i;
779 struct type_frame_head *frame_hdr = ks->frame_head_info;
780 struct sk_buff *skb;
781
782 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
783
784
785 for (i = 0; i < ks->frame_cnt; i++) {
786
787 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
788
789 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
790 frame_hdr++;
791 }
792
793 frame_hdr = ks->frame_head_info;
794 while (ks->frame_cnt--) {
795 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
796 frame_hdr->len >= RX_BUF_SIZE ||
797 frame_hdr->len <= 0)) {
798
799
800 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
801 netdev->stats.rx_dropped++;
802 if (!(frame_hdr->sts & RXFSHR_RXFV))
803 netdev->stats.rx_frame_errors++;
804 else
805 netdev->stats.rx_length_errors++;
806 frame_hdr++;
807 continue;
808 }
809
810 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
811 if (likely(skb)) {
812 skb_reserve(skb, 2);
813
814 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
815 skb_put(skb, frame_hdr->len - 4);
816 skb->protocol = eth_type_trans(skb, netdev);
817 netif_rx(skb);
818
819 netdev->stats.rx_bytes += frame_hdr->len - 4;
820 netdev->stats.rx_packets++;
821 } else {
822 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
823 netdev->stats.rx_dropped++;
824 }
825 frame_hdr++;
826 }
827}
828
829
830
831
832
833
834
835
836static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
837{
838
839 u32 link_up_status;
840 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
841 netif_carrier_on(netdev);
842 link_up_status = true;
843 } else {
844 netif_carrier_off(netdev);
845 link_up_status = false;
846 }
847 netif_dbg(ks, link, ks->netdev,
848 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
849}
850
851
852
853
854
855
856
857
858
859
860
861
862static irqreturn_t ks_irq(int irq, void *pw)
863{
864 struct net_device *netdev = pw;
865 struct ks_net *ks = netdev_priv(netdev);
866 u16 status;
867
868
869 ks_save_cmd_reg(ks);
870
871 status = ks_rdreg16(ks, KS_ISR);
872 if (unlikely(!status)) {
873 ks_restore_cmd_reg(ks);
874 return IRQ_NONE;
875 }
876
877 ks_wrreg16(ks, KS_ISR, status);
878
879 if (likely(status & IRQ_RXI))
880 ks_rcv(ks, netdev);
881
882 if (unlikely(status & IRQ_LCI))
883 ks_update_link_status(netdev, ks);
884
885 if (unlikely(status & IRQ_TXI))
886 netif_wake_queue(netdev);
887
888 if (unlikely(status & IRQ_LDI)) {
889
890 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
891 pmecr &= ~PMECR_WKEVT_MASK;
892 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
893 }
894
895 if (unlikely(status & IRQ_RXOI))
896 ks->netdev->stats.rx_over_errors++;
897
898 ks_restore_cmd_reg(ks);
899 return IRQ_HANDLED;
900}
901
902
903
904
905
906
907
908
909
910static int ks_net_open(struct net_device *netdev)
911{
912 struct ks_net *ks = netdev_priv(netdev);
913 int err;
914
915#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
916
917
918
919
920 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
921
922
923 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
924
925 if (err) {
926 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
927 return err;
928 }
929
930
931 ks_set_powermode(ks, PMECR_PM_NORMAL);
932 mdelay(1);
933
934 ks_wrreg16(ks, KS_ISR, 0xffff);
935 ks_enable_int(ks);
936 ks_enable_qmu(ks);
937 netif_start_queue(ks->netdev);
938
939 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
940
941 return 0;
942}
943
944
945
946
947
948
949
950
951
952static int ks_net_stop(struct net_device *netdev)
953{
954 struct ks_net *ks = netdev_priv(netdev);
955
956 netif_info(ks, ifdown, netdev, "shutting down\n");
957
958 netif_stop_queue(netdev);
959
960 mutex_lock(&ks->lock);
961
962
963 ks_wrreg16(ks, KS_IER, 0x0000);
964 ks_wrreg16(ks, KS_ISR, 0xffff);
965
966
967 ks_disable_qmu(ks);
968
969
970 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
971 free_irq(netdev->irq, netdev);
972 mutex_unlock(&ks->lock);
973 return 0;
974}
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
991{
992
993 ks->txh.txw[0] = 0;
994 ks->txh.txw[1] = cpu_to_le16(len);
995
996
997 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
998
999 ks_outblk(ks, ks->txh.txw, 4);
1000
1001 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
1002
1003 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
1004
1005 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
1006
1007 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
1008 ;
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1021{
1022 int retv = NETDEV_TX_OK;
1023 struct ks_net *ks = netdev_priv(netdev);
1024
1025 disable_irq(netdev->irq);
1026 ks_disable_int(ks);
1027 spin_lock(&ks->statelock);
1028
1029
1030
1031
1032
1033 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1034 ks_write_qmu(ks, skb->data, skb->len);
1035
1036 netdev->stats.tx_bytes += skb->len;
1037 netdev->stats.tx_packets++;
1038 dev_kfree_skb(skb);
1039 } else
1040 retv = NETDEV_TX_BUSY;
1041 spin_unlock(&ks->statelock);
1042 ks_enable_int(ks);
1043 enable_irq(netdev->irq);
1044 return retv;
1045}
1046
1047
1048
1049
1050
1051
1052static void ks_start_rx(struct ks_net *ks)
1053{
1054 u16 cntl;
1055
1056
1057 cntl = ks_rdreg16(ks, KS_RXCR1);
1058 cntl |= RXCR1_RXE ;
1059 ks_wrreg16(ks, KS_RXCR1, cntl);
1060}
1061
1062
1063
1064
1065
1066
1067static void ks_stop_rx(struct ks_net *ks)
1068{
1069 u16 cntl;
1070
1071
1072 cntl = ks_rdreg16(ks, KS_RXCR1);
1073 cntl &= ~RXCR1_RXE ;
1074 ks_wrreg16(ks, KS_RXCR1, cntl);
1075
1076}
1077
1078static unsigned long const ethernet_polynomial = 0x04c11db7U;
1079
1080static unsigned long ether_gen_crc(int length, u8 *data)
1081{
1082 long crc = -1;
1083 while (--length >= 0) {
1084 u8 current_octet = *data++;
1085 int bit;
1086
1087 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1088 crc = (crc << 1) ^
1089 ((crc < 0) ^ (current_octet & 1) ?
1090 ethernet_polynomial : 0);
1091 }
1092 }
1093 return (unsigned long)crc;
1094}
1095
1096
1097
1098
1099
1100
1101static void ks_set_grpaddr(struct ks_net *ks)
1102{
1103 u8 i;
1104 u32 index, position, value;
1105
1106 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1107
1108 for (i = 0; i < ks->mcast_lst_size; i++) {
1109 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1110 index = position >> 3;
1111 value = 1 << (position & 7);
1112 ks->mcast_bits[index] |= (u8)value;
1113 }
1114
1115 for (i = 0; i < HW_MCAST_SIZE; i++) {
1116 if (i & 1) {
1117 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1118 (ks->mcast_bits[i] << 8) |
1119 ks->mcast_bits[i - 1]);
1120 }
1121 }
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131static void ks_clear_mcast(struct ks_net *ks)
1132{
1133 u16 i, mcast_size;
1134 for (i = 0; i < HW_MCAST_SIZE; i++)
1135 ks->mcast_bits[i] = 0;
1136
1137 mcast_size = HW_MCAST_SIZE >> 2;
1138 for (i = 0; i < mcast_size; i++)
1139 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1140}
1141
1142static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1143{
1144 u16 cntl;
1145 ks->promiscuous = promiscuous_mode;
1146 ks_stop_rx(ks);
1147 cntl = ks_rdreg16(ks, KS_RXCR1);
1148
1149 cntl &= ~RXCR1_FILTER_MASK;
1150 if (promiscuous_mode)
1151
1152 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1153 else
1154
1155 cntl |= RXCR1_RXPAFMA;
1156
1157 ks_wrreg16(ks, KS_RXCR1, cntl);
1158
1159 if (ks->enabled)
1160 ks_start_rx(ks);
1161
1162}
1163
1164static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1165{
1166 u16 cntl;
1167
1168 ks->all_mcast = mcast;
1169 ks_stop_rx(ks);
1170 cntl = ks_rdreg16(ks, KS_RXCR1);
1171 cntl &= ~RXCR1_FILTER_MASK;
1172 if (mcast)
1173
1174 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1175 else
1176
1177
1178
1179
1180 cntl |= RXCR1_RXPAFMA;
1181
1182 ks_wrreg16(ks, KS_RXCR1, cntl);
1183
1184 if (ks->enabled)
1185 ks_start_rx(ks);
1186}
1187
1188static void ks_set_rx_mode(struct net_device *netdev)
1189{
1190 struct ks_net *ks = netdev_priv(netdev);
1191 struct netdev_hw_addr *ha;
1192
1193
1194 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1195 ks_set_promis(ks,
1196 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1197
1198 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1199 ks_set_mcast(ks,
1200 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1201 else
1202 ks_set_promis(ks, false);
1203
1204 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1205 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1206 int i = 0;
1207
1208 netdev_for_each_mc_addr(ha, netdev) {
1209 if (i >= MAX_MCAST_LST)
1210 break;
1211 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1212 }
1213 ks->mcast_lst_size = (u8)i;
1214 ks_set_grpaddr(ks);
1215 } else {
1216
1217
1218
1219
1220 ks->mcast_lst_size = MAX_MCAST_LST;
1221 ks_set_mcast(ks, true);
1222 }
1223 } else {
1224 ks->mcast_lst_size = 0;
1225 ks_clear_mcast(ks);
1226 }
1227}
1228
1229static void ks_set_mac(struct ks_net *ks, u8 *data)
1230{
1231 u16 *pw = (u16 *)data;
1232 u16 w, u;
1233
1234 ks_stop_rx(ks);
1235
1236 u = *pw++;
1237 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1238 ks_wrreg16(ks, KS_MARH, w);
1239
1240 u = *pw++;
1241 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1242 ks_wrreg16(ks, KS_MARM, w);
1243
1244 u = *pw;
1245 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1246 ks_wrreg16(ks, KS_MARL, w);
1247
1248 memcpy(ks->mac_addr, data, ETH_ALEN);
1249
1250 if (ks->enabled)
1251 ks_start_rx(ks);
1252}
1253
1254static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1255{
1256 struct ks_net *ks = netdev_priv(netdev);
1257 struct sockaddr *addr = paddr;
1258 u8 *da;
1259
1260 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1261
1262 da = (u8 *)netdev->dev_addr;
1263
1264 ks_set_mac(ks, da);
1265 return 0;
1266}
1267
1268static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1269{
1270 struct ks_net *ks = netdev_priv(netdev);
1271
1272 if (!netif_running(netdev))
1273 return -EINVAL;
1274
1275 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1276}
1277
1278static const struct net_device_ops ks_netdev_ops = {
1279 .ndo_open = ks_net_open,
1280 .ndo_stop = ks_net_stop,
1281 .ndo_do_ioctl = ks_net_ioctl,
1282 .ndo_start_xmit = ks_start_xmit,
1283 .ndo_set_mac_address = ks_set_mac_address,
1284 .ndo_set_rx_mode = ks_set_rx_mode,
1285 .ndo_change_mtu = eth_change_mtu,
1286 .ndo_validate_addr = eth_validate_addr,
1287};
1288
1289
1290
1291static void ks_get_drvinfo(struct net_device *netdev,
1292 struct ethtool_drvinfo *di)
1293{
1294 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1295 strlcpy(di->version, "1.00", sizeof(di->version));
1296 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1297 sizeof(di->bus_info));
1298}
1299
1300static u32 ks_get_msglevel(struct net_device *netdev)
1301{
1302 struct ks_net *ks = netdev_priv(netdev);
1303 return ks->msg_enable;
1304}
1305
1306static void ks_set_msglevel(struct net_device *netdev, u32 to)
1307{
1308 struct ks_net *ks = netdev_priv(netdev);
1309 ks->msg_enable = to;
1310}
1311
1312static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1313{
1314 struct ks_net *ks = netdev_priv(netdev);
1315 return mii_ethtool_gset(&ks->mii, cmd);
1316}
1317
1318static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1319{
1320 struct ks_net *ks = netdev_priv(netdev);
1321 return mii_ethtool_sset(&ks->mii, cmd);
1322}
1323
1324static u32 ks_get_link(struct net_device *netdev)
1325{
1326 struct ks_net *ks = netdev_priv(netdev);
1327 return mii_link_ok(&ks->mii);
1328}
1329
1330static int ks_nway_reset(struct net_device *netdev)
1331{
1332 struct ks_net *ks = netdev_priv(netdev);
1333 return mii_nway_restart(&ks->mii);
1334}
1335
1336static const struct ethtool_ops ks_ethtool_ops = {
1337 .get_drvinfo = ks_get_drvinfo,
1338 .get_msglevel = ks_get_msglevel,
1339 .set_msglevel = ks_set_msglevel,
1340 .get_settings = ks_get_settings,
1341 .set_settings = ks_set_settings,
1342 .get_link = ks_get_link,
1343 .nway_reset = ks_nway_reset,
1344};
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356static int ks_phy_reg(int reg)
1357{
1358 switch (reg) {
1359 case MII_BMCR:
1360 return KS_P1MBCR;
1361 case MII_BMSR:
1362 return KS_P1MBSR;
1363 case MII_PHYSID1:
1364 return KS_PHY1ILR;
1365 case MII_PHYSID2:
1366 return KS_PHY1IHR;
1367 case MII_ADVERTISE:
1368 return KS_P1ANAR;
1369 case MII_LPA:
1370 return KS_P1ANLPR;
1371 }
1372
1373 return 0x0;
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1392{
1393 struct ks_net *ks = netdev_priv(netdev);
1394 int ksreg;
1395 int result;
1396
1397 ksreg = ks_phy_reg(reg);
1398 if (!ksreg)
1399 return 0x0;
1400
1401 mutex_lock(&ks->lock);
1402 result = ks_rdreg16(ks, ksreg);
1403 mutex_unlock(&ks->lock);
1404
1405 return result;
1406}
1407
1408static void ks_phy_write(struct net_device *netdev,
1409 int phy, int reg, int value)
1410{
1411 struct ks_net *ks = netdev_priv(netdev);
1412 int ksreg;
1413
1414 ksreg = ks_phy_reg(reg);
1415 if (ksreg) {
1416 mutex_lock(&ks->lock);
1417 ks_wrreg16(ks, ksreg, value);
1418 mutex_unlock(&ks->lock);
1419 }
1420}
1421
1422
1423
1424
1425
1426
1427
1428static int ks_read_selftest(struct ks_net *ks)
1429{
1430 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1431 int ret = 0;
1432 unsigned rd;
1433
1434 rd = ks_rdreg16(ks, KS_MBIR);
1435
1436 if ((rd & both_done) != both_done) {
1437 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1438 return 0;
1439 }
1440
1441 if (rd & MBIR_TXMBFA) {
1442 netdev_err(ks->netdev, "TX memory selftest fails\n");
1443 ret |= 1;
1444 }
1445
1446 if (rd & MBIR_RXMBFA) {
1447 netdev_err(ks->netdev, "RX memory selftest fails\n");
1448 ret |= 2;
1449 }
1450
1451 netdev_info(ks->netdev, "the selftest passes\n");
1452 return ret;
1453}
1454
1455static void ks_setup(struct ks_net *ks)
1456{
1457 u16 w;
1458
1459
1460
1461
1462
1463
1464 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1465
1466
1467 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1468
1469
1470 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1471
1472
1473 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1474 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1475
1476
1477
1478
1479
1480
1481
1482 w = ks_rdreg16(ks, KS_P1MBCR);
1483 w &= ~P1MBCR_FORCE_FDX;
1484 ks_wrreg16(ks, KS_P1MBCR, w);
1485
1486 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1487 ks_wrreg16(ks, KS_TXCR, w);
1488
1489 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1490
1491 if (ks->promiscuous)
1492 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1493 else if (ks->all_mcast)
1494 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1495 else
1496 w |= RXCR1_RXPAFMA;
1497
1498 ks_wrreg16(ks, KS_RXCR1, w);
1499}
1500
1501
1502static void ks_setup_int(struct ks_net *ks)
1503{
1504 ks->rc_ier = 0x00;
1505
1506 ks_wrreg16(ks, KS_ISR, 0xffff);
1507
1508
1509 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1510}
1511
1512static int ks_hw_init(struct ks_net *ks)
1513{
1514#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1515 ks->promiscuous = 0;
1516 ks->all_mcast = 0;
1517 ks->mcast_lst_size = 0;
1518
1519 ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
1520 if (!ks->frame_head_info)
1521 return false;
1522
1523 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1524 return true;
1525}
1526
1527
1528static int ks8851_probe(struct platform_device *pdev)
1529{
1530 int err = -ENOMEM;
1531 struct resource *io_d, *io_c;
1532 struct net_device *netdev;
1533 struct ks_net *ks;
1534 u16 id, data;
1535 struct ks8851_mll_platform_data *pdata;
1536
1537 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1538 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1539
1540 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1541 goto err_mem_region;
1542
1543 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1544 goto err_mem_region1;
1545
1546 netdev = alloc_etherdev(sizeof(struct ks_net));
1547 if (!netdev)
1548 goto err_alloc_etherdev;
1549
1550 SET_NETDEV_DEV(netdev, &pdev->dev);
1551
1552 ks = netdev_priv(netdev);
1553 ks->netdev = netdev;
1554 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1555
1556 if (!ks->hw_addr)
1557 goto err_ioremap;
1558
1559 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1560 if (!ks->hw_addr_cmd)
1561 goto err_ioremap1;
1562
1563 netdev->irq = platform_get_irq(pdev, 0);
1564
1565 if ((int)netdev->irq < 0) {
1566 err = netdev->irq;
1567 goto err_get_irq;
1568 }
1569
1570 ks->pdev = pdev;
1571
1572 mutex_init(&ks->lock);
1573 spin_lock_init(&ks->statelock);
1574
1575 netdev->netdev_ops = &ks_netdev_ops;
1576 netdev->ethtool_ops = &ks_ethtool_ops;
1577
1578
1579 ks->mii.dev = netdev;
1580 ks->mii.phy_id = 1,
1581 ks->mii.phy_id_mask = 1;
1582 ks->mii.reg_num_mask = 0xf;
1583 ks->mii.mdio_read = ks_phy_read;
1584 ks->mii.mdio_write = ks_phy_write;
1585
1586 netdev_info(netdev, "message enable is %d\n", msg_enable);
1587
1588 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1589 NETIF_MSG_PROBE |
1590 NETIF_MSG_LINK));
1591 ks_read_config(ks);
1592
1593
1594 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1595 netdev_err(netdev, "failed to read device ID\n");
1596 err = -ENODEV;
1597 goto err_register;
1598 }
1599
1600 if (ks_read_selftest(ks)) {
1601 netdev_err(netdev, "failed to read device ID\n");
1602 err = -ENODEV;
1603 goto err_register;
1604 }
1605
1606 err = register_netdev(netdev);
1607 if (err)
1608 goto err_register;
1609
1610 platform_set_drvdata(pdev, netdev);
1611
1612 ks_soft_reset(ks, GRR_GSR);
1613 ks_hw_init(ks);
1614 ks_disable_qmu(ks);
1615 ks_setup(ks);
1616 ks_setup_int(ks);
1617
1618 data = ks_rdreg16(ks, KS_OBCR);
1619 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1620
1621
1622 pdata = pdev->dev.platform_data;
1623 if (!pdata) {
1624 netdev_err(netdev, "No platform data\n");
1625 err = -ENODEV;
1626 goto err_pdata;
1627 }
1628 memcpy(ks->mac_addr, pdata->mac_addr, 6);
1629 if (!is_valid_ether_addr(ks->mac_addr)) {
1630
1631 eth_random_addr(ks->mac_addr);
1632 netdev_info(netdev, "Using random mac address\n");
1633 }
1634 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1635
1636 memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1637
1638 ks_set_mac(ks, netdev->dev_addr);
1639
1640 id = ks_rdreg16(ks, KS_CIDER);
1641
1642 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1643 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1644 return 0;
1645
1646err_pdata:
1647 unregister_netdev(netdev);
1648err_register:
1649err_get_irq:
1650 iounmap(ks->hw_addr_cmd);
1651err_ioremap1:
1652 iounmap(ks->hw_addr);
1653err_ioremap:
1654 free_netdev(netdev);
1655err_alloc_etherdev:
1656 release_mem_region(io_c->start, resource_size(io_c));
1657err_mem_region1:
1658 release_mem_region(io_d->start, resource_size(io_d));
1659err_mem_region:
1660 return err;
1661}
1662
1663static int ks8851_remove(struct platform_device *pdev)
1664{
1665 struct net_device *netdev = platform_get_drvdata(pdev);
1666 struct ks_net *ks = netdev_priv(netdev);
1667 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1668
1669 kfree(ks->frame_head_info);
1670 unregister_netdev(netdev);
1671 iounmap(ks->hw_addr);
1672 free_netdev(netdev);
1673 release_mem_region(iomem->start, resource_size(iomem));
1674 platform_set_drvdata(pdev, NULL);
1675 return 0;
1676
1677}
1678
1679static struct platform_driver ks8851_platform_driver = {
1680 .driver = {
1681 .name = DRV_NAME,
1682 .owner = THIS_MODULE,
1683 },
1684 .probe = ks8851_probe,
1685 .remove = ks8851_remove,
1686};
1687
1688module_platform_driver(ks8851_platform_driver);
1689
1690MODULE_DESCRIPTION("KS8851 MLL Network driver");
1691MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1692MODULE_LICENSE("GPL");
1693module_param_named(message, msg_enable, int, 0);
1694MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1695
1696