1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/init.h>
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/mdio-bitbang.h>
29#include <linux/netdevice.h>
30#include <linux/phy.h>
31#include <linux/cache.h>
32#include <linux/io.h>
33#include <asm/cacheflush.h>
34
35#include "sh_eth.h"
36
37
38#if defined(CONFIG_CPU_SUBTYPE_SH7724)
39#define SH_ETH_RESET_DEFAULT 1
40static void sh_eth_set_duplex(struct net_device *ndev)
41{
42 struct sh_eth_private *mdp = netdev_priv(ndev);
43 u32 ioaddr = ndev->base_addr;
44
45 if (mdp->duplex)
46 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
47 else
48 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
49}
50
51static void sh_eth_set_rate(struct net_device *ndev)
52{
53 struct sh_eth_private *mdp = netdev_priv(ndev);
54 u32 ioaddr = ndev->base_addr;
55
56 switch (mdp->speed) {
57 case 10:
58 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
59 break;
60 case 100:
61 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
62 break;
63 default:
64 break;
65 }
66}
67
68
69static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
70 .set_duplex = sh_eth_set_duplex,
71 .set_rate = sh_eth_set_rate,
72
73 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
74 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
75 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
76
77 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
78 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
79 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
80 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
81
82 .apr = 1,
83 .mpr = 1,
84 .tpauser = 1,
85 .hw_swap = 1,
86};
87
88#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
89#define SH_ETH_HAS_TSU 1
90static void sh_eth_chip_reset(struct net_device *ndev)
91{
92
93 ctrl_outl(ARSTR_ARSTR, ARSTR);
94 mdelay(1);
95}
96
97static void sh_eth_reset(struct net_device *ndev)
98{
99 u32 ioaddr = ndev->base_addr;
100 int cnt = 100;
101
102 ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
103 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
104 while (cnt > 0) {
105 if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
106 break;
107 mdelay(1);
108 cnt--;
109 }
110 if (cnt < 0)
111 printk(KERN_ERR "Device reset fail\n");
112
113
114 ctrl_outl(0x0, ioaddr + TDLAR);
115 ctrl_outl(0x0, ioaddr + TDFAR);
116 ctrl_outl(0x0, ioaddr + TDFXR);
117 ctrl_outl(0x0, ioaddr + TDFFR);
118 ctrl_outl(0x0, ioaddr + RDLAR);
119 ctrl_outl(0x0, ioaddr + RDFAR);
120 ctrl_outl(0x0, ioaddr + RDFXR);
121 ctrl_outl(0x0, ioaddr + RDFFR);
122}
123
124static void sh_eth_set_duplex(struct net_device *ndev)
125{
126 struct sh_eth_private *mdp = netdev_priv(ndev);
127 u32 ioaddr = ndev->base_addr;
128
129 if (mdp->duplex)
130 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
131 else
132 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
133}
134
135static void sh_eth_set_rate(struct net_device *ndev)
136{
137 struct sh_eth_private *mdp = netdev_priv(ndev);
138 u32 ioaddr = ndev->base_addr;
139
140 switch (mdp->speed) {
141 case 10:
142 ctrl_outl(GECMR_10, ioaddr + GECMR);
143 break;
144 case 100:
145 ctrl_outl(GECMR_100, ioaddr + GECMR);
146 break;
147 case 1000:
148 ctrl_outl(GECMR_1000, ioaddr + GECMR);
149 break;
150 default:
151 break;
152 }
153}
154
155
156static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
157 .chip_reset = sh_eth_chip_reset,
158 .set_duplex = sh_eth_set_duplex,
159 .set_rate = sh_eth_set_rate,
160
161 .ecsr_value = ECSR_ICD | ECSR_MPD,
162 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
163 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
164
165 .tx_check = EESR_TC1 | EESR_FTC,
166 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
167 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
168 EESR_ECI,
169 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
170 EESR_TFE,
171
172 .apr = 1,
173 .mpr = 1,
174 .tpauser = 1,
175 .bculr = 1,
176 .hw_swap = 1,
177 .rpadir = 1,
178 .no_trimd = 1,
179 .no_ade = 1,
180};
181
182#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
183#define SH_ETH_RESET_DEFAULT 1
184static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
185 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
186
187 .apr = 1,
188 .mpr = 1,
189 .tpauser = 1,
190 .hw_swap = 1,
191};
192#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
193#define SH_ETH_RESET_DEFAULT 1
194#define SH_ETH_HAS_TSU 1
195static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
196 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
197};
198#endif
199
200static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
201{
202 if (!cd->ecsr_value)
203 cd->ecsr_value = DEFAULT_ECSR_INIT;
204
205 if (!cd->ecsipr_value)
206 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
207
208 if (!cd->fcftr_value)
209 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
210 DEFAULT_FIFO_F_D_RFD;
211
212 if (!cd->fdr_value)
213 cd->fdr_value = DEFAULT_FDR_INIT;
214
215 if (!cd->rmcr_value)
216 cd->rmcr_value = DEFAULT_RMCR_VALUE;
217
218 if (!cd->tx_check)
219 cd->tx_check = DEFAULT_TX_CHECK;
220
221 if (!cd->eesr_err_check)
222 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
223
224 if (!cd->tx_error_check)
225 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
226}
227
228#if defined(SH_ETH_RESET_DEFAULT)
229
230static void sh_eth_reset(struct net_device *ndev)
231{
232 u32 ioaddr = ndev->base_addr;
233
234 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
235 mdelay(3);
236 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
237}
238#endif
239
240#if defined(CONFIG_CPU_SH4)
241static void sh_eth_set_receive_align(struct sk_buff *skb)
242{
243 int reserve;
244
245 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
246 if (reserve)
247 skb_reserve(skb, reserve);
248}
249#else
250static void sh_eth_set_receive_align(struct sk_buff *skb)
251{
252 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
253}
254#endif
255
256
257
258static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
259{
260 switch (mdp->edmac_endian) {
261 case EDMAC_LITTLE_ENDIAN:
262 return cpu_to_le32(x);
263 case EDMAC_BIG_ENDIAN:
264 return cpu_to_be32(x);
265 }
266 return x;
267}
268
269static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
270{
271 switch (mdp->edmac_endian) {
272 case EDMAC_LITTLE_ENDIAN:
273 return le32_to_cpu(x);
274 case EDMAC_BIG_ENDIAN:
275 return be32_to_cpu(x);
276 }
277 return x;
278}
279
280
281
282
283static void update_mac_address(struct net_device *ndev)
284{
285 u32 ioaddr = ndev->base_addr;
286
287 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
288 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
289 ioaddr + MAHR);
290 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
291 ioaddr + MALR);
292}
293
294
295
296
297
298
299
300
301
302static void read_mac_address(struct net_device *ndev)
303{
304 u32 ioaddr = ndev->base_addr;
305
306 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
307 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
308 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
309 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
310 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
311 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
312}
313
314struct bb_info {
315 struct mdiobb_ctrl ctrl;
316 u32 addr;
317 u32 mmd_msk;
318 u32 mdo_msk;
319 u32 mdi_msk;
320 u32 mdc_msk;
321};
322
323
324static void bb_set(u32 addr, u32 msk)
325{
326 ctrl_outl(ctrl_inl(addr) | msk, addr);
327}
328
329
330static void bb_clr(u32 addr, u32 msk)
331{
332 ctrl_outl((ctrl_inl(addr) & ~msk), addr);
333}
334
335
336static int bb_read(u32 addr, u32 msk)
337{
338 return (ctrl_inl(addr) & msk) != 0;
339}
340
341
342static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
343{
344 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
345 if (bit)
346 bb_set(bitbang->addr, bitbang->mmd_msk);
347 else
348 bb_clr(bitbang->addr, bitbang->mmd_msk);
349}
350
351
352static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
353{
354 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
355
356 if (bit)
357 bb_set(bitbang->addr, bitbang->mdo_msk);
358 else
359 bb_clr(bitbang->addr, bitbang->mdo_msk);
360}
361
362
363static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
364{
365 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
366 return bb_read(bitbang->addr, bitbang->mdi_msk);
367}
368
369
370static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
371{
372 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
373
374 if (bit)
375 bb_set(bitbang->addr, bitbang->mdc_msk);
376 else
377 bb_clr(bitbang->addr, bitbang->mdc_msk);
378}
379
380
381static struct mdiobb_ops bb_ops = {
382 .owner = THIS_MODULE,
383 .set_mdc = sh_mdc_ctrl,
384 .set_mdio_dir = sh_mmd_ctrl,
385 .set_mdio_data = sh_set_mdio,
386 .get_mdio_data = sh_get_mdio,
387};
388
389
390static void sh_eth_ring_free(struct net_device *ndev)
391{
392 struct sh_eth_private *mdp = netdev_priv(ndev);
393 int i;
394
395
396 if (mdp->rx_skbuff) {
397 for (i = 0; i < RX_RING_SIZE; i++) {
398 if (mdp->rx_skbuff[i])
399 dev_kfree_skb(mdp->rx_skbuff[i]);
400 }
401 }
402 kfree(mdp->rx_skbuff);
403
404
405 if (mdp->tx_skbuff) {
406 for (i = 0; i < TX_RING_SIZE; i++) {
407 if (mdp->tx_skbuff[i])
408 dev_kfree_skb(mdp->tx_skbuff[i]);
409 }
410 }
411 kfree(mdp->tx_skbuff);
412}
413
414
415static void sh_eth_ring_format(struct net_device *ndev)
416{
417 u32 ioaddr = ndev->base_addr;
418 struct sh_eth_private *mdp = netdev_priv(ndev);
419 int i;
420 struct sk_buff *skb;
421 struct sh_eth_rxdesc *rxdesc = NULL;
422 struct sh_eth_txdesc *txdesc = NULL;
423 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
424 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
425
426 mdp->cur_rx = mdp->cur_tx = 0;
427 mdp->dirty_rx = mdp->dirty_tx = 0;
428
429 memset(mdp->rx_ring, 0, rx_ringsize);
430
431
432 for (i = 0; i < RX_RING_SIZE; i++) {
433
434 mdp->rx_skbuff[i] = NULL;
435 skb = dev_alloc_skb(mdp->rx_buf_sz);
436 mdp->rx_skbuff[i] = skb;
437 if (skb == NULL)
438 break;
439 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
440 DMA_FROM_DEVICE);
441 skb->dev = ndev;
442 sh_eth_set_receive_align(skb);
443
444
445 rxdesc = &mdp->rx_ring[i];
446 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
447 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
448
449
450 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
451
452 if (i == 0) {
453 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
454#if defined(CONFIG_CPU_SUBTYPE_SH7763)
455 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
456#endif
457 }
458 }
459
460 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
461
462
463 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
464
465 memset(mdp->tx_ring, 0, tx_ringsize);
466
467
468 for (i = 0; i < TX_RING_SIZE; i++) {
469 mdp->tx_skbuff[i] = NULL;
470 txdesc = &mdp->tx_ring[i];
471 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
472 txdesc->buffer_length = 0;
473 if (i == 0) {
474
475 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
476#if defined(CONFIG_CPU_SUBTYPE_SH7763)
477 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
478#endif
479 }
480 }
481
482 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
483}
484
485
486static int sh_eth_ring_init(struct net_device *ndev)
487{
488 struct sh_eth_private *mdp = netdev_priv(ndev);
489 int rx_ringsize, tx_ringsize, ret = 0;
490
491
492
493
494
495
496
497 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
498 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
499
500
501 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
502 GFP_KERNEL);
503 if (!mdp->rx_skbuff) {
504 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
505 ret = -ENOMEM;
506 return ret;
507 }
508
509 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
510 GFP_KERNEL);
511 if (!mdp->tx_skbuff) {
512 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
513 ret = -ENOMEM;
514 goto skb_ring_free;
515 }
516
517
518 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
519 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
520 GFP_KERNEL);
521
522 if (!mdp->rx_ring) {
523 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
524 rx_ringsize);
525 ret = -ENOMEM;
526 goto desc_ring_free;
527 }
528
529 mdp->dirty_rx = 0;
530
531
532 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
533 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
534 GFP_KERNEL);
535 if (!mdp->tx_ring) {
536 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
537 tx_ringsize);
538 ret = -ENOMEM;
539 goto desc_ring_free;
540 }
541 return ret;
542
543desc_ring_free:
544
545 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
546
547skb_ring_free:
548
549 sh_eth_ring_free(ndev);
550
551 return ret;
552}
553
554static int sh_eth_dev_init(struct net_device *ndev)
555{
556 int ret = 0;
557 struct sh_eth_private *mdp = netdev_priv(ndev);
558 u32 ioaddr = ndev->base_addr;
559 u_int32_t rx_int_var, tx_int_var;
560 u32 val;
561
562
563 sh_eth_reset(ndev);
564
565
566 sh_eth_ring_format(ndev);
567 if (mdp->cd->rpadir)
568 ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
569
570
571 ctrl_outl(0, ioaddr + EESIPR);
572
573#if defined(__LITTLE_ENDIAN__)
574 if (mdp->cd->hw_swap)
575 ctrl_outl(EDMR_EL, ioaddr + EDMR);
576 else
577#endif
578 ctrl_outl(0, ioaddr + EDMR);
579
580
581 ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
582 ctrl_outl(0, ioaddr + TFTR);
583
584
585 ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
586
587 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
588 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
589 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
590
591 if (mdp->cd->bculr)
592 ctrl_outl(0x800, ioaddr + BCULR);
593
594 ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
595
596 if (!mdp->cd->no_trimd)
597 ctrl_outl(0, ioaddr + TRIMD);
598
599
600 ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
601
602 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
603 ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
604
605
606 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
607 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
608
609 ctrl_outl(val, ioaddr + ECMR);
610
611 if (mdp->cd->set_rate)
612 mdp->cd->set_rate(ndev);
613
614
615 ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
616
617
618 ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
619
620
621 update_mac_address(ndev);
622
623
624 if (mdp->cd->apr)
625 ctrl_outl(APR_AP, ioaddr + APR);
626 if (mdp->cd->mpr)
627 ctrl_outl(MPR_MP, ioaddr + MPR);
628 if (mdp->cd->tpauser)
629 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
630
631
632 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
633
634 netif_start_queue(ndev);
635
636 return ret;
637}
638
639
640static int sh_eth_txfree(struct net_device *ndev)
641{
642 struct sh_eth_private *mdp = netdev_priv(ndev);
643 struct sh_eth_txdesc *txdesc;
644 int freeNum = 0;
645 int entry = 0;
646
647 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
648 entry = mdp->dirty_tx % TX_RING_SIZE;
649 txdesc = &mdp->tx_ring[entry];
650 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
651 break;
652
653 if (mdp->tx_skbuff[entry]) {
654 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
655 mdp->tx_skbuff[entry] = NULL;
656 freeNum++;
657 }
658 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
659 if (entry >= TX_RING_SIZE - 1)
660 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
661
662 mdp->stats.tx_packets++;
663 mdp->stats.tx_bytes += txdesc->buffer_length;
664 }
665 return freeNum;
666}
667
668
669static int sh_eth_rx(struct net_device *ndev)
670{
671 struct sh_eth_private *mdp = netdev_priv(ndev);
672 struct sh_eth_rxdesc *rxdesc;
673
674 int entry = mdp->cur_rx % RX_RING_SIZE;
675 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
676 struct sk_buff *skb;
677 u16 pkt_len = 0;
678 u32 desc_status;
679
680 rxdesc = &mdp->rx_ring[entry];
681 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
682 desc_status = edmac_to_cpu(mdp, rxdesc->status);
683 pkt_len = rxdesc->frame_length;
684
685 if (--boguscnt < 0)
686 break;
687
688 if (!(desc_status & RDFEND))
689 mdp->stats.rx_length_errors++;
690
691 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
692 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
693 mdp->stats.rx_errors++;
694 if (desc_status & RD_RFS1)
695 mdp->stats.rx_crc_errors++;
696 if (desc_status & RD_RFS2)
697 mdp->stats.rx_frame_errors++;
698 if (desc_status & RD_RFS3)
699 mdp->stats.rx_length_errors++;
700 if (desc_status & RD_RFS4)
701 mdp->stats.rx_length_errors++;
702 if (desc_status & RD_RFS6)
703 mdp->stats.rx_missed_errors++;
704 if (desc_status & RD_RFS10)
705 mdp->stats.rx_over_errors++;
706 } else {
707 if (!mdp->cd->hw_swap)
708 sh_eth_soft_swap(
709 phys_to_virt(ALIGN(rxdesc->addr, 4)),
710 pkt_len + 2);
711 skb = mdp->rx_skbuff[entry];
712 mdp->rx_skbuff[entry] = NULL;
713 skb_put(skb, pkt_len);
714 skb->protocol = eth_type_trans(skb, ndev);
715 netif_rx(skb);
716 mdp->stats.rx_packets++;
717 mdp->stats.rx_bytes += pkt_len;
718 }
719 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
720 entry = (++mdp->cur_rx) % RX_RING_SIZE;
721 rxdesc = &mdp->rx_ring[entry];
722 }
723
724
725 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
726 entry = mdp->dirty_rx % RX_RING_SIZE;
727 rxdesc = &mdp->rx_ring[entry];
728
729 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
730
731 if (mdp->rx_skbuff[entry] == NULL) {
732 skb = dev_alloc_skb(mdp->rx_buf_sz);
733 mdp->rx_skbuff[entry] = skb;
734 if (skb == NULL)
735 break;
736 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
737 DMA_FROM_DEVICE);
738 skb->dev = ndev;
739 sh_eth_set_receive_align(skb);
740
741 skb->ip_summed = CHECKSUM_NONE;
742 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
743 }
744 if (entry >= RX_RING_SIZE - 1)
745 rxdesc->status |=
746 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
747 else
748 rxdesc->status |=
749 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
750 }
751
752
753
754 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
755 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
756
757 return 0;
758}
759
760
761static void sh_eth_error(struct net_device *ndev, int intr_status)
762{
763 struct sh_eth_private *mdp = netdev_priv(ndev);
764 u32 ioaddr = ndev->base_addr;
765 u32 felic_stat;
766 u32 link_stat;
767 u32 mask;
768
769 if (intr_status & EESR_ECI) {
770 felic_stat = ctrl_inl(ioaddr + ECSR);
771 ctrl_outl(felic_stat, ioaddr + ECSR);
772 if (felic_stat & ECSR_ICD)
773 mdp->stats.tx_carrier_errors++;
774 if (felic_stat & ECSR_LCHNG) {
775
776 if (mdp->cd->no_psr || mdp->no_ether_link) {
777 if (mdp->link == PHY_DOWN)
778 link_stat = 0;
779 else
780 link_stat = PHY_ST_LINK;
781 } else {
782 link_stat = (ctrl_inl(ioaddr + PSR));
783 if (mdp->ether_link_active_low)
784 link_stat = ~link_stat;
785 }
786 if (!(link_stat & PHY_ST_LINK)) {
787
788 ctrl_outl(ctrl_inl(ioaddr + ECMR) &
789 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
790 } else {
791
792 ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
793 ~DMAC_M_ECI, ioaddr + EESIPR);
794
795 ctrl_outl(ctrl_inl(ioaddr + ECSR),
796 ioaddr + ECSR);
797 ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
798 DMAC_M_ECI, ioaddr + EESIPR);
799
800 ctrl_outl(ctrl_inl(ioaddr + ECMR) |
801 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
802 }
803 }
804 }
805
806 if (intr_status & EESR_TWB) {
807
808 if (intr_status & EESR_TABT)
809 mdp->stats.tx_aborted_errors++;
810 }
811
812 if (intr_status & EESR_RABT) {
813
814 if (intr_status & EESR_RFRMER) {
815
816 mdp->stats.rx_frame_errors++;
817 dev_err(&ndev->dev, "Receive Frame Overflow\n");
818 }
819 }
820
821 if (!mdp->cd->no_ade) {
822 if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
823 intr_status & EESR_TFE)
824 mdp->stats.tx_fifo_errors++;
825 }
826
827 if (intr_status & EESR_RDE) {
828
829 mdp->stats.rx_over_errors++;
830
831 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
832 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
833 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
834 }
835 if (intr_status & EESR_RFE) {
836
837 mdp->stats.rx_fifo_errors++;
838 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
839 }
840
841 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
842 if (mdp->cd->no_ade)
843 mask &= ~EESR_ADE;
844 if (intr_status & mask) {
845
846 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
847
848 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
849 intr_status, mdp->cur_tx);
850 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
851 mdp->dirty_tx, (u32) ndev->state, edtrr);
852
853 sh_eth_txfree(ndev);
854
855
856 if (edtrr ^ EDTRR_TRNS) {
857
858 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
859 }
860
861 netif_wake_queue(ndev);
862 }
863}
864
865static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
866{
867 struct net_device *ndev = netdev;
868 struct sh_eth_private *mdp = netdev_priv(ndev);
869 struct sh_eth_cpu_data *cd = mdp->cd;
870 irqreturn_t ret = IRQ_NONE;
871 u32 ioaddr, intr_status = 0;
872
873 ioaddr = ndev->base_addr;
874 spin_lock(&mdp->lock);
875
876
877 intr_status = ctrl_inl(ioaddr + EESR);
878
879 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
880 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
881 cd->tx_check | cd->eesr_err_check)) {
882 ctrl_outl(intr_status, ioaddr + EESR);
883 ret = IRQ_HANDLED;
884 } else
885 goto other_irq;
886
887 if (intr_status & (EESR_FRC |
888 EESR_RMAF |
889 EESR_RRF |
890 EESR_RTLF |
891 EESR_RTSF |
892 EESR_PRE |
893 EESR_CERF)){
894 sh_eth_rx(ndev);
895 }
896
897
898 if (intr_status & cd->tx_check) {
899 sh_eth_txfree(ndev);
900 netif_wake_queue(ndev);
901 }
902
903 if (intr_status & cd->eesr_err_check)
904 sh_eth_error(ndev, intr_status);
905
906other_irq:
907 spin_unlock(&mdp->lock);
908
909 return ret;
910}
911
912static void sh_eth_timer(unsigned long data)
913{
914 struct net_device *ndev = (struct net_device *)data;
915 struct sh_eth_private *mdp = netdev_priv(ndev);
916
917 mod_timer(&mdp->timer, jiffies + (10 * HZ));
918}
919
920
921static void sh_eth_adjust_link(struct net_device *ndev)
922{
923 struct sh_eth_private *mdp = netdev_priv(ndev);
924 struct phy_device *phydev = mdp->phydev;
925 u32 ioaddr = ndev->base_addr;
926 int new_state = 0;
927
928 if (phydev->link != PHY_DOWN) {
929 if (phydev->duplex != mdp->duplex) {
930 new_state = 1;
931 mdp->duplex = phydev->duplex;
932 if (mdp->cd->set_duplex)
933 mdp->cd->set_duplex(ndev);
934 }
935
936 if (phydev->speed != mdp->speed) {
937 new_state = 1;
938 mdp->speed = phydev->speed;
939 if (mdp->cd->set_rate)
940 mdp->cd->set_rate(ndev);
941 }
942 if (mdp->link == PHY_DOWN) {
943 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
944 | ECMR_DM, ioaddr + ECMR);
945 new_state = 1;
946 mdp->link = phydev->link;
947 }
948 } else if (mdp->link) {
949 new_state = 1;
950 mdp->link = PHY_DOWN;
951 mdp->speed = 0;
952 mdp->duplex = -1;
953 }
954
955 if (new_state)
956 phy_print_status(phydev);
957}
958
959
960static int sh_eth_phy_init(struct net_device *ndev)
961{
962 struct sh_eth_private *mdp = netdev_priv(ndev);
963 char phy_id[MII_BUS_ID_SIZE + 3];
964 struct phy_device *phydev = NULL;
965
966 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
967 mdp->mii_bus->id , mdp->phy_id);
968
969 mdp->link = PHY_DOWN;
970 mdp->speed = 0;
971 mdp->duplex = -1;
972
973
974 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
975 0, PHY_INTERFACE_MODE_MII);
976 if (IS_ERR(phydev)) {
977 dev_err(&ndev->dev, "phy_connect failed\n");
978 return PTR_ERR(phydev);
979 }
980
981 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
982 phydev->addr, phydev->drv->name);
983
984 mdp->phydev = phydev;
985
986 return 0;
987}
988
989
990static int sh_eth_phy_start(struct net_device *ndev)
991{
992 struct sh_eth_private *mdp = netdev_priv(ndev);
993 int ret;
994
995 ret = sh_eth_phy_init(ndev);
996 if (ret)
997 return ret;
998
999
1000 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1001 phy_start(mdp->phydev);
1002
1003 return 0;
1004}
1005
1006
1007static int sh_eth_open(struct net_device *ndev)
1008{
1009 int ret = 0;
1010 struct sh_eth_private *mdp = netdev_priv(ndev);
1011
1012 ret = request_irq(ndev->irq, &sh_eth_interrupt,
1013#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
1014 IRQF_SHARED,
1015#else
1016 0,
1017#endif
1018 ndev->name, ndev);
1019 if (ret) {
1020 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1021 return ret;
1022 }
1023
1024
1025 ret = sh_eth_ring_init(ndev);
1026 if (ret)
1027 goto out_free_irq;
1028
1029
1030 ret = sh_eth_dev_init(ndev);
1031 if (ret)
1032 goto out_free_irq;
1033
1034
1035 ret = sh_eth_phy_start(ndev);
1036 if (ret)
1037 goto out_free_irq;
1038
1039
1040 init_timer(&mdp->timer);
1041 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;
1042 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1043
1044 return ret;
1045
1046out_free_irq:
1047 free_irq(ndev->irq, ndev);
1048 return ret;
1049}
1050
1051
1052static void sh_eth_tx_timeout(struct net_device *ndev)
1053{
1054 struct sh_eth_private *mdp = netdev_priv(ndev);
1055 u32 ioaddr = ndev->base_addr;
1056 struct sh_eth_rxdesc *rxdesc;
1057 int i;
1058
1059 netif_stop_queue(ndev);
1060
1061
1062 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
1063 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
1064
1065
1066 mdp->stats.tx_errors++;
1067
1068
1069 del_timer_sync(&mdp->timer);
1070
1071
1072 for (i = 0; i < RX_RING_SIZE; i++) {
1073 rxdesc = &mdp->rx_ring[i];
1074 rxdesc->status = 0;
1075 rxdesc->addr = 0xBADF00D0;
1076 if (mdp->rx_skbuff[i])
1077 dev_kfree_skb(mdp->rx_skbuff[i]);
1078 mdp->rx_skbuff[i] = NULL;
1079 }
1080 for (i = 0; i < TX_RING_SIZE; i++) {
1081 if (mdp->tx_skbuff[i])
1082 dev_kfree_skb(mdp->tx_skbuff[i]);
1083 mdp->tx_skbuff[i] = NULL;
1084 }
1085
1086
1087 sh_eth_dev_init(ndev);
1088
1089
1090 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;
1091 add_timer(&mdp->timer);
1092}
1093
1094
1095static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1096{
1097 struct sh_eth_private *mdp = netdev_priv(ndev);
1098 struct sh_eth_txdesc *txdesc;
1099 u32 entry;
1100 unsigned long flags;
1101
1102 spin_lock_irqsave(&mdp->lock, flags);
1103 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1104 if (!sh_eth_txfree(ndev)) {
1105 netif_stop_queue(ndev);
1106 spin_unlock_irqrestore(&mdp->lock, flags);
1107 return NETDEV_TX_BUSY;
1108 }
1109 }
1110 spin_unlock_irqrestore(&mdp->lock, flags);
1111
1112 entry = mdp->cur_tx % TX_RING_SIZE;
1113 mdp->tx_skbuff[entry] = skb;
1114 txdesc = &mdp->tx_ring[entry];
1115 txdesc->addr = virt_to_phys(skb->data);
1116
1117 if (!mdp->cd->hw_swap)
1118 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1119 skb->len + 2);
1120
1121 __flush_purge_region(skb->data, skb->len);
1122 if (skb->len < ETHERSMALL)
1123 txdesc->buffer_length = ETHERSMALL;
1124 else
1125 txdesc->buffer_length = skb->len;
1126
1127 if (entry >= TX_RING_SIZE - 1)
1128 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1129 else
1130 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1131
1132 mdp->cur_tx++;
1133
1134 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1135 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
1136
1137 ndev->trans_start = jiffies;
1138
1139 return NETDEV_TX_OK;
1140}
1141
1142
1143static int sh_eth_close(struct net_device *ndev)
1144{
1145 struct sh_eth_private *mdp = netdev_priv(ndev);
1146 u32 ioaddr = ndev->base_addr;
1147 int ringsize;
1148
1149 netif_stop_queue(ndev);
1150
1151
1152 ctrl_outl(0x0000, ioaddr + EESIPR);
1153
1154
1155 ctrl_outl(0, ioaddr + EDTRR);
1156 ctrl_outl(0, ioaddr + EDRRR);
1157
1158
1159 if (mdp->phydev) {
1160 phy_stop(mdp->phydev);
1161 phy_disconnect(mdp->phydev);
1162 }
1163
1164 free_irq(ndev->irq, ndev);
1165
1166 del_timer_sync(&mdp->timer);
1167
1168
1169 sh_eth_ring_free(ndev);
1170
1171
1172 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1173 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1174
1175
1176 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1177 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1178
1179 return 0;
1180}
1181
1182static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1183{
1184 struct sh_eth_private *mdp = netdev_priv(ndev);
1185 u32 ioaddr = ndev->base_addr;
1186
1187 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
1188 ctrl_outl(0, ioaddr + TROCR);
1189 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
1190 ctrl_outl(0, ioaddr + CDCR);
1191 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
1192 ctrl_outl(0, ioaddr + LCCR);
1193#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1194 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);
1195 ctrl_outl(0, ioaddr + CERCR);
1196 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);
1197 ctrl_outl(0, ioaddr + CEECR);
1198#else
1199 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
1200 ctrl_outl(0, ioaddr + CNDCR);
1201#endif
1202 return &mdp->stats;
1203}
1204
1205
1206static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1207 int cmd)
1208{
1209 struct sh_eth_private *mdp = netdev_priv(ndev);
1210 struct phy_device *phydev = mdp->phydev;
1211
1212 if (!netif_running(ndev))
1213 return -EINVAL;
1214
1215 if (!phydev)
1216 return -ENODEV;
1217
1218 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1219}
1220
1221#if defined(SH_ETH_HAS_TSU)
1222
1223static void sh_eth_set_multicast_list(struct net_device *ndev)
1224{
1225 u32 ioaddr = ndev->base_addr;
1226
1227 if (ndev->flags & IFF_PROMISC) {
1228
1229 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1230 ioaddr + ECMR);
1231 } else {
1232
1233 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1234 ioaddr + ECMR);
1235 }
1236}
1237
1238
1239static void sh_eth_tsu_init(u32 ioaddr)
1240{
1241 ctrl_outl(0, ioaddr + TSU_FWEN0);
1242 ctrl_outl(0, ioaddr + TSU_FWEN1);
1243 ctrl_outl(0, ioaddr + TSU_FCM);
1244 ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
1245 ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
1246 ctrl_outl(0, ioaddr + TSU_PRISL0);
1247 ctrl_outl(0, ioaddr + TSU_PRISL1);
1248 ctrl_outl(0, ioaddr + TSU_FWSL0);
1249 ctrl_outl(0, ioaddr + TSU_FWSL1);
1250 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1251#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1252 ctrl_outl(0, ioaddr + TSU_QTAG0);
1253 ctrl_outl(0, ioaddr + TSU_QTAG1);
1254#else
1255 ctrl_outl(0, ioaddr + TSU_QTAGM0);
1256 ctrl_outl(0, ioaddr + TSU_QTAGM1);
1257#endif
1258 ctrl_outl(0, ioaddr + TSU_FWSR);
1259 ctrl_outl(0, ioaddr + TSU_FWINMK);
1260 ctrl_outl(0, ioaddr + TSU_TEN);
1261 ctrl_outl(0, ioaddr + TSU_POST1);
1262 ctrl_outl(0, ioaddr + TSU_POST2);
1263 ctrl_outl(0, ioaddr + TSU_POST3);
1264 ctrl_outl(0, ioaddr + TSU_POST4);
1265}
1266#endif
1267
1268
1269static int sh_mdio_release(struct net_device *ndev)
1270{
1271 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1272
1273
1274 mdiobus_unregister(bus);
1275
1276
1277 dev_set_drvdata(&ndev->dev, NULL);
1278
1279
1280 free_mdio_bitbang(bus);
1281
1282 return 0;
1283}
1284
1285
1286static int sh_mdio_init(struct net_device *ndev, int id)
1287{
1288 int ret, i;
1289 struct bb_info *bitbang;
1290 struct sh_eth_private *mdp = netdev_priv(ndev);
1291
1292
1293 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1294 if (!bitbang) {
1295 ret = -ENOMEM;
1296 goto out;
1297 }
1298
1299
1300 bitbang->addr = ndev->base_addr + PIR;
1301 bitbang->mdi_msk = 0x08;
1302 bitbang->mdo_msk = 0x04;
1303 bitbang->mmd_msk = 0x02;
1304 bitbang->mdc_msk = 0x01;
1305 bitbang->ctrl.ops = &bb_ops;
1306
1307
1308 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1309 if (!mdp->mii_bus) {
1310 ret = -ENOMEM;
1311 goto out_free_bitbang;
1312 }
1313
1314
1315 mdp->mii_bus->name = "sh_mii";
1316 mdp->mii_bus->parent = &ndev->dev;
1317 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
1318
1319
1320 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1321 if (!mdp->mii_bus->irq) {
1322 ret = -ENOMEM;
1323 goto out_free_bus;
1324 }
1325
1326 for (i = 0; i < PHY_MAX_ADDR; i++)
1327 mdp->mii_bus->irq[i] = PHY_POLL;
1328
1329
1330 ret = mdiobus_register(mdp->mii_bus);
1331 if (ret)
1332 goto out_free_irq;
1333
1334 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1335
1336 return 0;
1337
1338out_free_irq:
1339 kfree(mdp->mii_bus->irq);
1340
1341out_free_bus:
1342 free_mdio_bitbang(mdp->mii_bus);
1343
1344out_free_bitbang:
1345 kfree(bitbang);
1346
1347out:
1348 return ret;
1349}
1350
1351static const struct net_device_ops sh_eth_netdev_ops = {
1352 .ndo_open = sh_eth_open,
1353 .ndo_stop = sh_eth_close,
1354 .ndo_start_xmit = sh_eth_start_xmit,
1355 .ndo_get_stats = sh_eth_get_stats,
1356#if defined(SH_ETH_HAS_TSU)
1357 .ndo_set_multicast_list = sh_eth_set_multicast_list,
1358#endif
1359 .ndo_tx_timeout = sh_eth_tx_timeout,
1360 .ndo_do_ioctl = sh_eth_do_ioctl,
1361 .ndo_validate_addr = eth_validate_addr,
1362 .ndo_set_mac_address = eth_mac_addr,
1363 .ndo_change_mtu = eth_change_mtu,
1364};
1365
1366static int sh_eth_drv_probe(struct platform_device *pdev)
1367{
1368 int ret, i, devno = 0;
1369 struct resource *res;
1370 struct net_device *ndev = NULL;
1371 struct sh_eth_private *mdp;
1372 struct sh_eth_plat_data *pd;
1373
1374
1375 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1376 if (unlikely(res == NULL)) {
1377 dev_err(&pdev->dev, "invalid resource\n");
1378 ret = -EINVAL;
1379 goto out;
1380 }
1381
1382 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1383 if (!ndev) {
1384 dev_err(&pdev->dev, "Could not allocate device.\n");
1385 ret = -ENOMEM;
1386 goto out;
1387 }
1388
1389
1390 ndev->base_addr = res->start;
1391 devno = pdev->id;
1392 if (devno < 0)
1393 devno = 0;
1394
1395 ndev->dma = -1;
1396 ret = platform_get_irq(pdev, 0);
1397 if (ret < 0) {
1398 ret = -ENODEV;
1399 goto out_release;
1400 }
1401 ndev->irq = ret;
1402
1403 SET_NETDEV_DEV(ndev, &pdev->dev);
1404
1405
1406 ether_setup(ndev);
1407
1408 mdp = netdev_priv(ndev);
1409 spin_lock_init(&mdp->lock);
1410
1411 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1412
1413 mdp->phy_id = pd->phy;
1414
1415 mdp->edmac_endian = pd->edmac_endian;
1416 mdp->no_ether_link = pd->no_ether_link;
1417 mdp->ether_link_active_low = pd->ether_link_active_low;
1418
1419
1420 mdp->cd = &sh_eth_my_cpu_data;
1421 sh_eth_set_default_cpu_data(mdp->cd);
1422
1423
1424 ndev->netdev_ops = &sh_eth_netdev_ops;
1425 ndev->watchdog_timeo = TX_TIMEOUT;
1426
1427 mdp->post_rx = POST_RX >> (devno << 1);
1428 mdp->post_fw = POST_FW >> (devno << 1);
1429
1430
1431 read_mac_address(ndev);
1432
1433
1434 if (!devno) {
1435 if (mdp->cd->chip_reset)
1436 mdp->cd->chip_reset(ndev);
1437
1438#if defined(SH_ETH_HAS_TSU)
1439
1440 sh_eth_tsu_init(SH_TSU_ADDR);
1441#endif
1442 }
1443
1444
1445 ret = register_netdev(ndev);
1446 if (ret)
1447 goto out_release;
1448
1449
1450 ret = sh_mdio_init(ndev, pdev->id);
1451 if (ret)
1452 goto out_unregister;
1453
1454
1455 pr_info("Base address at 0x%x, ",
1456 (u32)ndev->base_addr);
1457
1458 for (i = 0; i < 5; i++)
1459 printk("%02X:", ndev->dev_addr[i]);
1460 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1461
1462 platform_set_drvdata(pdev, ndev);
1463
1464 return ret;
1465
1466out_unregister:
1467 unregister_netdev(ndev);
1468
1469out_release:
1470
1471 if (ndev)
1472 free_netdev(ndev);
1473
1474out:
1475 return ret;
1476}
1477
1478static int sh_eth_drv_remove(struct platform_device *pdev)
1479{
1480 struct net_device *ndev = platform_get_drvdata(pdev);
1481
1482 sh_mdio_release(ndev);
1483 unregister_netdev(ndev);
1484 flush_scheduled_work();
1485
1486 free_netdev(ndev);
1487 platform_set_drvdata(pdev, NULL);
1488
1489 return 0;
1490}
1491
1492static struct platform_driver sh_eth_driver = {
1493 .probe = sh_eth_drv_probe,
1494 .remove = sh_eth_drv_remove,
1495 .driver = {
1496 .name = CARDNAME,
1497 },
1498};
1499
1500static int __init sh_eth_init(void)
1501{
1502 return platform_driver_register(&sh_eth_driver);
1503}
1504
1505static void __exit sh_eth_cleanup(void)
1506{
1507 platform_driver_unregister(&sh_eth_driver);
1508}
1509
1510module_init(sh_eth_init);
1511module_exit(sh_eth_cleanup);
1512
1513MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1514MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1515MODULE_LICENSE("GPL v2");
1516