1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/module.h>
28#include <linux/sched.h>
29#include <linux/string.h>
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/pci.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/bitops.h>
40#include <linux/workqueue.h>
41#include <linux/of.h>
42
43#include <asm/processor.h>
44#include <asm/io.h>
45#include <asm/dma.h>
46#include <asm/uaccess.h>
47#include <asm/dcr.h>
48#include <asm/dcr-regs.h>
49
50#include "core.h"
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69#define DRV_NAME "emac"
70#define DRV_VERSION "3.54"
71#define DRV_DESC "PPC 4xx OCP EMAC driver"
72
73MODULE_DESCRIPTION(DRV_DESC);
74MODULE_AUTHOR
75 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
76MODULE_LICENSE("GPL");
77
78
79
80
81#ifdef CONFIG_PPC64
82#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
83#endif
84
85
86#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87
88
89
90
91#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92
93
94
95
96
97
98
99
100
101static u32 busy_phy_map;
102static DEFINE_MUTEX(emac_phy_map_lock);
103
104
105
106
107static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108
109
110
111
112
113
114
115
116
117
118
119
120
121#define EMAC_BOOT_LIST_SIZE 4
122static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123
124
125#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126
127
128
129
130static inline void emac_report_timeout_error(struct emac_instance *dev,
131 const char *error)
132{
133 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
134 EMAC_FTR_460EX_PHY_CLK_FIX |
135 EMAC_FTR_440EP_PHY_CLK_FIX))
136 DBG(dev, "%s" NL, error);
137 else if (net_ratelimit())
138 printk(KERN_ERR "%s: %s\n", dev->ofdev->node->full_name, error);
139}
140
141
142
143
144
145static inline void emac_rx_clk_tx(struct emac_instance *dev)
146{
147#ifdef CONFIG_PPC_DCR_NATIVE
148 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
149 dcri_clrset(SDR0, SDR0_MFR,
150 0, SDR0_MFR_ECS >> dev->cell_index);
151#endif
152}
153
154static inline void emac_rx_clk_default(struct emac_instance *dev)
155{
156#ifdef CONFIG_PPC_DCR_NATIVE
157 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
158 dcri_clrset(SDR0, SDR0_MFR,
159 SDR0_MFR_ECS >> dev->cell_index, 0);
160#endif
161}
162
163
164#define PHY_POLL_LINK_ON HZ
165#define PHY_POLL_LINK_OFF (HZ / 5)
166
167
168
169
170#define STOP_TIMEOUT_10 1230
171#define STOP_TIMEOUT_100 124
172#define STOP_TIMEOUT_1000 13
173#define STOP_TIMEOUT_1000_JUMBO 73
174
175static unsigned char default_mcast_addr[] = {
176 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
177};
178
179
180static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
181 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
182 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
183 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
184 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
185 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
186 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
187 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
188 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
189 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
190 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
191 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
192 "tx_bd_excessive_collisions", "tx_bd_late_collision",
193 "tx_bd_multple_collisions", "tx_bd_single_collision",
194 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
195 "tx_errors"
196};
197
198static irqreturn_t emac_irq(int irq, void *dev_instance);
199static void emac_clean_tx_ring(struct emac_instance *dev);
200static void __emac_set_multicast_list(struct emac_instance *dev);
201
202static inline int emac_phy_supports_gige(int phy_mode)
203{
204 return phy_mode == PHY_MODE_GMII ||
205 phy_mode == PHY_MODE_RGMII ||
206 phy_mode == PHY_MODE_SGMII ||
207 phy_mode == PHY_MODE_TBI ||
208 phy_mode == PHY_MODE_RTBI;
209}
210
211static inline int emac_phy_gpcs(int phy_mode)
212{
213 return phy_mode == PHY_MODE_SGMII ||
214 phy_mode == PHY_MODE_TBI ||
215 phy_mode == PHY_MODE_RTBI;
216}
217
218static inline void emac_tx_enable(struct emac_instance *dev)
219{
220 struct emac_regs __iomem *p = dev->emacp;
221 u32 r;
222
223 DBG(dev, "tx_enable" NL);
224
225 r = in_be32(&p->mr0);
226 if (!(r & EMAC_MR0_TXE))
227 out_be32(&p->mr0, r | EMAC_MR0_TXE);
228}
229
230static void emac_tx_disable(struct emac_instance *dev)
231{
232 struct emac_regs __iomem *p = dev->emacp;
233 u32 r;
234
235 DBG(dev, "tx_disable" NL);
236
237 r = in_be32(&p->mr0);
238 if (r & EMAC_MR0_TXE) {
239 int n = dev->stop_timeout;
240 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
241 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
242 udelay(1);
243 --n;
244 }
245 if (unlikely(!n))
246 emac_report_timeout_error(dev, "TX disable timeout");
247 }
248}
249
250static void emac_rx_enable(struct emac_instance *dev)
251{
252 struct emac_regs __iomem *p = dev->emacp;
253 u32 r;
254
255 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
256 goto out;
257
258 DBG(dev, "rx_enable" NL);
259
260 r = in_be32(&p->mr0);
261 if (!(r & EMAC_MR0_RXE)) {
262 if (unlikely(!(r & EMAC_MR0_RXI))) {
263
264 int n = dev->stop_timeout;
265 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
266 udelay(1);
267 --n;
268 }
269 if (unlikely(!n))
270 emac_report_timeout_error(dev,
271 "RX disable timeout");
272 }
273 out_be32(&p->mr0, r | EMAC_MR0_RXE);
274 }
275 out:
276 ;
277}
278
279static void emac_rx_disable(struct emac_instance *dev)
280{
281 struct emac_regs __iomem *p = dev->emacp;
282 u32 r;
283
284 DBG(dev, "rx_disable" NL);
285
286 r = in_be32(&p->mr0);
287 if (r & EMAC_MR0_RXE) {
288 int n = dev->stop_timeout;
289 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
290 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291 udelay(1);
292 --n;
293 }
294 if (unlikely(!n))
295 emac_report_timeout_error(dev, "RX disable timeout");
296 }
297}
298
299static inline void emac_netif_stop(struct emac_instance *dev)
300{
301 netif_tx_lock_bh(dev->ndev);
302 netif_addr_lock(dev->ndev);
303 dev->no_mcast = 1;
304 netif_addr_unlock(dev->ndev);
305 netif_tx_unlock_bh(dev->ndev);
306 dev->ndev->trans_start = jiffies;
307 mal_poll_disable(dev->mal, &dev->commac);
308 netif_tx_disable(dev->ndev);
309}
310
311static inline void emac_netif_start(struct emac_instance *dev)
312{
313 netif_tx_lock_bh(dev->ndev);
314 netif_addr_lock(dev->ndev);
315 dev->no_mcast = 0;
316 if (dev->mcast_pending && netif_running(dev->ndev))
317 __emac_set_multicast_list(dev);
318 netif_addr_unlock(dev->ndev);
319 netif_tx_unlock_bh(dev->ndev);
320
321 netif_wake_queue(dev->ndev);
322
323
324
325
326
327
328 mal_poll_enable(dev->mal, &dev->commac);
329}
330
331static inline void emac_rx_disable_async(struct emac_instance *dev)
332{
333 struct emac_regs __iomem *p = dev->emacp;
334 u32 r;
335
336 DBG(dev, "rx_disable_async" NL);
337
338 r = in_be32(&p->mr0);
339 if (r & EMAC_MR0_RXE)
340 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
341}
342
343static int emac_reset(struct emac_instance *dev)
344{
345 struct emac_regs __iomem *p = dev->emacp;
346 int n = 20;
347
348 DBG(dev, "reset" NL);
349
350 if (!dev->reset_failed) {
351
352
353
354 emac_rx_disable(dev);
355 emac_tx_disable(dev);
356 }
357
358#ifdef CONFIG_PPC_DCR_NATIVE
359
360 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
361 dcri_clrset(SDR0, SDR0_ETH_CFG,
362 0, SDR0_ETH_CFG_ECS << dev->cell_index);
363#endif
364
365 out_be32(&p->mr0, EMAC_MR0_SRST);
366 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
367 --n;
368
369#ifdef CONFIG_PPC_DCR_NATIVE
370
371 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
372 dcri_clrset(SDR0, SDR0_ETH_CFG,
373 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
374#endif
375
376 if (n) {
377 dev->reset_failed = 0;
378 return 0;
379 } else {
380 emac_report_timeout_error(dev, "reset timeout");
381 dev->reset_failed = 1;
382 return -ETIMEDOUT;
383 }
384}
385
386static void emac_hash_mc(struct emac_instance *dev)
387{
388 const int regs = EMAC_XAHT_REGS(dev);
389 u32 *gaht_base = emac_gaht_base(dev);
390 u32 gaht_temp[regs];
391 struct dev_mc_list *dmi;
392 int i;
393
394 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
395
396 memset(gaht_temp, 0, sizeof (gaht_temp));
397
398 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
399 int slot, reg, mask;
400 DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
401
402 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
403 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
404 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
405
406 gaht_temp[reg] |= mask;
407 }
408
409 for (i = 0; i < regs; i++)
410 out_be32(gaht_base + i, gaht_temp[i]);
411}
412
413static inline u32 emac_iff2rmr(struct net_device *ndev)
414{
415 struct emac_instance *dev = netdev_priv(ndev);
416 u32 r;
417
418 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
419
420 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
421 r |= EMAC4_RMR_BASE;
422 else
423 r |= EMAC_RMR_BASE;
424
425 if (ndev->flags & IFF_PROMISC)
426 r |= EMAC_RMR_PME;
427 else if (ndev->flags & IFF_ALLMULTI ||
428 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
429 r |= EMAC_RMR_PMME;
430 else if (ndev->mc_count > 0)
431 r |= EMAC_RMR_MAE;
432
433 return r;
434}
435
436static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
437{
438 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
439
440 DBG2(dev, "__emac_calc_base_mr1" NL);
441
442 switch(tx_size) {
443 case 2048:
444 ret |= EMAC_MR1_TFS_2K;
445 break;
446 default:
447 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
448 dev->ndev->name, tx_size);
449 }
450
451 switch(rx_size) {
452 case 16384:
453 ret |= EMAC_MR1_RFS_16K;
454 break;
455 case 4096:
456 ret |= EMAC_MR1_RFS_4K;
457 break;
458 default:
459 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
460 dev->ndev->name, rx_size);
461 }
462
463 return ret;
464}
465
466static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
467{
468 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
469 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
470
471 DBG2(dev, "__emac4_calc_base_mr1" NL);
472
473 switch(tx_size) {
474 case 16384:
475 ret |= EMAC4_MR1_TFS_16K;
476 break;
477 case 4096:
478 ret |= EMAC4_MR1_TFS_4K;
479 break;
480 case 2048:
481 ret |= EMAC4_MR1_TFS_2K;
482 break;
483 default:
484 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
485 dev->ndev->name, tx_size);
486 }
487
488 switch(rx_size) {
489 case 16384:
490 ret |= EMAC4_MR1_RFS_16K;
491 break;
492 case 4096:
493 ret |= EMAC4_MR1_RFS_4K;
494 break;
495 case 2048:
496 ret |= EMAC4_MR1_RFS_2K;
497 break;
498 default:
499 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
500 dev->ndev->name, rx_size);
501 }
502
503 return ret;
504}
505
506static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
507{
508 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
509 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
510 __emac_calc_base_mr1(dev, tx_size, rx_size);
511}
512
513static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
514{
515 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
516 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
517 else
518 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
519}
520
521static inline u32 emac_calc_rwmr(struct emac_instance *dev,
522 unsigned int low, unsigned int high)
523{
524 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
525 return (low << 22) | ( (high & 0x3ff) << 6);
526 else
527 return (low << 23) | ( (high & 0x1ff) << 7);
528}
529
530static int emac_configure(struct emac_instance *dev)
531{
532 struct emac_regs __iomem *p = dev->emacp;
533 struct net_device *ndev = dev->ndev;
534 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
535 u32 r, mr1 = 0;
536
537 DBG(dev, "configure" NL);
538
539 if (!link) {
540 out_be32(&p->mr1, in_be32(&p->mr1)
541 | EMAC_MR1_FDE | EMAC_MR1_ILE);
542 udelay(100);
543 } else if (emac_reset(dev) < 0)
544 return -ETIMEDOUT;
545
546 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
547 tah_reset(dev->tah_dev);
548
549 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
550 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
551
552
553 tx_size = dev->tx_fifo_size;
554 rx_size = dev->rx_fifo_size;
555
556
557 if (!link)
558 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
559
560
561 else if (dev->phy.duplex == DUPLEX_FULL)
562 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
563
564
565 dev->stop_timeout = STOP_TIMEOUT_10;
566 switch (dev->phy.speed) {
567 case SPEED_1000:
568 if (emac_phy_gpcs(dev->phy.mode)) {
569 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
570 (dev->phy.gpcs_address != 0xffffffff) ?
571 dev->phy.gpcs_address : dev->phy.address);
572
573
574
575
576 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
577 } else
578 mr1 |= EMAC_MR1_MF_1000;
579
580
581 tx_size = dev->tx_fifo_size_gige;
582 rx_size = dev->rx_fifo_size_gige;
583
584 if (dev->ndev->mtu > ETH_DATA_LEN) {
585 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
586 mr1 |= EMAC4_MR1_JPSM;
587 else
588 mr1 |= EMAC_MR1_JPSM;
589 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
590 } else
591 dev->stop_timeout = STOP_TIMEOUT_1000;
592 break;
593 case SPEED_100:
594 mr1 |= EMAC_MR1_MF_100;
595 dev->stop_timeout = STOP_TIMEOUT_100;
596 break;
597 default:
598 break;
599 }
600
601 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
602 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
603 dev->phy.speed);
604 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
605 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
606
607
608
609
610 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
611 dev->phy.duplex == DUPLEX_FULL) {
612 if (dev->phy.pause)
613 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
614 else if (dev->phy.asym_pause)
615 mr1 |= EMAC_MR1_APP;
616 }
617
618
619 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
620 out_be32(&p->mr1, mr1);
621
622
623 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
624 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
625 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
626 ndev->dev_addr[5]);
627
628
629 out_be32(&p->vtpid, 0x8100);
630
631
632 r = emac_iff2rmr(ndev);
633 if (r & EMAC_RMR_MAE)
634 emac_hash_mc(dev);
635 out_be32(&p->rmr, r);
636
637
638 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
639 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
640 tx_size / 2 / dev->fifo_entry_size);
641 else
642 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
643 tx_size / 2 / dev->fifo_entry_size);
644 out_be32(&p->tmr1, r);
645 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
667 rx_size / 4 / dev->fifo_entry_size);
668 out_be32(&p->rwmr, r);
669
670
671 out_be32(&p->ptr, 0xffff);
672
673
674 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
675 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
676 EMAC_ISR_IRE | EMAC_ISR_TE;
677 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
678 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE
679;
680 out_be32(&p->iser, r);
681
682
683 if (emac_phy_gpcs(dev->phy.mode)) {
684 if (dev->phy.gpcs_address != 0xffffffff)
685 emac_mii_reset_gpcs(&dev->phy);
686 else
687 emac_mii_reset_phy(&dev->phy);
688 }
689
690 return 0;
691}
692
693static void emac_reinitialize(struct emac_instance *dev)
694{
695 DBG(dev, "reinitialize" NL);
696
697 emac_netif_stop(dev);
698 if (!emac_configure(dev)) {
699 emac_tx_enable(dev);
700 emac_rx_enable(dev);
701 }
702 emac_netif_start(dev);
703}
704
705static void emac_full_tx_reset(struct emac_instance *dev)
706{
707 DBG(dev, "full_tx_reset" NL);
708
709 emac_tx_disable(dev);
710 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
711 emac_clean_tx_ring(dev);
712 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
713
714 emac_configure(dev);
715
716 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
717 emac_tx_enable(dev);
718 emac_rx_enable(dev);
719}
720
721static void emac_reset_work(struct work_struct *work)
722{
723 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
724
725 DBG(dev, "reset_work" NL);
726
727 mutex_lock(&dev->link_lock);
728 if (dev->opened) {
729 emac_netif_stop(dev);
730 emac_full_tx_reset(dev);
731 emac_netif_start(dev);
732 }
733 mutex_unlock(&dev->link_lock);
734}
735
736static void emac_tx_timeout(struct net_device *ndev)
737{
738 struct emac_instance *dev = netdev_priv(ndev);
739
740 DBG(dev, "tx_timeout" NL);
741
742 schedule_work(&dev->reset_work);
743}
744
745
746static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
747{
748 int done = !!(stacr & EMAC_STACR_OC);
749
750 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
751 done = !done;
752
753 return done;
754};
755
756static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
757{
758 struct emac_regs __iomem *p = dev->emacp;
759 u32 r = 0;
760 int n, err = -ETIMEDOUT;
761
762 mutex_lock(&dev->mdio_lock);
763
764 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
765
766
767 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
768 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
769 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
770 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
771
772
773 n = 20;
774 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
775 udelay(1);
776 if (!--n) {
777 DBG2(dev, " -> timeout wait idle\n");
778 goto bail;
779 }
780 }
781
782
783 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
784 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
785 else
786 r = EMAC_STACR_BASE(dev->opb_bus_freq);
787 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
788 r |= EMAC_STACR_OC;
789 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
790 r |= EMACX_STACR_STAC_READ;
791 else
792 r |= EMAC_STACR_STAC_READ;
793 r |= (reg & EMAC_STACR_PRA_MASK)
794 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
795 out_be32(&p->stacr, r);
796
797
798 n = 200;
799 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
800 udelay(1);
801 if (!--n) {
802 DBG2(dev, " -> timeout wait complete\n");
803 goto bail;
804 }
805 }
806
807 if (unlikely(r & EMAC_STACR_PHYE)) {
808 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
809 err = -EREMOTEIO;
810 goto bail;
811 }
812
813 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
814
815 DBG2(dev, "mdio_read -> %04x" NL, r);
816 err = 0;
817 bail:
818 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
819 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
820 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
821 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
822 mutex_unlock(&dev->mdio_lock);
823
824 return err == 0 ? r : err;
825}
826
827static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
828 u16 val)
829{
830 struct emac_regs __iomem *p = dev->emacp;
831 u32 r = 0;
832 int n, err = -ETIMEDOUT;
833
834 mutex_lock(&dev->mdio_lock);
835
836 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
837
838
839 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
840 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
841 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
842 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
843
844
845 n = 20;
846 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
847 udelay(1);
848 if (!--n) {
849 DBG2(dev, " -> timeout wait idle\n");
850 goto bail;
851 }
852 }
853
854
855 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
856 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
857 else
858 r = EMAC_STACR_BASE(dev->opb_bus_freq);
859 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
860 r |= EMAC_STACR_OC;
861 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
862 r |= EMACX_STACR_STAC_WRITE;
863 else
864 r |= EMAC_STACR_STAC_WRITE;
865 r |= (reg & EMAC_STACR_PRA_MASK) |
866 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
867 (val << EMAC_STACR_PHYD_SHIFT);
868 out_be32(&p->stacr, r);
869
870
871 n = 200;
872 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
873 udelay(1);
874 if (!--n) {
875 DBG2(dev, " -> timeout wait complete\n");
876 goto bail;
877 }
878 }
879 err = 0;
880 bail:
881 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
882 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
883 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
884 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
885 mutex_unlock(&dev->mdio_lock);
886}
887
888static int emac_mdio_read(struct net_device *ndev, int id, int reg)
889{
890 struct emac_instance *dev = netdev_priv(ndev);
891 int res;
892
893 res = __emac_mdio_read((dev->mdio_instance &&
894 dev->phy.gpcs_address != id) ?
895 dev->mdio_instance : dev,
896 (u8) id, (u8) reg);
897 return res;
898}
899
900static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
901{
902 struct emac_instance *dev = netdev_priv(ndev);
903
904 __emac_mdio_write((dev->mdio_instance &&
905 dev->phy.gpcs_address != id) ?
906 dev->mdio_instance : dev,
907 (u8) id, (u8) reg, (u16) val);
908}
909
910
911static void __emac_set_multicast_list(struct emac_instance *dev)
912{
913 struct emac_regs __iomem *p = dev->emacp;
914 u32 rmr = emac_iff2rmr(dev->ndev);
915
916 DBG(dev, "__multicast %08x" NL, rmr);
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935 dev->mcast_pending = 0;
936 emac_rx_disable(dev);
937 if (rmr & EMAC_RMR_MAE)
938 emac_hash_mc(dev);
939 out_be32(&p->rmr, rmr);
940 emac_rx_enable(dev);
941}
942
943
944static void emac_set_multicast_list(struct net_device *ndev)
945{
946 struct emac_instance *dev = netdev_priv(ndev);
947
948 DBG(dev, "multicast" NL);
949
950 BUG_ON(!netif_running(dev->ndev));
951
952 if (dev->no_mcast) {
953 dev->mcast_pending = 1;
954 return;
955 }
956 __emac_set_multicast_list(dev);
957}
958
959static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
960{
961 int rx_sync_size = emac_rx_sync_size(new_mtu);
962 int rx_skb_size = emac_rx_skb_size(new_mtu);
963 int i, ret = 0;
964
965 mutex_lock(&dev->link_lock);
966 emac_netif_stop(dev);
967 emac_rx_disable(dev);
968 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
969
970 if (dev->rx_sg_skb) {
971 ++dev->estats.rx_dropped_resize;
972 dev_kfree_skb(dev->rx_sg_skb);
973 dev->rx_sg_skb = NULL;
974 }
975
976
977
978
979
980 for (i = 0; i < NUM_RX_BUFF; ++i) {
981 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
982 ++dev->estats.rx_dropped_resize;
983
984 dev->rx_desc[i].data_len = 0;
985 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
986 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
987 }
988
989
990 if (rx_skb_size <= dev->rx_skb_size)
991 goto skip;
992
993
994 for (i = 0; i < NUM_RX_BUFF; ++i) {
995 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
996 if (!skb) {
997 ret = -ENOMEM;
998 goto oom;
999 }
1000
1001 BUG_ON(!dev->rx_skb[i]);
1002 dev_kfree_skb(dev->rx_skb[i]);
1003
1004 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1005 dev->rx_desc[i].data_ptr =
1006 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1007 DMA_FROM_DEVICE) + 2;
1008 dev->rx_skb[i] = skb;
1009 }
1010 skip:
1011
1012 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1013
1014 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1015
1016 dev->ndev->mtu = new_mtu;
1017 emac_full_tx_reset(dev);
1018 }
1019
1020 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1021 oom:
1022
1023 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1024 dev->rx_slot = 0;
1025 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1026 emac_rx_enable(dev);
1027 emac_netif_start(dev);
1028 mutex_unlock(&dev->link_lock);
1029
1030 return ret;
1031}
1032
1033
1034static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1035{
1036 struct emac_instance *dev = netdev_priv(ndev);
1037 int ret = 0;
1038
1039 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1040 return -EINVAL;
1041
1042 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1043
1044 if (netif_running(ndev)) {
1045
1046 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1047 ret = emac_resize_rx_ring(dev, new_mtu);
1048 }
1049
1050 if (!ret) {
1051 ndev->mtu = new_mtu;
1052 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1053 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1054 }
1055
1056 return ret;
1057}
1058
1059static void emac_clean_tx_ring(struct emac_instance *dev)
1060{
1061 int i;
1062
1063 for (i = 0; i < NUM_TX_BUFF; ++i) {
1064 if (dev->tx_skb[i]) {
1065 dev_kfree_skb(dev->tx_skb[i]);
1066 dev->tx_skb[i] = NULL;
1067 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1068 ++dev->estats.tx_dropped;
1069 }
1070 dev->tx_desc[i].ctrl = 0;
1071 dev->tx_desc[i].data_ptr = 0;
1072 }
1073}
1074
1075static void emac_clean_rx_ring(struct emac_instance *dev)
1076{
1077 int i;
1078
1079 for (i = 0; i < NUM_RX_BUFF; ++i)
1080 if (dev->rx_skb[i]) {
1081 dev->rx_desc[i].ctrl = 0;
1082 dev_kfree_skb(dev->rx_skb[i]);
1083 dev->rx_skb[i] = NULL;
1084 dev->rx_desc[i].data_ptr = 0;
1085 }
1086
1087 if (dev->rx_sg_skb) {
1088 dev_kfree_skb(dev->rx_sg_skb);
1089 dev->rx_sg_skb = NULL;
1090 }
1091}
1092
1093static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1094 gfp_t flags)
1095{
1096 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1097 if (unlikely(!skb))
1098 return -ENOMEM;
1099
1100 dev->rx_skb[slot] = skb;
1101 dev->rx_desc[slot].data_len = 0;
1102
1103 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1104 dev->rx_desc[slot].data_ptr =
1105 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1106 DMA_FROM_DEVICE) + 2;
1107 wmb();
1108 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1109 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1110
1111 return 0;
1112}
1113
1114static void emac_print_link_status(struct emac_instance *dev)
1115{
1116 if (netif_carrier_ok(dev->ndev))
1117 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1118 dev->ndev->name, dev->phy.speed,
1119 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1120 dev->phy.pause ? ", pause enabled" :
1121 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1122 else
1123 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1124}
1125
1126
1127static int emac_open(struct net_device *ndev)
1128{
1129 struct emac_instance *dev = netdev_priv(ndev);
1130 int err, i;
1131
1132 DBG(dev, "open" NL);
1133
1134
1135 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1136 if (err) {
1137 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1138 ndev->name, dev->emac_irq);
1139 return err;
1140 }
1141
1142
1143 for (i = 0; i < NUM_RX_BUFF; ++i)
1144 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1145 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1146 ndev->name);
1147 goto oom;
1148 }
1149
1150 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1151 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1152 dev->rx_sg_skb = NULL;
1153
1154 mutex_lock(&dev->link_lock);
1155 dev->opened = 1;
1156
1157
1158
1159 if (dev->phy.address >= 0) {
1160 int link_poll_interval;
1161 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1162 dev->phy.def->ops->read_link(&dev->phy);
1163 emac_rx_clk_default(dev);
1164 netif_carrier_on(dev->ndev);
1165 link_poll_interval = PHY_POLL_LINK_ON;
1166 } else {
1167 emac_rx_clk_tx(dev);
1168 netif_carrier_off(dev->ndev);
1169 link_poll_interval = PHY_POLL_LINK_OFF;
1170 }
1171 dev->link_polling = 1;
1172 wmb();
1173 schedule_delayed_work(&dev->link_work, link_poll_interval);
1174 emac_print_link_status(dev);
1175 } else
1176 netif_carrier_on(dev->ndev);
1177
1178
1179 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
1180
1181 emac_configure(dev);
1182 mal_poll_add(dev->mal, &dev->commac);
1183 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1184 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1185 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1186 emac_tx_enable(dev);
1187 emac_rx_enable(dev);
1188 emac_netif_start(dev);
1189
1190 mutex_unlock(&dev->link_lock);
1191
1192 return 0;
1193 oom:
1194 emac_clean_rx_ring(dev);
1195 free_irq(dev->emac_irq, dev);
1196
1197 return -ENOMEM;
1198}
1199
1200
1201#if 0
1202static int emac_link_differs(struct emac_instance *dev)
1203{
1204 u32 r = in_be32(&dev->emacp->mr1);
1205
1206 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1207 int speed, pause, asym_pause;
1208
1209 if (r & EMAC_MR1_MF_1000)
1210 speed = SPEED_1000;
1211 else if (r & EMAC_MR1_MF_100)
1212 speed = SPEED_100;
1213 else
1214 speed = SPEED_10;
1215
1216 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1217 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1218 pause = 1;
1219 asym_pause = 0;
1220 break;
1221 case EMAC_MR1_APP:
1222 pause = 0;
1223 asym_pause = 1;
1224 break;
1225 default:
1226 pause = asym_pause = 0;
1227 }
1228 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1229 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1230}
1231#endif
1232
1233static void emac_link_timer(struct work_struct *work)
1234{
1235 struct emac_instance *dev =
1236 container_of(to_delayed_work(work),
1237 struct emac_instance, link_work);
1238 int link_poll_interval;
1239
1240 mutex_lock(&dev->link_lock);
1241 DBG2(dev, "link timer" NL);
1242
1243 if (!dev->opened)
1244 goto bail;
1245
1246 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1247 if (!netif_carrier_ok(dev->ndev)) {
1248 emac_rx_clk_default(dev);
1249
1250 dev->phy.def->ops->read_link(&dev->phy);
1251
1252 netif_carrier_on(dev->ndev);
1253 emac_netif_stop(dev);
1254 emac_full_tx_reset(dev);
1255 emac_netif_start(dev);
1256 emac_print_link_status(dev);
1257 }
1258 link_poll_interval = PHY_POLL_LINK_ON;
1259 } else {
1260 if (netif_carrier_ok(dev->ndev)) {
1261 emac_rx_clk_tx(dev);
1262 netif_carrier_off(dev->ndev);
1263 netif_tx_disable(dev->ndev);
1264 emac_reinitialize(dev);
1265 emac_print_link_status(dev);
1266 }
1267 link_poll_interval = PHY_POLL_LINK_OFF;
1268 }
1269 schedule_delayed_work(&dev->link_work, link_poll_interval);
1270 bail:
1271 mutex_unlock(&dev->link_lock);
1272}
1273
1274static void emac_force_link_update(struct emac_instance *dev)
1275{
1276 netif_carrier_off(dev->ndev);
1277 smp_rmb();
1278 if (dev->link_polling) {
1279 cancel_rearming_delayed_work(&dev->link_work);
1280 if (dev->link_polling)
1281 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1282 }
1283}
1284
1285
1286static int emac_close(struct net_device *ndev)
1287{
1288 struct emac_instance *dev = netdev_priv(ndev);
1289
1290 DBG(dev, "close" NL);
1291
1292 if (dev->phy.address >= 0) {
1293 dev->link_polling = 0;
1294 cancel_rearming_delayed_work(&dev->link_work);
1295 }
1296 mutex_lock(&dev->link_lock);
1297 emac_netif_stop(dev);
1298 dev->opened = 0;
1299 mutex_unlock(&dev->link_lock);
1300
1301 emac_rx_disable(dev);
1302 emac_tx_disable(dev);
1303 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1304 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1305 mal_poll_del(dev->mal, &dev->commac);
1306
1307 emac_clean_tx_ring(dev);
1308 emac_clean_rx_ring(dev);
1309
1310 free_irq(dev->emac_irq, dev);
1311
1312 netif_carrier_off(ndev);
1313
1314 return 0;
1315}
1316
1317static inline u16 emac_tx_csum(struct emac_instance *dev,
1318 struct sk_buff *skb)
1319{
1320 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1321 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1322 ++dev->stats.tx_packets_csum;
1323 return EMAC_TX_CTRL_TAH_CSUM;
1324 }
1325 return 0;
1326}
1327
1328static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1329{
1330 struct emac_regs __iomem *p = dev->emacp;
1331 struct net_device *ndev = dev->ndev;
1332
1333
1334
1335
1336
1337 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1338 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1339 else
1340 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1341
1342 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1343 netif_stop_queue(ndev);
1344 DBG2(dev, "stopped TX queue" NL);
1345 }
1346
1347 ndev->trans_start = jiffies;
1348 ++dev->stats.tx_packets;
1349 dev->stats.tx_bytes += len;
1350
1351 return NETDEV_TX_OK;
1352}
1353
1354
1355static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1356{
1357 struct emac_instance *dev = netdev_priv(ndev);
1358 unsigned int len = skb->len;
1359 int slot;
1360
1361 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1362 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1363
1364 slot = dev->tx_slot++;
1365 if (dev->tx_slot == NUM_TX_BUFF) {
1366 dev->tx_slot = 0;
1367 ctrl |= MAL_TX_CTRL_WRAP;
1368 }
1369
1370 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1371
1372 dev->tx_skb[slot] = skb;
1373 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1374 skb->data, len,
1375 DMA_TO_DEVICE);
1376 dev->tx_desc[slot].data_len = (u16) len;
1377 wmb();
1378 dev->tx_desc[slot].ctrl = ctrl;
1379
1380 return emac_xmit_finish(dev, len);
1381}
1382
1383static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1384 u32 pd, int len, int last, u16 base_ctrl)
1385{
1386 while (1) {
1387 u16 ctrl = base_ctrl;
1388 int chunk = min(len, MAL_MAX_TX_SIZE);
1389 len -= chunk;
1390
1391 slot = (slot + 1) % NUM_TX_BUFF;
1392
1393 if (last && !len)
1394 ctrl |= MAL_TX_CTRL_LAST;
1395 if (slot == NUM_TX_BUFF - 1)
1396 ctrl |= MAL_TX_CTRL_WRAP;
1397
1398 dev->tx_skb[slot] = NULL;
1399 dev->tx_desc[slot].data_ptr = pd;
1400 dev->tx_desc[slot].data_len = (u16) chunk;
1401 dev->tx_desc[slot].ctrl = ctrl;
1402 ++dev->tx_cnt;
1403
1404 if (!len)
1405 break;
1406
1407 pd += chunk;
1408 }
1409 return slot;
1410}
1411
1412
1413static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1414{
1415 struct emac_instance *dev = netdev_priv(ndev);
1416 int nr_frags = skb_shinfo(skb)->nr_frags;
1417 int len = skb->len, chunk;
1418 int slot, i;
1419 u16 ctrl;
1420 u32 pd;
1421
1422
1423 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1424 return emac_start_xmit(skb, ndev);
1425
1426 len -= skb->data_len;
1427
1428
1429
1430
1431
1432 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1433 goto stop_queue;
1434
1435 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1436 emac_tx_csum(dev, skb);
1437 slot = dev->tx_slot;
1438
1439
1440 dev->tx_skb[slot] = NULL;
1441 chunk = min(len, MAL_MAX_TX_SIZE);
1442 dev->tx_desc[slot].data_ptr = pd =
1443 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1444 dev->tx_desc[slot].data_len = (u16) chunk;
1445 len -= chunk;
1446 if (unlikely(len))
1447 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1448 ctrl);
1449
1450 for (i = 0; i < nr_frags; ++i) {
1451 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1452 len = frag->size;
1453
1454 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1455 goto undo_frame;
1456
1457 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1458 DMA_TO_DEVICE);
1459
1460 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1461 ctrl);
1462 }
1463
1464 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1465
1466
1467 dev->tx_skb[slot] = skb;
1468
1469
1470 if (dev->tx_slot == NUM_TX_BUFF - 1)
1471 ctrl |= MAL_TX_CTRL_WRAP;
1472 wmb();
1473 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1474 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1475
1476 return emac_xmit_finish(dev, skb->len);
1477
1478 undo_frame:
1479
1480
1481
1482 while (slot != dev->tx_slot) {
1483 dev->tx_desc[slot].ctrl = 0;
1484 --dev->tx_cnt;
1485 if (--slot < 0)
1486 slot = NUM_TX_BUFF - 1;
1487 }
1488 ++dev->estats.tx_undo;
1489
1490 stop_queue:
1491 netif_stop_queue(ndev);
1492 DBG2(dev, "stopped TX queue" NL);
1493 return NETDEV_TX_BUSY;
1494}
1495
1496
1497static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1498{
1499 struct emac_error_stats *st = &dev->estats;
1500
1501 DBG(dev, "BD TX error %04x" NL, ctrl);
1502
1503 ++st->tx_bd_errors;
1504 if (ctrl & EMAC_TX_ST_BFCS)
1505 ++st->tx_bd_bad_fcs;
1506 if (ctrl & EMAC_TX_ST_LCS)
1507 ++st->tx_bd_carrier_loss;
1508 if (ctrl & EMAC_TX_ST_ED)
1509 ++st->tx_bd_excessive_deferral;
1510 if (ctrl & EMAC_TX_ST_EC)
1511 ++st->tx_bd_excessive_collisions;
1512 if (ctrl & EMAC_TX_ST_LC)
1513 ++st->tx_bd_late_collision;
1514 if (ctrl & EMAC_TX_ST_MC)
1515 ++st->tx_bd_multple_collisions;
1516 if (ctrl & EMAC_TX_ST_SC)
1517 ++st->tx_bd_single_collision;
1518 if (ctrl & EMAC_TX_ST_UR)
1519 ++st->tx_bd_underrun;
1520 if (ctrl & EMAC_TX_ST_SQE)
1521 ++st->tx_bd_sqe;
1522}
1523
1524static void emac_poll_tx(void *param)
1525{
1526 struct emac_instance *dev = param;
1527 u32 bad_mask;
1528
1529 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1530
1531 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1532 bad_mask = EMAC_IS_BAD_TX_TAH;
1533 else
1534 bad_mask = EMAC_IS_BAD_TX;
1535
1536 netif_tx_lock_bh(dev->ndev);
1537 if (dev->tx_cnt) {
1538 u16 ctrl;
1539 int slot = dev->ack_slot, n = 0;
1540 again:
1541 ctrl = dev->tx_desc[slot].ctrl;
1542 if (!(ctrl & MAL_TX_CTRL_READY)) {
1543 struct sk_buff *skb = dev->tx_skb[slot];
1544 ++n;
1545
1546 if (skb) {
1547 dev_kfree_skb(skb);
1548 dev->tx_skb[slot] = NULL;
1549 }
1550 slot = (slot + 1) % NUM_TX_BUFF;
1551
1552 if (unlikely(ctrl & bad_mask))
1553 emac_parse_tx_error(dev, ctrl);
1554
1555 if (--dev->tx_cnt)
1556 goto again;
1557 }
1558 if (n) {
1559 dev->ack_slot = slot;
1560 if (netif_queue_stopped(dev->ndev) &&
1561 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1562 netif_wake_queue(dev->ndev);
1563
1564 DBG2(dev, "tx %d pkts" NL, n);
1565 }
1566 }
1567 netif_tx_unlock_bh(dev->ndev);
1568}
1569
1570static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1571 int len)
1572{
1573 struct sk_buff *skb = dev->rx_skb[slot];
1574
1575 DBG2(dev, "recycle %d %d" NL, slot, len);
1576
1577 if (len)
1578 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1579 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1580
1581 dev->rx_desc[slot].data_len = 0;
1582 wmb();
1583 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1584 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1585}
1586
1587static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1588{
1589 struct emac_error_stats *st = &dev->estats;
1590
1591 DBG(dev, "BD RX error %04x" NL, ctrl);
1592
1593 ++st->rx_bd_errors;
1594 if (ctrl & EMAC_RX_ST_OE)
1595 ++st->rx_bd_overrun;
1596 if (ctrl & EMAC_RX_ST_BP)
1597 ++st->rx_bd_bad_packet;
1598 if (ctrl & EMAC_RX_ST_RP)
1599 ++st->rx_bd_runt_packet;
1600 if (ctrl & EMAC_RX_ST_SE)
1601 ++st->rx_bd_short_event;
1602 if (ctrl & EMAC_RX_ST_AE)
1603 ++st->rx_bd_alignment_error;
1604 if (ctrl & EMAC_RX_ST_BFCS)
1605 ++st->rx_bd_bad_fcs;
1606 if (ctrl & EMAC_RX_ST_PTL)
1607 ++st->rx_bd_packet_too_long;
1608 if (ctrl & EMAC_RX_ST_ORE)
1609 ++st->rx_bd_out_of_range;
1610 if (ctrl & EMAC_RX_ST_IRE)
1611 ++st->rx_bd_in_range;
1612}
1613
1614static inline void emac_rx_csum(struct emac_instance *dev,
1615 struct sk_buff *skb, u16 ctrl)
1616{
1617#ifdef CONFIG_IBM_NEW_EMAC_TAH
1618 if (!ctrl && dev->tah_dev) {
1619 skb->ip_summed = CHECKSUM_UNNECESSARY;
1620 ++dev->stats.rx_packets_csum;
1621 }
1622#endif
1623}
1624
1625static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1626{
1627 if (likely(dev->rx_sg_skb != NULL)) {
1628 int len = dev->rx_desc[slot].data_len;
1629 int tot_len = dev->rx_sg_skb->len + len;
1630
1631 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1632 ++dev->estats.rx_dropped_mtu;
1633 dev_kfree_skb(dev->rx_sg_skb);
1634 dev->rx_sg_skb = NULL;
1635 } else {
1636 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1637 dev->rx_skb[slot]->data, len);
1638 skb_put(dev->rx_sg_skb, len);
1639 emac_recycle_rx_skb(dev, slot, len);
1640 return 0;
1641 }
1642 }
1643 emac_recycle_rx_skb(dev, slot, 0);
1644 return -1;
1645}
1646
1647
1648static int emac_poll_rx(void *param, int budget)
1649{
1650 struct emac_instance *dev = param;
1651 int slot = dev->rx_slot, received = 0;
1652
1653 DBG2(dev, "poll_rx(%d)" NL, budget);
1654
1655 again:
1656 while (budget > 0) {
1657 int len;
1658 struct sk_buff *skb;
1659 u16 ctrl = dev->rx_desc[slot].ctrl;
1660
1661 if (ctrl & MAL_RX_CTRL_EMPTY)
1662 break;
1663
1664 skb = dev->rx_skb[slot];
1665 mb();
1666 len = dev->rx_desc[slot].data_len;
1667
1668 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1669 goto sg;
1670
1671 ctrl &= EMAC_BAD_RX_MASK;
1672 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1673 emac_parse_rx_error(dev, ctrl);
1674 ++dev->estats.rx_dropped_error;
1675 emac_recycle_rx_skb(dev, slot, 0);
1676 len = 0;
1677 goto next;
1678 }
1679
1680 if (len < ETH_HLEN) {
1681 ++dev->estats.rx_dropped_stack;
1682 emac_recycle_rx_skb(dev, slot, len);
1683 goto next;
1684 }
1685
1686 if (len && len < EMAC_RX_COPY_THRESH) {
1687 struct sk_buff *copy_skb =
1688 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1689 if (unlikely(!copy_skb))
1690 goto oom;
1691
1692 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1693 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1694 len + 2);
1695 emac_recycle_rx_skb(dev, slot, len);
1696 skb = copy_skb;
1697 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1698 goto oom;
1699
1700 skb_put(skb, len);
1701 push_packet:
1702 skb->dev = dev->ndev;
1703 skb->protocol = eth_type_trans(skb, dev->ndev);
1704 emac_rx_csum(dev, skb, ctrl);
1705
1706 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1707 ++dev->estats.rx_dropped_stack;
1708 next:
1709 ++dev->stats.rx_packets;
1710 skip:
1711 dev->stats.rx_bytes += len;
1712 slot = (slot + 1) % NUM_RX_BUFF;
1713 --budget;
1714 ++received;
1715 continue;
1716 sg:
1717 if (ctrl & MAL_RX_CTRL_FIRST) {
1718 BUG_ON(dev->rx_sg_skb);
1719 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1720 DBG(dev, "rx OOM %d" NL, slot);
1721 ++dev->estats.rx_dropped_oom;
1722 emac_recycle_rx_skb(dev, slot, 0);
1723 } else {
1724 dev->rx_sg_skb = skb;
1725 skb_put(skb, len);
1726 }
1727 } else if (!emac_rx_sg_append(dev, slot) &&
1728 (ctrl & MAL_RX_CTRL_LAST)) {
1729
1730 skb = dev->rx_sg_skb;
1731 dev->rx_sg_skb = NULL;
1732
1733 ctrl &= EMAC_BAD_RX_MASK;
1734 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1735 emac_parse_rx_error(dev, ctrl);
1736 ++dev->estats.rx_dropped_error;
1737 dev_kfree_skb(skb);
1738 len = 0;
1739 } else
1740 goto push_packet;
1741 }
1742 goto skip;
1743 oom:
1744 DBG(dev, "rx OOM %d" NL, slot);
1745
1746 ++dev->estats.rx_dropped_oom;
1747 emac_recycle_rx_skb(dev, slot, 0);
1748 goto next;
1749 }
1750
1751 if (received) {
1752 DBG2(dev, "rx %d BDs" NL, received);
1753 dev->rx_slot = slot;
1754 }
1755
1756 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1757 mb();
1758 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1759 DBG2(dev, "rx restart" NL);
1760 received = 0;
1761 goto again;
1762 }
1763
1764 if (dev->rx_sg_skb) {
1765 DBG2(dev, "dropping partial rx packet" NL);
1766 ++dev->estats.rx_dropped_error;
1767 dev_kfree_skb(dev->rx_sg_skb);
1768 dev->rx_sg_skb = NULL;
1769 }
1770
1771 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1772 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1773 emac_rx_enable(dev);
1774 dev->rx_slot = 0;
1775 }
1776 return received;
1777}
1778
1779
1780static int emac_peek_rx(void *param)
1781{
1782 struct emac_instance *dev = param;
1783
1784 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1785}
1786
1787
1788static int emac_peek_rx_sg(void *param)
1789{
1790 struct emac_instance *dev = param;
1791
1792 int slot = dev->rx_slot;
1793 while (1) {
1794 u16 ctrl = dev->rx_desc[slot].ctrl;
1795 if (ctrl & MAL_RX_CTRL_EMPTY)
1796 return 0;
1797 else if (ctrl & MAL_RX_CTRL_LAST)
1798 return 1;
1799
1800 slot = (slot + 1) % NUM_RX_BUFF;
1801
1802
1803 if (unlikely(slot == dev->rx_slot))
1804 return 0;
1805 }
1806}
1807
1808
1809static void emac_rxde(void *param)
1810{
1811 struct emac_instance *dev = param;
1812
1813 ++dev->estats.rx_stopped;
1814 emac_rx_disable_async(dev);
1815}
1816
1817
1818static irqreturn_t emac_irq(int irq, void *dev_instance)
1819{
1820 struct emac_instance *dev = dev_instance;
1821 struct emac_regs __iomem *p = dev->emacp;
1822 struct emac_error_stats *st = &dev->estats;
1823 u32 isr;
1824
1825 spin_lock(&dev->lock);
1826
1827 isr = in_be32(&p->isr);
1828 out_be32(&p->isr, isr);
1829
1830 DBG(dev, "isr = %08x" NL, isr);
1831
1832 if (isr & EMAC4_ISR_TXPE)
1833 ++st->tx_parity;
1834 if (isr & EMAC4_ISR_RXPE)
1835 ++st->rx_parity;
1836 if (isr & EMAC4_ISR_TXUE)
1837 ++st->tx_underrun;
1838 if (isr & EMAC4_ISR_RXOE)
1839 ++st->rx_fifo_overrun;
1840 if (isr & EMAC_ISR_OVR)
1841 ++st->rx_overrun;
1842 if (isr & EMAC_ISR_BP)
1843 ++st->rx_bad_packet;
1844 if (isr & EMAC_ISR_RP)
1845 ++st->rx_runt_packet;
1846 if (isr & EMAC_ISR_SE)
1847 ++st->rx_short_event;
1848 if (isr & EMAC_ISR_ALE)
1849 ++st->rx_alignment_error;
1850 if (isr & EMAC_ISR_BFCS)
1851 ++st->rx_bad_fcs;
1852 if (isr & EMAC_ISR_PTLE)
1853 ++st->rx_packet_too_long;
1854 if (isr & EMAC_ISR_ORE)
1855 ++st->rx_out_of_range;
1856 if (isr & EMAC_ISR_IRE)
1857 ++st->rx_in_range;
1858 if (isr & EMAC_ISR_SQE)
1859 ++st->tx_sqe;
1860 if (isr & EMAC_ISR_TE)
1861 ++st->tx_errors;
1862
1863 spin_unlock(&dev->lock);
1864
1865 return IRQ_HANDLED;
1866}
1867
1868static struct net_device_stats *emac_stats(struct net_device *ndev)
1869{
1870 struct emac_instance *dev = netdev_priv(ndev);
1871 struct emac_stats *st = &dev->stats;
1872 struct emac_error_stats *est = &dev->estats;
1873 struct net_device_stats *nst = &dev->nstats;
1874 unsigned long flags;
1875
1876 DBG2(dev, "stats" NL);
1877
1878
1879 spin_lock_irqsave(&dev->lock, flags);
1880 nst->rx_packets = (unsigned long)st->rx_packets;
1881 nst->rx_bytes = (unsigned long)st->rx_bytes;
1882 nst->tx_packets = (unsigned long)st->tx_packets;
1883 nst->tx_bytes = (unsigned long)st->tx_bytes;
1884 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1885 est->rx_dropped_error +
1886 est->rx_dropped_resize +
1887 est->rx_dropped_mtu);
1888 nst->tx_dropped = (unsigned long)est->tx_dropped;
1889
1890 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1891 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1892 est->rx_fifo_overrun +
1893 est->rx_overrun);
1894 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1895 est->rx_alignment_error);
1896 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1897 est->rx_bad_fcs);
1898 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1899 est->rx_bd_short_event +
1900 est->rx_bd_packet_too_long +
1901 est->rx_bd_out_of_range +
1902 est->rx_bd_in_range +
1903 est->rx_runt_packet +
1904 est->rx_short_event +
1905 est->rx_packet_too_long +
1906 est->rx_out_of_range +
1907 est->rx_in_range);
1908
1909 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1910 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1911 est->tx_underrun);
1912 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1913 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1914 est->tx_bd_excessive_collisions +
1915 est->tx_bd_late_collision +
1916 est->tx_bd_multple_collisions);
1917 spin_unlock_irqrestore(&dev->lock, flags);
1918 return nst;
1919}
1920
1921static struct mal_commac_ops emac_commac_ops = {
1922 .poll_tx = &emac_poll_tx,
1923 .poll_rx = &emac_poll_rx,
1924 .peek_rx = &emac_peek_rx,
1925 .rxde = &emac_rxde,
1926};
1927
1928static struct mal_commac_ops emac_commac_sg_ops = {
1929 .poll_tx = &emac_poll_tx,
1930 .poll_rx = &emac_poll_rx,
1931 .peek_rx = &emac_peek_rx_sg,
1932 .rxde = &emac_rxde,
1933};
1934
1935
1936static int emac_ethtool_get_settings(struct net_device *ndev,
1937 struct ethtool_cmd *cmd)
1938{
1939 struct emac_instance *dev = netdev_priv(ndev);
1940
1941 cmd->supported = dev->phy.features;
1942 cmd->port = PORT_MII;
1943 cmd->phy_address = dev->phy.address;
1944 cmd->transceiver =
1945 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1946
1947 mutex_lock(&dev->link_lock);
1948 cmd->advertising = dev->phy.advertising;
1949 cmd->autoneg = dev->phy.autoneg;
1950 cmd->speed = dev->phy.speed;
1951 cmd->duplex = dev->phy.duplex;
1952 mutex_unlock(&dev->link_lock);
1953
1954 return 0;
1955}
1956
1957static int emac_ethtool_set_settings(struct net_device *ndev,
1958 struct ethtool_cmd *cmd)
1959{
1960 struct emac_instance *dev = netdev_priv(ndev);
1961 u32 f = dev->phy.features;
1962
1963 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1964 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1965
1966
1967 if (dev->phy.address < 0)
1968 return -EOPNOTSUPP;
1969 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1970 return -EINVAL;
1971 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1972 return -EINVAL;
1973 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1974 return -EINVAL;
1975
1976 if (cmd->autoneg == AUTONEG_DISABLE) {
1977 switch (cmd->speed) {
1978 case SPEED_10:
1979 if (cmd->duplex == DUPLEX_HALF
1980 && !(f & SUPPORTED_10baseT_Half))
1981 return -EINVAL;
1982 if (cmd->duplex == DUPLEX_FULL
1983 && !(f & SUPPORTED_10baseT_Full))
1984 return -EINVAL;
1985 break;
1986 case SPEED_100:
1987 if (cmd->duplex == DUPLEX_HALF
1988 && !(f & SUPPORTED_100baseT_Half))
1989 return -EINVAL;
1990 if (cmd->duplex == DUPLEX_FULL
1991 && !(f & SUPPORTED_100baseT_Full))
1992 return -EINVAL;
1993 break;
1994 case SPEED_1000:
1995 if (cmd->duplex == DUPLEX_HALF
1996 && !(f & SUPPORTED_1000baseT_Half))
1997 return -EINVAL;
1998 if (cmd->duplex == DUPLEX_FULL
1999 && !(f & SUPPORTED_1000baseT_Full))
2000 return -EINVAL;
2001 break;
2002 default:
2003 return -EINVAL;
2004 }
2005
2006 mutex_lock(&dev->link_lock);
2007 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2008 cmd->duplex);
2009 mutex_unlock(&dev->link_lock);
2010
2011 } else {
2012 if (!(f & SUPPORTED_Autoneg))
2013 return -EINVAL;
2014
2015 mutex_lock(&dev->link_lock);
2016 dev->phy.def->ops->setup_aneg(&dev->phy,
2017 (cmd->advertising & f) |
2018 (dev->phy.advertising &
2019 (ADVERTISED_Pause |
2020 ADVERTISED_Asym_Pause)));
2021 mutex_unlock(&dev->link_lock);
2022 }
2023 emac_force_link_update(dev);
2024
2025 return 0;
2026}
2027
2028static void emac_ethtool_get_ringparam(struct net_device *ndev,
2029 struct ethtool_ringparam *rp)
2030{
2031 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2032 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2033}
2034
2035static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2036 struct ethtool_pauseparam *pp)
2037{
2038 struct emac_instance *dev = netdev_priv(ndev);
2039
2040 mutex_lock(&dev->link_lock);
2041 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2042 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2043 pp->autoneg = 1;
2044
2045 if (dev->phy.duplex == DUPLEX_FULL) {
2046 if (dev->phy.pause)
2047 pp->rx_pause = pp->tx_pause = 1;
2048 else if (dev->phy.asym_pause)
2049 pp->tx_pause = 1;
2050 }
2051 mutex_unlock(&dev->link_lock);
2052}
2053
2054static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2055{
2056 struct emac_instance *dev = netdev_priv(ndev);
2057
2058 return dev->tah_dev != NULL;
2059}
2060
2061static int emac_get_regs_len(struct emac_instance *dev)
2062{
2063 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2064 return sizeof(struct emac_ethtool_regs_subhdr) +
2065 EMAC4_ETHTOOL_REGS_SIZE(dev);
2066 else
2067 return sizeof(struct emac_ethtool_regs_subhdr) +
2068 EMAC_ETHTOOL_REGS_SIZE(dev);
2069}
2070
2071static int emac_ethtool_get_regs_len(struct net_device *ndev)
2072{
2073 struct emac_instance *dev = netdev_priv(ndev);
2074 int size;
2075
2076 size = sizeof(struct emac_ethtool_regs_hdr) +
2077 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2078 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2079 size += zmii_get_regs_len(dev->zmii_dev);
2080 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2081 size += rgmii_get_regs_len(dev->rgmii_dev);
2082 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2083 size += tah_get_regs_len(dev->tah_dev);
2084
2085 return size;
2086}
2087
2088static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2089{
2090 struct emac_ethtool_regs_subhdr *hdr = buf;
2091
2092 hdr->index = dev->cell_index;
2093 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2094 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2095 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2096 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2097 } else {
2098 hdr->version = EMAC_ETHTOOL_REGS_VER;
2099 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2100 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2101 }
2102}
2103
2104static void emac_ethtool_get_regs(struct net_device *ndev,
2105 struct ethtool_regs *regs, void *buf)
2106{
2107 struct emac_instance *dev = netdev_priv(ndev);
2108 struct emac_ethtool_regs_hdr *hdr = buf;
2109
2110 hdr->components = 0;
2111 buf = hdr + 1;
2112
2113 buf = mal_dump_regs(dev->mal, buf);
2114 buf = emac_dump_regs(dev, buf);
2115 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2116 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2117 buf = zmii_dump_regs(dev->zmii_dev, buf);
2118 }
2119 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2120 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2121 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2122 }
2123 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2124 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2125 buf = tah_dump_regs(dev->tah_dev, buf);
2126 }
2127}
2128
2129static int emac_ethtool_nway_reset(struct net_device *ndev)
2130{
2131 struct emac_instance *dev = netdev_priv(ndev);
2132 int res = 0;
2133
2134 DBG(dev, "nway_reset" NL);
2135
2136 if (dev->phy.address < 0)
2137 return -EOPNOTSUPP;
2138
2139 mutex_lock(&dev->link_lock);
2140 if (!dev->phy.autoneg) {
2141 res = -EINVAL;
2142 goto out;
2143 }
2144
2145 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2146 out:
2147 mutex_unlock(&dev->link_lock);
2148 emac_force_link_update(dev);
2149 return res;
2150}
2151
2152static int emac_ethtool_get_stats_count(struct net_device *ndev)
2153{
2154 return EMAC_ETHTOOL_STATS_COUNT;
2155}
2156
2157static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2158 u8 * buf)
2159{
2160 if (stringset == ETH_SS_STATS)
2161 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2162}
2163
2164static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2165 struct ethtool_stats *estats,
2166 u64 * tmp_stats)
2167{
2168 struct emac_instance *dev = netdev_priv(ndev);
2169
2170 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2171 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2172 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2173}
2174
2175static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2176 struct ethtool_drvinfo *info)
2177{
2178 struct emac_instance *dev = netdev_priv(ndev);
2179
2180 strcpy(info->driver, "ibm_emac");
2181 strcpy(info->version, DRV_VERSION);
2182 info->fw_version[0] = '\0';
2183 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2184 dev->cell_index, dev->ofdev->node->full_name);
2185 info->n_stats = emac_ethtool_get_stats_count(ndev);
2186 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2187}
2188
2189static const struct ethtool_ops emac_ethtool_ops = {
2190 .get_settings = emac_ethtool_get_settings,
2191 .set_settings = emac_ethtool_set_settings,
2192 .get_drvinfo = emac_ethtool_get_drvinfo,
2193
2194 .get_regs_len = emac_ethtool_get_regs_len,
2195 .get_regs = emac_ethtool_get_regs,
2196
2197 .nway_reset = emac_ethtool_nway_reset,
2198
2199 .get_ringparam = emac_ethtool_get_ringparam,
2200 .get_pauseparam = emac_ethtool_get_pauseparam,
2201
2202 .get_rx_csum = emac_ethtool_get_rx_csum,
2203
2204 .get_strings = emac_ethtool_get_strings,
2205 .get_stats_count = emac_ethtool_get_stats_count,
2206 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2207
2208 .get_link = ethtool_op_get_link,
2209 .get_tx_csum = ethtool_op_get_tx_csum,
2210 .get_sg = ethtool_op_get_sg,
2211};
2212
2213static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2214{
2215 struct emac_instance *dev = netdev_priv(ndev);
2216 struct mii_ioctl_data *data = if_mii(rq);
2217
2218 DBG(dev, "ioctl %08x" NL, cmd);
2219
2220 if (dev->phy.address < 0)
2221 return -EOPNOTSUPP;
2222
2223 switch (cmd) {
2224 case SIOCGMIIPHY:
2225 data->phy_id = dev->phy.address;
2226
2227 case SIOCGMIIREG:
2228 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2229 data->reg_num);
2230 return 0;
2231
2232 case SIOCSMIIREG:
2233 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2234 data->val_in);
2235 return 0;
2236 default:
2237 return -EOPNOTSUPP;
2238 }
2239}
2240
2241struct emac_depentry {
2242 u32 phandle;
2243 struct device_node *node;
2244 struct of_device *ofdev;
2245 void *drvdata;
2246};
2247
2248#define EMAC_DEP_MAL_IDX 0
2249#define EMAC_DEP_ZMII_IDX 1
2250#define EMAC_DEP_RGMII_IDX 2
2251#define EMAC_DEP_TAH_IDX 3
2252#define EMAC_DEP_MDIO_IDX 4
2253#define EMAC_DEP_PREV_IDX 5
2254#define EMAC_DEP_COUNT 6
2255
2256static int __devinit emac_check_deps(struct emac_instance *dev,
2257 struct emac_depentry *deps)
2258{
2259 int i, there = 0;
2260 struct device_node *np;
2261
2262 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2263
2264 if (deps[i].phandle == 0) {
2265 there++;
2266 continue;
2267 }
2268
2269 if (i == EMAC_DEP_PREV_IDX) {
2270 np = *(dev->blist - 1);
2271 if (np == NULL) {
2272 deps[i].phandle = 0;
2273 there++;
2274 continue;
2275 }
2276 if (deps[i].node == NULL)
2277 deps[i].node = of_node_get(np);
2278 }
2279 if (deps[i].node == NULL)
2280 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2281 if (deps[i].node == NULL)
2282 continue;
2283 if (deps[i].ofdev == NULL)
2284 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2285 if (deps[i].ofdev == NULL)
2286 continue;
2287 if (deps[i].drvdata == NULL)
2288 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2289 if (deps[i].drvdata != NULL)
2290 there++;
2291 }
2292 return (there == EMAC_DEP_COUNT);
2293}
2294
2295static void emac_put_deps(struct emac_instance *dev)
2296{
2297 if (dev->mal_dev)
2298 of_dev_put(dev->mal_dev);
2299 if (dev->zmii_dev)
2300 of_dev_put(dev->zmii_dev);
2301 if (dev->rgmii_dev)
2302 of_dev_put(dev->rgmii_dev);
2303 if (dev->mdio_dev)
2304 of_dev_put(dev->mdio_dev);
2305 if (dev->tah_dev)
2306 of_dev_put(dev->tah_dev);
2307}
2308
2309static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2310 unsigned long action, void *data)
2311{
2312
2313 if (action == BUS_NOTIFY_BOUND_DRIVER)
2314 wake_up_all(&emac_probe_wait);
2315 return 0;
2316}
2317
2318static struct notifier_block emac_of_bus_notifier __devinitdata = {
2319 .notifier_call = emac_of_bus_notify
2320};
2321
2322static int __devinit emac_wait_deps(struct emac_instance *dev)
2323{
2324 struct emac_depentry deps[EMAC_DEP_COUNT];
2325 int i, err;
2326
2327 memset(&deps, 0, sizeof(deps));
2328
2329 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2330 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2331 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2332 if (dev->tah_ph)
2333 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2334 if (dev->mdio_ph)
2335 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2336 if (dev->blist && dev->blist > emac_boot_list)
2337 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2338 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2339 wait_event_timeout(emac_probe_wait,
2340 emac_check_deps(dev, deps),
2341 EMAC_PROBE_DEP_TIMEOUT);
2342 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2343 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2344 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2345 if (deps[i].node)
2346 of_node_put(deps[i].node);
2347 if (err && deps[i].ofdev)
2348 of_dev_put(deps[i].ofdev);
2349 }
2350 if (err == 0) {
2351 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2352 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2353 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2354 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2355 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2356 }
2357 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2358 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2359 return err;
2360}
2361
2362static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2363 u32 *val, int fatal)
2364{
2365 int len;
2366 const u32 *prop = of_get_property(np, name, &len);
2367 if (prop == NULL || len < sizeof(u32)) {
2368 if (fatal)
2369 printk(KERN_ERR "%s: missing %s property\n",
2370 np->full_name, name);
2371 return -ENODEV;
2372 }
2373 *val = *prop;
2374 return 0;
2375}
2376
2377static int __devinit emac_init_phy(struct emac_instance *dev)
2378{
2379 struct device_node *np = dev->ofdev->node;
2380 struct net_device *ndev = dev->ndev;
2381 u32 phy_map, adv;
2382 int i;
2383
2384 dev->phy.dev = ndev;
2385 dev->phy.mode = dev->phy_mode;
2386
2387
2388
2389
2390 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2391 emac_reset(dev);
2392
2393
2394
2395
2396 dev->phy.address = -1;
2397 dev->phy.features = SUPPORTED_MII;
2398 if (emac_phy_supports_gige(dev->phy_mode))
2399 dev->phy.features |= SUPPORTED_1000baseT_Full;
2400 else
2401 dev->phy.features |= SUPPORTED_100baseT_Full;
2402 dev->phy.pause = 1;
2403
2404 return 0;
2405 }
2406
2407 mutex_lock(&emac_phy_map_lock);
2408 phy_map = dev->phy_map | busy_phy_map;
2409
2410 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2411
2412 dev->phy.mdio_read = emac_mdio_read;
2413 dev->phy.mdio_write = emac_mdio_write;
2414
2415
2416#ifdef CONFIG_PPC_DCR_NATIVE
2417 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2418 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2419#endif
2420
2421 emac_rx_clk_tx(dev);
2422
2423
2424#ifdef CONFIG_PPC_DCR_NATIVE
2425 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2426 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2427#endif
2428
2429
2430
2431 if (emac_phy_gpcs(dev->phy.mode)) {
2432
2433
2434
2435
2436
2437
2438
2439
2440 dev->phy.gpcs_address = dev->gpcs_address;
2441 if (dev->phy.gpcs_address == 0xffffffff)
2442 dev->phy.address = dev->cell_index;
2443 }
2444
2445 emac_configure(dev);
2446
2447 if (dev->phy_address != 0xffffffff)
2448 phy_map = ~(1 << dev->phy_address);
2449
2450 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2451 if (!(phy_map & 1)) {
2452 int r;
2453 busy_phy_map |= 1 << i;
2454
2455
2456 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2457 if (r == 0xffff || r < 0)
2458 continue;
2459 if (!emac_mii_phy_probe(&dev->phy, i))
2460 break;
2461 }
2462
2463
2464#ifdef CONFIG_PPC_DCR_NATIVE
2465 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2466 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2467#endif
2468 mutex_unlock(&emac_phy_map_lock);
2469 if (i == 0x20) {
2470 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2471 return -ENXIO;
2472 }
2473
2474
2475 if (dev->phy.def->ops->init)
2476 dev->phy.def->ops->init(&dev->phy);
2477
2478
2479 dev->phy.def->features &= ~dev->phy_feat_exc;
2480
2481
2482 if (dev->phy.features & SUPPORTED_Autoneg) {
2483 adv = dev->phy.features;
2484 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2485 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2486
2487 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2488 } else {
2489 u32 f = dev->phy.def->features;
2490 int speed = SPEED_10, fd = DUPLEX_HALF;
2491
2492
2493 if (f & SUPPORTED_1000baseT_Full) {
2494 speed = SPEED_1000;
2495 fd = DUPLEX_FULL;
2496 } else if (f & SUPPORTED_1000baseT_Half)
2497 speed = SPEED_1000;
2498 else if (f & SUPPORTED_100baseT_Full) {
2499 speed = SPEED_100;
2500 fd = DUPLEX_FULL;
2501 } else if (f & SUPPORTED_100baseT_Half)
2502 speed = SPEED_100;
2503 else if (f & SUPPORTED_10baseT_Full)
2504 fd = DUPLEX_FULL;
2505
2506
2507 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2508 }
2509 return 0;
2510}
2511
2512static int __devinit emac_init_config(struct emac_instance *dev)
2513{
2514 struct device_node *np = dev->ofdev->node;
2515 const void *p;
2516 unsigned int plen;
2517 const char *pm, *phy_modes[] = {
2518 [PHY_MODE_NA] = "",
2519 [PHY_MODE_MII] = "mii",
2520 [PHY_MODE_RMII] = "rmii",
2521 [PHY_MODE_SMII] = "smii",
2522 [PHY_MODE_RGMII] = "rgmii",
2523 [PHY_MODE_TBI] = "tbi",
2524 [PHY_MODE_GMII] = "gmii",
2525 [PHY_MODE_RTBI] = "rtbi",
2526 [PHY_MODE_SGMII] = "sgmii",
2527 };
2528
2529
2530 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2531 return -ENXIO;
2532 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2533 return -ENXIO;
2534 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2535 return -ENXIO;
2536 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2537 return -ENXIO;
2538 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2539 dev->max_mtu = 1500;
2540 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2541 dev->rx_fifo_size = 2048;
2542 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2543 dev->tx_fifo_size = 2048;
2544 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2545 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2546 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2547 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2548 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2549 dev->phy_address = 0xffffffff;
2550 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2551 dev->phy_map = 0xffffffff;
2552 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2553 dev->gpcs_address = 0xffffffff;
2554 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2555 return -ENXIO;
2556 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2557 dev->tah_ph = 0;
2558 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2559 dev->tah_port = 0;
2560 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2561 dev->mdio_ph = 0;
2562 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2563 dev->zmii_ph = 0;
2564 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2565 dev->zmii_port = 0xffffffff;
2566 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2567 dev->rgmii_ph = 0;
2568 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2569 dev->rgmii_port = 0xffffffff;
2570 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2571 dev->fifo_entry_size = 16;
2572 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2573 dev->mal_burst_size = 256;
2574
2575
2576 dev->phy_mode = PHY_MODE_NA;
2577 pm = of_get_property(np, "phy-mode", &plen);
2578 if (pm != NULL) {
2579 int i;
2580 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2581 if (!strcasecmp(pm, phy_modes[i])) {
2582 dev->phy_mode = i;
2583 break;
2584 }
2585 }
2586
2587
2588 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2589 u32 nmode = *(const u32 *)pm;
2590 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2591 dev->phy_mode = nmode;
2592 }
2593
2594
2595 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2596 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2597 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2598 of_device_is_compatible(np, "ibm,emac-460gt"))
2599 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2600 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2601 of_device_is_compatible(np, "ibm,emac-405exr"))
2602 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2603 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2604 dev->features |= EMAC_FTR_EMAC4;
2605 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2606 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2607 } else {
2608 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2609 of_device_is_compatible(np, "ibm,emac-440gr"))
2610 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2611 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2612#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2613 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2614#else
2615 printk(KERN_ERR "%s: Flow control not disabled!\n",
2616 np->full_name);
2617 return -ENXIO;
2618#endif
2619 }
2620
2621 }
2622
2623
2624 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2625 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2626 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2627 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2628
2629
2630 if (of_device_is_compatible(np, "ibm,emac-axon"))
2631 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2632 EMAC_FTR_STACR_OC_INVERT;
2633
2634
2635 if (dev->tah_ph != 0) {
2636#ifdef CONFIG_IBM_NEW_EMAC_TAH
2637 dev->features |= EMAC_FTR_HAS_TAH;
2638#else
2639 printk(KERN_ERR "%s: TAH support not enabled !\n",
2640 np->full_name);
2641 return -ENXIO;
2642#endif
2643 }
2644
2645 if (dev->zmii_ph != 0) {
2646#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2647 dev->features |= EMAC_FTR_HAS_ZMII;
2648#else
2649 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2650 np->full_name);
2651 return -ENXIO;
2652#endif
2653 }
2654
2655 if (dev->rgmii_ph != 0) {
2656#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2657 dev->features |= EMAC_FTR_HAS_RGMII;
2658#else
2659 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2660 np->full_name);
2661 return -ENXIO;
2662#endif
2663 }
2664
2665
2666 p = of_get_property(np, "local-mac-address", NULL);
2667 if (p == NULL) {
2668 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2669 np->full_name);
2670 return -ENXIO;
2671 }
2672 memcpy(dev->ndev->dev_addr, p, 6);
2673
2674
2675 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2676 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2677 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2678 } else {
2679 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2680 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2681 }
2682
2683 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2684 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2685 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2686 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2687 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2688
2689 return 0;
2690}
2691
2692static const struct net_device_ops emac_netdev_ops = {
2693 .ndo_open = emac_open,
2694 .ndo_stop = emac_close,
2695 .ndo_get_stats = emac_stats,
2696 .ndo_set_multicast_list = emac_set_multicast_list,
2697 .ndo_do_ioctl = emac_ioctl,
2698 .ndo_tx_timeout = emac_tx_timeout,
2699 .ndo_validate_addr = eth_validate_addr,
2700 .ndo_set_mac_address = eth_mac_addr,
2701 .ndo_start_xmit = emac_start_xmit,
2702 .ndo_change_mtu = eth_change_mtu,
2703};
2704
2705static const struct net_device_ops emac_gige_netdev_ops = {
2706 .ndo_open = emac_open,
2707 .ndo_stop = emac_close,
2708 .ndo_get_stats = emac_stats,
2709 .ndo_set_multicast_list = emac_set_multicast_list,
2710 .ndo_do_ioctl = emac_ioctl,
2711 .ndo_tx_timeout = emac_tx_timeout,
2712 .ndo_validate_addr = eth_validate_addr,
2713 .ndo_set_mac_address = eth_mac_addr,
2714 .ndo_start_xmit = emac_start_xmit_sg,
2715 .ndo_change_mtu = emac_change_mtu,
2716};
2717
2718static int __devinit emac_probe(struct of_device *ofdev,
2719 const struct of_device_id *match)
2720{
2721 struct net_device *ndev;
2722 struct emac_instance *dev;
2723 struct device_node *np = ofdev->node;
2724 struct device_node **blist = NULL;
2725 int err, i;
2726
2727
2728
2729
2730
2731 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2732 return -ENODEV;
2733
2734
2735 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2736 if (emac_boot_list[i] == np)
2737 blist = &emac_boot_list[i];
2738
2739
2740 err = -ENOMEM;
2741 ndev = alloc_etherdev(sizeof(struct emac_instance));
2742 if (!ndev) {
2743 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2744 np->full_name);
2745 goto err_gone;
2746 }
2747 dev = netdev_priv(ndev);
2748 dev->ndev = ndev;
2749 dev->ofdev = ofdev;
2750 dev->blist = blist;
2751 SET_NETDEV_DEV(ndev, &ofdev->dev);
2752
2753
2754 mutex_init(&dev->mdio_lock);
2755 mutex_init(&dev->link_lock);
2756 spin_lock_init(&dev->lock);
2757 INIT_WORK(&dev->reset_work, emac_reset_work);
2758
2759
2760 err = emac_init_config(dev);
2761 if (err != 0)
2762 goto err_free;
2763
2764
2765 dev->emac_irq = irq_of_parse_and_map(np, 0);
2766 dev->wol_irq = irq_of_parse_and_map(np, 1);
2767 if (dev->emac_irq == NO_IRQ) {
2768 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2769 goto err_free;
2770 }
2771 ndev->irq = dev->emac_irq;
2772
2773
2774 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2775 printk(KERN_ERR "%s: Can't get registers address\n",
2776 np->full_name);
2777 goto err_irq_unmap;
2778 }
2779
2780 dev->emacp = ioremap(dev->rsrc_regs.start,
2781 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2782 if (dev->emacp == NULL) {
2783 printk(KERN_ERR "%s: Can't map device registers!\n",
2784 np->full_name);
2785 err = -ENOMEM;
2786 goto err_irq_unmap;
2787 }
2788
2789
2790 err = emac_wait_deps(dev);
2791 if (err) {
2792 printk(KERN_ERR
2793 "%s: Timeout waiting for dependent devices\n",
2794 np->full_name);
2795
2796 goto err_reg_unmap;
2797 }
2798 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2799 if (dev->mdio_dev != NULL)
2800 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2801
2802
2803 dev->commac.ops = &emac_commac_ops;
2804 dev->commac.dev = dev;
2805 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2806 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2807 err = mal_register_commac(dev->mal, &dev->commac);
2808 if (err) {
2809 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2810 np->full_name, dev->mal_dev->node->full_name);
2811 goto err_rel_deps;
2812 }
2813 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2814 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2815
2816
2817 dev->tx_desc =
2818 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2819 dev->rx_desc =
2820 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2821
2822 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2823 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2824
2825
2826 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2827 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2828 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2829 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2830
2831
2832 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2833 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2834 goto err_unreg_commac;
2835
2836
2837 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2838 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2839 goto err_detach_zmii;
2840
2841
2842 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2843 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2844 goto err_detach_rgmii;
2845
2846
2847 dev->phy.speed = SPEED_100;
2848 dev->phy.duplex = DUPLEX_FULL;
2849 dev->phy.autoneg = AUTONEG_DISABLE;
2850 dev->phy.pause = dev->phy.asym_pause = 0;
2851 dev->stop_timeout = STOP_TIMEOUT_100;
2852 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2853
2854
2855 err = emac_init_phy(dev);
2856 if (err != 0)
2857 goto err_detach_tah;
2858
2859 if (dev->tah_dev)
2860 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2861 ndev->watchdog_timeo = 5 * HZ;
2862 if (emac_phy_supports_gige(dev->phy_mode)) {
2863 ndev->netdev_ops = &emac_gige_netdev_ops;
2864 dev->commac.ops = &emac_commac_sg_ops;
2865 } else
2866 ndev->netdev_ops = &emac_netdev_ops;
2867 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2868
2869 netif_carrier_off(ndev);
2870 netif_stop_queue(ndev);
2871
2872 err = register_netdev(ndev);
2873 if (err) {
2874 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2875 np->full_name, err);
2876 goto err_detach_tah;
2877 }
2878
2879
2880
2881
2882 wmb();
2883 dev_set_drvdata(&ofdev->dev, dev);
2884
2885
2886 wake_up_all(&emac_probe_wait);
2887
2888
2889 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2890 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2891
2892 if (dev->phy_mode == PHY_MODE_SGMII)
2893 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2894
2895 if (dev->phy.address >= 0)
2896 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2897 dev->phy.def->name, dev->phy.address);
2898
2899 emac_dbg_register(dev);
2900
2901
2902 return 0;
2903
2904
2905
2906 err_detach_tah:
2907 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2908 tah_detach(dev->tah_dev, dev->tah_port);
2909 err_detach_rgmii:
2910 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2911 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2912 err_detach_zmii:
2913 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2914 zmii_detach(dev->zmii_dev, dev->zmii_port);
2915 err_unreg_commac:
2916 mal_unregister_commac(dev->mal, &dev->commac);
2917 err_rel_deps:
2918 emac_put_deps(dev);
2919 err_reg_unmap:
2920 iounmap(dev->emacp);
2921 err_irq_unmap:
2922 if (dev->wol_irq != NO_IRQ)
2923 irq_dispose_mapping(dev->wol_irq);
2924 if (dev->emac_irq != NO_IRQ)
2925 irq_dispose_mapping(dev->emac_irq);
2926 err_free:
2927 kfree(ndev);
2928 err_gone:
2929
2930
2931
2932
2933 if (blist) {
2934 *blist = NULL;
2935 wake_up_all(&emac_probe_wait);
2936 }
2937 return err;
2938}
2939
2940static int __devexit emac_remove(struct of_device *ofdev)
2941{
2942 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2943
2944 DBG(dev, "remove" NL);
2945
2946 dev_set_drvdata(&ofdev->dev, NULL);
2947
2948 unregister_netdev(dev->ndev);
2949
2950 flush_scheduled_work();
2951
2952 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2953 tah_detach(dev->tah_dev, dev->tah_port);
2954 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2955 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2956 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2957 zmii_detach(dev->zmii_dev, dev->zmii_port);
2958
2959 mal_unregister_commac(dev->mal, &dev->commac);
2960 emac_put_deps(dev);
2961
2962 emac_dbg_unregister(dev);
2963 iounmap(dev->emacp);
2964
2965 if (dev->wol_irq != NO_IRQ)
2966 irq_dispose_mapping(dev->wol_irq);
2967 if (dev->emac_irq != NO_IRQ)
2968 irq_dispose_mapping(dev->emac_irq);
2969
2970 kfree(dev->ndev);
2971
2972 return 0;
2973}
2974
2975
2976static struct of_device_id emac_match[] =
2977{
2978 {
2979 .type = "network",
2980 .compatible = "ibm,emac",
2981 },
2982 {
2983 .type = "network",
2984 .compatible = "ibm,emac4",
2985 },
2986 {
2987 .type = "network",
2988 .compatible = "ibm,emac4sync",
2989 },
2990 {},
2991};
2992MODULE_DEVICE_TABLE(of, emac_match);
2993
2994static struct of_platform_driver emac_driver = {
2995 .name = "emac",
2996 .match_table = emac_match,
2997
2998 .probe = emac_probe,
2999 .remove = emac_remove,
3000};
3001
3002static void __init emac_make_bootlist(void)
3003{
3004 struct device_node *np = NULL;
3005 int j, max, i = 0, k;
3006 int cell_indices[EMAC_BOOT_LIST_SIZE];
3007
3008
3009 while((np = of_find_all_nodes(np)) != NULL) {
3010 const u32 *idx;
3011
3012 if (of_match_node(emac_match, np) == NULL)
3013 continue;
3014 if (of_get_property(np, "unused", NULL))
3015 continue;
3016 idx = of_get_property(np, "cell-index", NULL);
3017 if (idx == NULL)
3018 continue;
3019 cell_indices[i] = *idx;
3020 emac_boot_list[i++] = of_node_get(np);
3021 if (i >= EMAC_BOOT_LIST_SIZE) {
3022 of_node_put(np);
3023 break;
3024 }
3025 }
3026 max = i;
3027
3028
3029 for (i = 0; max > 1 && (i < (max - 1)); i++)
3030 for (j = i; j < max; j++) {
3031 if (cell_indices[i] > cell_indices[j]) {
3032 np = emac_boot_list[i];
3033 emac_boot_list[i] = emac_boot_list[j];
3034 emac_boot_list[j] = np;
3035 k = cell_indices[i];
3036 cell_indices[i] = cell_indices[j];
3037 cell_indices[j] = k;
3038 }
3039 }
3040}
3041
3042static int __init emac_init(void)
3043{
3044 int rc;
3045
3046 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3047
3048
3049 emac_init_debug();
3050
3051
3052 emac_make_bootlist();
3053
3054
3055 rc = mal_init();
3056 if (rc)
3057 goto err;
3058 rc = zmii_init();
3059 if (rc)
3060 goto err_mal;
3061 rc = rgmii_init();
3062 if (rc)
3063 goto err_zmii;
3064 rc = tah_init();
3065 if (rc)
3066 goto err_rgmii;
3067 rc = of_register_platform_driver(&emac_driver);
3068 if (rc)
3069 goto err_tah;
3070
3071 return 0;
3072
3073 err_tah:
3074 tah_exit();
3075 err_rgmii:
3076 rgmii_exit();
3077 err_zmii:
3078 zmii_exit();
3079 err_mal:
3080 mal_exit();
3081 err:
3082 return rc;
3083}
3084
3085static void __exit emac_exit(void)
3086{
3087 int i;
3088
3089 of_unregister_platform_driver(&emac_driver);
3090
3091 tah_exit();
3092 rgmii_exit();
3093 zmii_exit();
3094 mal_exit();
3095 emac_fini_debug();
3096
3097
3098 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3099 if (emac_boot_list[i])
3100 of_node_put(emac_boot_list[i]);
3101}
3102
3103module_init(emac_init);
3104module_exit(emac_exit);
3105