1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/bitops.h>
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/dma-mapping.h>
17#include <linux/etherdevice.h>
18#include <linux/ethtool.h>
19#include <linux/in.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/ip.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/of.h>
26#include <linux/of_net.h>
27#include <linux/phy.h>
28#include <linux/platform_device.h>
29#include <linux/pxa168_eth.h>
30#include <linux/tcp.h>
31#include <linux/types.h>
32#include <linux/udp.h>
33#include <linux/workqueue.h>
34#include <linux/pgtable.h>
35
36#include <asm/cacheflush.h>
37
38#define DRIVER_NAME "pxa168-eth"
39#define DRIVER_VERSION "0.3"
40
41
42
43
44
45#define PHY_ADDRESS 0x0000
46#define SMI 0x0010
47#define PORT_CONFIG 0x0400
48#define PORT_CONFIG_EXT 0x0408
49#define PORT_COMMAND 0x0410
50#define PORT_STATUS 0x0418
51#define HTPR 0x0428
52#define MAC_ADDR_LOW 0x0430
53#define MAC_ADDR_HIGH 0x0438
54#define SDMA_CONFIG 0x0440
55#define SDMA_CMD 0x0448
56#define INT_CAUSE 0x0450
57#define INT_W_CLEAR 0x0454
58#define INT_MASK 0x0458
59#define ETH_F_RX_DESC_0 0x0480
60#define ETH_C_RX_DESC_0 0x04A0
61#define ETH_C_TX_DESC_1 0x04E4
62
63
64#define SMI_BUSY (1 << 28)
65#define SMI_R_VALID (1 << 27)
66#define SMI_OP_W (0 << 26)
67#define SMI_OP_R (1 << 26)
68
69#define PHY_WAIT_ITERATIONS 10
70
71#define PXA168_ETH_PHY_ADDR_DEFAULT 0
72
73#define BUF_OWNED_BY_DMA (1 << 31)
74
75
76#define RX_EN_INT (1 << 23)
77#define RX_FIRST_DESC (1 << 17)
78#define RX_LAST_DESC (1 << 16)
79#define RX_ERROR (1 << 15)
80
81
82#define TX_EN_INT (1 << 23)
83#define TX_GEN_CRC (1 << 22)
84#define TX_ZERO_PADDING (1 << 18)
85#define TX_FIRST_DESC (1 << 17)
86#define TX_LAST_DESC (1 << 16)
87#define TX_ERROR (1 << 15)
88
89
90#define SDMA_CMD_AT (1 << 31)
91#define SDMA_CMD_TXDL (1 << 24)
92#define SDMA_CMD_TXDH (1 << 23)
93#define SDMA_CMD_AR (1 << 15)
94#define SDMA_CMD_ERD (1 << 7)
95
96
97#define PCR_DUPLEX_FULL (1 << 15)
98#define PCR_HS (1 << 12)
99#define PCR_EN (1 << 7)
100#define PCR_PM (1 << 0)
101
102
103#define PCXR_2BSM (1 << 28)
104#define PCXR_DSCP_EN (1 << 21)
105#define PCXR_RMII_EN (1 << 20)
106#define PCXR_AN_SPEED_DIS (1 << 19)
107#define PCXR_SPEED_100 (1 << 18)
108#define PCXR_MFL_1518 (0 << 14)
109#define PCXR_MFL_1536 (1 << 14)
110#define PCXR_MFL_2048 (2 << 14)
111#define PCXR_MFL_64K (3 << 14)
112#define PCXR_FLOWCTL_DIS (1 << 12)
113#define PCXR_FLP (1 << 11)
114#define PCXR_AN_FLOWCTL_DIS (1 << 10)
115#define PCXR_AN_DUPLEX_DIS (1 << 9)
116#define PCXR_PRIO_TX_OFF 3
117#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
118
119
120#define SDCR_BSZ_OFF 12
121#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
122#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
123#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
124#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
125#define SDCR_BLMR (1 << 6)
126#define SDCR_BLMT (1 << 7)
127#define SDCR_RIFB (1 << 9)
128#define SDCR_RC_OFF 2
129#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
130
131
132
133
134
135#define ICR_RXBUF (1 << 0)
136#define ICR_TXBUF_H (1 << 2)
137#define ICR_TXBUF_L (1 << 3)
138#define ICR_TXEND_H (1 << 6)
139#define ICR_TXEND_L (1 << 7)
140#define ICR_RXERR (1 << 8)
141#define ICR_TXERR_H (1 << 10)
142#define ICR_TXERR_L (1 << 11)
143#define ICR_TX_UDR (1 << 13)
144#define ICR_MII_CH (1 << 28)
145
146#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
147 ICR_TXERR_H | ICR_TXERR_L |\
148 ICR_TXEND_H | ICR_TXEND_L |\
149 ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
150
151#define ETH_HW_IP_ALIGN 2
152
153#define NUM_RX_DESCS 64
154#define NUM_TX_DESCS 64
155
156#define HASH_ADD 0
157#define HASH_DELETE 1
158#define HASH_ADDR_TABLE_SIZE 0x4000
159#define HOP_NUMBER 12
160
161
162#define PORT_SPEED_100 (1 << 0)
163#define FULL_DUPLEX (1 << 1)
164#define FLOW_CONTROL_DISABLED (1 << 2)
165#define LINK_UP (1 << 3)
166
167
168#define WORK_TX_DONE (1 << 1)
169
170
171
172
173#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
174
175struct rx_desc {
176 u32 cmd_sts;
177 u16 byte_cnt;
178 u16 buf_size;
179 u32 buf_ptr;
180 u32 next_desc_ptr;
181};
182
183struct tx_desc {
184 u32 cmd_sts;
185 u16 reserved;
186 u16 byte_cnt;
187 u32 buf_ptr;
188 u32 next_desc_ptr;
189};
190
191struct pxa168_eth_private {
192 struct platform_device *pdev;
193 int port_num;
194 int phy_addr;
195 int phy_speed;
196 int phy_duplex;
197 phy_interface_t phy_intf;
198
199 int rx_resource_err;
200
201
202 int rx_curr_desc_q, rx_used_desc_q;
203
204
205 int tx_curr_desc_q, tx_used_desc_q;
206
207 struct rx_desc *p_rx_desc_area;
208 dma_addr_t rx_desc_dma;
209 int rx_desc_area_size;
210 struct sk_buff **rx_skb;
211
212 struct tx_desc *p_tx_desc_area;
213 dma_addr_t tx_desc_dma;
214 int tx_desc_area_size;
215 struct sk_buff **tx_skb;
216
217 struct work_struct tx_timeout_task;
218
219 struct net_device *dev;
220 struct napi_struct napi;
221 u8 work_todo;
222 int skb_size;
223
224
225 int tx_ring_size;
226
227 int tx_desc_count;
228
229 int rx_ring_size;
230
231 int rx_desc_count;
232
233
234
235
236
237 struct timer_list timeout;
238 struct mii_bus *smi_bus;
239
240
241 struct clk *clk;
242 struct pxa168_eth_platform_data *pd;
243
244
245
246 void __iomem *base;
247
248
249 void *htpr;
250 dma_addr_t htpr_dma;
251};
252
253struct addr_table_entry {
254 __le32 lo;
255 __le32 hi;
256};
257
258
259enum hash_table_entry {
260 HASH_ENTRY_VALID = 1,
261 SKIP = 2,
262 HASH_ENTRY_RECEIVE_DISCARD = 4,
263 HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
264};
265
266static int pxa168_init_hw(struct pxa168_eth_private *pep);
267static int pxa168_init_phy(struct net_device *dev);
268static void eth_port_reset(struct net_device *dev);
269static void eth_port_start(struct net_device *dev);
270static int pxa168_eth_open(struct net_device *dev);
271static int pxa168_eth_stop(struct net_device *dev);
272
273static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
274{
275 return readl_relaxed(pep->base + offset);
276}
277
278static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
279{
280 writel_relaxed(data, pep->base + offset);
281}
282
283static void abort_dma(struct pxa168_eth_private *pep)
284{
285 int delay;
286 int max_retries = 40;
287
288 do {
289 wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
290 udelay(100);
291
292 delay = 10;
293 while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
294 && delay-- > 0) {
295 udelay(10);
296 }
297 } while (max_retries-- > 0 && delay <= 0);
298
299 if (max_retries <= 0)
300 netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
301}
302
303static void rxq_refill(struct net_device *dev)
304{
305 struct pxa168_eth_private *pep = netdev_priv(dev);
306 struct sk_buff *skb;
307 struct rx_desc *p_used_rx_desc;
308 int used_rx_desc;
309
310 while (pep->rx_desc_count < pep->rx_ring_size) {
311 int size;
312
313 skb = netdev_alloc_skb(dev, pep->skb_size);
314 if (!skb)
315 break;
316 if (SKB_DMA_REALIGN)
317 skb_reserve(skb, SKB_DMA_REALIGN);
318 pep->rx_desc_count++;
319
320 used_rx_desc = pep->rx_used_desc_q;
321 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
322 size = skb_end_pointer(skb) - skb->data;
323 p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
324 skb->data,
325 size,
326 DMA_FROM_DEVICE);
327 p_used_rx_desc->buf_size = size;
328 pep->rx_skb[used_rx_desc] = skb;
329
330
331 dma_wmb();
332 p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
333 dma_wmb();
334
335
336 pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
337
338
339 pep->rx_resource_err = 0;
340
341 skb_reserve(skb, ETH_HW_IP_ALIGN);
342 }
343
344
345
346
347
348 if (pep->rx_desc_count == 0) {
349 pep->timeout.expires = jiffies + (HZ / 10);
350 add_timer(&pep->timeout);
351 }
352}
353
354static inline void rxq_refill_timer_wrapper(struct timer_list *t)
355{
356 struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
357 napi_schedule(&pep->napi);
358}
359
360static inline u8 flip_8_bits(u8 x)
361{
362 return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
363 | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
364 | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
365 | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
366}
367
368static void nibble_swap_every_byte(unsigned char *mac_addr)
369{
370 int i;
371 for (i = 0; i < ETH_ALEN; i++) {
372 mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
373 ((mac_addr[i] & 0xf0) >> 4);
374 }
375}
376
377static void inverse_every_nibble(unsigned char *mac_addr)
378{
379 int i;
380 for (i = 0; i < ETH_ALEN; i++)
381 mac_addr[i] = flip_8_bits(mac_addr[i]);
382}
383
384
385
386
387
388
389
390
391
392static u32 hash_function(unsigned char *mac_addr_orig)
393{
394 u32 hash_result;
395 u32 addr0;
396 u32 addr1;
397 u32 addr2;
398 u32 addr3;
399 unsigned char mac_addr[ETH_ALEN];
400
401
402
403
404 memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
405
406 nibble_swap_every_byte(mac_addr);
407 inverse_every_nibble(mac_addr);
408
409 addr0 = (mac_addr[5] >> 2) & 0x3f;
410 addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
411 addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
412 addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
413
414 hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
415 hash_result = hash_result & 0x07ff;
416 return hash_result;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436static int add_del_hash_entry(struct pxa168_eth_private *pep,
437 unsigned char *mac_addr,
438 u32 rd, u32 skip, int del)
439{
440 struct addr_table_entry *entry, *start;
441 u32 new_high;
442 u32 new_low;
443 u32 i;
444
445 new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
446 | (((mac_addr[1] >> 0) & 0xf) << 11)
447 | (((mac_addr[0] >> 4) & 0xf) << 7)
448 | (((mac_addr[0] >> 0) & 0xf) << 3)
449 | (((mac_addr[3] >> 4) & 0x1) << 31)
450 | (((mac_addr[3] >> 0) & 0xf) << 27)
451 | (((mac_addr[2] >> 4) & 0xf) << 23)
452 | (((mac_addr[2] >> 0) & 0xf) << 19)
453 | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
454 | HASH_ENTRY_VALID;
455
456 new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
457 | (((mac_addr[5] >> 0) & 0xf) << 11)
458 | (((mac_addr[4] >> 4) & 0xf) << 7)
459 | (((mac_addr[4] >> 0) & 0xf) << 3)
460 | (((mac_addr[3] >> 5) & 0x7) << 0);
461
462
463
464
465
466 start = pep->htpr;
467 entry = start + hash_function(mac_addr);
468 for (i = 0; i < HOP_NUMBER; i++) {
469 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
470 break;
471 } else {
472
473 if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
474 (new_low & 0xfffffff8)) &&
475 (le32_to_cpu(entry->hi) == new_high)) {
476 break;
477 }
478 }
479 if (entry == start + 0x7ff)
480 entry = start;
481 else
482 entry++;
483 }
484
485 if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
486 (le32_to_cpu(entry->hi) != new_high) && del)
487 return 0;
488
489 if (i == HOP_NUMBER) {
490 if (!del) {
491 netdev_info(pep->dev,
492 "%s: table section is full, need to "
493 "move to 16kB implementation?\n",
494 __FILE__);
495 return -ENOSPC;
496 } else
497 return 0;
498 }
499
500
501
502
503 if (del) {
504 entry->hi = 0;
505 entry->lo = 0;
506 } else {
507 entry->hi = cpu_to_le32(new_high);
508 entry->lo = cpu_to_le32(new_low);
509 }
510
511 return 0;
512}
513
514
515
516
517
518
519
520
521
522static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
523 unsigned char *oaddr,
524 unsigned char *addr)
525{
526
527 if (oaddr)
528 add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
529
530 add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
531}
532
533static int init_hash_table(struct pxa168_eth_private *pep)
534{
535
536
537
538
539
540
541
542
543
544
545
546
547
548 if (!pep->htpr) {
549 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
550 HASH_ADDR_TABLE_SIZE,
551 &pep->htpr_dma, GFP_KERNEL);
552 if (!pep->htpr)
553 return -ENOMEM;
554 } else {
555 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
556 }
557 wrl(pep, HTPR, pep->htpr_dma);
558 return 0;
559}
560
561static void pxa168_eth_set_rx_mode(struct net_device *dev)
562{
563 struct pxa168_eth_private *pep = netdev_priv(dev);
564 struct netdev_hw_addr *ha;
565 u32 val;
566
567 val = rdl(pep, PORT_CONFIG);
568 if (dev->flags & IFF_PROMISC)
569 val |= PCR_PM;
570 else
571 val &= ~PCR_PM;
572 wrl(pep, PORT_CONFIG, val);
573
574
575
576
577
578 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
579 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
580
581 netdev_for_each_mc_addr(ha, dev)
582 update_hash_table_mac_address(pep, NULL, ha->addr);
583}
584
585static void pxa168_eth_get_mac_address(struct net_device *dev,
586 unsigned char *addr)
587{
588 struct pxa168_eth_private *pep = netdev_priv(dev);
589 unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
590 unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
591
592 addr[0] = (mac_h >> 24) & 0xff;
593 addr[1] = (mac_h >> 16) & 0xff;
594 addr[2] = (mac_h >> 8) & 0xff;
595 addr[3] = mac_h & 0xff;
596 addr[4] = (mac_l >> 8) & 0xff;
597 addr[5] = mac_l & 0xff;
598}
599
600static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
601{
602 struct sockaddr *sa = addr;
603 struct pxa168_eth_private *pep = netdev_priv(dev);
604 unsigned char oldMac[ETH_ALEN];
605 u32 mac_h, mac_l;
606
607 if (!is_valid_ether_addr(sa->sa_data))
608 return -EADDRNOTAVAIL;
609 memcpy(oldMac, dev->dev_addr, ETH_ALEN);
610 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
611
612 mac_h = dev->dev_addr[0] << 24;
613 mac_h |= dev->dev_addr[1] << 16;
614 mac_h |= dev->dev_addr[2] << 8;
615 mac_h |= dev->dev_addr[3];
616 mac_l = dev->dev_addr[4] << 8;
617 mac_l |= dev->dev_addr[5];
618 wrl(pep, MAC_ADDR_HIGH, mac_h);
619 wrl(pep, MAC_ADDR_LOW, mac_l);
620
621 netif_addr_lock_bh(dev);
622 update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
623 netif_addr_unlock_bh(dev);
624 return 0;
625}
626
627static void eth_port_start(struct net_device *dev)
628{
629 unsigned int val = 0;
630 struct pxa168_eth_private *pep = netdev_priv(dev);
631 int tx_curr_desc, rx_curr_desc;
632
633 phy_start(dev->phydev);
634
635
636 tx_curr_desc = pep->tx_curr_desc_q;
637 wrl(pep, ETH_C_TX_DESC_1,
638 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
639
640
641 rx_curr_desc = pep->rx_curr_desc_q;
642 wrl(pep, ETH_C_RX_DESC_0,
643 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
644
645 wrl(pep, ETH_F_RX_DESC_0,
646 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
647
648
649 wrl(pep, INT_CAUSE, 0);
650
651
652 wrl(pep, INT_MASK, ALL_INTS);
653
654 val = rdl(pep, PORT_CONFIG);
655 val |= PCR_EN;
656 wrl(pep, PORT_CONFIG, val);
657
658
659 val = rdl(pep, SDMA_CMD);
660 val |= SDMA_CMD_ERD;
661 wrl(pep, SDMA_CMD, val);
662}
663
664static void eth_port_reset(struct net_device *dev)
665{
666 struct pxa168_eth_private *pep = netdev_priv(dev);
667 unsigned int val = 0;
668
669
670 wrl(pep, INT_MASK, 0);
671
672
673 wrl(pep, INT_CAUSE, 0);
674
675
676 val = rdl(pep, SDMA_CMD);
677 val &= ~SDMA_CMD_ERD;
678
679
680
681
682 abort_dma(pep);
683
684
685 val = rdl(pep, PORT_CONFIG);
686 val &= ~PCR_EN;
687 wrl(pep, PORT_CONFIG, val);
688
689 phy_stop(dev->phydev);
690}
691
692
693
694
695
696static int txq_reclaim(struct net_device *dev, int force)
697{
698 struct pxa168_eth_private *pep = netdev_priv(dev);
699 struct tx_desc *desc;
700 u32 cmd_sts;
701 struct sk_buff *skb;
702 int tx_index;
703 dma_addr_t addr;
704 int count;
705 int released = 0;
706
707 netif_tx_lock(dev);
708
709 pep->work_todo &= ~WORK_TX_DONE;
710 while (pep->tx_desc_count > 0) {
711 tx_index = pep->tx_used_desc_q;
712 desc = &pep->p_tx_desc_area[tx_index];
713 cmd_sts = desc->cmd_sts;
714 if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
715 if (released > 0) {
716 goto txq_reclaim_end;
717 } else {
718 released = -1;
719 goto txq_reclaim_end;
720 }
721 }
722 pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
723 pep->tx_desc_count--;
724 addr = desc->buf_ptr;
725 count = desc->byte_cnt;
726 skb = pep->tx_skb[tx_index];
727 if (skb)
728 pep->tx_skb[tx_index] = NULL;
729
730 if (cmd_sts & TX_ERROR) {
731 if (net_ratelimit())
732 netdev_err(dev, "Error in TX\n");
733 dev->stats.tx_errors++;
734 }
735 dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
736 if (skb)
737 dev_kfree_skb_irq(skb);
738 released++;
739 }
740txq_reclaim_end:
741 netif_tx_unlock(dev);
742 return released;
743}
744
745static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
746{
747 struct pxa168_eth_private *pep = netdev_priv(dev);
748
749 netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count);
750
751 schedule_work(&pep->tx_timeout_task);
752}
753
754static void pxa168_eth_tx_timeout_task(struct work_struct *work)
755{
756 struct pxa168_eth_private *pep = container_of(work,
757 struct pxa168_eth_private,
758 tx_timeout_task);
759 struct net_device *dev = pep->dev;
760 pxa168_eth_stop(dev);
761 pxa168_eth_open(dev);
762}
763
764static int rxq_process(struct net_device *dev, int budget)
765{
766 struct pxa168_eth_private *pep = netdev_priv(dev);
767 struct net_device_stats *stats = &dev->stats;
768 unsigned int received_packets = 0;
769 struct sk_buff *skb;
770
771 while (budget-- > 0) {
772 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
773 struct rx_desc *rx_desc;
774 unsigned int cmd_sts;
775
776
777 if (pep->rx_resource_err)
778 break;
779 rx_curr_desc = pep->rx_curr_desc_q;
780 rx_used_desc = pep->rx_used_desc_q;
781 rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
782 cmd_sts = rx_desc->cmd_sts;
783 dma_rmb();
784 if (cmd_sts & (BUF_OWNED_BY_DMA))
785 break;
786 skb = pep->rx_skb[rx_curr_desc];
787 pep->rx_skb[rx_curr_desc] = NULL;
788
789 rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
790 pep->rx_curr_desc_q = rx_next_curr_desc;
791
792
793
794 if (rx_next_curr_desc == rx_used_desc)
795 pep->rx_resource_err = 1;
796 pep->rx_desc_count--;
797 dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
798 rx_desc->buf_size,
799 DMA_FROM_DEVICE);
800 received_packets++;
801
802
803
804
805 stats->rx_packets++;
806 stats->rx_bytes += rx_desc->byte_cnt;
807
808
809
810
811 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
812 (RX_FIRST_DESC | RX_LAST_DESC))
813 || (cmd_sts & RX_ERROR)) {
814
815 stats->rx_dropped++;
816 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
817 (RX_FIRST_DESC | RX_LAST_DESC)) {
818 if (net_ratelimit())
819 netdev_err(dev,
820 "Rx pkt on multiple desc\n");
821 }
822 if (cmd_sts & RX_ERROR)
823 stats->rx_errors++;
824 dev_kfree_skb_irq(skb);
825 } else {
826
827
828
829
830 skb_put(skb, rx_desc->byte_cnt - 4);
831 skb->protocol = eth_type_trans(skb, dev);
832 netif_receive_skb(skb);
833 }
834 }
835
836 rxq_refill(dev);
837 return received_packets;
838}
839
840static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
841 struct net_device *dev)
842{
843 u32 icr;
844 int ret = 0;
845
846 icr = rdl(pep, INT_CAUSE);
847 if (icr == 0)
848 return IRQ_NONE;
849
850 wrl(pep, INT_CAUSE, ~icr);
851 if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
852 pep->work_todo |= WORK_TX_DONE;
853 ret = 1;
854 }
855 if (icr & ICR_RXBUF)
856 ret = 1;
857 return ret;
858}
859
860static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
861{
862 struct net_device *dev = (struct net_device *)dev_id;
863 struct pxa168_eth_private *pep = netdev_priv(dev);
864
865 if (unlikely(!pxa168_eth_collect_events(pep, dev)))
866 return IRQ_NONE;
867
868 wrl(pep, INT_MASK, 0);
869 napi_schedule(&pep->napi);
870 return IRQ_HANDLED;
871}
872
873static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
874{
875 int skb_size;
876
877
878
879
880
881
882
883 skb_size = pep->dev->mtu + 36;
884
885
886
887
888
889
890 pep->skb_size = (skb_size + 7) & ~7;
891
892
893
894
895
896
897
898 pep->skb_size += SKB_DMA_REALIGN;
899
900}
901
902static int set_port_config_ext(struct pxa168_eth_private *pep)
903{
904 int skb_size;
905
906 pxa168_eth_recalc_skb_size(pep);
907 if (pep->skb_size <= 1518)
908 skb_size = PCXR_MFL_1518;
909 else if (pep->skb_size <= 1536)
910 skb_size = PCXR_MFL_1536;
911 else if (pep->skb_size <= 2048)
912 skb_size = PCXR_MFL_2048;
913 else
914 skb_size = PCXR_MFL_64K;
915
916
917 wrl(pep, PORT_CONFIG_EXT,
918 PCXR_AN_SPEED_DIS |
919 PCXR_AN_DUPLEX_DIS |
920 PCXR_AN_FLOWCTL_DIS |
921 PCXR_2BSM |
922 PCXR_DSCP_EN |
923 skb_size | PCXR_FLP |
924 PCXR_TX_HIGH_PRI);
925
926 return 0;
927}
928
929static void pxa168_eth_adjust_link(struct net_device *dev)
930{
931 struct pxa168_eth_private *pep = netdev_priv(dev);
932 struct phy_device *phy = dev->phydev;
933 u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
934 u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
935
936 cfg = cfg_o & ~PCR_DUPLEX_FULL;
937 cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
938
939 if (phy->interface == PHY_INTERFACE_MODE_RMII)
940 cfgext |= PCXR_RMII_EN;
941 if (phy->speed == SPEED_100)
942 cfgext |= PCXR_SPEED_100;
943 if (phy->duplex)
944 cfg |= PCR_DUPLEX_FULL;
945 if (!phy->pause)
946 cfgext |= PCXR_FLOWCTL_DIS;
947
948
949 if (cfg == cfg_o && cfgext == cfgext_o)
950 return;
951
952 wrl(pep, PORT_CONFIG, cfg);
953 wrl(pep, PORT_CONFIG_EXT, cfgext);
954
955 phy_print_status(phy);
956}
957
958static int pxa168_init_phy(struct net_device *dev)
959{
960 struct pxa168_eth_private *pep = netdev_priv(dev);
961 struct ethtool_link_ksettings cmd;
962 struct phy_device *phy = NULL;
963 int err;
964
965 if (dev->phydev)
966 return 0;
967
968 phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
969 if (IS_ERR(phy))
970 return PTR_ERR(phy);
971
972 err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
973 pep->phy_intf);
974 if (err)
975 return err;
976
977 cmd.base.phy_address = pep->phy_addr;
978 cmd.base.speed = pep->phy_speed;
979 cmd.base.duplex = pep->phy_duplex;
980 bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
981 __ETHTOOL_LINK_MODE_MASK_NBITS);
982 cmd.base.autoneg = AUTONEG_ENABLE;
983
984 if (cmd.base.speed != 0)
985 cmd.base.autoneg = AUTONEG_DISABLE;
986
987 return phy_ethtool_set_link_ksettings(dev, &cmd);
988}
989
990static int pxa168_init_hw(struct pxa168_eth_private *pep)
991{
992 int err = 0;
993
994
995 wrl(pep, INT_MASK, 0);
996 wrl(pep, INT_CAUSE, 0);
997
998 wrl(pep, INT_W_CLEAR, 0);
999
1000
1001
1002 abort_dma(pep);
1003
1004 err = init_hash_table(pep);
1005 if (err)
1006 return err;
1007
1008 wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |
1009 SDCR_RIFB |
1010 SDCR_BLMT |
1011 SDCR_BLMR |
1012 SDCR_RC_MAX_RETRANS);
1013
1014 wrl(pep, PORT_CONFIG, PCR_HS);
1015 set_port_config_ext(pep);
1016
1017 return err;
1018}
1019
1020static int rxq_init(struct net_device *dev)
1021{
1022 struct pxa168_eth_private *pep = netdev_priv(dev);
1023 struct rx_desc *p_rx_desc;
1024 int size = 0, i = 0;
1025 int rx_desc_num = pep->rx_ring_size;
1026
1027
1028 pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
1029 if (!pep->rx_skb)
1030 return -ENOMEM;
1031
1032
1033 pep->rx_desc_count = 0;
1034 size = pep->rx_ring_size * sizeof(struct rx_desc);
1035 pep->rx_desc_area_size = size;
1036 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1037 &pep->rx_desc_dma,
1038 GFP_KERNEL);
1039 if (!pep->p_rx_desc_area)
1040 goto out;
1041
1042
1043 p_rx_desc = pep->p_rx_desc_area;
1044 for (i = 0; i < rx_desc_num; i++) {
1045 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1046 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1047 }
1048
1049 pep->rx_curr_desc_q = 0;
1050 pep->rx_used_desc_q = 0;
1051 pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1052 return 0;
1053out:
1054 kfree(pep->rx_skb);
1055 return -ENOMEM;
1056}
1057
1058static void rxq_deinit(struct net_device *dev)
1059{
1060 struct pxa168_eth_private *pep = netdev_priv(dev);
1061 int curr;
1062
1063
1064 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1065 if (pep->rx_skb[curr]) {
1066 dev_kfree_skb(pep->rx_skb[curr]);
1067 pep->rx_desc_count--;
1068 }
1069 }
1070 if (pep->rx_desc_count)
1071 netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
1072 pep->rx_desc_count);
1073
1074 if (pep->p_rx_desc_area)
1075 dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1076 pep->p_rx_desc_area, pep->rx_desc_dma);
1077 kfree(pep->rx_skb);
1078}
1079
1080static int txq_init(struct net_device *dev)
1081{
1082 struct pxa168_eth_private *pep = netdev_priv(dev);
1083 struct tx_desc *p_tx_desc;
1084 int size = 0, i = 0;
1085 int tx_desc_num = pep->tx_ring_size;
1086
1087 pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
1088 if (!pep->tx_skb)
1089 return -ENOMEM;
1090
1091
1092 pep->tx_desc_count = 0;
1093 size = pep->tx_ring_size * sizeof(struct tx_desc);
1094 pep->tx_desc_area_size = size;
1095 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1096 &pep->tx_desc_dma,
1097 GFP_KERNEL);
1098 if (!pep->p_tx_desc_area)
1099 goto out;
1100
1101 p_tx_desc = pep->p_tx_desc_area;
1102 for (i = 0; i < tx_desc_num; i++) {
1103 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1104 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1105 }
1106 pep->tx_curr_desc_q = 0;
1107 pep->tx_used_desc_q = 0;
1108 pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1109 return 0;
1110out:
1111 kfree(pep->tx_skb);
1112 return -ENOMEM;
1113}
1114
1115static void txq_deinit(struct net_device *dev)
1116{
1117 struct pxa168_eth_private *pep = netdev_priv(dev);
1118
1119
1120 txq_reclaim(dev, 1);
1121 BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1122
1123 if (pep->p_tx_desc_area)
1124 dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1125 pep->p_tx_desc_area, pep->tx_desc_dma);
1126 kfree(pep->tx_skb);
1127}
1128
1129static int pxa168_eth_open(struct net_device *dev)
1130{
1131 struct pxa168_eth_private *pep = netdev_priv(dev);
1132 int err;
1133
1134 err = pxa168_init_phy(dev);
1135 if (err)
1136 return err;
1137
1138 err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
1139 if (err) {
1140 dev_err(&dev->dev, "can't assign irq\n");
1141 return -EAGAIN;
1142 }
1143 pep->rx_resource_err = 0;
1144 err = rxq_init(dev);
1145 if (err != 0)
1146 goto out_free_irq;
1147 err = txq_init(dev);
1148 if (err != 0)
1149 goto out_free_rx_skb;
1150 pep->rx_used_desc_q = 0;
1151 pep->rx_curr_desc_q = 0;
1152
1153
1154 rxq_refill(dev);
1155 pep->rx_used_desc_q = 0;
1156 pep->rx_curr_desc_q = 0;
1157 netif_carrier_off(dev);
1158 napi_enable(&pep->napi);
1159 eth_port_start(dev);
1160 return 0;
1161out_free_rx_skb:
1162 rxq_deinit(dev);
1163out_free_irq:
1164 free_irq(dev->irq, dev);
1165 return err;
1166}
1167
1168static int pxa168_eth_stop(struct net_device *dev)
1169{
1170 struct pxa168_eth_private *pep = netdev_priv(dev);
1171 eth_port_reset(dev);
1172
1173
1174 wrl(pep, INT_MASK, 0);
1175 wrl(pep, INT_CAUSE, 0);
1176
1177 wrl(pep, INT_W_CLEAR, 0);
1178 napi_disable(&pep->napi);
1179 del_timer_sync(&pep->timeout);
1180 netif_carrier_off(dev);
1181 free_irq(dev->irq, dev);
1182 rxq_deinit(dev);
1183 txq_deinit(dev);
1184
1185 return 0;
1186}
1187
1188static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1189{
1190 struct pxa168_eth_private *pep = netdev_priv(dev);
1191
1192 dev->mtu = mtu;
1193 set_port_config_ext(pep);
1194
1195 if (!netif_running(dev))
1196 return 0;
1197
1198
1199
1200
1201
1202
1203
1204 pxa168_eth_stop(dev);
1205 if (pxa168_eth_open(dev)) {
1206 dev_err(&dev->dev,
1207 "fatal error on re-opening device after MTU change\n");
1208 }
1209
1210 return 0;
1211}
1212
1213static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1214{
1215 int tx_desc_curr;
1216
1217 tx_desc_curr = pep->tx_curr_desc_q;
1218 pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1219 BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1220 pep->tx_desc_count++;
1221
1222 return tx_desc_curr;
1223}
1224
1225static int pxa168_rx_poll(struct napi_struct *napi, int budget)
1226{
1227 struct pxa168_eth_private *pep =
1228 container_of(napi, struct pxa168_eth_private, napi);
1229 struct net_device *dev = pep->dev;
1230 int work_done = 0;
1231
1232
1233
1234
1235
1236
1237 txq_reclaim(dev, 0);
1238 if (netif_queue_stopped(dev)
1239 && pep->tx_ring_size - pep->tx_desc_count > 1) {
1240 netif_wake_queue(dev);
1241 }
1242 work_done = rxq_process(dev, budget);
1243 if (work_done < budget) {
1244 napi_complete_done(napi, work_done);
1245 wrl(pep, INT_MASK, ALL_INTS);
1246 }
1247
1248 return work_done;
1249}
1250
1251static netdev_tx_t
1252pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1253{
1254 struct pxa168_eth_private *pep = netdev_priv(dev);
1255 struct net_device_stats *stats = &dev->stats;
1256 struct tx_desc *desc;
1257 int tx_index;
1258 int length;
1259
1260 tx_index = eth_alloc_tx_desc_index(pep);
1261 desc = &pep->p_tx_desc_area[tx_index];
1262 length = skb->len;
1263 pep->tx_skb[tx_index] = skb;
1264 desc->byte_cnt = length;
1265 desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
1266 DMA_TO_DEVICE);
1267
1268 skb_tx_timestamp(skb);
1269
1270 dma_wmb();
1271 desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
1272 TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
1273 wmb();
1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1275
1276 stats->tx_bytes += length;
1277 stats->tx_packets++;
1278 netif_trans_update(dev);
1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1280
1281 netif_stop_queue(dev);
1282 }
1283
1284 return NETDEV_TX_OK;
1285}
1286
1287static int smi_wait_ready(struct pxa168_eth_private *pep)
1288{
1289 int i = 0;
1290
1291
1292 for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1293 if (i == PHY_WAIT_ITERATIONS)
1294 return -ETIMEDOUT;
1295 msleep(10);
1296 }
1297
1298 return 0;
1299}
1300
1301static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
1302{
1303 struct pxa168_eth_private *pep = bus->priv;
1304 int i = 0;
1305 int val;
1306
1307 if (smi_wait_ready(pep)) {
1308 netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1309 return -ETIMEDOUT;
1310 }
1311 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1312
1313 for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1314 if (i == PHY_WAIT_ITERATIONS) {
1315 netdev_warn(pep->dev,
1316 "pxa168_eth: SMI bus read not valid\n");
1317 return -ENODEV;
1318 }
1319 msleep(10);
1320 }
1321
1322 return val & 0xffff;
1323}
1324
1325static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
1326 u16 value)
1327{
1328 struct pxa168_eth_private *pep = bus->priv;
1329
1330 if (smi_wait_ready(pep)) {
1331 netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1332 return -ETIMEDOUT;
1333 }
1334
1335 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1336 SMI_OP_W | (value & 0xffff));
1337
1338 if (smi_wait_ready(pep)) {
1339 netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1340 return -ETIMEDOUT;
1341 }
1342
1343 return 0;
1344}
1345
1346#ifdef CONFIG_NET_POLL_CONTROLLER
1347static void pxa168_eth_netpoll(struct net_device *dev)
1348{
1349 disable_irq(dev->irq);
1350 pxa168_eth_int_handler(dev->irq, dev);
1351 enable_irq(dev->irq);
1352}
1353#endif
1354
1355static void pxa168_get_drvinfo(struct net_device *dev,
1356 struct ethtool_drvinfo *info)
1357{
1358 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1359 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
1360 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1361 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1362}
1363
1364static const struct ethtool_ops pxa168_ethtool_ops = {
1365 .get_drvinfo = pxa168_get_drvinfo,
1366 .nway_reset = phy_ethtool_nway_reset,
1367 .get_link = ethtool_op_get_link,
1368 .get_ts_info = ethtool_op_get_ts_info,
1369 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1370 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1371};
1372
1373static const struct net_device_ops pxa168_eth_netdev_ops = {
1374 .ndo_open = pxa168_eth_open,
1375 .ndo_stop = pxa168_eth_stop,
1376 .ndo_start_xmit = pxa168_eth_start_xmit,
1377 .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
1378 .ndo_set_mac_address = pxa168_eth_set_mac_address,
1379 .ndo_validate_addr = eth_validate_addr,
1380 .ndo_do_ioctl = phy_do_ioctl,
1381 .ndo_change_mtu = pxa168_eth_change_mtu,
1382 .ndo_tx_timeout = pxa168_eth_tx_timeout,
1383#ifdef CONFIG_NET_POLL_CONTROLLER
1384 .ndo_poll_controller = pxa168_eth_netpoll,
1385#endif
1386};
1387
1388static int pxa168_eth_probe(struct platform_device *pdev)
1389{
1390 struct pxa168_eth_private *pep = NULL;
1391 struct net_device *dev = NULL;
1392 struct resource *res;
1393 struct clk *clk;
1394 struct device_node *np;
1395 const unsigned char *mac_addr = NULL;
1396 int err;
1397
1398 printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
1399
1400 clk = devm_clk_get(&pdev->dev, NULL);
1401 if (IS_ERR(clk)) {
1402 dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
1403 return -ENODEV;
1404 }
1405 clk_prepare_enable(clk);
1406
1407 dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1408 if (!dev) {
1409 err = -ENOMEM;
1410 goto err_clk;
1411 }
1412
1413 platform_set_drvdata(pdev, dev);
1414 pep = netdev_priv(dev);
1415 pep->dev = dev;
1416 pep->clk = clk;
1417
1418 pep->base = devm_platform_ioremap_resource(pdev, 0);
1419 if (IS_ERR(pep->base)) {
1420 err = PTR_ERR(pep->base);
1421 goto err_netdev;
1422 }
1423
1424 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1425 BUG_ON(!res);
1426 dev->irq = res->start;
1427 dev->netdev_ops = &pxa168_eth_netdev_ops;
1428 dev->watchdog_timeo = 2 * HZ;
1429 dev->base_addr = 0;
1430 dev->ethtool_ops = &pxa168_ethtool_ops;
1431
1432
1433 dev->min_mtu = ETH_MIN_MTU;
1434 dev->max_mtu = 9500;
1435
1436 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1437
1438 if (pdev->dev.of_node)
1439 mac_addr = of_get_mac_address(pdev->dev.of_node);
1440
1441 if (!IS_ERR_OR_NULL(mac_addr)) {
1442 ether_addr_copy(dev->dev_addr, mac_addr);
1443 } else {
1444
1445 pxa168_eth_get_mac_address(dev, dev->dev_addr);
1446 if (!is_valid_ether_addr(dev->dev_addr)) {
1447 dev_info(&pdev->dev, "Using random mac address\n");
1448 eth_hw_addr_random(dev);
1449 }
1450 }
1451
1452 pep->rx_ring_size = NUM_RX_DESCS;
1453 pep->tx_ring_size = NUM_TX_DESCS;
1454
1455 pep->pd = dev_get_platdata(&pdev->dev);
1456 if (pep->pd) {
1457 if (pep->pd->rx_queue_size)
1458 pep->rx_ring_size = pep->pd->rx_queue_size;
1459
1460 if (pep->pd->tx_queue_size)
1461 pep->tx_ring_size = pep->pd->tx_queue_size;
1462
1463 pep->port_num = pep->pd->port_number;
1464 pep->phy_addr = pep->pd->phy_addr;
1465 pep->phy_speed = pep->pd->speed;
1466 pep->phy_duplex = pep->pd->duplex;
1467 pep->phy_intf = pep->pd->intf;
1468
1469 if (pep->pd->init)
1470 pep->pd->init();
1471 } else if (pdev->dev.of_node) {
1472 of_property_read_u32(pdev->dev.of_node, "port-id",
1473 &pep->port_num);
1474
1475 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1476 if (!np) {
1477 dev_err(&pdev->dev, "missing phy-handle\n");
1478 err = -EINVAL;
1479 goto err_netdev;
1480 }
1481 of_property_read_u32(np, "reg", &pep->phy_addr);
1482 of_node_put(np);
1483 err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
1484 if (err && err != -ENODEV)
1485 goto err_netdev;
1486 }
1487
1488
1489 BUG_ON(pep->port_num > 2);
1490 netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1491
1492 memset(&pep->timeout, 0, sizeof(struct timer_list));
1493 timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
1494
1495 pep->smi_bus = mdiobus_alloc();
1496 if (!pep->smi_bus) {
1497 err = -ENOMEM;
1498 goto err_netdev;
1499 }
1500 pep->smi_bus->priv = pep;
1501 pep->smi_bus->name = "pxa168_eth smi";
1502 pep->smi_bus->read = pxa168_smi_read;
1503 pep->smi_bus->write = pxa168_smi_write;
1504 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1505 pdev->name, pdev->id);
1506 pep->smi_bus->parent = &pdev->dev;
1507 pep->smi_bus->phy_mask = 0xffffffff;
1508 err = mdiobus_register(pep->smi_bus);
1509 if (err)
1510 goto err_free_mdio;
1511
1512 pep->pdev = pdev;
1513 SET_NETDEV_DEV(dev, &pdev->dev);
1514 pxa168_init_hw(pep);
1515 err = register_netdev(dev);
1516 if (err)
1517 goto err_mdiobus;
1518 return 0;
1519
1520err_mdiobus:
1521 mdiobus_unregister(pep->smi_bus);
1522err_free_mdio:
1523 mdiobus_free(pep->smi_bus);
1524err_netdev:
1525 free_netdev(dev);
1526err_clk:
1527 clk_disable_unprepare(clk);
1528 return err;
1529}
1530
1531static int pxa168_eth_remove(struct platform_device *pdev)
1532{
1533 struct net_device *dev = platform_get_drvdata(pdev);
1534 struct pxa168_eth_private *pep = netdev_priv(dev);
1535
1536 if (pep->htpr) {
1537 dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1538 pep->htpr, pep->htpr_dma);
1539 pep->htpr = NULL;
1540 }
1541 if (dev->phydev)
1542 phy_disconnect(dev->phydev);
1543
1544 clk_disable_unprepare(pep->clk);
1545 mdiobus_unregister(pep->smi_bus);
1546 mdiobus_free(pep->smi_bus);
1547 cancel_work_sync(&pep->tx_timeout_task);
1548 unregister_netdev(dev);
1549 free_netdev(dev);
1550 return 0;
1551}
1552
1553static void pxa168_eth_shutdown(struct platform_device *pdev)
1554{
1555 struct net_device *dev = platform_get_drvdata(pdev);
1556 eth_port_reset(dev);
1557}
1558
1559#ifdef CONFIG_PM
1560static int pxa168_eth_resume(struct platform_device *pdev)
1561{
1562 return -ENOSYS;
1563}
1564
1565static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
1566{
1567 return -ENOSYS;
1568}
1569
1570#else
1571#define pxa168_eth_resume NULL
1572#define pxa168_eth_suspend NULL
1573#endif
1574
1575static const struct of_device_id pxa168_eth_of_match[] = {
1576 { .compatible = "marvell,pxa168-eth" },
1577 { },
1578};
1579MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
1580
1581static struct platform_driver pxa168_eth_driver = {
1582 .probe = pxa168_eth_probe,
1583 .remove = pxa168_eth_remove,
1584 .shutdown = pxa168_eth_shutdown,
1585 .resume = pxa168_eth_resume,
1586 .suspend = pxa168_eth_suspend,
1587 .driver = {
1588 .name = DRIVER_NAME,
1589 .of_match_table = of_match_ptr(pxa168_eth_of_match),
1590 },
1591};
1592
1593module_platform_driver(pxa168_eth_driver);
1594
1595MODULE_LICENSE("GPL");
1596MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
1597MODULE_ALIAS("platform:pxa168_eth");
1598