1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/atomic.h>
21#include <linux/delay.h>
22#include <linux/etherdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/mii.h>
30#include <linux/netdevice.h>
31#include <linux/of_device.h>
32#include <linux/of_mdio.h>
33#include <linux/of_net.h>
34#include <linux/of_platform.h>
35#include <linux/phy.h>
36#include <linux/platform_device.h>
37#include <linux/skbuff.h>
38#include <asm/cacheflush.h>
39
40#include "altera_utils.h"
41#include "altera_tse.h"
42#include "altera_sgdma.h"
43#include "altera_msgdma.h"
44
45static atomic_t instance_count = ATOMIC_INIT(~0);
46
47static int debug = -1;
48module_param(debug, int, 0644);
49MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
50
51static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
52 NETIF_MSG_LINK | NETIF_MSG_IFUP |
53 NETIF_MSG_IFDOWN);
54
55#define RX_DESCRIPTORS 64
56static int dma_rx_num = RX_DESCRIPTORS;
57module_param(dma_rx_num, int, 0644);
58MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
59
60#define TX_DESCRIPTORS 64
61static int dma_tx_num = TX_DESCRIPTORS;
62module_param(dma_tx_num, int, 0644);
63MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
64
65
66#define POLL_PHY (-1)
67
68
69
70
71
72
73#define ALTERA_RXDMABUFFER_SIZE 2048
74
75
76
77
78#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
79
80#define TXQUEUESTOP_THRESHHOLD 2
81
82static const struct of_device_id altera_tse_ids[];
83
84static inline u32 tse_tx_avail(struct altera_tse_private *priv)
85{
86 return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
87}
88
89
90
91static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
92{
93 return csrrd32(priv->mac_dev,
94 tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
95}
96
97static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
98 u16 value)
99{
100 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
101}
102
103
104static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
105{
106 sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
107 return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
108}
109
110
111
112static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
113{
114 struct net_device *ndev = bus->priv;
115 struct altera_tse_private *priv = netdev_priv(ndev);
116
117
118 csrwr32((mii_id & 0x1f), priv->mac_dev,
119 tse_csroffs(mdio_phy1_addr));
120
121
122 return csrrd32(priv->mac_dev,
123 tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
124}
125
126static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
127 u16 value)
128{
129 struct net_device *ndev = bus->priv;
130 struct altera_tse_private *priv = netdev_priv(ndev);
131
132
133 csrwr32((mii_id & 0x1f), priv->mac_dev,
134 tse_csroffs(mdio_phy1_addr));
135
136
137 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
138 return 0;
139}
140
141static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
142{
143 struct altera_tse_private *priv = netdev_priv(dev);
144 int ret;
145 struct device_node *mdio_node = NULL;
146 struct mii_bus *mdio = NULL;
147 struct device_node *child_node = NULL;
148
149 for_each_child_of_node(priv->device->of_node, child_node) {
150 if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
151 mdio_node = child_node;
152 break;
153 }
154 }
155
156 if (mdio_node) {
157 netdev_dbg(dev, "FOUND MDIO subnode\n");
158 } else {
159 netdev_dbg(dev, "NO MDIO subnode\n");
160 return 0;
161 }
162
163 mdio = mdiobus_alloc();
164 if (mdio == NULL) {
165 netdev_err(dev, "Error allocating MDIO bus\n");
166 ret = -ENOMEM;
167 goto put_node;
168 }
169
170 mdio->name = ALTERA_TSE_RESOURCE_NAME;
171 mdio->read = &altera_tse_mdio_read;
172 mdio->write = &altera_tse_mdio_write;
173 snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
174
175 mdio->priv = dev;
176 mdio->parent = priv->device;
177
178 ret = of_mdiobus_register(mdio, mdio_node);
179 if (ret != 0) {
180 netdev_err(dev, "Cannot register MDIO bus %s\n",
181 mdio->id);
182 goto out_free_mdio;
183 }
184 of_node_put(mdio_node);
185
186 if (netif_msg_drv(priv))
187 netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
188
189 priv->mdio = mdio;
190 return 0;
191out_free_mdio:
192 mdiobus_free(mdio);
193 mdio = NULL;
194put_node:
195 of_node_put(mdio_node);
196 return ret;
197}
198
199static void altera_tse_mdio_destroy(struct net_device *dev)
200{
201 struct altera_tse_private *priv = netdev_priv(dev);
202
203 if (priv->mdio == NULL)
204 return;
205
206 if (netif_msg_drv(priv))
207 netdev_info(dev, "MDIO bus %s: removed\n",
208 priv->mdio->id);
209
210 mdiobus_unregister(priv->mdio);
211 mdiobus_free(priv->mdio);
212 priv->mdio = NULL;
213}
214
215static int tse_init_rx_buffer(struct altera_tse_private *priv,
216 struct tse_buffer *rxbuffer, int len)
217{
218 rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
219 if (!rxbuffer->skb)
220 return -ENOMEM;
221
222 rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
223 len,
224 DMA_FROM_DEVICE);
225
226 if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
227 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
228 dev_kfree_skb_any(rxbuffer->skb);
229 return -EINVAL;
230 }
231 rxbuffer->dma_addr &= (dma_addr_t)~3;
232 rxbuffer->len = len;
233 return 0;
234}
235
236static void tse_free_rx_buffer(struct altera_tse_private *priv,
237 struct tse_buffer *rxbuffer)
238{
239 struct sk_buff *skb = rxbuffer->skb;
240 dma_addr_t dma_addr = rxbuffer->dma_addr;
241
242 if (skb != NULL) {
243 if (dma_addr)
244 dma_unmap_single(priv->device, dma_addr,
245 rxbuffer->len,
246 DMA_FROM_DEVICE);
247 dev_kfree_skb_any(skb);
248 rxbuffer->skb = NULL;
249 rxbuffer->dma_addr = 0;
250 }
251}
252
253
254
255static void tse_free_tx_buffer(struct altera_tse_private *priv,
256 struct tse_buffer *buffer)
257{
258 if (buffer->dma_addr) {
259 if (buffer->mapped_as_page)
260 dma_unmap_page(priv->device, buffer->dma_addr,
261 buffer->len, DMA_TO_DEVICE);
262 else
263 dma_unmap_single(priv->device, buffer->dma_addr,
264 buffer->len, DMA_TO_DEVICE);
265 buffer->dma_addr = 0;
266 }
267 if (buffer->skb) {
268 dev_kfree_skb_any(buffer->skb);
269 buffer->skb = NULL;
270 }
271}
272
273static int alloc_init_skbufs(struct altera_tse_private *priv)
274{
275 unsigned int rx_descs = priv->rx_ring_size;
276 unsigned int tx_descs = priv->tx_ring_size;
277 int ret = -ENOMEM;
278 int i;
279
280
281 priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
282 GFP_KERNEL);
283 if (!priv->rx_ring)
284 goto err_rx_ring;
285
286
287 priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
288 GFP_KERNEL);
289 if (!priv->tx_ring)
290 goto err_tx_ring;
291
292 priv->tx_cons = 0;
293 priv->tx_prod = 0;
294
295
296 for (i = 0; i < rx_descs; i++) {
297 ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
298 priv->rx_dma_buf_sz);
299 if (ret)
300 goto err_init_rx_buffers;
301 }
302
303 priv->rx_cons = 0;
304 priv->rx_prod = 0;
305
306 return 0;
307err_init_rx_buffers:
308 while (--i >= 0)
309 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
310 kfree(priv->tx_ring);
311err_tx_ring:
312 kfree(priv->rx_ring);
313err_rx_ring:
314 return ret;
315}
316
317static void free_skbufs(struct net_device *dev)
318{
319 struct altera_tse_private *priv = netdev_priv(dev);
320 unsigned int rx_descs = priv->rx_ring_size;
321 unsigned int tx_descs = priv->tx_ring_size;
322 int i;
323
324
325 for (i = 0; i < rx_descs; i++)
326 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
327 for (i = 0; i < tx_descs; i++)
328 tse_free_tx_buffer(priv, &priv->tx_ring[i]);
329
330
331 kfree(priv->tx_ring);
332}
333
334
335
336static inline void tse_rx_refill(struct altera_tse_private *priv)
337{
338 unsigned int rxsize = priv->rx_ring_size;
339 unsigned int entry;
340 int ret;
341
342 for (; priv->rx_cons - priv->rx_prod > 0;
343 priv->rx_prod++) {
344 entry = priv->rx_prod % rxsize;
345 if (likely(priv->rx_ring[entry].skb == NULL)) {
346 ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
347 priv->rx_dma_buf_sz);
348 if (unlikely(ret != 0))
349 break;
350 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
351 }
352 }
353}
354
355
356
357static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
358{
359 struct ethhdr *eth_hdr;
360 u16 vid;
361 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
362 !__vlan_get_tag(skb, &vid)) {
363 eth_hdr = (struct ethhdr *)skb->data;
364 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
365 skb_pull(skb, VLAN_HLEN);
366 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
367 }
368}
369
370
371
372static int tse_rx(struct altera_tse_private *priv, int limit)
373{
374 unsigned int count = 0;
375 unsigned int next_entry;
376 struct sk_buff *skb;
377 unsigned int entry = priv->rx_cons % priv->rx_ring_size;
378 u32 rxstatus;
379 u16 pktlength;
380 u16 pktstatus;
381
382
383
384
385
386
387 while ((count < limit) &&
388 ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
389 pktstatus = rxstatus >> 16;
390 pktlength = rxstatus & 0xffff;
391
392 if ((pktstatus & 0xFF) || (pktlength == 0))
393 netdev_err(priv->dev,
394 "RCV pktstatus %08X pktlength %08X\n",
395 pktstatus, pktlength);
396
397
398
399
400
401 pktlength -= 2;
402
403 count++;
404 next_entry = (++priv->rx_cons) % priv->rx_ring_size;
405
406 skb = priv->rx_ring[entry].skb;
407 if (unlikely(!skb)) {
408 netdev_err(priv->dev,
409 "%s: Inconsistent Rx descriptor chain\n",
410 __func__);
411 priv->dev->stats.rx_dropped++;
412 break;
413 }
414 priv->rx_ring[entry].skb = NULL;
415
416 skb_put(skb, pktlength);
417
418 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
419 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
420
421 if (netif_msg_pktdata(priv)) {
422 netdev_info(priv->dev, "frame received %d bytes\n",
423 pktlength);
424 print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
425 16, 1, skb->data, pktlength, true);
426 }
427
428 tse_rx_vlan(priv->dev, skb);
429
430 skb->protocol = eth_type_trans(skb, priv->dev);
431 skb_checksum_none_assert(skb);
432
433 napi_gro_receive(&priv->napi, skb);
434
435 priv->dev->stats.rx_packets++;
436 priv->dev->stats.rx_bytes += pktlength;
437
438 entry = next_entry;
439
440 tse_rx_refill(priv);
441 }
442
443 return count;
444}
445
446
447
448static int tse_tx_complete(struct altera_tse_private *priv)
449{
450 unsigned int txsize = priv->tx_ring_size;
451 u32 ready;
452 unsigned int entry;
453 struct tse_buffer *tx_buff;
454 int txcomplete = 0;
455
456 spin_lock(&priv->tx_lock);
457
458 ready = priv->dmaops->tx_completions(priv);
459
460
461 while (ready && (priv->tx_cons != priv->tx_prod)) {
462 entry = priv->tx_cons % txsize;
463 tx_buff = &priv->tx_ring[entry];
464
465 if (netif_msg_tx_done(priv))
466 netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
467 __func__, priv->tx_prod, priv->tx_cons);
468
469 if (likely(tx_buff->skb))
470 priv->dev->stats.tx_packets++;
471
472 tse_free_tx_buffer(priv, tx_buff);
473 priv->tx_cons++;
474
475 txcomplete++;
476 ready--;
477 }
478
479 if (unlikely(netif_queue_stopped(priv->dev) &&
480 tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
481 if (netif_queue_stopped(priv->dev) &&
482 tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
483 if (netif_msg_tx_done(priv))
484 netdev_dbg(priv->dev, "%s: restart transmit\n",
485 __func__);
486 netif_wake_queue(priv->dev);
487 }
488 }
489
490 spin_unlock(&priv->tx_lock);
491 return txcomplete;
492}
493
494
495
496static int tse_poll(struct napi_struct *napi, int budget)
497{
498 struct altera_tse_private *priv =
499 container_of(napi, struct altera_tse_private, napi);
500 int rxcomplete = 0;
501 unsigned long int flags;
502
503 tse_tx_complete(priv);
504
505 rxcomplete = tse_rx(priv, budget);
506
507 if (rxcomplete < budget) {
508
509 napi_complete_done(napi, rxcomplete);
510
511 netdev_dbg(priv->dev,
512 "NAPI Complete, did %d packets with budget %d\n",
513 rxcomplete, budget);
514
515 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
516 priv->dmaops->enable_rxirq(priv);
517 priv->dmaops->enable_txirq(priv);
518 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
519 }
520 return rxcomplete;
521}
522
523
524
525static irqreturn_t altera_isr(int irq, void *dev_id)
526{
527 struct net_device *dev = dev_id;
528 struct altera_tse_private *priv;
529
530 if (unlikely(!dev)) {
531 pr_err("%s: invalid dev pointer\n", __func__);
532 return IRQ_NONE;
533 }
534 priv = netdev_priv(dev);
535
536 spin_lock(&priv->rxdma_irq_lock);
537
538 priv->dmaops->clear_rxirq(priv);
539 priv->dmaops->clear_txirq(priv);
540 spin_unlock(&priv->rxdma_irq_lock);
541
542 if (likely(napi_schedule_prep(&priv->napi))) {
543 spin_lock(&priv->rxdma_irq_lock);
544 priv->dmaops->disable_rxirq(priv);
545 priv->dmaops->disable_txirq(priv);
546 spin_unlock(&priv->rxdma_irq_lock);
547 __napi_schedule(&priv->napi);
548 }
549
550
551 return IRQ_HANDLED;
552}
553
554
555
556
557
558
559
560
561static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
562{
563 struct altera_tse_private *priv = netdev_priv(dev);
564 unsigned int txsize = priv->tx_ring_size;
565 unsigned int entry;
566 struct tse_buffer *buffer = NULL;
567 int nfrags = skb_shinfo(skb)->nr_frags;
568 unsigned int nopaged_len = skb_headlen(skb);
569 netdev_tx_t ret = NETDEV_TX_OK;
570 dma_addr_t dma_addr;
571
572 spin_lock_bh(&priv->tx_lock);
573
574 if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
575 if (!netif_queue_stopped(dev)) {
576 netif_stop_queue(dev);
577
578 netdev_err(priv->dev,
579 "%s: Tx list full when queue awake\n",
580 __func__);
581 }
582 ret = NETDEV_TX_BUSY;
583 goto out;
584 }
585
586
587 entry = priv->tx_prod % txsize;
588 buffer = &priv->tx_ring[entry];
589
590 dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
591 DMA_TO_DEVICE);
592 if (dma_mapping_error(priv->device, dma_addr)) {
593 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
594 ret = NETDEV_TX_OK;
595 goto out;
596 }
597
598 buffer->skb = skb;
599 buffer->dma_addr = dma_addr;
600 buffer->len = nopaged_len;
601
602 priv->dmaops->tx_buffer(priv, buffer);
603
604 skb_tx_timestamp(skb);
605
606 priv->tx_prod++;
607 dev->stats.tx_bytes += skb->len;
608
609 if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
610 if (netif_msg_hw(priv))
611 netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
612 __func__);
613 netif_stop_queue(dev);
614 }
615
616out:
617 spin_unlock_bh(&priv->tx_lock);
618
619 return ret;
620}
621
622
623
624
625
626
627
628static void altera_tse_adjust_link(struct net_device *dev)
629{
630 struct altera_tse_private *priv = netdev_priv(dev);
631 struct phy_device *phydev = dev->phydev;
632 int new_state = 0;
633
634
635 spin_lock(&priv->mac_cfg_lock);
636 if (phydev->link) {
637
638 u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
639
640
641 if (phydev->duplex != priv->oldduplex) {
642 new_state = 1;
643 if (!(phydev->duplex))
644 cfg_reg |= MAC_CMDCFG_HD_ENA;
645 else
646 cfg_reg &= ~MAC_CMDCFG_HD_ENA;
647
648 netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
649 dev->name, phydev->duplex);
650
651 priv->oldduplex = phydev->duplex;
652 }
653
654
655 if (phydev->speed != priv->oldspeed) {
656 new_state = 1;
657 switch (phydev->speed) {
658 case 1000:
659 cfg_reg |= MAC_CMDCFG_ETH_SPEED;
660 cfg_reg &= ~MAC_CMDCFG_ENA_10;
661 break;
662 case 100:
663 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
664 cfg_reg &= ~MAC_CMDCFG_ENA_10;
665 break;
666 case 10:
667 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
668 cfg_reg |= MAC_CMDCFG_ENA_10;
669 break;
670 default:
671 if (netif_msg_link(priv))
672 netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
673 phydev->speed);
674 break;
675 }
676 priv->oldspeed = phydev->speed;
677 }
678 iowrite32(cfg_reg, &priv->mac_dev->command_config);
679
680 if (!priv->oldlink) {
681 new_state = 1;
682 priv->oldlink = 1;
683 }
684 } else if (priv->oldlink) {
685 new_state = 1;
686 priv->oldlink = 0;
687 priv->oldspeed = 0;
688 priv->oldduplex = -1;
689 }
690
691 if (new_state && netif_msg_link(priv))
692 phy_print_status(phydev);
693
694 spin_unlock(&priv->mac_cfg_lock);
695}
696static struct phy_device *connect_local_phy(struct net_device *dev)
697{
698 struct altera_tse_private *priv = netdev_priv(dev);
699 struct phy_device *phydev = NULL;
700 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
701
702 if (priv->phy_addr != POLL_PHY) {
703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
704 priv->mdio->id, priv->phy_addr);
705
706 netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
707
708 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
709 priv->phy_iface);
710 if (IS_ERR(phydev)) {
711 netdev_err(dev, "Could not attach to PHY\n");
712 phydev = NULL;
713 }
714
715 } else {
716 int ret;
717 phydev = phy_find_first(priv->mdio);
718 if (phydev == NULL) {
719 netdev_err(dev, "No PHY found\n");
720 return phydev;
721 }
722
723 ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
724 priv->phy_iface);
725 if (ret != 0) {
726 netdev_err(dev, "Could not attach to PHY\n");
727 phydev = NULL;
728 }
729 }
730 return phydev;
731}
732
733static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
734{
735 struct altera_tse_private *priv = netdev_priv(dev);
736 struct device_node *np = priv->device->of_node;
737 int ret;
738
739 ret = of_get_phy_mode(np, &priv->phy_iface);
740
741
742 if (ret)
743 return 0;
744
745
746
747
748
749 if (of_property_read_u32(priv->device->of_node, "phy-addr",
750 &priv->phy_addr)) {
751 priv->phy_addr = POLL_PHY;
752 }
753
754 if (!((priv->phy_addr == POLL_PHY) ||
755 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
756 netdev_err(dev, "invalid phy-addr specified %d\n",
757 priv->phy_addr);
758 return -ENODEV;
759 }
760
761
762 ret = altera_tse_mdio_create(dev,
763 atomic_add_return(1, &instance_count));
764
765 if (ret)
766 return -ENODEV;
767
768 return 0;
769}
770
771
772
773static int init_phy(struct net_device *dev)
774{
775 struct altera_tse_private *priv = netdev_priv(dev);
776 struct phy_device *phydev;
777 struct device_node *phynode;
778 bool fixed_link = false;
779 int rc = 0;
780
781
782 if (!priv->phy_iface)
783 return 0;
784
785 priv->oldlink = 0;
786 priv->oldspeed = 0;
787 priv->oldduplex = -1;
788
789 phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
790
791 if (!phynode) {
792
793 if (of_phy_is_fixed_link(priv->device->of_node)) {
794 rc = of_phy_register_fixed_link(priv->device->of_node);
795 if (rc < 0) {
796 netdev_err(dev, "cannot register fixed PHY\n");
797 return rc;
798 }
799
800
801
802
803 phynode = of_node_get(priv->device->of_node);
804 fixed_link = true;
805
806 netdev_dbg(dev, "fixed-link detected\n");
807 phydev = of_phy_connect(dev, phynode,
808 &altera_tse_adjust_link,
809 0, priv->phy_iface);
810 } else {
811 netdev_dbg(dev, "no phy-handle found\n");
812 if (!priv->mdio) {
813 netdev_err(dev, "No phy-handle nor local mdio specified\n");
814 return -ENODEV;
815 }
816 phydev = connect_local_phy(dev);
817 }
818 } else {
819 netdev_dbg(dev, "phy-handle found\n");
820 phydev = of_phy_connect(dev, phynode,
821 &altera_tse_adjust_link, 0, priv->phy_iface);
822 }
823 of_node_put(phynode);
824
825 if (!phydev) {
826 netdev_err(dev, "Could not find the PHY\n");
827 if (fixed_link)
828 of_phy_deregister_fixed_link(priv->device->of_node);
829 return -ENODEV;
830 }
831
832
833
834 if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
835 (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
836 phy_set_max_speed(phydev, SPEED_100);
837
838
839
840
841
842
843
844 if ((phydev->phy_id == 0) && !fixed_link) {
845 netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
846 phy_disconnect(phydev);
847 return -ENODEV;
848 }
849
850 netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
851 phydev->mdio.addr, phydev->phy_id, phydev->link);
852
853 return 0;
854}
855
856static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
857{
858 u32 msb;
859 u32 lsb;
860
861 msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
862 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
863
864
865 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
866 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
867}
868
869
870
871
872
873
874
875static int reset_mac(struct altera_tse_private *priv)
876{
877 int counter;
878 u32 dat;
879
880 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
881 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
882 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
883 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
884
885 counter = 0;
886 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
887 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
888 MAC_CMDCFG_SW_RESET))
889 break;
890 udelay(1);
891 }
892
893 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
894 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
895 dat &= ~MAC_CMDCFG_SW_RESET;
896 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
897 return -1;
898 }
899 return 0;
900}
901
902
903
904static int init_mac(struct altera_tse_private *priv)
905{
906 unsigned int cmd = 0;
907 u32 frm_length;
908
909
910 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
911 priv->mac_dev, tse_csroffs(rx_section_empty));
912
913 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
914 tse_csroffs(rx_section_full));
915
916 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
917 tse_csroffs(rx_almost_empty));
918
919 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
920 tse_csroffs(rx_almost_full));
921
922
923 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
924 priv->mac_dev, tse_csroffs(tx_section_empty));
925
926 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
927 tse_csroffs(tx_section_full));
928
929 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
930 tse_csroffs(tx_almost_empty));
931
932 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
933 tse_csroffs(tx_almost_full));
934
935
936 tse_update_mac_addr(priv, priv->dev->dev_addr);
937
938
939 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
940 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
941
942 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
943 tse_csroffs(tx_ipg_length));
944
945
946
947
948 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
949 ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
950
951 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
952 ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
953 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
954
955
956 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
957 cmd &= ~MAC_CMDCFG_PAD_EN;
958 cmd &= ~MAC_CMDCFG_CRC_FWD;
959 cmd |= MAC_CMDCFG_RX_ERR_DISC;
960
961
962 cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
963 cmd &= ~MAC_CMDCFG_TX_ENA;
964 cmd &= ~MAC_CMDCFG_RX_ENA;
965
966
967 cmd &= ~MAC_CMDCFG_HD_ENA;
968 cmd &= ~MAC_CMDCFG_ETH_SPEED;
969 cmd &= ~MAC_CMDCFG_ENA_10;
970
971 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
972
973 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
974 tse_csroffs(pause_quanta));
975
976 if (netif_msg_hw(priv))
977 dev_dbg(priv->device,
978 "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
979
980 return 0;
981}
982
983
984
985static void tse_set_mac(struct altera_tse_private *priv, bool enable)
986{
987 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
988
989 if (enable)
990 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
991 else
992 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
993
994 csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
995}
996
997
998
999static int tse_change_mtu(struct net_device *dev, int new_mtu)
1000{
1001 if (netif_running(dev)) {
1002 netdev_err(dev, "must be stopped to change its MTU\n");
1003 return -EBUSY;
1004 }
1005
1006 dev->mtu = new_mtu;
1007 netdev_update_features(dev);
1008
1009 return 0;
1010}
1011
1012static void altera_tse_set_mcfilter(struct net_device *dev)
1013{
1014 struct altera_tse_private *priv = netdev_priv(dev);
1015 int i;
1016 struct netdev_hw_addr *ha;
1017
1018
1019 for (i = 0; i < 64; i++)
1020 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1021
1022 netdev_for_each_mc_addr(ha, dev) {
1023 unsigned int hash = 0;
1024 int mac_octet;
1025
1026 for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
1027 unsigned char xor_bit = 0;
1028 unsigned char octet = ha->addr[mac_octet];
1029 unsigned int bitshift;
1030
1031 for (bitshift = 0; bitshift < 8; bitshift++)
1032 xor_bit ^= ((octet >> bitshift) & 0x01);
1033
1034 hash = (hash << 1) | xor_bit;
1035 }
1036 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
1037 }
1038}
1039
1040
1041static void altera_tse_set_mcfilterall(struct net_device *dev)
1042{
1043 struct altera_tse_private *priv = netdev_priv(dev);
1044 int i;
1045
1046
1047 for (i = 0; i < 64; i++)
1048 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1049}
1050
1051
1052
1053static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1054{
1055 struct altera_tse_private *priv = netdev_priv(dev);
1056
1057 spin_lock(&priv->mac_cfg_lock);
1058
1059 if (dev->flags & IFF_PROMISC)
1060 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1061 MAC_CMDCFG_PROMIS_EN);
1062
1063 if (dev->flags & IFF_ALLMULTI)
1064 altera_tse_set_mcfilterall(dev);
1065 else
1066 altera_tse_set_mcfilter(dev);
1067
1068 spin_unlock(&priv->mac_cfg_lock);
1069}
1070
1071
1072
1073static void tse_set_rx_mode(struct net_device *dev)
1074{
1075 struct altera_tse_private *priv = netdev_priv(dev);
1076
1077 spin_lock(&priv->mac_cfg_lock);
1078
1079 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1080 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1081 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1082 MAC_CMDCFG_PROMIS_EN);
1083 else
1084 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1085 MAC_CMDCFG_PROMIS_EN);
1086
1087 spin_unlock(&priv->mac_cfg_lock);
1088}
1089
1090
1091
1092static int init_sgmii_pcs(struct net_device *dev)
1093{
1094 struct altera_tse_private *priv = netdev_priv(dev);
1095 int n;
1096 unsigned int tmp_reg = 0;
1097
1098 if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
1099 return 0;
1100
1101
1102
1103
1104
1105
1106
1107
1108 if (sgmii_pcs_scratch_test(priv, 0x0000) &&
1109 sgmii_pcs_scratch_test(priv, 0xffff) &&
1110 sgmii_pcs_scratch_test(priv, 0xa5a5) &&
1111 sgmii_pcs_scratch_test(priv, 0x5a5a)) {
1112 netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
1113 sgmii_pcs_read(priv, MII_PHYSID1),
1114 sgmii_pcs_read(priv, MII_PHYSID2));
1115 } else {
1116 netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
1117 return -ENOMEM;
1118 }
1119
1120
1121
1122
1123 sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
1124 sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
1125
1126
1127 sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
1128
1129
1130 tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
1131 tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
1132 sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1133
1134
1135 tmp_reg |= BMCR_RESET;
1136 sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1137 for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
1138 if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
1139 netdev_info(dev, "SGMII PCS block initialised OK\n");
1140 return 0;
1141 }
1142 udelay(1);
1143 }
1144
1145
1146 netdev_err(dev, "SGMII PCS block reset failed.\n");
1147 return -ETIMEDOUT;
1148}
1149
1150
1151
1152static int tse_open(struct net_device *dev)
1153{
1154 struct altera_tse_private *priv = netdev_priv(dev);
1155 int ret = 0;
1156 int i;
1157 unsigned long int flags;
1158
1159
1160 ret = priv->dmaops->init_dma(priv);
1161 if (ret != 0) {
1162 netdev_err(dev, "Cannot initialize DMA\n");
1163 goto phy_error;
1164 }
1165
1166 if (netif_msg_ifup(priv))
1167 netdev_warn(dev, "device MAC address %pM\n",
1168 dev->dev_addr);
1169
1170 if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1171 netdev_warn(dev, "TSE revision %x\n", priv->revision);
1172
1173 spin_lock(&priv->mac_cfg_lock);
1174
1175 ret = init_sgmii_pcs(dev);
1176 if (ret) {
1177 netdev_err(dev,
1178 "Cannot init the SGMII PCS (error: %d)\n", ret);
1179 spin_unlock(&priv->mac_cfg_lock);
1180 goto phy_error;
1181 }
1182
1183 ret = reset_mac(priv);
1184
1185
1186
1187
1188 if (ret)
1189 netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1190
1191 ret = init_mac(priv);
1192 spin_unlock(&priv->mac_cfg_lock);
1193 if (ret) {
1194 netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1195 goto alloc_skbuf_error;
1196 }
1197
1198 priv->dmaops->reset_dma(priv);
1199
1200
1201 priv->rx_ring_size = dma_rx_num;
1202 priv->tx_ring_size = dma_tx_num;
1203 ret = alloc_init_skbufs(priv);
1204 if (ret) {
1205 netdev_err(dev, "DMA descriptors initialization failed\n");
1206 goto alloc_skbuf_error;
1207 }
1208
1209
1210
1211 ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1212 dev->name, dev);
1213 if (ret) {
1214 netdev_err(dev, "Unable to register RX interrupt %d\n",
1215 priv->rx_irq);
1216 goto init_error;
1217 }
1218
1219
1220 ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1221 dev->name, dev);
1222 if (ret) {
1223 netdev_err(dev, "Unable to register TX interrupt %d\n",
1224 priv->tx_irq);
1225 goto tx_request_irq_error;
1226 }
1227
1228
1229 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1230 priv->dmaops->enable_rxirq(priv);
1231 priv->dmaops->enable_txirq(priv);
1232
1233
1234 for (i = 0; i < priv->rx_ring_size; i++)
1235 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1236
1237 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1238
1239 if (dev->phydev)
1240 phy_start(dev->phydev);
1241
1242 napi_enable(&priv->napi);
1243 netif_start_queue(dev);
1244
1245 priv->dmaops->start_rxdma(priv);
1246
1247
1248 spin_lock(&priv->mac_cfg_lock);
1249 tse_set_mac(priv, true);
1250 spin_unlock(&priv->mac_cfg_lock);
1251
1252 return 0;
1253
1254tx_request_irq_error:
1255 free_irq(priv->rx_irq, dev);
1256init_error:
1257 free_skbufs(dev);
1258alloc_skbuf_error:
1259phy_error:
1260 return ret;
1261}
1262
1263
1264
1265static int tse_shutdown(struct net_device *dev)
1266{
1267 struct altera_tse_private *priv = netdev_priv(dev);
1268 int ret;
1269 unsigned long int flags;
1270
1271
1272 if (dev->phydev)
1273 phy_stop(dev->phydev);
1274
1275 netif_stop_queue(dev);
1276 napi_disable(&priv->napi);
1277
1278
1279 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1280 priv->dmaops->disable_rxirq(priv);
1281 priv->dmaops->disable_txirq(priv);
1282 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1283
1284
1285 free_irq(priv->rx_irq, dev);
1286 free_irq(priv->tx_irq, dev);
1287
1288
1289 spin_lock(&priv->mac_cfg_lock);
1290 spin_lock(&priv->tx_lock);
1291
1292 ret = reset_mac(priv);
1293
1294
1295
1296
1297 if (ret)
1298 netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1299 priv->dmaops->reset_dma(priv);
1300 free_skbufs(dev);
1301
1302 spin_unlock(&priv->tx_lock);
1303 spin_unlock(&priv->mac_cfg_lock);
1304
1305 priv->dmaops->uninit_dma(priv);
1306
1307 return 0;
1308}
1309
1310static struct net_device_ops altera_tse_netdev_ops = {
1311 .ndo_open = tse_open,
1312 .ndo_stop = tse_shutdown,
1313 .ndo_start_xmit = tse_start_xmit,
1314 .ndo_set_mac_address = eth_mac_addr,
1315 .ndo_set_rx_mode = tse_set_rx_mode,
1316 .ndo_change_mtu = tse_change_mtu,
1317 .ndo_validate_addr = eth_validate_addr,
1318};
1319
1320static int request_and_map(struct platform_device *pdev, const char *name,
1321 struct resource **res, void __iomem **ptr)
1322{
1323 struct resource *region;
1324 struct device *device = &pdev->dev;
1325
1326 *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1327 if (*res == NULL) {
1328 dev_err(device, "resource %s not defined\n", name);
1329 return -ENODEV;
1330 }
1331
1332 region = devm_request_mem_region(device, (*res)->start,
1333 resource_size(*res), dev_name(device));
1334 if (region == NULL) {
1335 dev_err(device, "unable to request %s\n", name);
1336 return -EBUSY;
1337 }
1338
1339 *ptr = devm_ioremap(device, region->start,
1340 resource_size(region));
1341 if (*ptr == NULL) {
1342 dev_err(device, "ioremap of %s failed!", name);
1343 return -ENOMEM;
1344 }
1345
1346 return 0;
1347}
1348
1349
1350
1351static int altera_tse_probe(struct platform_device *pdev)
1352{
1353 struct net_device *ndev;
1354 int ret = -ENODEV;
1355 struct resource *control_port;
1356 struct resource *dma_res;
1357 struct altera_tse_private *priv;
1358 void __iomem *descmap;
1359 const struct of_device_id *of_id = NULL;
1360
1361 ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1362 if (!ndev) {
1363 dev_err(&pdev->dev, "Could not allocate network device\n");
1364 return -ENODEV;
1365 }
1366
1367 SET_NETDEV_DEV(ndev, &pdev->dev);
1368
1369 priv = netdev_priv(ndev);
1370 priv->device = &pdev->dev;
1371 priv->dev = ndev;
1372 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1373
1374 of_id = of_match_device(altera_tse_ids, &pdev->dev);
1375
1376 if (of_id)
1377 priv->dmaops = (struct altera_dmaops *)of_id->data;
1378
1379
1380 if (priv->dmaops &&
1381 priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1382
1383 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1384 if (ret)
1385 goto err_free_netdev;
1386
1387
1388 priv->tx_dma_desc = descmap;
1389
1390
1391 priv->txdescmem = resource_size(dma_res)/2;
1392
1393 priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1394
1395 priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1396 priv->txdescmem));
1397 priv->rxdescmem = resource_size(dma_res)/2;
1398 priv->rxdescmem_busaddr = dma_res->start;
1399 priv->rxdescmem_busaddr += priv->txdescmem;
1400
1401 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1402 dev_dbg(priv->device,
1403 "SGDMA bus addresses greater than 32-bits\n");
1404 ret = -EINVAL;
1405 goto err_free_netdev;
1406 }
1407 if (upper_32_bits(priv->txdescmem_busaddr)) {
1408 dev_dbg(priv->device,
1409 "SGDMA bus addresses greater than 32-bits\n");
1410 ret = -EINVAL;
1411 goto err_free_netdev;
1412 }
1413 } else if (priv->dmaops &&
1414 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1415 ret = request_and_map(pdev, "rx_resp", &dma_res,
1416 &priv->rx_dma_resp);
1417 if (ret)
1418 goto err_free_netdev;
1419
1420 ret = request_and_map(pdev, "tx_desc", &dma_res,
1421 &priv->tx_dma_desc);
1422 if (ret)
1423 goto err_free_netdev;
1424
1425 priv->txdescmem = resource_size(dma_res);
1426 priv->txdescmem_busaddr = dma_res->start;
1427
1428 ret = request_and_map(pdev, "rx_desc", &dma_res,
1429 &priv->rx_dma_desc);
1430 if (ret)
1431 goto err_free_netdev;
1432
1433 priv->rxdescmem = resource_size(dma_res);
1434 priv->rxdescmem_busaddr = dma_res->start;
1435
1436 } else {
1437 ret = -ENODEV;
1438 goto err_free_netdev;
1439 }
1440
1441 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
1442 dma_set_coherent_mask(priv->device,
1443 DMA_BIT_MASK(priv->dmaops->dmamask));
1444 } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
1445 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1446 } else {
1447 ret = -EIO;
1448 goto err_free_netdev;
1449 }
1450
1451
1452 ret = request_and_map(pdev, "control_port", &control_port,
1453 (void __iomem **)&priv->mac_dev);
1454 if (ret)
1455 goto err_free_netdev;
1456
1457
1458 ret = request_and_map(pdev, "rx_csr", &dma_res,
1459 &priv->rx_dma_csr);
1460 if (ret)
1461 goto err_free_netdev;
1462
1463
1464
1465 ret = request_and_map(pdev, "tx_csr", &dma_res,
1466 &priv->tx_dma_csr);
1467 if (ret)
1468 goto err_free_netdev;
1469
1470
1471
1472 priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1473 if (priv->rx_irq == -ENXIO) {
1474 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1475 ret = -ENXIO;
1476 goto err_free_netdev;
1477 }
1478
1479
1480 priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1481 if (priv->tx_irq == -ENXIO) {
1482 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1483 ret = -ENXIO;
1484 goto err_free_netdev;
1485 }
1486
1487
1488 if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1489 &priv->rx_fifo_depth)) {
1490 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1491 ret = -ENXIO;
1492 goto err_free_netdev;
1493 }
1494
1495 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1496 &priv->tx_fifo_depth)) {
1497 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1498 ret = -ENXIO;
1499 goto err_free_netdev;
1500 }
1501
1502
1503 priv->hash_filter =
1504 of_property_read_bool(pdev->dev.of_node,
1505 "altr,has-hash-multicast-filter");
1506
1507
1508
1509
1510 priv->hash_filter = 0;
1511
1512
1513 priv->added_unicast =
1514 of_property_read_bool(pdev->dev.of_node,
1515 "altr,has-supplementary-unicast");
1516
1517 priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1518
1519 priv->dev->max_mtu = ETH_DATA_LEN;
1520
1521
1522
1523
1524
1525 of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1526 &priv->dev->max_mtu);
1527
1528
1529
1530
1531 priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1532
1533
1534 ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
1535 if (ret)
1536 eth_hw_addr_random(ndev);
1537
1538
1539 ret = altera_tse_phy_get_addr_mdio_create(ndev);
1540
1541 if (ret)
1542 goto err_free_netdev;
1543
1544
1545 ndev->mem_start = control_port->start;
1546 ndev->mem_end = control_port->end;
1547 ndev->netdev_ops = &altera_tse_netdev_ops;
1548 altera_tse_set_ethtool_ops(ndev);
1549
1550 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1551
1552 if (priv->hash_filter)
1553 altera_tse_netdev_ops.ndo_set_rx_mode =
1554 tse_set_rx_mode_hashfilter;
1555
1556
1557
1558
1559 ndev->hw_features &= ~NETIF_F_SG;
1560 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1561
1562
1563
1564
1565
1566 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1567
1568
1569 netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1570
1571 spin_lock_init(&priv->mac_cfg_lock);
1572 spin_lock_init(&priv->tx_lock);
1573 spin_lock_init(&priv->rxdma_irq_lock);
1574
1575 netif_carrier_off(ndev);
1576 ret = register_netdev(ndev);
1577 if (ret) {
1578 dev_err(&pdev->dev, "failed to register TSE net device\n");
1579 goto err_register_netdev;
1580 }
1581
1582 platform_set_drvdata(pdev, ndev);
1583
1584 priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1585
1586 if (netif_msg_probe(priv))
1587 dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1588 (priv->revision >> 8) & 0xff,
1589 priv->revision & 0xff,
1590 (unsigned long) control_port->start, priv->rx_irq,
1591 priv->tx_irq);
1592
1593 ret = init_phy(ndev);
1594 if (ret != 0) {
1595 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1596 goto err_init_phy;
1597 }
1598 return 0;
1599
1600err_init_phy:
1601 unregister_netdev(ndev);
1602err_register_netdev:
1603 netif_napi_del(&priv->napi);
1604 altera_tse_mdio_destroy(ndev);
1605err_free_netdev:
1606 free_netdev(ndev);
1607 return ret;
1608}
1609
1610
1611
1612static int altera_tse_remove(struct platform_device *pdev)
1613{
1614 struct net_device *ndev = platform_get_drvdata(pdev);
1615 struct altera_tse_private *priv = netdev_priv(ndev);
1616
1617 if (ndev->phydev) {
1618 phy_disconnect(ndev->phydev);
1619
1620 if (of_phy_is_fixed_link(priv->device->of_node))
1621 of_phy_deregister_fixed_link(priv->device->of_node);
1622 }
1623
1624 platform_set_drvdata(pdev, NULL);
1625 altera_tse_mdio_destroy(ndev);
1626 unregister_netdev(ndev);
1627 free_netdev(ndev);
1628
1629 return 0;
1630}
1631
1632static const struct altera_dmaops altera_dtype_sgdma = {
1633 .altera_dtype = ALTERA_DTYPE_SGDMA,
1634 .dmamask = 32,
1635 .reset_dma = sgdma_reset,
1636 .enable_txirq = sgdma_enable_txirq,
1637 .enable_rxirq = sgdma_enable_rxirq,
1638 .disable_txirq = sgdma_disable_txirq,
1639 .disable_rxirq = sgdma_disable_rxirq,
1640 .clear_txirq = sgdma_clear_txirq,
1641 .clear_rxirq = sgdma_clear_rxirq,
1642 .tx_buffer = sgdma_tx_buffer,
1643 .tx_completions = sgdma_tx_completions,
1644 .add_rx_desc = sgdma_add_rx_desc,
1645 .get_rx_status = sgdma_rx_status,
1646 .init_dma = sgdma_initialize,
1647 .uninit_dma = sgdma_uninitialize,
1648 .start_rxdma = sgdma_start_rxdma,
1649};
1650
1651static const struct altera_dmaops altera_dtype_msgdma = {
1652 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1653 .dmamask = 64,
1654 .reset_dma = msgdma_reset,
1655 .enable_txirq = msgdma_enable_txirq,
1656 .enable_rxirq = msgdma_enable_rxirq,
1657 .disable_txirq = msgdma_disable_txirq,
1658 .disable_rxirq = msgdma_disable_rxirq,
1659 .clear_txirq = msgdma_clear_txirq,
1660 .clear_rxirq = msgdma_clear_rxirq,
1661 .tx_buffer = msgdma_tx_buffer,
1662 .tx_completions = msgdma_tx_completions,
1663 .add_rx_desc = msgdma_add_rx_desc,
1664 .get_rx_status = msgdma_rx_status,
1665 .init_dma = msgdma_initialize,
1666 .uninit_dma = msgdma_uninitialize,
1667 .start_rxdma = msgdma_start_rxdma,
1668};
1669
1670static const struct of_device_id altera_tse_ids[] = {
1671 { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1672 { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1673 { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1674 {},
1675};
1676MODULE_DEVICE_TABLE(of, altera_tse_ids);
1677
1678static struct platform_driver altera_tse_driver = {
1679 .probe = altera_tse_probe,
1680 .remove = altera_tse_remove,
1681 .suspend = NULL,
1682 .resume = NULL,
1683 .driver = {
1684 .name = ALTERA_TSE_RESOURCE_NAME,
1685 .of_match_table = altera_tse_ids,
1686 },
1687};
1688
1689module_platform_driver(altera_tse_driver);
1690
1691MODULE_AUTHOR("Altera Corporation");
1692MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1693MODULE_LICENSE("GPL v2");
1694