linux/drivers/net/ethernet/aeroflex/greth.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
   4 *
   5 * 2005-2010 (c) Aeroflex Gaisler AB
   6 *
   7 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
   8 * available in the GRLIB VHDL IP core library.
   9 *
  10 * Full documentation of both cores can be found here:
  11 * https://www.gaisler.com/products/grlib/grip.pdf
  12 *
  13 * The Gigabit version supports scatter/gather DMA, any alignment of
  14 * buffers and checksum offloading.
  15 *
  16 * Contributors: Kristoffer Glembo
  17 *               Daniel Hellstrom
  18 *               Marko Isomaki
  19 */
  20
  21#include <linux/dma-mapping.h>
  22#include <linux/module.h>
  23#include <linux/uaccess.h>
  24#include <linux/interrupt.h>
  25#include <linux/netdevice.h>
  26#include <linux/etherdevice.h>
  27#include <linux/ethtool.h>
  28#include <linux/skbuff.h>
  29#include <linux/io.h>
  30#include <linux/crc32.h>
  31#include <linux/mii.h>
  32#include <linux/of_device.h>
  33#include <linux/of_net.h>
  34#include <linux/of_platform.h>
  35#include <linux/slab.h>
  36#include <asm/cacheflush.h>
  37#include <asm/byteorder.h>
  38
  39#ifdef CONFIG_SPARC
  40#include <asm/idprom.h>
  41#endif
  42
  43#include "greth.h"
  44
  45#define GRETH_DEF_MSG_ENABLE      \
  46        (NETIF_MSG_DRV          | \
  47         NETIF_MSG_PROBE        | \
  48         NETIF_MSG_LINK         | \
  49         NETIF_MSG_IFDOWN       | \
  50         NETIF_MSG_IFUP         | \
  51         NETIF_MSG_RX_ERR       | \
  52         NETIF_MSG_TX_ERR)
  53
  54static int greth_debug = -1;    /* -1 == use GRETH_DEF_MSG_ENABLE as value */
  55module_param(greth_debug, int, 0);
  56MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
  57
  58/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
  59static int macaddr[6];
  60module_param_array(macaddr, int, NULL, 0);
  61MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
  62
  63static int greth_edcl = 1;
  64module_param(greth_edcl, int, 0);
  65MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
  66
  67static int greth_open(struct net_device *dev);
  68static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
  69           struct net_device *dev);
  70static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
  71           struct net_device *dev);
  72static int greth_rx(struct net_device *dev, int limit);
  73static int greth_rx_gbit(struct net_device *dev, int limit);
  74static void greth_clean_tx(struct net_device *dev);
  75static void greth_clean_tx_gbit(struct net_device *dev);
  76static irqreturn_t greth_interrupt(int irq, void *dev_id);
  77static int greth_close(struct net_device *dev);
  78static int greth_set_mac_add(struct net_device *dev, void *p);
  79static void greth_set_multicast_list(struct net_device *dev);
  80
  81#define GRETH_REGLOAD(a)            (be32_to_cpu(__raw_readl(&(a))))
  82#define GRETH_REGSAVE(a, v)         (__raw_writel(cpu_to_be32(v), &(a)))
  83#define GRETH_REGORIN(a, v)         (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
  84#define GRETH_REGANDIN(a, v)        (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
  85
  86#define NEXT_TX(N)      (((N) + 1) & GRETH_TXBD_NUM_MASK)
  87#define SKIP_TX(N, C)   (((N) + C) & GRETH_TXBD_NUM_MASK)
  88#define NEXT_RX(N)      (((N) + 1) & GRETH_RXBD_NUM_MASK)
  89
  90static void greth_print_rx_packet(void *addr, int len)
  91{
  92        print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
  93                        addr, len, true);
  94}
  95
  96static void greth_print_tx_packet(struct sk_buff *skb)
  97{
  98        int i;
  99        int length;
 100
 101        if (skb_shinfo(skb)->nr_frags == 0)
 102                length = skb->len;
 103        else
 104                length = skb_headlen(skb);
 105
 106        print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 107                        skb->data, length, true);
 108
 109        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 110
 111                print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 112                               skb_frag_address(&skb_shinfo(skb)->frags[i]),
 113                               skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
 114        }
 115}
 116
 117static inline void greth_enable_tx(struct greth_private *greth)
 118{
 119        wmb();
 120        GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
 121}
 122
 123static inline void greth_enable_tx_and_irq(struct greth_private *greth)
 124{
 125        wmb(); /* BDs must been written to memory before enabling TX */
 126        GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
 127}
 128
 129static inline void greth_disable_tx(struct greth_private *greth)
 130{
 131        GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
 132}
 133
 134static inline void greth_enable_rx(struct greth_private *greth)
 135{
 136        wmb();
 137        GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
 138}
 139
 140static inline void greth_disable_rx(struct greth_private *greth)
 141{
 142        GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
 143}
 144
 145static inline void greth_enable_irqs(struct greth_private *greth)
 146{
 147        GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
 148}
 149
 150static inline void greth_disable_irqs(struct greth_private *greth)
 151{
 152        GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
 153}
 154
 155static inline void greth_write_bd(u32 *bd, u32 val)
 156{
 157        __raw_writel(cpu_to_be32(val), bd);
 158}
 159
 160static inline u32 greth_read_bd(u32 *bd)
 161{
 162        return be32_to_cpu(__raw_readl(bd));
 163}
 164
 165static void greth_clean_rings(struct greth_private *greth)
 166{
 167        int i;
 168        struct greth_bd *rx_bdp = greth->rx_bd_base;
 169        struct greth_bd *tx_bdp = greth->tx_bd_base;
 170
 171        if (greth->gbit_mac) {
 172
 173                /* Free and unmap RX buffers */
 174                for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 175                        if (greth->rx_skbuff[i] != NULL) {
 176                                dev_kfree_skb(greth->rx_skbuff[i]);
 177                                dma_unmap_single(greth->dev,
 178                                                 greth_read_bd(&rx_bdp->addr),
 179                                                 MAX_FRAME_SIZE+NET_IP_ALIGN,
 180                                                 DMA_FROM_DEVICE);
 181                        }
 182                }
 183
 184                /* TX buffers */
 185                while (greth->tx_free < GRETH_TXBD_NUM) {
 186
 187                        struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
 188                        int nr_frags = skb_shinfo(skb)->nr_frags;
 189                        tx_bdp = greth->tx_bd_base + greth->tx_last;
 190                        greth->tx_last = NEXT_TX(greth->tx_last);
 191
 192                        dma_unmap_single(greth->dev,
 193                                         greth_read_bd(&tx_bdp->addr),
 194                                         skb_headlen(skb),
 195                                         DMA_TO_DEVICE);
 196
 197                        for (i = 0; i < nr_frags; i++) {
 198                                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 199                                tx_bdp = greth->tx_bd_base + greth->tx_last;
 200
 201                                dma_unmap_page(greth->dev,
 202                                               greth_read_bd(&tx_bdp->addr),
 203                                               skb_frag_size(frag),
 204                                               DMA_TO_DEVICE);
 205
 206                                greth->tx_last = NEXT_TX(greth->tx_last);
 207                        }
 208                        greth->tx_free += nr_frags+1;
 209                        dev_kfree_skb(skb);
 210                }
 211
 212
 213        } else { /* 10/100 Mbps MAC */
 214
 215                for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 216                        kfree(greth->rx_bufs[i]);
 217                        dma_unmap_single(greth->dev,
 218                                         greth_read_bd(&rx_bdp->addr),
 219                                         MAX_FRAME_SIZE,
 220                                         DMA_FROM_DEVICE);
 221                }
 222                for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
 223                        kfree(greth->tx_bufs[i]);
 224                        dma_unmap_single(greth->dev,
 225                                         greth_read_bd(&tx_bdp->addr),
 226                                         MAX_FRAME_SIZE,
 227                                         DMA_TO_DEVICE);
 228                }
 229        }
 230}
 231
 232static int greth_init_rings(struct greth_private *greth)
 233{
 234        struct sk_buff *skb;
 235        struct greth_bd *rx_bd, *tx_bd;
 236        u32 dma_addr;
 237        int i;
 238
 239        rx_bd = greth->rx_bd_base;
 240        tx_bd = greth->tx_bd_base;
 241
 242        /* Initialize descriptor rings and buffers */
 243        if (greth->gbit_mac) {
 244
 245                for (i = 0; i < GRETH_RXBD_NUM; i++) {
 246                        skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
 247                        if (skb == NULL) {
 248                                if (netif_msg_ifup(greth))
 249                                        dev_err(greth->dev, "Error allocating DMA ring.\n");
 250                                goto cleanup;
 251                        }
 252                        skb_reserve(skb, NET_IP_ALIGN);
 253                        dma_addr = dma_map_single(greth->dev,
 254                                                  skb->data,
 255                                                  MAX_FRAME_SIZE+NET_IP_ALIGN,
 256                                                  DMA_FROM_DEVICE);
 257
 258                        if (dma_mapping_error(greth->dev, dma_addr)) {
 259                                if (netif_msg_ifup(greth))
 260                                        dev_err(greth->dev, "Could not create initial DMA mapping\n");
 261                                goto cleanup;
 262                        }
 263                        greth->rx_skbuff[i] = skb;
 264                        greth_write_bd(&rx_bd[i].addr, dma_addr);
 265                        greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 266                }
 267
 268        } else {
 269
 270                /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
 271                for (i = 0; i < GRETH_RXBD_NUM; i++) {
 272
 273                        greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 274
 275                        if (greth->rx_bufs[i] == NULL) {
 276                                if (netif_msg_ifup(greth))
 277                                        dev_err(greth->dev, "Error allocating DMA ring.\n");
 278                                goto cleanup;
 279                        }
 280
 281                        dma_addr = dma_map_single(greth->dev,
 282                                                  greth->rx_bufs[i],
 283                                                  MAX_FRAME_SIZE,
 284                                                  DMA_FROM_DEVICE);
 285
 286                        if (dma_mapping_error(greth->dev, dma_addr)) {
 287                                if (netif_msg_ifup(greth))
 288                                        dev_err(greth->dev, "Could not create initial DMA mapping\n");
 289                                goto cleanup;
 290                        }
 291                        greth_write_bd(&rx_bd[i].addr, dma_addr);
 292                        greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 293                }
 294                for (i = 0; i < GRETH_TXBD_NUM; i++) {
 295
 296                        greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 297
 298                        if (greth->tx_bufs[i] == NULL) {
 299                                if (netif_msg_ifup(greth))
 300                                        dev_err(greth->dev, "Error allocating DMA ring.\n");
 301                                goto cleanup;
 302                        }
 303
 304                        dma_addr = dma_map_single(greth->dev,
 305                                                  greth->tx_bufs[i],
 306                                                  MAX_FRAME_SIZE,
 307                                                  DMA_TO_DEVICE);
 308
 309                        if (dma_mapping_error(greth->dev, dma_addr)) {
 310                                if (netif_msg_ifup(greth))
 311                                        dev_err(greth->dev, "Could not create initial DMA mapping\n");
 312                                goto cleanup;
 313                        }
 314                        greth_write_bd(&tx_bd[i].addr, dma_addr);
 315                        greth_write_bd(&tx_bd[i].stat, 0);
 316                }
 317        }
 318        greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
 319                       greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
 320
 321        /* Initialize pointers. */
 322        greth->rx_cur = 0;
 323        greth->tx_next = 0;
 324        greth->tx_last = 0;
 325        greth->tx_free = GRETH_TXBD_NUM;
 326
 327        /* Initialize descriptor base address */
 328        GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
 329        GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
 330
 331        return 0;
 332
 333cleanup:
 334        greth_clean_rings(greth);
 335        return -ENOMEM;
 336}
 337
 338static int greth_open(struct net_device *dev)
 339{
 340        struct greth_private *greth = netdev_priv(dev);
 341        int err;
 342
 343        err = greth_init_rings(greth);
 344        if (err) {
 345                if (netif_msg_ifup(greth))
 346                        dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
 347                return err;
 348        }
 349
 350        err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
 351        if (err) {
 352                if (netif_msg_ifup(greth))
 353                        dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
 354                greth_clean_rings(greth);
 355                return err;
 356        }
 357
 358        if (netif_msg_ifup(greth))
 359                dev_dbg(&dev->dev, " starting queue\n");
 360        netif_start_queue(dev);
 361
 362        GRETH_REGSAVE(greth->regs->status, 0xFF);
 363
 364        napi_enable(&greth->napi);
 365
 366        greth_enable_irqs(greth);
 367        greth_enable_tx(greth);
 368        greth_enable_rx(greth);
 369        return 0;
 370
 371}
 372
 373static int greth_close(struct net_device *dev)
 374{
 375        struct greth_private *greth = netdev_priv(dev);
 376
 377        napi_disable(&greth->napi);
 378
 379        greth_disable_irqs(greth);
 380        greth_disable_tx(greth);
 381        greth_disable_rx(greth);
 382
 383        netif_stop_queue(dev);
 384
 385        free_irq(greth->irq, (void *) dev);
 386
 387        greth_clean_rings(greth);
 388
 389        return 0;
 390}
 391
 392static netdev_tx_t
 393greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 394{
 395        struct greth_private *greth = netdev_priv(dev);
 396        struct greth_bd *bdp;
 397        int err = NETDEV_TX_OK;
 398        u32 status, dma_addr, ctrl;
 399        unsigned long flags;
 400
 401        /* Clean TX Ring */
 402        greth_clean_tx(greth->netdev);
 403
 404        if (unlikely(greth->tx_free <= 0)) {
 405                spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
 406                ctrl = GRETH_REGLOAD(greth->regs->control);
 407                /* Enable TX IRQ only if not already in poll() routine */
 408                if (ctrl & GRETH_RXI)
 409                        GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 410                netif_stop_queue(dev);
 411                spin_unlock_irqrestore(&greth->devlock, flags);
 412                return NETDEV_TX_BUSY;
 413        }
 414
 415        if (netif_msg_pktdata(greth))
 416                greth_print_tx_packet(skb);
 417
 418
 419        if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 420                dev->stats.tx_errors++;
 421                goto out;
 422        }
 423
 424        bdp = greth->tx_bd_base + greth->tx_next;
 425        dma_addr = greth_read_bd(&bdp->addr);
 426
 427        memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
 428
 429        dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 430
 431        status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
 432        greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
 433
 434        /* Wrap around descriptor ring */
 435        if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
 436                status |= GRETH_BD_WR;
 437        }
 438
 439        greth->tx_next = NEXT_TX(greth->tx_next);
 440        greth->tx_free--;
 441
 442        /* Write descriptor control word and enable transmission */
 443        greth_write_bd(&bdp->stat, status);
 444        spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 445        greth_enable_tx(greth);
 446        spin_unlock_irqrestore(&greth->devlock, flags);
 447
 448out:
 449        dev_kfree_skb(skb);
 450        return err;
 451}
 452
 453static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
 454{
 455        if (tx_next < tx_last)
 456                return (tx_last - tx_next) - 1;
 457        else
 458                return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
 459}
 460
 461static netdev_tx_t
 462greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 463{
 464        struct greth_private *greth = netdev_priv(dev);
 465        struct greth_bd *bdp;
 466        u32 status, dma_addr;
 467        int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
 468        unsigned long flags;
 469        u16 tx_last;
 470
 471        nr_frags = skb_shinfo(skb)->nr_frags;
 472        tx_last = greth->tx_last;
 473        rmb(); /* tx_last is updated by the poll task */
 474
 475        if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
 476                netif_stop_queue(dev);
 477                err = NETDEV_TX_BUSY;
 478                goto out;
 479        }
 480
 481        if (netif_msg_pktdata(greth))
 482                greth_print_tx_packet(skb);
 483
 484        if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 485                dev->stats.tx_errors++;
 486                goto out;
 487        }
 488
 489        /* Save skb pointer. */
 490        greth->tx_skbuff[greth->tx_next] = skb;
 491
 492        /* Linear buf */
 493        if (nr_frags != 0)
 494                status = GRETH_TXBD_MORE;
 495        else
 496                status = GRETH_BD_IE;
 497
 498        if (skb->ip_summed == CHECKSUM_PARTIAL)
 499                status |= GRETH_TXBD_CSALL;
 500        status |= skb_headlen(skb) & GRETH_BD_LEN;
 501        if (greth->tx_next == GRETH_TXBD_NUM_MASK)
 502                status |= GRETH_BD_WR;
 503
 504
 505        bdp = greth->tx_bd_base + greth->tx_next;
 506        greth_write_bd(&bdp->stat, status);
 507        dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 508
 509        if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 510                goto map_error;
 511
 512        greth_write_bd(&bdp->addr, dma_addr);
 513
 514        curr_tx = NEXT_TX(greth->tx_next);
 515
 516        /* Frags */
 517        for (i = 0; i < nr_frags; i++) {
 518                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 519                greth->tx_skbuff[curr_tx] = NULL;
 520                bdp = greth->tx_bd_base + curr_tx;
 521
 522                status = GRETH_BD_EN;
 523                if (skb->ip_summed == CHECKSUM_PARTIAL)
 524                        status |= GRETH_TXBD_CSALL;
 525                status |= skb_frag_size(frag) & GRETH_BD_LEN;
 526
 527                /* Wrap around descriptor ring */
 528                if (curr_tx == GRETH_TXBD_NUM_MASK)
 529                        status |= GRETH_BD_WR;
 530
 531                /* More fragments left */
 532                if (i < nr_frags - 1)
 533                        status |= GRETH_TXBD_MORE;
 534                else
 535                        status |= GRETH_BD_IE; /* enable IRQ on last fragment */
 536
 537                greth_write_bd(&bdp->stat, status);
 538
 539                dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
 540                                            DMA_TO_DEVICE);
 541
 542                if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 543                        goto frag_map_error;
 544
 545                greth_write_bd(&bdp->addr, dma_addr);
 546
 547                curr_tx = NEXT_TX(curr_tx);
 548        }
 549
 550        wmb();
 551
 552        /* Enable the descriptor chain by enabling the first descriptor */
 553        bdp = greth->tx_bd_base + greth->tx_next;
 554        greth_write_bd(&bdp->stat,
 555                       greth_read_bd(&bdp->stat) | GRETH_BD_EN);
 556
 557        spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 558        greth->tx_next = curr_tx;
 559        greth_enable_tx_and_irq(greth);
 560        spin_unlock_irqrestore(&greth->devlock, flags);
 561
 562        return NETDEV_TX_OK;
 563
 564frag_map_error:
 565        /* Unmap SKB mappings that succeeded and disable descriptor */
 566        for (i = 0; greth->tx_next + i != curr_tx; i++) {
 567                bdp = greth->tx_bd_base + greth->tx_next + i;
 568                dma_unmap_single(greth->dev,
 569                                 greth_read_bd(&bdp->addr),
 570                                 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
 571                                 DMA_TO_DEVICE);
 572                greth_write_bd(&bdp->stat, 0);
 573        }
 574map_error:
 575        if (net_ratelimit())
 576                dev_warn(greth->dev, "Could not create TX DMA mapping\n");
 577        dev_kfree_skb(skb);
 578out:
 579        return err;
 580}
 581
 582static irqreturn_t greth_interrupt(int irq, void *dev_id)
 583{
 584        struct net_device *dev = dev_id;
 585        struct greth_private *greth;
 586        u32 status, ctrl;
 587        irqreturn_t retval = IRQ_NONE;
 588
 589        greth = netdev_priv(dev);
 590
 591        spin_lock(&greth->devlock);
 592
 593        /* Get the interrupt events that caused us to be here. */
 594        status = GRETH_REGLOAD(greth->regs->status);
 595
 596        /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
 597         * set regardless of whether IRQ is enabled or not. Especially
 598         * important when shared IRQ.
 599         */
 600        ctrl = GRETH_REGLOAD(greth->regs->control);
 601
 602        /* Handle rx and tx interrupts through poll */
 603        if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
 604            ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
 605                retval = IRQ_HANDLED;
 606
 607                /* Disable interrupts and schedule poll() */
 608                greth_disable_irqs(greth);
 609                napi_schedule(&greth->napi);
 610        }
 611
 612        spin_unlock(&greth->devlock);
 613
 614        return retval;
 615}
 616
 617static void greth_clean_tx(struct net_device *dev)
 618{
 619        struct greth_private *greth;
 620        struct greth_bd *bdp;
 621        u32 stat;
 622
 623        greth = netdev_priv(dev);
 624
 625        while (1) {
 626                bdp = greth->tx_bd_base + greth->tx_last;
 627                GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 628                mb();
 629                stat = greth_read_bd(&bdp->stat);
 630
 631                if (unlikely(stat & GRETH_BD_EN))
 632                        break;
 633
 634                if (greth->tx_free == GRETH_TXBD_NUM)
 635                        break;
 636
 637                /* Check status for errors */
 638                if (unlikely(stat & GRETH_TXBD_STATUS)) {
 639                        dev->stats.tx_errors++;
 640                        if (stat & GRETH_TXBD_ERR_AL)
 641                                dev->stats.tx_aborted_errors++;
 642                        if (stat & GRETH_TXBD_ERR_UE)
 643                                dev->stats.tx_fifo_errors++;
 644                }
 645                dev->stats.tx_packets++;
 646                dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
 647                greth->tx_last = NEXT_TX(greth->tx_last);
 648                greth->tx_free++;
 649        }
 650
 651        if (greth->tx_free > 0) {
 652                netif_wake_queue(dev);
 653        }
 654}
 655
 656static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
 657{
 658        /* Check status for errors */
 659        if (unlikely(stat & GRETH_TXBD_STATUS)) {
 660                dev->stats.tx_errors++;
 661                if (stat & GRETH_TXBD_ERR_AL)
 662                        dev->stats.tx_aborted_errors++;
 663                if (stat & GRETH_TXBD_ERR_UE)
 664                        dev->stats.tx_fifo_errors++;
 665                if (stat & GRETH_TXBD_ERR_LC)
 666                        dev->stats.tx_aborted_errors++;
 667        }
 668        dev->stats.tx_packets++;
 669}
 670
 671static void greth_clean_tx_gbit(struct net_device *dev)
 672{
 673        struct greth_private *greth;
 674        struct greth_bd *bdp, *bdp_last_frag;
 675        struct sk_buff *skb = NULL;
 676        u32 stat;
 677        int nr_frags, i;
 678        u16 tx_last;
 679
 680        greth = netdev_priv(dev);
 681        tx_last = greth->tx_last;
 682
 683        while (tx_last != greth->tx_next) {
 684
 685                skb = greth->tx_skbuff[tx_last];
 686
 687                nr_frags = skb_shinfo(skb)->nr_frags;
 688
 689                /* We only clean fully completed SKBs */
 690                bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
 691
 692                GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 693                mb();
 694                stat = greth_read_bd(&bdp_last_frag->stat);
 695
 696                if (stat & GRETH_BD_EN)
 697                        break;
 698
 699                greth->tx_skbuff[tx_last] = NULL;
 700
 701                greth_update_tx_stats(dev, stat);
 702                dev->stats.tx_bytes += skb->len;
 703
 704                bdp = greth->tx_bd_base + tx_last;
 705
 706                tx_last = NEXT_TX(tx_last);
 707
 708                dma_unmap_single(greth->dev,
 709                                 greth_read_bd(&bdp->addr),
 710                                 skb_headlen(skb),
 711                                 DMA_TO_DEVICE);
 712
 713                for (i = 0; i < nr_frags; i++) {
 714                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 715                        bdp = greth->tx_bd_base + tx_last;
 716
 717                        dma_unmap_page(greth->dev,
 718                                       greth_read_bd(&bdp->addr),
 719                                       skb_frag_size(frag),
 720                                       DMA_TO_DEVICE);
 721
 722                        tx_last = NEXT_TX(tx_last);
 723                }
 724                dev_kfree_skb(skb);
 725        }
 726        if (skb) { /* skb is set only if the above while loop was entered */
 727                wmb();
 728                greth->tx_last = tx_last;
 729
 730                if (netif_queue_stopped(dev) &&
 731                    (greth_num_free_bds(tx_last, greth->tx_next) >
 732                    (MAX_SKB_FRAGS+1)))
 733                        netif_wake_queue(dev);
 734        }
 735}
 736
 737static int greth_rx(struct net_device *dev, int limit)
 738{
 739        struct greth_private *greth;
 740        struct greth_bd *bdp;
 741        struct sk_buff *skb;
 742        int pkt_len;
 743        int bad, count;
 744        u32 status, dma_addr;
 745        unsigned long flags;
 746
 747        greth = netdev_priv(dev);
 748
 749        for (count = 0; count < limit; ++count) {
 750
 751                bdp = greth->rx_bd_base + greth->rx_cur;
 752                GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 753                mb();
 754                status = greth_read_bd(&bdp->stat);
 755
 756                if (unlikely(status & GRETH_BD_EN)) {
 757                        break;
 758                }
 759
 760                dma_addr = greth_read_bd(&bdp->addr);
 761                bad = 0;
 762
 763                /* Check status for errors. */
 764                if (unlikely(status & GRETH_RXBD_STATUS)) {
 765                        if (status & GRETH_RXBD_ERR_FT) {
 766                                dev->stats.rx_length_errors++;
 767                                bad = 1;
 768                        }
 769                        if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
 770                                dev->stats.rx_frame_errors++;
 771                                bad = 1;
 772                        }
 773                        if (status & GRETH_RXBD_ERR_CRC) {
 774                                dev->stats.rx_crc_errors++;
 775                                bad = 1;
 776                        }
 777                }
 778                if (unlikely(bad)) {
 779                        dev->stats.rx_errors++;
 780
 781                } else {
 782
 783                        pkt_len = status & GRETH_BD_LEN;
 784
 785                        skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
 786
 787                        if (unlikely(skb == NULL)) {
 788
 789                                if (net_ratelimit())
 790                                        dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
 791
 792                                dev->stats.rx_dropped++;
 793
 794                        } else {
 795                                skb_reserve(skb, NET_IP_ALIGN);
 796
 797                                dma_sync_single_for_cpu(greth->dev,
 798                                                        dma_addr,
 799                                                        pkt_len,
 800                                                        DMA_FROM_DEVICE);
 801
 802                                if (netif_msg_pktdata(greth))
 803                                        greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
 804
 805                                skb_put_data(skb, phys_to_virt(dma_addr),
 806                                             pkt_len);
 807
 808                                skb->protocol = eth_type_trans(skb, dev);
 809                                dev->stats.rx_bytes += pkt_len;
 810                                dev->stats.rx_packets++;
 811                                netif_receive_skb(skb);
 812                        }
 813                }
 814
 815                status = GRETH_BD_EN | GRETH_BD_IE;
 816                if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 817                        status |= GRETH_BD_WR;
 818                }
 819
 820                wmb();
 821                greth_write_bd(&bdp->stat, status);
 822
 823                dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
 824
 825                spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
 826                greth_enable_rx(greth);
 827                spin_unlock_irqrestore(&greth->devlock, flags);
 828
 829                greth->rx_cur = NEXT_RX(greth->rx_cur);
 830        }
 831
 832        return count;
 833}
 834
 835static inline int hw_checksummed(u32 status)
 836{
 837
 838        if (status & GRETH_RXBD_IP_FRAG)
 839                return 0;
 840
 841        if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
 842                return 0;
 843
 844        if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
 845                return 0;
 846
 847        if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
 848                return 0;
 849
 850        return 1;
 851}
 852
 853static int greth_rx_gbit(struct net_device *dev, int limit)
 854{
 855        struct greth_private *greth;
 856        struct greth_bd *bdp;
 857        struct sk_buff *skb, *newskb;
 858        int pkt_len;
 859        int bad, count = 0;
 860        u32 status, dma_addr;
 861        unsigned long flags;
 862
 863        greth = netdev_priv(dev);
 864
 865        for (count = 0; count < limit; ++count) {
 866
 867                bdp = greth->rx_bd_base + greth->rx_cur;
 868                skb = greth->rx_skbuff[greth->rx_cur];
 869                GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 870                mb();
 871                status = greth_read_bd(&bdp->stat);
 872                bad = 0;
 873
 874                if (status & GRETH_BD_EN)
 875                        break;
 876
 877                /* Check status for errors. */
 878                if (unlikely(status & GRETH_RXBD_STATUS)) {
 879
 880                        if (status & GRETH_RXBD_ERR_FT) {
 881                                dev->stats.rx_length_errors++;
 882                                bad = 1;
 883                        } else if (status &
 884                                   (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
 885                                dev->stats.rx_frame_errors++;
 886                                bad = 1;
 887                        } else if (status & GRETH_RXBD_ERR_CRC) {
 888                                dev->stats.rx_crc_errors++;
 889                                bad = 1;
 890                        }
 891                }
 892
 893                /* Allocate new skb to replace current, not needed if the
 894                 * current skb can be reused */
 895                if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
 896                        skb_reserve(newskb, NET_IP_ALIGN);
 897
 898                        dma_addr = dma_map_single(greth->dev,
 899                                                      newskb->data,
 900                                                      MAX_FRAME_SIZE + NET_IP_ALIGN,
 901                                                      DMA_FROM_DEVICE);
 902
 903                        if (!dma_mapping_error(greth->dev, dma_addr)) {
 904                                /* Process the incoming frame. */
 905                                pkt_len = status & GRETH_BD_LEN;
 906
 907                                dma_unmap_single(greth->dev,
 908                                                 greth_read_bd(&bdp->addr),
 909                                                 MAX_FRAME_SIZE + NET_IP_ALIGN,
 910                                                 DMA_FROM_DEVICE);
 911
 912                                if (netif_msg_pktdata(greth))
 913                                        greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
 914
 915                                skb_put(skb, pkt_len);
 916
 917                                if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
 918                                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 919                                else
 920                                        skb_checksum_none_assert(skb);
 921
 922                                skb->protocol = eth_type_trans(skb, dev);
 923                                dev->stats.rx_packets++;
 924                                dev->stats.rx_bytes += pkt_len;
 925                                netif_receive_skb(skb);
 926
 927                                greth->rx_skbuff[greth->rx_cur] = newskb;
 928                                greth_write_bd(&bdp->addr, dma_addr);
 929                        } else {
 930                                if (net_ratelimit())
 931                                        dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
 932                                dev_kfree_skb(newskb);
 933                                /* reusing current skb, so it is a drop */
 934                                dev->stats.rx_dropped++;
 935                        }
 936                } else if (bad) {
 937                        /* Bad Frame transfer, the skb is reused */
 938                        dev->stats.rx_dropped++;
 939                } else {
 940                        /* Failed Allocating a new skb. This is rather stupid
 941                         * but the current "filled" skb is reused, as if
 942                         * transfer failure. One could argue that RX descriptor
 943                         * table handling should be divided into cleaning and
 944                         * filling as the TX part of the driver
 945                         */
 946                        if (net_ratelimit())
 947                                dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
 948                        /* reusing current skb, so it is a drop */
 949                        dev->stats.rx_dropped++;
 950                }
 951
 952                status = GRETH_BD_EN | GRETH_BD_IE;
 953                if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 954                        status |= GRETH_BD_WR;
 955                }
 956
 957                wmb();
 958                greth_write_bd(&bdp->stat, status);
 959                spin_lock_irqsave(&greth->devlock, flags);
 960                greth_enable_rx(greth);
 961                spin_unlock_irqrestore(&greth->devlock, flags);
 962                greth->rx_cur = NEXT_RX(greth->rx_cur);
 963        }
 964
 965        return count;
 966
 967}
 968
 969static int greth_poll(struct napi_struct *napi, int budget)
 970{
 971        struct greth_private *greth;
 972        int work_done = 0;
 973        unsigned long flags;
 974        u32 mask, ctrl;
 975        greth = container_of(napi, struct greth_private, napi);
 976
 977restart_txrx_poll:
 978        if (greth->gbit_mac) {
 979                greth_clean_tx_gbit(greth->netdev);
 980                work_done += greth_rx_gbit(greth->netdev, budget - work_done);
 981        } else {
 982                if (netif_queue_stopped(greth->netdev))
 983                        greth_clean_tx(greth->netdev);
 984                work_done += greth_rx(greth->netdev, budget - work_done);
 985        }
 986
 987        if (work_done < budget) {
 988
 989                spin_lock_irqsave(&greth->devlock, flags);
 990
 991                ctrl = GRETH_REGLOAD(greth->regs->control);
 992                if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
 993                    (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
 994                        GRETH_REGSAVE(greth->regs->control,
 995                                        ctrl | GRETH_TXI | GRETH_RXI);
 996                        mask = GRETH_INT_RX | GRETH_INT_RE |
 997                               GRETH_INT_TX | GRETH_INT_TE;
 998                } else {
 999                        GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
1000                        mask = GRETH_INT_RX | GRETH_INT_RE;
1001                }
1002
1003                if (GRETH_REGLOAD(greth->regs->status) & mask) {
1004                        GRETH_REGSAVE(greth->regs->control, ctrl);
1005                        spin_unlock_irqrestore(&greth->devlock, flags);
1006                        goto restart_txrx_poll;
1007                } else {
1008                        napi_complete_done(napi, work_done);
1009                        spin_unlock_irqrestore(&greth->devlock, flags);
1010                }
1011        }
1012
1013        return work_done;
1014}
1015
1016static int greth_set_mac_add(struct net_device *dev, void *p)
1017{
1018        struct sockaddr *addr = p;
1019        struct greth_private *greth;
1020        struct greth_regs *regs;
1021
1022        greth = netdev_priv(dev);
1023        regs = greth->regs;
1024
1025        if (!is_valid_ether_addr(addr->sa_data))
1026                return -EADDRNOTAVAIL;
1027
1028        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1029        GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1030        GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1031                      dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1032
1033        return 0;
1034}
1035
1036static u32 greth_hash_get_index(__u8 *addr)
1037{
1038        return (ether_crc(6, addr)) & 0x3F;
1039}
1040
1041static void greth_set_hash_filter(struct net_device *dev)
1042{
1043        struct netdev_hw_addr *ha;
1044        struct greth_private *greth = netdev_priv(dev);
1045        struct greth_regs *regs = greth->regs;
1046        u32 mc_filter[2];
1047        unsigned int bitnr;
1048
1049        mc_filter[0] = mc_filter[1] = 0;
1050
1051        netdev_for_each_mc_addr(ha, dev) {
1052                bitnr = greth_hash_get_index(ha->addr);
1053                mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1054        }
1055
1056        GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1057        GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1058}
1059
1060static void greth_set_multicast_list(struct net_device *dev)
1061{
1062        int cfg;
1063        struct greth_private *greth = netdev_priv(dev);
1064        struct greth_regs *regs = greth->regs;
1065
1066        cfg = GRETH_REGLOAD(regs->control);
1067        if (dev->flags & IFF_PROMISC)
1068                cfg |= GRETH_CTRL_PR;
1069        else
1070                cfg &= ~GRETH_CTRL_PR;
1071
1072        if (greth->multicast) {
1073                if (dev->flags & IFF_ALLMULTI) {
1074                        GRETH_REGSAVE(regs->hash_msb, -1);
1075                        GRETH_REGSAVE(regs->hash_lsb, -1);
1076                        cfg |= GRETH_CTRL_MCEN;
1077                        GRETH_REGSAVE(regs->control, cfg);
1078                        return;
1079                }
1080
1081                if (netdev_mc_empty(dev)) {
1082                        cfg &= ~GRETH_CTRL_MCEN;
1083                        GRETH_REGSAVE(regs->control, cfg);
1084                        return;
1085                }
1086
1087                /* Setup multicast filter */
1088                greth_set_hash_filter(dev);
1089                cfg |= GRETH_CTRL_MCEN;
1090        }
1091        GRETH_REGSAVE(regs->control, cfg);
1092}
1093
1094static u32 greth_get_msglevel(struct net_device *dev)
1095{
1096        struct greth_private *greth = netdev_priv(dev);
1097        return greth->msg_enable;
1098}
1099
1100static void greth_set_msglevel(struct net_device *dev, u32 value)
1101{
1102        struct greth_private *greth = netdev_priv(dev);
1103        greth->msg_enable = value;
1104}
1105
1106static int greth_get_regs_len(struct net_device *dev)
1107{
1108        return sizeof(struct greth_regs);
1109}
1110
1111static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1112{
1113        struct greth_private *greth = netdev_priv(dev);
1114
1115        strlcpy(info->driver, dev_driver_string(greth->dev),
1116                sizeof(info->driver));
1117        strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
1118}
1119
1120static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1121{
1122        int i;
1123        struct greth_private *greth = netdev_priv(dev);
1124        u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1125        u32 *buff = p;
1126
1127        for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1128                buff[i] = greth_read_bd(&greth_regs[i]);
1129}
1130
1131static const struct ethtool_ops greth_ethtool_ops = {
1132        .get_msglevel           = greth_get_msglevel,
1133        .set_msglevel           = greth_set_msglevel,
1134        .get_drvinfo            = greth_get_drvinfo,
1135        .get_regs_len           = greth_get_regs_len,
1136        .get_regs               = greth_get_regs,
1137        .get_link               = ethtool_op_get_link,
1138        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
1139        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
1140};
1141
1142static struct net_device_ops greth_netdev_ops = {
1143        .ndo_open               = greth_open,
1144        .ndo_stop               = greth_close,
1145        .ndo_start_xmit         = greth_start_xmit,
1146        .ndo_set_mac_address    = greth_set_mac_add,
1147        .ndo_validate_addr      = eth_validate_addr,
1148};
1149
1150static inline int wait_for_mdio(struct greth_private *greth)
1151{
1152        unsigned long timeout = jiffies + 4*HZ/100;
1153        while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1154                if (time_after(jiffies, timeout))
1155                        return 0;
1156        }
1157        return 1;
1158}
1159
1160static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1161{
1162        struct greth_private *greth = bus->priv;
1163        int data;
1164
1165        if (!wait_for_mdio(greth))
1166                return -EBUSY;
1167
1168        GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1169
1170        if (!wait_for_mdio(greth))
1171                return -EBUSY;
1172
1173        if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1174                data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1175                return data;
1176
1177        } else {
1178                return -1;
1179        }
1180}
1181
1182static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1183{
1184        struct greth_private *greth = bus->priv;
1185
1186        if (!wait_for_mdio(greth))
1187                return -EBUSY;
1188
1189        GRETH_REGSAVE(greth->regs->mdio,
1190                      ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1191
1192        if (!wait_for_mdio(greth))
1193                return -EBUSY;
1194
1195        return 0;
1196}
1197
1198static void greth_link_change(struct net_device *dev)
1199{
1200        struct greth_private *greth = netdev_priv(dev);
1201        struct phy_device *phydev = dev->phydev;
1202        unsigned long flags;
1203        int status_change = 0;
1204        u32 ctrl;
1205
1206        spin_lock_irqsave(&greth->devlock, flags);
1207
1208        if (phydev->link) {
1209
1210                if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1211                        ctrl = GRETH_REGLOAD(greth->regs->control) &
1212                               ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
1213
1214                        if (phydev->duplex)
1215                                ctrl |= GRETH_CTRL_FD;
1216
1217                        if (phydev->speed == SPEED_100)
1218                                ctrl |= GRETH_CTRL_SP;
1219                        else if (phydev->speed == SPEED_1000)
1220                                ctrl |= GRETH_CTRL_GB;
1221
1222                        GRETH_REGSAVE(greth->regs->control, ctrl);
1223                        greth->speed = phydev->speed;
1224                        greth->duplex = phydev->duplex;
1225                        status_change = 1;
1226                }
1227        }
1228
1229        if (phydev->link != greth->link) {
1230                if (!phydev->link) {
1231                        greth->speed = 0;
1232                        greth->duplex = -1;
1233                }
1234                greth->link = phydev->link;
1235
1236                status_change = 1;
1237        }
1238
1239        spin_unlock_irqrestore(&greth->devlock, flags);
1240
1241        if (status_change) {
1242                if (phydev->link)
1243                        pr_debug("%s: link up (%d/%s)\n",
1244                                dev->name, phydev->speed,
1245                                DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1246                else
1247                        pr_debug("%s: link down\n", dev->name);
1248        }
1249}
1250
1251static int greth_mdio_probe(struct net_device *dev)
1252{
1253        struct greth_private *greth = netdev_priv(dev);
1254        struct phy_device *phy = NULL;
1255        int ret;
1256
1257        /* Find the first PHY */
1258        phy = phy_find_first(greth->mdio);
1259
1260        if (!phy) {
1261                if (netif_msg_probe(greth))
1262                        dev_err(&dev->dev, "no PHY found\n");
1263                return -ENXIO;
1264        }
1265
1266        ret = phy_connect_direct(dev, phy, &greth_link_change,
1267                                 greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
1268        if (ret) {
1269                if (netif_msg_ifup(greth))
1270                        dev_err(&dev->dev, "could not attach to PHY\n");
1271                return ret;
1272        }
1273
1274        if (greth->gbit_mac)
1275                phy_set_max_speed(phy, SPEED_1000);
1276        else
1277                phy_set_max_speed(phy, SPEED_100);
1278
1279        linkmode_copy(phy->advertising, phy->supported);
1280
1281        greth->link = 0;
1282        greth->speed = 0;
1283        greth->duplex = -1;
1284
1285        return 0;
1286}
1287
1288static int greth_mdio_init(struct greth_private *greth)
1289{
1290        int ret;
1291        unsigned long timeout;
1292        struct net_device *ndev = greth->netdev;
1293
1294        greth->mdio = mdiobus_alloc();
1295        if (!greth->mdio) {
1296                return -ENOMEM;
1297        }
1298
1299        greth->mdio->name = "greth-mdio";
1300        snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1301        greth->mdio->read = greth_mdio_read;
1302        greth->mdio->write = greth_mdio_write;
1303        greth->mdio->priv = greth;
1304
1305        ret = mdiobus_register(greth->mdio);
1306        if (ret) {
1307                goto error;
1308        }
1309
1310        ret = greth_mdio_probe(greth->netdev);
1311        if (ret) {
1312                if (netif_msg_probe(greth))
1313                        dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1314                goto unreg_mdio;
1315        }
1316
1317        phy_start(ndev->phydev);
1318
1319        /* If Ethernet debug link is used make autoneg happen right away */
1320        if (greth->edcl && greth_edcl == 1) {
1321                phy_start_aneg(ndev->phydev);
1322                timeout = jiffies + 6*HZ;
1323                while (!phy_aneg_done(ndev->phydev) &&
1324                       time_before(jiffies, timeout)) {
1325                }
1326                phy_read_status(ndev->phydev);
1327                greth_link_change(greth->netdev);
1328        }
1329
1330        return 0;
1331
1332unreg_mdio:
1333        mdiobus_unregister(greth->mdio);
1334error:
1335        mdiobus_free(greth->mdio);
1336        return ret;
1337}
1338
1339/* Initialize the GRETH MAC */
1340static int greth_of_probe(struct platform_device *ofdev)
1341{
1342        struct net_device *dev;
1343        struct greth_private *greth;
1344        struct greth_regs *regs;
1345
1346        int i;
1347        int err;
1348        int tmp;
1349        unsigned long timeout;
1350
1351        dev = alloc_etherdev(sizeof(struct greth_private));
1352
1353        if (dev == NULL)
1354                return -ENOMEM;
1355
1356        greth = netdev_priv(dev);
1357        greth->netdev = dev;
1358        greth->dev = &ofdev->dev;
1359
1360        if (greth_debug > 0)
1361                greth->msg_enable = greth_debug;
1362        else
1363                greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1364
1365        spin_lock_init(&greth->devlock);
1366
1367        greth->regs = of_ioremap(&ofdev->resource[0], 0,
1368                                 resource_size(&ofdev->resource[0]),
1369                                 "grlib-greth regs");
1370
1371        if (greth->regs == NULL) {
1372                if (netif_msg_probe(greth))
1373                        dev_err(greth->dev, "ioremap failure.\n");
1374                err = -EIO;
1375                goto error1;
1376        }
1377
1378        regs = greth->regs;
1379        greth->irq = ofdev->archdata.irqs[0];
1380
1381        dev_set_drvdata(greth->dev, dev);
1382        SET_NETDEV_DEV(dev, greth->dev);
1383
1384        if (netif_msg_probe(greth))
1385                dev_dbg(greth->dev, "resetting controller.\n");
1386
1387        /* Reset the controller. */
1388        GRETH_REGSAVE(regs->control, GRETH_RESET);
1389
1390        /* Wait for MAC to reset itself */
1391        timeout = jiffies + HZ/100;
1392        while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1393                if (time_after(jiffies, timeout)) {
1394                        err = -EIO;
1395                        if (netif_msg_probe(greth))
1396                                dev_err(greth->dev, "timeout when waiting for reset.\n");
1397                        goto error2;
1398                }
1399        }
1400
1401        /* Get default PHY address  */
1402        greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1403
1404        /* Check if we have GBIT capable MAC */
1405        tmp = GRETH_REGLOAD(regs->control);
1406        greth->gbit_mac = (tmp >> 27) & 1;
1407
1408        /* Check for multicast capability */
1409        greth->multicast = (tmp >> 25) & 1;
1410
1411        greth->edcl = (tmp >> 31) & 1;
1412
1413        /* If we have EDCL we disable the EDCL speed-duplex FSM so
1414         * it doesn't interfere with the software */
1415        if (greth->edcl != 0)
1416                GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1417
1418        /* Check if MAC can handle MDIO interrupts */
1419        greth->mdio_int_en = (tmp >> 26) & 1;
1420
1421        err = greth_mdio_init(greth);
1422        if (err) {
1423                if (netif_msg_probe(greth))
1424                        dev_err(greth->dev, "failed to register MDIO bus\n");
1425                goto error2;
1426        }
1427
1428        /* Allocate TX descriptor ring in coherent memory */
1429        greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1430                                               &greth->tx_bd_base_phys,
1431                                               GFP_KERNEL);
1432        if (!greth->tx_bd_base) {
1433                err = -ENOMEM;
1434                goto error3;
1435        }
1436
1437        /* Allocate RX descriptor ring in coherent memory */
1438        greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1439                                               &greth->rx_bd_base_phys,
1440                                               GFP_KERNEL);
1441        if (!greth->rx_bd_base) {
1442                err = -ENOMEM;
1443                goto error4;
1444        }
1445
1446        /* Get MAC address from: module param, OF property or ID prom */
1447        for (i = 0; i < 6; i++) {
1448                if (macaddr[i] != 0)
1449                        break;
1450        }
1451        if (i == 6) {
1452                u8 addr[ETH_ALEN];
1453
1454                err = of_get_mac_address(ofdev->dev.of_node, addr);
1455                if (!err) {
1456                        for (i = 0; i < 6; i++)
1457                                macaddr[i] = (unsigned int) addr[i];
1458                } else {
1459#ifdef CONFIG_SPARC
1460                        for (i = 0; i < 6; i++)
1461                                macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1462#endif
1463                }
1464        }
1465
1466        for (i = 0; i < 6; i++)
1467                dev->dev_addr[i] = macaddr[i];
1468
1469        macaddr[5]++;
1470
1471        if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1472                if (netif_msg_probe(greth))
1473                        dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1474                err = -EINVAL;
1475                goto error5;
1476        }
1477
1478        GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1479        GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1480                      dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1481
1482        /* Clear all pending interrupts except PHY irq */
1483        GRETH_REGSAVE(regs->status, 0xFF);
1484
1485        if (greth->gbit_mac) {
1486                dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1487                        NETIF_F_RXCSUM;
1488                dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1489                greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1490        }
1491
1492        if (greth->multicast) {
1493                greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
1494                dev->flags |= IFF_MULTICAST;
1495        } else {
1496                dev->flags &= ~IFF_MULTICAST;
1497        }
1498
1499        dev->netdev_ops = &greth_netdev_ops;
1500        dev->ethtool_ops = &greth_ethtool_ops;
1501
1502        err = register_netdev(dev);
1503        if (err) {
1504                if (netif_msg_probe(greth))
1505                        dev_err(greth->dev, "netdevice registration failed.\n");
1506                goto error5;
1507        }
1508
1509        /* setup NAPI */
1510        netif_napi_add(dev, &greth->napi, greth_poll, 64);
1511
1512        return 0;
1513
1514error5:
1515        dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1516error4:
1517        dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1518error3:
1519        mdiobus_unregister(greth->mdio);
1520error2:
1521        of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1522error1:
1523        free_netdev(dev);
1524        return err;
1525}
1526
1527static int greth_of_remove(struct platform_device *of_dev)
1528{
1529        struct net_device *ndev = platform_get_drvdata(of_dev);
1530        struct greth_private *greth = netdev_priv(ndev);
1531
1532        /* Free descriptor areas */
1533        dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1534
1535        dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1536
1537        if (ndev->phydev)
1538                phy_stop(ndev->phydev);
1539        mdiobus_unregister(greth->mdio);
1540
1541        unregister_netdev(ndev);
1542
1543        of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1544
1545        free_netdev(ndev);
1546
1547        return 0;
1548}
1549
1550static const struct of_device_id greth_of_match[] = {
1551        {
1552         .name = "GAISLER_ETHMAC",
1553         },
1554        {
1555         .name = "01_01d",
1556         },
1557        {},
1558};
1559
1560MODULE_DEVICE_TABLE(of, greth_of_match);
1561
1562static struct platform_driver greth_of_driver = {
1563        .driver = {
1564                .name = "grlib-greth",
1565                .of_match_table = greth_of_match,
1566        },
1567        .probe = greth_of_probe,
1568        .remove = greth_of_remove,
1569};
1570
1571module_platform_driver(greth_of_driver);
1572
1573MODULE_AUTHOR("Aeroflex Gaisler AB.");
1574MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1575MODULE_LICENSE("GPL");
1576