linux/drivers/net/ethernet/apm/xgene-v2/main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Applied Micro X-Gene SoC Ethernet v2 Driver
   4 *
   5 * Copyright (c) 2017, Applied Micro Circuits Corporation
   6 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
   7 *            Keyur Chudgar <kchudgar@apm.com>
   8 */
   9
  10#include "main.h"
  11
  12static const struct acpi_device_id xge_acpi_match[];
  13
  14static int xge_get_resources(struct xge_pdata *pdata)
  15{
  16        struct platform_device *pdev;
  17        struct net_device *ndev;
  18        int phy_mode, ret = 0;
  19        struct resource *res;
  20        struct device *dev;
  21
  22        pdev = pdata->pdev;
  23        dev = &pdev->dev;
  24        ndev = pdata->ndev;
  25
  26        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  27        if (!res) {
  28                dev_err(dev, "Resource enet_csr not defined\n");
  29                return -ENODEV;
  30        }
  31
  32        pdata->resources.base_addr = devm_ioremap(dev, res->start,
  33                                                  resource_size(res));
  34        if (!pdata->resources.base_addr) {
  35                dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
  36                return -ENOMEM;
  37        }
  38
  39        if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
  40                eth_hw_addr_random(ndev);
  41
  42        memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  43
  44        phy_mode = device_get_phy_mode(dev);
  45        if (phy_mode < 0) {
  46                dev_err(dev, "Unable to get phy-connection-type\n");
  47                return phy_mode;
  48        }
  49        pdata->resources.phy_mode = phy_mode;
  50
  51        if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
  52                dev_err(dev, "Incorrect phy-connection-type specified\n");
  53                return -ENODEV;
  54        }
  55
  56        ret = platform_get_irq(pdev, 0);
  57        if (ret < 0)
  58                return ret;
  59        pdata->resources.irq = ret;
  60
  61        return 0;
  62}
  63
  64static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
  65{
  66        struct xge_pdata *pdata = netdev_priv(ndev);
  67        struct xge_desc_ring *ring = pdata->rx_ring;
  68        const u8 slots = XGENE_ENET_NUM_DESC - 1;
  69        struct device *dev = &pdata->pdev->dev;
  70        struct xge_raw_desc *raw_desc;
  71        u64 addr_lo, addr_hi;
  72        u8 tail = ring->tail;
  73        struct sk_buff *skb;
  74        dma_addr_t dma_addr;
  75        u16 len;
  76        int i;
  77
  78        for (i = 0; i < nbuf; i++) {
  79                raw_desc = &ring->raw_desc[tail];
  80
  81                len = XGENE_ENET_STD_MTU;
  82                skb = netdev_alloc_skb(ndev, len);
  83                if (unlikely(!skb))
  84                        return -ENOMEM;
  85
  86                dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
  87                if (dma_mapping_error(dev, dma_addr)) {
  88                        netdev_err(ndev, "DMA mapping error\n");
  89                        dev_kfree_skb_any(skb);
  90                        return -EINVAL;
  91                }
  92
  93                ring->pkt_info[tail].skb = skb;
  94                ring->pkt_info[tail].dma_addr = dma_addr;
  95
  96                addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
  97                addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
  98                raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
  99                                           SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
 100                                           SET_BITS(PKT_ADDRH,
 101                                                    upper_32_bits(dma_addr)));
 102
 103                dma_wmb();
 104                raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
 105                                           SET_BITS(E, 1));
 106                tail = (tail + 1) & slots;
 107        }
 108
 109        ring->tail = tail;
 110
 111        return 0;
 112}
 113
 114static int xge_init_hw(struct net_device *ndev)
 115{
 116        struct xge_pdata *pdata = netdev_priv(ndev);
 117        int ret;
 118
 119        ret = xge_port_reset(ndev);
 120        if (ret)
 121                return ret;
 122
 123        xge_port_init(ndev);
 124        pdata->nbufs = NUM_BUFS;
 125
 126        return 0;
 127}
 128
 129static irqreturn_t xge_irq(const int irq, void *data)
 130{
 131        struct xge_pdata *pdata = data;
 132
 133        if (napi_schedule_prep(&pdata->napi)) {
 134                xge_intr_disable(pdata);
 135                __napi_schedule(&pdata->napi);
 136        }
 137
 138        return IRQ_HANDLED;
 139}
 140
 141static int xge_request_irq(struct net_device *ndev)
 142{
 143        struct xge_pdata *pdata = netdev_priv(ndev);
 144        int ret;
 145
 146        snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
 147
 148        ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
 149                          pdata);
 150        if (ret)
 151                netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
 152
 153        return ret;
 154}
 155
 156static void xge_free_irq(struct net_device *ndev)
 157{
 158        struct xge_pdata *pdata = netdev_priv(ndev);
 159
 160        free_irq(pdata->resources.irq, pdata);
 161}
 162
 163static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
 164{
 165        if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
 166            (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
 167                return true;
 168
 169        return false;
 170}
 171
 172static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 173{
 174        struct xge_pdata *pdata = netdev_priv(ndev);
 175        struct device *dev = &pdata->pdev->dev;
 176        struct xge_desc_ring *tx_ring;
 177        struct xge_raw_desc *raw_desc;
 178        static dma_addr_t dma_addr;
 179        u64 addr_lo, addr_hi;
 180        void *pkt_buf;
 181        u8 tail;
 182        u16 len;
 183
 184        tx_ring = pdata->tx_ring;
 185        tail = tx_ring->tail;
 186        len = skb_headlen(skb);
 187        raw_desc = &tx_ring->raw_desc[tail];
 188
 189        if (!is_tx_slot_available(raw_desc)) {
 190                netif_stop_queue(ndev);
 191                return NETDEV_TX_BUSY;
 192        }
 193
 194        /* Packet buffers should be 64B aligned */
 195        pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
 196                                     GFP_ATOMIC);
 197        if (unlikely(!pkt_buf)) {
 198                dev_kfree_skb_any(skb);
 199                return NETDEV_TX_OK;
 200        }
 201        memcpy(pkt_buf, skb->data, len);
 202
 203        addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
 204        addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
 205        raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
 206                                   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
 207                                   SET_BITS(PKT_ADDRH,
 208                                            upper_32_bits(dma_addr)));
 209
 210        tx_ring->pkt_info[tail].skb = skb;
 211        tx_ring->pkt_info[tail].dma_addr = dma_addr;
 212        tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
 213
 214        dma_wmb();
 215
 216        raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
 217                                   SET_BITS(PKT_SIZE, len) |
 218                                   SET_BITS(E, 0));
 219        skb_tx_timestamp(skb);
 220        xge_wr_csr(pdata, DMATXCTRL, 1);
 221
 222        tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
 223
 224        return NETDEV_TX_OK;
 225}
 226
 227static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
 228{
 229        if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
 230            !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
 231                return true;
 232
 233        return false;
 234}
 235
 236static void xge_txc_poll(struct net_device *ndev)
 237{
 238        struct xge_pdata *pdata = netdev_priv(ndev);
 239        struct device *dev = &pdata->pdev->dev;
 240        struct xge_desc_ring *tx_ring;
 241        struct xge_raw_desc *raw_desc;
 242        dma_addr_t dma_addr;
 243        struct sk_buff *skb;
 244        void *pkt_buf;
 245        u32 data;
 246        u8 head;
 247
 248        tx_ring = pdata->tx_ring;
 249        head = tx_ring->head;
 250
 251        data = xge_rd_csr(pdata, DMATXSTATUS);
 252        if (!GET_BITS(TXPKTCOUNT, data))
 253                return;
 254
 255        while (1) {
 256                raw_desc = &tx_ring->raw_desc[head];
 257
 258                if (!is_tx_hw_done(raw_desc))
 259                        break;
 260
 261                dma_rmb();
 262
 263                skb = tx_ring->pkt_info[head].skb;
 264                dma_addr = tx_ring->pkt_info[head].dma_addr;
 265                pkt_buf = tx_ring->pkt_info[head].pkt_buf;
 266                pdata->stats.tx_packets++;
 267                pdata->stats.tx_bytes += skb->len;
 268                dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
 269                dev_kfree_skb_any(skb);
 270
 271                /* clear pktstart address and pktsize */
 272                raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
 273                                           SET_BITS(PKT_SIZE, SLOT_EMPTY));
 274                xge_wr_csr(pdata, DMATXSTATUS, 1);
 275
 276                head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
 277        }
 278
 279        if (netif_queue_stopped(ndev))
 280                netif_wake_queue(ndev);
 281
 282        tx_ring->head = head;
 283}
 284
 285static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
 286{
 287        struct xge_pdata *pdata = netdev_priv(ndev);
 288        struct device *dev = &pdata->pdev->dev;
 289        struct xge_desc_ring *rx_ring;
 290        struct xge_raw_desc *raw_desc;
 291        struct sk_buff *skb;
 292        dma_addr_t dma_addr;
 293        int processed = 0;
 294        u8 head, rx_error;
 295        int i, ret;
 296        u32 data;
 297        u16 len;
 298
 299        rx_ring = pdata->rx_ring;
 300        head = rx_ring->head;
 301
 302        data = xge_rd_csr(pdata, DMARXSTATUS);
 303        if (!GET_BITS(RXPKTCOUNT, data))
 304                return 0;
 305
 306        for (i = 0; i < budget; i++) {
 307                raw_desc = &rx_ring->raw_desc[head];
 308
 309                if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
 310                        break;
 311
 312                dma_rmb();
 313
 314                skb = rx_ring->pkt_info[head].skb;
 315                rx_ring->pkt_info[head].skb = NULL;
 316                dma_addr = rx_ring->pkt_info[head].dma_addr;
 317                len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
 318                dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
 319                                 DMA_FROM_DEVICE);
 320
 321                rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
 322                if (unlikely(rx_error)) {
 323                        pdata->stats.rx_errors++;
 324                        dev_kfree_skb_any(skb);
 325                        goto out;
 326                }
 327
 328                skb_put(skb, len);
 329                skb->protocol = eth_type_trans(skb, ndev);
 330
 331                pdata->stats.rx_packets++;
 332                pdata->stats.rx_bytes += len;
 333                napi_gro_receive(&pdata->napi, skb);
 334out:
 335                ret = xge_refill_buffers(ndev, 1);
 336                xge_wr_csr(pdata, DMARXSTATUS, 1);
 337                xge_wr_csr(pdata, DMARXCTRL, 1);
 338
 339                if (ret)
 340                        break;
 341
 342                head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
 343                processed++;
 344        }
 345
 346        rx_ring->head = head;
 347
 348        return processed;
 349}
 350
 351static void xge_delete_desc_ring(struct net_device *ndev,
 352                                 struct xge_desc_ring *ring)
 353{
 354        struct xge_pdata *pdata = netdev_priv(ndev);
 355        struct device *dev = &pdata->pdev->dev;
 356        u16 size;
 357
 358        if (!ring)
 359                return;
 360
 361        size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
 362        if (ring->desc_addr)
 363                dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
 364
 365        kfree(ring->pkt_info);
 366        kfree(ring);
 367}
 368
 369static void xge_free_buffers(struct net_device *ndev)
 370{
 371        struct xge_pdata *pdata = netdev_priv(ndev);
 372        struct xge_desc_ring *ring = pdata->rx_ring;
 373        struct device *dev = &pdata->pdev->dev;
 374        struct sk_buff *skb;
 375        dma_addr_t dma_addr;
 376        int i;
 377
 378        for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
 379                skb = ring->pkt_info[i].skb;
 380                dma_addr = ring->pkt_info[i].dma_addr;
 381
 382                if (!skb)
 383                        continue;
 384
 385                dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
 386                                 DMA_FROM_DEVICE);
 387                dev_kfree_skb_any(skb);
 388        }
 389}
 390
 391static void xge_delete_desc_rings(struct net_device *ndev)
 392{
 393        struct xge_pdata *pdata = netdev_priv(ndev);
 394
 395        xge_txc_poll(ndev);
 396        xge_delete_desc_ring(ndev, pdata->tx_ring);
 397
 398        xge_rx_poll(ndev, 64);
 399        xge_free_buffers(ndev);
 400        xge_delete_desc_ring(ndev, pdata->rx_ring);
 401}
 402
 403static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
 404{
 405        struct xge_pdata *pdata = netdev_priv(ndev);
 406        struct device *dev = &pdata->pdev->dev;
 407        struct xge_desc_ring *ring;
 408        u16 size;
 409
 410        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 411        if (!ring)
 412                return NULL;
 413
 414        ring->ndev = ndev;
 415
 416        size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
 417        ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
 418                                             GFP_KERNEL);
 419        if (!ring->desc_addr)
 420                goto err;
 421
 422        ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
 423                                 GFP_KERNEL);
 424        if (!ring->pkt_info)
 425                goto err;
 426
 427        xge_setup_desc(ring);
 428
 429        return ring;
 430
 431err:
 432        xge_delete_desc_ring(ndev, ring);
 433
 434        return NULL;
 435}
 436
 437static int xge_create_desc_rings(struct net_device *ndev)
 438{
 439        struct xge_pdata *pdata = netdev_priv(ndev);
 440        struct xge_desc_ring *ring;
 441        int ret;
 442
 443        /* create tx ring */
 444        ring = xge_create_desc_ring(ndev);
 445        if (!ring)
 446                goto err;
 447
 448        pdata->tx_ring = ring;
 449        xge_update_tx_desc_addr(pdata);
 450
 451        /* create rx ring */
 452        ring = xge_create_desc_ring(ndev);
 453        if (!ring)
 454                goto err;
 455
 456        pdata->rx_ring = ring;
 457        xge_update_rx_desc_addr(pdata);
 458
 459        ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
 460        if (ret)
 461                goto err;
 462
 463        return 0;
 464err:
 465        xge_delete_desc_rings(ndev);
 466
 467        return -ENOMEM;
 468}
 469
 470static int xge_open(struct net_device *ndev)
 471{
 472        struct xge_pdata *pdata = netdev_priv(ndev);
 473        int ret;
 474
 475        ret = xge_create_desc_rings(ndev);
 476        if (ret)
 477                return ret;
 478
 479        napi_enable(&pdata->napi);
 480        ret = xge_request_irq(ndev);
 481        if (ret)
 482                return ret;
 483
 484        xge_intr_enable(pdata);
 485        xge_wr_csr(pdata, DMARXCTRL, 1);
 486
 487        phy_start(ndev->phydev);
 488        xge_mac_enable(pdata);
 489        netif_start_queue(ndev);
 490
 491        return 0;
 492}
 493
 494static int xge_close(struct net_device *ndev)
 495{
 496        struct xge_pdata *pdata = netdev_priv(ndev);
 497
 498        netif_stop_queue(ndev);
 499        xge_mac_disable(pdata);
 500        phy_stop(ndev->phydev);
 501
 502        xge_intr_disable(pdata);
 503        xge_free_irq(ndev);
 504        napi_disable(&pdata->napi);
 505        xge_delete_desc_rings(ndev);
 506
 507        return 0;
 508}
 509
 510static int xge_napi(struct napi_struct *napi, const int budget)
 511{
 512        struct net_device *ndev = napi->dev;
 513        struct xge_pdata *pdata;
 514        int processed;
 515
 516        pdata = netdev_priv(ndev);
 517
 518        xge_txc_poll(ndev);
 519        processed = xge_rx_poll(ndev, budget);
 520
 521        if (processed < budget) {
 522                napi_complete_done(napi, processed);
 523                xge_intr_enable(pdata);
 524        }
 525
 526        return processed;
 527}
 528
 529static int xge_set_mac_addr(struct net_device *ndev, void *addr)
 530{
 531        struct xge_pdata *pdata = netdev_priv(ndev);
 532        int ret;
 533
 534        ret = eth_mac_addr(ndev, addr);
 535        if (ret)
 536                return ret;
 537
 538        xge_mac_set_station_addr(pdata);
 539
 540        return 0;
 541}
 542
 543static bool is_tx_pending(struct xge_raw_desc *raw_desc)
 544{
 545        if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
 546                return true;
 547
 548        return false;
 549}
 550
 551static void xge_free_pending_skb(struct net_device *ndev)
 552{
 553        struct xge_pdata *pdata = netdev_priv(ndev);
 554        struct device *dev = &pdata->pdev->dev;
 555        struct xge_desc_ring *tx_ring;
 556        struct xge_raw_desc *raw_desc;
 557        dma_addr_t dma_addr;
 558        struct sk_buff *skb;
 559        void *pkt_buf;
 560        int i;
 561
 562        tx_ring = pdata->tx_ring;
 563
 564        for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
 565                raw_desc = &tx_ring->raw_desc[i];
 566
 567                if (!is_tx_pending(raw_desc))
 568                        continue;
 569
 570                skb = tx_ring->pkt_info[i].skb;
 571                dma_addr = tx_ring->pkt_info[i].dma_addr;
 572                pkt_buf = tx_ring->pkt_info[i].pkt_buf;
 573                dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
 574                dev_kfree_skb_any(skb);
 575        }
 576}
 577
 578static void xge_timeout(struct net_device *ndev, unsigned int txqueue)
 579{
 580        struct xge_pdata *pdata = netdev_priv(ndev);
 581
 582        rtnl_lock();
 583
 584        if (!netif_running(ndev))
 585                goto out;
 586
 587        netif_stop_queue(ndev);
 588        xge_intr_disable(pdata);
 589        napi_disable(&pdata->napi);
 590
 591        xge_wr_csr(pdata, DMATXCTRL, 0);
 592        xge_txc_poll(ndev);
 593        xge_free_pending_skb(ndev);
 594        xge_wr_csr(pdata, DMATXSTATUS, ~0U);
 595
 596        xge_setup_desc(pdata->tx_ring);
 597        xge_update_tx_desc_addr(pdata);
 598        xge_mac_init(pdata);
 599
 600        napi_enable(&pdata->napi);
 601        xge_intr_enable(pdata);
 602        xge_mac_enable(pdata);
 603        netif_start_queue(ndev);
 604
 605out:
 606        rtnl_unlock();
 607}
 608
 609static void xge_get_stats64(struct net_device *ndev,
 610                            struct rtnl_link_stats64 *storage)
 611{
 612        struct xge_pdata *pdata = netdev_priv(ndev);
 613        struct xge_stats *stats = &pdata->stats;
 614
 615        storage->tx_packets += stats->tx_packets;
 616        storage->tx_bytes += stats->tx_bytes;
 617
 618        storage->rx_packets += stats->rx_packets;
 619        storage->rx_bytes += stats->rx_bytes;
 620        storage->rx_errors += stats->rx_errors;
 621}
 622
 623static const struct net_device_ops xgene_ndev_ops = {
 624        .ndo_open = xge_open,
 625        .ndo_stop = xge_close,
 626        .ndo_start_xmit = xge_start_xmit,
 627        .ndo_set_mac_address = xge_set_mac_addr,
 628        .ndo_tx_timeout = xge_timeout,
 629        .ndo_get_stats64 = xge_get_stats64,
 630};
 631
 632static int xge_probe(struct platform_device *pdev)
 633{
 634        struct device *dev = &pdev->dev;
 635        struct net_device *ndev;
 636        struct xge_pdata *pdata;
 637        int ret;
 638
 639        ndev = alloc_etherdev(sizeof(*pdata));
 640        if (!ndev)
 641                return -ENOMEM;
 642
 643        pdata = netdev_priv(ndev);
 644
 645        pdata->pdev = pdev;
 646        pdata->ndev = ndev;
 647        SET_NETDEV_DEV(ndev, dev);
 648        platform_set_drvdata(pdev, pdata);
 649        ndev->netdev_ops = &xgene_ndev_ops;
 650
 651        ndev->features |= NETIF_F_GSO |
 652                          NETIF_F_GRO;
 653
 654        ret = xge_get_resources(pdata);
 655        if (ret)
 656                goto err;
 657
 658        ndev->hw_features = ndev->features;
 659        xge_set_ethtool_ops(ndev);
 660
 661        ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
 662        if (ret) {
 663                netdev_err(ndev, "No usable DMA configuration\n");
 664                goto err;
 665        }
 666
 667        ret = xge_init_hw(ndev);
 668        if (ret)
 669                goto err;
 670
 671        ret = xge_mdio_config(ndev);
 672        if (ret)
 673                goto err;
 674
 675        netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
 676
 677        ret = register_netdev(ndev);
 678        if (ret) {
 679                netdev_err(ndev, "Failed to register netdev\n");
 680                goto err_mdio_remove;
 681        }
 682
 683        return 0;
 684
 685err_mdio_remove:
 686        xge_mdio_remove(ndev);
 687err:
 688        free_netdev(ndev);
 689
 690        return ret;
 691}
 692
 693static int xge_remove(struct platform_device *pdev)
 694{
 695        struct xge_pdata *pdata;
 696        struct net_device *ndev;
 697
 698        pdata = platform_get_drvdata(pdev);
 699        ndev = pdata->ndev;
 700
 701        rtnl_lock();
 702        if (netif_running(ndev))
 703                dev_close(ndev);
 704        rtnl_unlock();
 705
 706        xge_mdio_remove(ndev);
 707        unregister_netdev(ndev);
 708        free_netdev(ndev);
 709
 710        return 0;
 711}
 712
 713static void xge_shutdown(struct platform_device *pdev)
 714{
 715        struct xge_pdata *pdata;
 716
 717        pdata = platform_get_drvdata(pdev);
 718        if (!pdata)
 719                return;
 720
 721        if (!pdata->ndev)
 722                return;
 723
 724        xge_remove(pdev);
 725}
 726
 727static const struct acpi_device_id xge_acpi_match[] = {
 728        { "APMC0D80" },
 729        { }
 730};
 731MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
 732
 733static struct platform_driver xge_driver = {
 734        .driver = {
 735                   .name = "xgene-enet-v2",
 736                   .acpi_match_table = ACPI_PTR(xge_acpi_match),
 737        },
 738        .probe = xge_probe,
 739        .remove = xge_remove,
 740        .shutdown = xge_shutdown,
 741};
 742module_platform_driver(xge_driver);
 743
 744MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
 745MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
 746MODULE_LICENSE("GPL");
 747