linux/drivers/net/ethernet/renesas/ravb_main.c
<<
>>
Prefs
   1/* Renesas Ethernet AVB device driver
   2 *
   3 * Copyright (C) 2014-2015 Renesas Electronics Corporation
   4 * Copyright (C) 2015 Renesas Solutions Corp.
   5 * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
   6 *
   7 * Based on the SuperH Ethernet driver
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms and conditions of the GNU General Public License version 2,
  11 * as published by the Free Software Foundation.
  12 */
  13
  14#include <linux/cache.h>
  15#include <linux/clk.h>
  16#include <linux/delay.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/err.h>
  19#include <linux/etherdevice.h>
  20#include <linux/ethtool.h>
  21#include <linux/if_vlan.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/net_tstamp.h>
  26#include <linux/of.h>
  27#include <linux/of_device.h>
  28#include <linux/of_irq.h>
  29#include <linux/of_mdio.h>
  30#include <linux/of_net.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/slab.h>
  33#include <linux/spinlock.h>
  34
  35#include "ravb.h"
  36
  37#define RAVB_DEF_MSG_ENABLE \
  38                (NETIF_MSG_LINK   | \
  39                 NETIF_MSG_TIMER  | \
  40                 NETIF_MSG_RX_ERR | \
  41                 NETIF_MSG_TX_ERR)
  42
  43int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  44{
  45        int i;
  46
  47        for (i = 0; i < 10000; i++) {
  48                if ((ravb_read(ndev, reg) & mask) == value)
  49                        return 0;
  50                udelay(10);
  51        }
  52        return -ETIMEDOUT;
  53}
  54
  55static int ravb_config(struct net_device *ndev)
  56{
  57        int error;
  58
  59        /* Set config mode */
  60        ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
  61                   CCC);
  62        /* Check if the operating mode is changed to the config mode */
  63        error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
  64        if (error)
  65                netdev_err(ndev, "failed to switch device to config mode\n");
  66
  67        return error;
  68}
  69
  70static void ravb_set_duplex(struct net_device *ndev)
  71{
  72        struct ravb_private *priv = netdev_priv(ndev);
  73        u32 ecmr = ravb_read(ndev, ECMR);
  74
  75        if (priv->duplex)       /* Full */
  76                ecmr |=  ECMR_DM;
  77        else                    /* Half */
  78                ecmr &= ~ECMR_DM;
  79        ravb_write(ndev, ecmr, ECMR);
  80}
  81
  82static void ravb_set_rate(struct net_device *ndev)
  83{
  84        struct ravb_private *priv = netdev_priv(ndev);
  85
  86        switch (priv->speed) {
  87        case 100:               /* 100BASE */
  88                ravb_write(ndev, GECMR_SPEED_100, GECMR);
  89                break;
  90        case 1000:              /* 1000BASE */
  91                ravb_write(ndev, GECMR_SPEED_1000, GECMR);
  92                break;
  93        default:
  94                break;
  95        }
  96}
  97
  98static void ravb_set_buffer_align(struct sk_buff *skb)
  99{
 100        u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
 101
 102        if (reserve)
 103                skb_reserve(skb, RAVB_ALIGN - reserve);
 104}
 105
 106/* Get MAC address from the MAC address registers
 107 *
 108 * Ethernet AVB device doesn't have ROM for MAC address.
 109 * This function gets the MAC address that was used by a bootloader.
 110 */
 111static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
 112{
 113        if (mac) {
 114                ether_addr_copy(ndev->dev_addr, mac);
 115        } else {
 116                ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
 117                ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
 118                ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
 119                ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
 120                ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
 121                ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
 122        }
 123}
 124
 125static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 126{
 127        struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 128                                                 mdiobb);
 129        u32 pir = ravb_read(priv->ndev, PIR);
 130
 131        if (set)
 132                pir |=  mask;
 133        else
 134                pir &= ~mask;
 135        ravb_write(priv->ndev, pir, PIR);
 136}
 137
 138/* MDC pin control */
 139static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
 140{
 141        ravb_mdio_ctrl(ctrl, PIR_MDC, level);
 142}
 143
 144/* Data I/O pin control */
 145static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
 146{
 147        ravb_mdio_ctrl(ctrl, PIR_MMD, output);
 148}
 149
 150/* Set data bit */
 151static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
 152{
 153        ravb_mdio_ctrl(ctrl, PIR_MDO, value);
 154}
 155
 156/* Get data bit */
 157static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
 158{
 159        struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 160                                                 mdiobb);
 161
 162        return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
 163}
 164
 165/* MDIO bus control struct */
 166static struct mdiobb_ops bb_ops = {
 167        .owner = THIS_MODULE,
 168        .set_mdc = ravb_set_mdc,
 169        .set_mdio_dir = ravb_set_mdio_dir,
 170        .set_mdio_data = ravb_set_mdio_data,
 171        .get_mdio_data = ravb_get_mdio_data,
 172};
 173
 174/* Free skb's and DMA buffers for Ethernet AVB */
 175static void ravb_ring_free(struct net_device *ndev, int q)
 176{
 177        struct ravb_private *priv = netdev_priv(ndev);
 178        int ring_size;
 179        int i;
 180
 181        /* Free RX skb ringbuffer */
 182        if (priv->rx_skb[q]) {
 183                for (i = 0; i < priv->num_rx_ring[q]; i++)
 184                        dev_kfree_skb(priv->rx_skb[q][i]);
 185        }
 186        kfree(priv->rx_skb[q]);
 187        priv->rx_skb[q] = NULL;
 188
 189        /* Free TX skb ringbuffer */
 190        if (priv->tx_skb[q]) {
 191                for (i = 0; i < priv->num_tx_ring[q]; i++)
 192                        dev_kfree_skb(priv->tx_skb[q][i]);
 193        }
 194        kfree(priv->tx_skb[q]);
 195        priv->tx_skb[q] = NULL;
 196
 197        /* Free aligned TX buffers */
 198        kfree(priv->tx_align[q]);
 199        priv->tx_align[q] = NULL;
 200
 201        if (priv->rx_ring[q]) {
 202                ring_size = sizeof(struct ravb_ex_rx_desc) *
 203                            (priv->num_rx_ring[q] + 1);
 204                dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
 205                                  priv->rx_desc_dma[q]);
 206                priv->rx_ring[q] = NULL;
 207        }
 208
 209        if (priv->tx_ring[q]) {
 210                ring_size = sizeof(struct ravb_tx_desc) *
 211                            (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 212                dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
 213                                  priv->tx_desc_dma[q]);
 214                priv->tx_ring[q] = NULL;
 215        }
 216}
 217
 218/* Format skb and descriptor buffer for Ethernet AVB */
 219static void ravb_ring_format(struct net_device *ndev, int q)
 220{
 221        struct ravb_private *priv = netdev_priv(ndev);
 222        struct ravb_ex_rx_desc *rx_desc;
 223        struct ravb_tx_desc *tx_desc;
 224        struct ravb_desc *desc;
 225        int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 226        int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
 227                           NUM_TX_DESC;
 228        dma_addr_t dma_addr;
 229        int i;
 230
 231        priv->cur_rx[q] = 0;
 232        priv->cur_tx[q] = 0;
 233        priv->dirty_rx[q] = 0;
 234        priv->dirty_tx[q] = 0;
 235
 236        memset(priv->rx_ring[q], 0, rx_ring_size);
 237        /* Build RX ring buffer */
 238        for (i = 0; i < priv->num_rx_ring[q]; i++) {
 239                /* RX descriptor */
 240                rx_desc = &priv->rx_ring[q][i];
 241                /* The size of the buffer should be on 16-byte boundary. */
 242                rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
 243                dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
 244                                          ALIGN(PKT_BUF_SZ, 16),
 245                                          DMA_FROM_DEVICE);
 246                /* We just set the data size to 0 for a failed mapping which
 247                 * should prevent DMA from happening...
 248                 */
 249                if (dma_mapping_error(&ndev->dev, dma_addr))
 250                        rx_desc->ds_cc = cpu_to_le16(0);
 251                rx_desc->dptr = cpu_to_le32(dma_addr);
 252                rx_desc->die_dt = DT_FEMPTY;
 253        }
 254        rx_desc = &priv->rx_ring[q][i];
 255        rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 256        rx_desc->die_dt = DT_LINKFIX; /* type */
 257
 258        memset(priv->tx_ring[q], 0, tx_ring_size);
 259        /* Build TX ring buffer */
 260        for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
 261             i++, tx_desc++) {
 262                tx_desc->die_dt = DT_EEMPTY;
 263                tx_desc++;
 264                tx_desc->die_dt = DT_EEMPTY;
 265        }
 266        tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 267        tx_desc->die_dt = DT_LINKFIX; /* type */
 268
 269        /* RX descriptor base address for best effort */
 270        desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
 271        desc->die_dt = DT_LINKFIX; /* type */
 272        desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 273
 274        /* TX descriptor base address for best effort */
 275        desc = &priv->desc_bat[q];
 276        desc->die_dt = DT_LINKFIX; /* type */
 277        desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 278}
 279
 280/* Init skb and descriptor buffer for Ethernet AVB */
 281static int ravb_ring_init(struct net_device *ndev, int q)
 282{
 283        struct ravb_private *priv = netdev_priv(ndev);
 284        struct sk_buff *skb;
 285        int ring_size;
 286        int i;
 287
 288        /* Allocate RX and TX skb rings */
 289        priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
 290                                  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
 291        priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
 292                                  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
 293        if (!priv->rx_skb[q] || !priv->tx_skb[q])
 294                goto error;
 295
 296        for (i = 0; i < priv->num_rx_ring[q]; i++) {
 297                skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
 298                if (!skb)
 299                        goto error;
 300                ravb_set_buffer_align(skb);
 301                priv->rx_skb[q][i] = skb;
 302        }
 303
 304        /* Allocate rings for the aligned buffers */
 305        priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
 306                                    DPTR_ALIGN - 1, GFP_KERNEL);
 307        if (!priv->tx_align[q])
 308                goto error;
 309
 310        /* Allocate all RX descriptors. */
 311        ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 312        priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
 313                                              &priv->rx_desc_dma[q],
 314                                              GFP_KERNEL);
 315        if (!priv->rx_ring[q])
 316                goto error;
 317
 318        priv->dirty_rx[q] = 0;
 319
 320        /* Allocate all TX descriptors. */
 321        ring_size = sizeof(struct ravb_tx_desc) *
 322                    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 323        priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
 324                                              &priv->tx_desc_dma[q],
 325                                              GFP_KERNEL);
 326        if (!priv->tx_ring[q])
 327                goto error;
 328
 329        return 0;
 330
 331error:
 332        ravb_ring_free(ndev, q);
 333
 334        return -ENOMEM;
 335}
 336
 337/* E-MAC init function */
 338static void ravb_emac_init(struct net_device *ndev)
 339{
 340        struct ravb_private *priv = netdev_priv(ndev);
 341        u32 ecmr;
 342
 343        /* Receive frame limit set register */
 344        ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
 345
 346        /* PAUSE prohibition */
 347        ecmr =  ravb_read(ndev, ECMR);
 348        ecmr &= ECMR_DM;
 349        ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
 350        ravb_write(ndev, ecmr, ECMR);
 351
 352        ravb_set_rate(ndev);
 353
 354        /* Set MAC address */
 355        ravb_write(ndev,
 356                   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 357                   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 358        ravb_write(ndev,
 359                   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 360
 361        ravb_write(ndev, 1, MPR);
 362
 363        /* E-MAC status register clear */
 364        ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
 365
 366        /* E-MAC interrupt enable register */
 367        ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
 368}
 369
 370/* Device init function for Ethernet AVB */
 371static int ravb_dmac_init(struct net_device *ndev)
 372{
 373        int error;
 374
 375        /* Set CONFIG mode */
 376        error = ravb_config(ndev);
 377        if (error)
 378                return error;
 379
 380        error = ravb_ring_init(ndev, RAVB_BE);
 381        if (error)
 382                return error;
 383        error = ravb_ring_init(ndev, RAVB_NC);
 384        if (error) {
 385                ravb_ring_free(ndev, RAVB_BE);
 386                return error;
 387        }
 388
 389        /* Descriptor format */
 390        ravb_ring_format(ndev, RAVB_BE);
 391        ravb_ring_format(ndev, RAVB_NC);
 392
 393#if defined(__LITTLE_ENDIAN)
 394        ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
 395#else
 396        ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
 397#endif
 398
 399        /* Set AVB RX */
 400        ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
 401
 402        /* Set FIFO size */
 403        ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
 404
 405        /* Timestamp enable */
 406        ravb_write(ndev, TCCR_TFEN, TCCR);
 407
 408        /* Interrupt enable: */
 409        /* Frame receive */
 410        ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 411        /* Receive FIFO full warning */
 412        ravb_write(ndev, RIC1_RFWE, RIC1);
 413        /* Receive FIFO full error, descriptor empty */
 414        ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 415        /* Frame transmitted, timestamp FIFO updated */
 416        ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 417
 418        /* Setting the control will start the AVB-DMAC process. */
 419        ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
 420                   CCC);
 421
 422        return 0;
 423}
 424
 425/* Free TX skb function for AVB-IP */
 426static int ravb_tx_free(struct net_device *ndev, int q)
 427{
 428        struct ravb_private *priv = netdev_priv(ndev);
 429        struct net_device_stats *stats = &priv->stats[q];
 430        struct ravb_tx_desc *desc;
 431        int free_num = 0;
 432        int entry;
 433        u32 size;
 434
 435        for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
 436                entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
 437                                             NUM_TX_DESC);
 438                desc = &priv->tx_ring[q][entry];
 439                if (desc->die_dt != DT_FEMPTY)
 440                        break;
 441                /* Descriptor type must be checked before all other reads */
 442                dma_rmb();
 443                size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 444                /* Free the original skb. */
 445                if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
 446                        dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
 447                                         size, DMA_TO_DEVICE);
 448                        /* Last packet descriptor? */
 449                        if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
 450                                entry /= NUM_TX_DESC;
 451                                dev_kfree_skb_any(priv->tx_skb[q][entry]);
 452                                priv->tx_skb[q][entry] = NULL;
 453                                stats->tx_packets++;
 454                        }
 455                        free_num++;
 456                }
 457                stats->tx_bytes += size;
 458                desc->die_dt = DT_EEMPTY;
 459        }
 460        return free_num;
 461}
 462
 463static void ravb_get_tx_tstamp(struct net_device *ndev)
 464{
 465        struct ravb_private *priv = netdev_priv(ndev);
 466        struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 467        struct skb_shared_hwtstamps shhwtstamps;
 468        struct sk_buff *skb;
 469        struct timespec64 ts;
 470        u16 tag, tfa_tag;
 471        int count;
 472        u32 tfa2;
 473
 474        count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
 475        while (count--) {
 476                tfa2 = ravb_read(ndev, TFA2);
 477                tfa_tag = (tfa2 & TFA2_TST) >> 16;
 478                ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
 479                ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
 480                            ravb_read(ndev, TFA1);
 481                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 482                shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
 483                list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
 484                                         list) {
 485                        skb = ts_skb->skb;
 486                        tag = ts_skb->tag;
 487                        list_del(&ts_skb->list);
 488                        kfree(ts_skb);
 489                        if (tag == tfa_tag) {
 490                                skb_tstamp_tx(skb, &shhwtstamps);
 491                                break;
 492                        }
 493                }
 494                ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
 495        }
 496}
 497
 498/* Packet receive function for Ethernet AVB */
 499static bool ravb_rx(struct net_device *ndev, int *quota, int q)
 500{
 501        struct ravb_private *priv = netdev_priv(ndev);
 502        int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 503        int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
 504                        priv->cur_rx[q];
 505        struct net_device_stats *stats = &priv->stats[q];
 506        struct ravb_ex_rx_desc *desc;
 507        struct sk_buff *skb;
 508        dma_addr_t dma_addr;
 509        struct timespec64 ts;
 510        u8  desc_status;
 511        u16 pkt_len;
 512        int limit;
 513
 514        boguscnt = min(boguscnt, *quota);
 515        limit = boguscnt;
 516        desc = &priv->rx_ring[q][entry];
 517        while (desc->die_dt != DT_FEMPTY) {
 518                /* Descriptor type must be checked before all other reads */
 519                dma_rmb();
 520                desc_status = desc->msc;
 521                pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 522
 523                if (--boguscnt < 0)
 524                        break;
 525
 526                /* We use 0-byte descriptors to mark the DMA mapping errors */
 527                if (!pkt_len)
 528                        continue;
 529
 530                if (desc_status & MSC_MC)
 531                        stats->multicast++;
 532
 533                if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
 534                                   MSC_CEEF)) {
 535                        stats->rx_errors++;
 536                        if (desc_status & MSC_CRC)
 537                                stats->rx_crc_errors++;
 538                        if (desc_status & MSC_RFE)
 539                                stats->rx_frame_errors++;
 540                        if (desc_status & (MSC_RTLF | MSC_RTSF))
 541                                stats->rx_length_errors++;
 542                        if (desc_status & MSC_CEEF)
 543                                stats->rx_missed_errors++;
 544                } else {
 545                        u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
 546
 547                        skb = priv->rx_skb[q][entry];
 548                        priv->rx_skb[q][entry] = NULL;
 549                        dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
 550                                         ALIGN(PKT_BUF_SZ, 16),
 551                                         DMA_FROM_DEVICE);
 552                        get_ts &= (q == RAVB_NC) ?
 553                                        RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
 554                                        ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
 555                        if (get_ts) {
 556                                struct skb_shared_hwtstamps *shhwtstamps;
 557
 558                                shhwtstamps = skb_hwtstamps(skb);
 559                                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 560                                ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
 561                                             32) | le32_to_cpu(desc->ts_sl);
 562                                ts.tv_nsec = le32_to_cpu(desc->ts_n);
 563                                shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
 564                        }
 565                        skb_put(skb, pkt_len);
 566                        skb->protocol = eth_type_trans(skb, ndev);
 567                        napi_gro_receive(&priv->napi[q], skb);
 568                        stats->rx_packets++;
 569                        stats->rx_bytes += pkt_len;
 570                }
 571
 572                entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 573                desc = &priv->rx_ring[q][entry];
 574        }
 575
 576        /* Refill the RX ring buffers. */
 577        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 578                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 579                desc = &priv->rx_ring[q][entry];
 580                /* The size of the buffer should be on 16-byte boundary. */
 581                desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
 582
 583                if (!priv->rx_skb[q][entry]) {
 584                        skb = netdev_alloc_skb(ndev,
 585                                               PKT_BUF_SZ + RAVB_ALIGN - 1);
 586                        if (!skb)
 587                                break;  /* Better luck next round. */
 588                        ravb_set_buffer_align(skb);
 589                        dma_addr = dma_map_single(&ndev->dev, skb->data,
 590                                                  le16_to_cpu(desc->ds_cc),
 591                                                  DMA_FROM_DEVICE);
 592                        skb_checksum_none_assert(skb);
 593                        /* We just set the data size to 0 for a failed mapping
 594                         * which should prevent DMA  from happening...
 595                         */
 596                        if (dma_mapping_error(&ndev->dev, dma_addr))
 597                                desc->ds_cc = cpu_to_le16(0);
 598                        desc->dptr = cpu_to_le32(dma_addr);
 599                        priv->rx_skb[q][entry] = skb;
 600                }
 601                /* Descriptor type must be set after all the above writes */
 602                dma_wmb();
 603                desc->die_dt = DT_FEMPTY;
 604        }
 605
 606        *quota -= limit - (++boguscnt);
 607
 608        return boguscnt <= 0;
 609}
 610
 611static void ravb_rcv_snd_disable(struct net_device *ndev)
 612{
 613        /* Disable TX and RX */
 614        ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
 615}
 616
 617static void ravb_rcv_snd_enable(struct net_device *ndev)
 618{
 619        /* Enable TX and RX */
 620        ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
 621}
 622
 623/* function for waiting dma process finished */
 624static int ravb_stop_dma(struct net_device *ndev)
 625{
 626        int error;
 627
 628        /* Wait for stopping the hardware TX process */
 629        error = ravb_wait(ndev, TCCR,
 630                          TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
 631        if (error)
 632                return error;
 633
 634        error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
 635                          0);
 636        if (error)
 637                return error;
 638
 639        /* Stop the E-MAC's RX/TX processes. */
 640        ravb_rcv_snd_disable(ndev);
 641
 642        /* Wait for stopping the RX DMA process */
 643        error = ravb_wait(ndev, CSR, CSR_RPO, 0);
 644        if (error)
 645                return error;
 646
 647        /* Stop AVB-DMAC process */
 648        return ravb_config(ndev);
 649}
 650
 651/* E-MAC interrupt handler */
 652static void ravb_emac_interrupt(struct net_device *ndev)
 653{
 654        struct ravb_private *priv = netdev_priv(ndev);
 655        u32 ecsr, psr;
 656
 657        ecsr = ravb_read(ndev, ECSR);
 658        ravb_write(ndev, ecsr, ECSR);   /* clear interrupt */
 659        if (ecsr & ECSR_ICD)
 660                ndev->stats.tx_carrier_errors++;
 661        if (ecsr & ECSR_LCHNG) {
 662                /* Link changed */
 663                if (priv->no_avb_link)
 664                        return;
 665                psr = ravb_read(ndev, PSR);
 666                if (priv->avb_link_active_low)
 667                        psr ^= PSR_LMON;
 668                if (!(psr & PSR_LMON)) {
 669                        /* DIsable RX and TX */
 670                        ravb_rcv_snd_disable(ndev);
 671                } else {
 672                        /* Enable RX and TX */
 673                        ravb_rcv_snd_enable(ndev);
 674                }
 675        }
 676}
 677
 678/* Error interrupt handler */
 679static void ravb_error_interrupt(struct net_device *ndev)
 680{
 681        struct ravb_private *priv = netdev_priv(ndev);
 682        u32 eis, ris2;
 683
 684        eis = ravb_read(ndev, EIS);
 685        ravb_write(ndev, ~EIS_QFS, EIS);
 686        if (eis & EIS_QFS) {
 687                ris2 = ravb_read(ndev, RIS2);
 688                ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
 689
 690                /* Receive Descriptor Empty int */
 691                if (ris2 & RIS2_QFF0)
 692                        priv->stats[RAVB_BE].rx_over_errors++;
 693
 694                    /* Receive Descriptor Empty int */
 695                if (ris2 & RIS2_QFF1)
 696                        priv->stats[RAVB_NC].rx_over_errors++;
 697
 698                /* Receive FIFO Overflow int */
 699                if (ris2 & RIS2_RFFF)
 700                        priv->rx_fifo_errors++;
 701        }
 702}
 703
 704static irqreturn_t ravb_interrupt(int irq, void *dev_id)
 705{
 706        struct net_device *ndev = dev_id;
 707        struct ravb_private *priv = netdev_priv(ndev);
 708        irqreturn_t result = IRQ_NONE;
 709        u32 iss;
 710
 711        spin_lock(&priv->lock);
 712        /* Get interrupt status */
 713        iss = ravb_read(ndev, ISS);
 714
 715        /* Received and transmitted interrupts */
 716        if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
 717                u32 ris0 = ravb_read(ndev, RIS0);
 718                u32 ric0 = ravb_read(ndev, RIC0);
 719                u32 tis  = ravb_read(ndev, TIS);
 720                u32 tic  = ravb_read(ndev, TIC);
 721                int q;
 722
 723                /* Timestamp updated */
 724                if (tis & TIS_TFUF) {
 725                        ravb_write(ndev, ~TIS_TFUF, TIS);
 726                        ravb_get_tx_tstamp(ndev);
 727                        result = IRQ_HANDLED;
 728                }
 729
 730                /* Network control and best effort queue RX/TX */
 731                for (q = RAVB_NC; q >= RAVB_BE; q--) {
 732                        if (((ris0 & ric0) & BIT(q)) ||
 733                            ((tis  & tic)  & BIT(q))) {
 734                                if (napi_schedule_prep(&priv->napi[q])) {
 735                                        /* Mask RX and TX interrupts */
 736                                        ravb_write(ndev, ric0 & ~BIT(q), RIC0);
 737                                        ravb_write(ndev, tic  & ~BIT(q), TIC);
 738                                        __napi_schedule(&priv->napi[q]);
 739                                } else {
 740                                        netdev_warn(ndev,
 741                                                    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
 742                                                    ris0, ric0);
 743                                        netdev_warn(ndev,
 744                                                    "                    tx status 0x%08x, tx mask 0x%08x.\n",
 745                                                    tis, tic);
 746                                }
 747                                result = IRQ_HANDLED;
 748                        }
 749                }
 750        }
 751
 752        /* E-MAC status summary */
 753        if (iss & ISS_MS) {
 754                ravb_emac_interrupt(ndev);
 755                result = IRQ_HANDLED;
 756        }
 757
 758        /* Error status summary */
 759        if (iss & ISS_ES) {
 760                ravb_error_interrupt(ndev);
 761                result = IRQ_HANDLED;
 762        }
 763
 764        if (iss & ISS_CGIS)
 765                result = ravb_ptp_interrupt(ndev);
 766
 767        mmiowb();
 768        spin_unlock(&priv->lock);
 769        return result;
 770}
 771
 772static int ravb_poll(struct napi_struct *napi, int budget)
 773{
 774        struct net_device *ndev = napi->dev;
 775        struct ravb_private *priv = netdev_priv(ndev);
 776        unsigned long flags;
 777        int q = napi - priv->napi;
 778        int mask = BIT(q);
 779        int quota = budget;
 780        u32 ris0, tis;
 781
 782        for (;;) {
 783                tis = ravb_read(ndev, TIS);
 784                ris0 = ravb_read(ndev, RIS0);
 785                if (!((ris0 & mask) || (tis & mask)))
 786                        break;
 787
 788                /* Processing RX Descriptor Ring */
 789                if (ris0 & mask) {
 790                        /* Clear RX interrupt */
 791                        ravb_write(ndev, ~mask, RIS0);
 792                        if (ravb_rx(ndev, &quota, q))
 793                                goto out;
 794                }
 795                /* Processing TX Descriptor Ring */
 796                if (tis & mask) {
 797                        spin_lock_irqsave(&priv->lock, flags);
 798                        /* Clear TX interrupt */
 799                        ravb_write(ndev, ~mask, TIS);
 800                        ravb_tx_free(ndev, q);
 801                        netif_wake_subqueue(ndev, q);
 802                        mmiowb();
 803                        spin_unlock_irqrestore(&priv->lock, flags);
 804                }
 805        }
 806
 807        napi_complete(napi);
 808
 809        /* Re-enable RX/TX interrupts */
 810        spin_lock_irqsave(&priv->lock, flags);
 811        ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
 812        ravb_write(ndev, ravb_read(ndev, TIC)  | mask,  TIC);
 813        mmiowb();
 814        spin_unlock_irqrestore(&priv->lock, flags);
 815
 816        /* Receive error message handling */
 817        priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
 818        priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
 819        if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
 820                ndev->stats.rx_over_errors = priv->rx_over_errors;
 821                netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
 822        }
 823        if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
 824                ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
 825                netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
 826        }
 827out:
 828        return budget - quota;
 829}
 830
 831/* PHY state control function */
 832static void ravb_adjust_link(struct net_device *ndev)
 833{
 834        struct ravb_private *priv = netdev_priv(ndev);
 835        struct phy_device *phydev = priv->phydev;
 836        bool new_state = false;
 837
 838        if (phydev->link) {
 839                if (phydev->duplex != priv->duplex) {
 840                        new_state = true;
 841                        priv->duplex = phydev->duplex;
 842                        ravb_set_duplex(ndev);
 843                }
 844
 845                if (phydev->speed != priv->speed) {
 846                        new_state = true;
 847                        priv->speed = phydev->speed;
 848                        ravb_set_rate(ndev);
 849                }
 850                if (!priv->link) {
 851                        ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
 852                                   ECMR);
 853                        new_state = true;
 854                        priv->link = phydev->link;
 855                        if (priv->no_avb_link)
 856                                ravb_rcv_snd_enable(ndev);
 857                }
 858        } else if (priv->link) {
 859                new_state = true;
 860                priv->link = 0;
 861                priv->speed = 0;
 862                priv->duplex = -1;
 863                if (priv->no_avb_link)
 864                        ravb_rcv_snd_disable(ndev);
 865        }
 866
 867        if (new_state && netif_msg_link(priv))
 868                phy_print_status(phydev);
 869}
 870
 871/* PHY init function */
 872static int ravb_phy_init(struct net_device *ndev)
 873{
 874        struct device_node *np = ndev->dev.parent->of_node;
 875        struct ravb_private *priv = netdev_priv(ndev);
 876        struct phy_device *phydev;
 877        struct device_node *pn;
 878
 879        priv->link = 0;
 880        priv->speed = 0;
 881        priv->duplex = -1;
 882
 883        /* Try connecting to PHY */
 884        pn = of_parse_phandle(np, "phy-handle", 0);
 885        phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
 886                                priv->phy_interface);
 887        if (!phydev) {
 888                netdev_err(ndev, "failed to connect PHY\n");
 889                return -ENOENT;
 890        }
 891
 892        netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
 893                    phydev->addr, phydev->irq, phydev->drv->name);
 894
 895        priv->phydev = phydev;
 896
 897        return 0;
 898}
 899
 900/* PHY control start function */
 901static int ravb_phy_start(struct net_device *ndev)
 902{
 903        struct ravb_private *priv = netdev_priv(ndev);
 904        int error;
 905
 906        error = ravb_phy_init(ndev);
 907        if (error)
 908                return error;
 909
 910        phy_start(priv->phydev);
 911
 912        return 0;
 913}
 914
 915static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
 916{
 917        struct ravb_private *priv = netdev_priv(ndev);
 918        int error = -ENODEV;
 919        unsigned long flags;
 920
 921        if (priv->phydev) {
 922                spin_lock_irqsave(&priv->lock, flags);
 923                error = phy_ethtool_gset(priv->phydev, ecmd);
 924                spin_unlock_irqrestore(&priv->lock, flags);
 925        }
 926
 927        return error;
 928}
 929
 930static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
 931{
 932        struct ravb_private *priv = netdev_priv(ndev);
 933        unsigned long flags;
 934        int error;
 935
 936        if (!priv->phydev)
 937                return -ENODEV;
 938
 939        spin_lock_irqsave(&priv->lock, flags);
 940
 941        /* Disable TX and RX */
 942        ravb_rcv_snd_disable(ndev);
 943
 944        error = phy_ethtool_sset(priv->phydev, ecmd);
 945        if (error)
 946                goto error_exit;
 947
 948        if (ecmd->duplex == DUPLEX_FULL)
 949                priv->duplex = 1;
 950        else
 951                priv->duplex = 0;
 952
 953        ravb_set_duplex(ndev);
 954
 955error_exit:
 956        mdelay(1);
 957
 958        /* Enable TX and RX */
 959        ravb_rcv_snd_enable(ndev);
 960
 961        mmiowb();
 962        spin_unlock_irqrestore(&priv->lock, flags);
 963
 964        return error;
 965}
 966
 967static int ravb_nway_reset(struct net_device *ndev)
 968{
 969        struct ravb_private *priv = netdev_priv(ndev);
 970        int error = -ENODEV;
 971        unsigned long flags;
 972
 973        if (priv->phydev) {
 974                spin_lock_irqsave(&priv->lock, flags);
 975                error = phy_start_aneg(priv->phydev);
 976                spin_unlock_irqrestore(&priv->lock, flags);
 977        }
 978
 979        return error;
 980}
 981
 982static u32 ravb_get_msglevel(struct net_device *ndev)
 983{
 984        struct ravb_private *priv = netdev_priv(ndev);
 985
 986        return priv->msg_enable;
 987}
 988
 989static void ravb_set_msglevel(struct net_device *ndev, u32 value)
 990{
 991        struct ravb_private *priv = netdev_priv(ndev);
 992
 993        priv->msg_enable = value;
 994}
 995
 996static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
 997        "rx_queue_0_current",
 998        "tx_queue_0_current",
 999        "rx_queue_0_dirty",
1000        "tx_queue_0_dirty",
1001        "rx_queue_0_packets",
1002        "tx_queue_0_packets",
1003        "rx_queue_0_bytes",
1004        "tx_queue_0_bytes",
1005        "rx_queue_0_mcast_packets",
1006        "rx_queue_0_errors",
1007        "rx_queue_0_crc_errors",
1008        "rx_queue_0_frame_errors",
1009        "rx_queue_0_length_errors",
1010        "rx_queue_0_missed_errors",
1011        "rx_queue_0_over_errors",
1012
1013        "rx_queue_1_current",
1014        "tx_queue_1_current",
1015        "rx_queue_1_dirty",
1016        "tx_queue_1_dirty",
1017        "rx_queue_1_packets",
1018        "tx_queue_1_packets",
1019        "rx_queue_1_bytes",
1020        "tx_queue_1_bytes",
1021        "rx_queue_1_mcast_packets",
1022        "rx_queue_1_errors",
1023        "rx_queue_1_crc_errors",
1024        "rx_queue_1_frame_errors_",
1025        "rx_queue_1_length_errors",
1026        "rx_queue_1_missed_errors",
1027        "rx_queue_1_over_errors",
1028};
1029
1030#define RAVB_STATS_LEN  ARRAY_SIZE(ravb_gstrings_stats)
1031
1032static int ravb_get_sset_count(struct net_device *netdev, int sset)
1033{
1034        switch (sset) {
1035        case ETH_SS_STATS:
1036                return RAVB_STATS_LEN;
1037        default:
1038                return -EOPNOTSUPP;
1039        }
1040}
1041
1042static void ravb_get_ethtool_stats(struct net_device *ndev,
1043                                   struct ethtool_stats *stats, u64 *data)
1044{
1045        struct ravb_private *priv = netdev_priv(ndev);
1046        int i = 0;
1047        int q;
1048
1049        /* Device-specific stats */
1050        for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
1051                struct net_device_stats *stats = &priv->stats[q];
1052
1053                data[i++] = priv->cur_rx[q];
1054                data[i++] = priv->cur_tx[q];
1055                data[i++] = priv->dirty_rx[q];
1056                data[i++] = priv->dirty_tx[q];
1057                data[i++] = stats->rx_packets;
1058                data[i++] = stats->tx_packets;
1059                data[i++] = stats->rx_bytes;
1060                data[i++] = stats->tx_bytes;
1061                data[i++] = stats->multicast;
1062                data[i++] = stats->rx_errors;
1063                data[i++] = stats->rx_crc_errors;
1064                data[i++] = stats->rx_frame_errors;
1065                data[i++] = stats->rx_length_errors;
1066                data[i++] = stats->rx_missed_errors;
1067                data[i++] = stats->rx_over_errors;
1068        }
1069}
1070
1071static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1072{
1073        switch (stringset) {
1074        case ETH_SS_STATS:
1075                memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
1076                break;
1077        }
1078}
1079
1080static void ravb_get_ringparam(struct net_device *ndev,
1081                               struct ethtool_ringparam *ring)
1082{
1083        struct ravb_private *priv = netdev_priv(ndev);
1084
1085        ring->rx_max_pending = BE_RX_RING_MAX;
1086        ring->tx_max_pending = BE_TX_RING_MAX;
1087        ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1088        ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1089}
1090
1091static int ravb_set_ringparam(struct net_device *ndev,
1092                              struct ethtool_ringparam *ring)
1093{
1094        struct ravb_private *priv = netdev_priv(ndev);
1095        int error;
1096
1097        if (ring->tx_pending > BE_TX_RING_MAX ||
1098            ring->rx_pending > BE_RX_RING_MAX ||
1099            ring->tx_pending < BE_TX_RING_MIN ||
1100            ring->rx_pending < BE_RX_RING_MIN)
1101                return -EINVAL;
1102        if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1103                return -EINVAL;
1104
1105        if (netif_running(ndev)) {
1106                netif_device_detach(ndev);
1107                /* Stop PTP Clock driver */
1108                ravb_ptp_stop(ndev);
1109                /* Wait for DMA stopping */
1110                error = ravb_stop_dma(ndev);
1111                if (error) {
1112                        netdev_err(ndev,
1113                                   "cannot set ringparam! Any AVB processes are still running?\n");
1114                        return error;
1115                }
1116                synchronize_irq(ndev->irq);
1117
1118                /* Free all the skb's in the RX queue and the DMA buffers. */
1119                ravb_ring_free(ndev, RAVB_BE);
1120                ravb_ring_free(ndev, RAVB_NC);
1121        }
1122
1123        /* Set new parameters */
1124        priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1125        priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1126
1127        if (netif_running(ndev)) {
1128                error = ravb_dmac_init(ndev);
1129                if (error) {
1130                        netdev_err(ndev,
1131                                   "%s: ravb_dmac_init() failed, error %d\n",
1132                                   __func__, error);
1133                        return error;
1134                }
1135
1136                ravb_emac_init(ndev);
1137
1138                /* Initialise PTP Clock driver */
1139                ravb_ptp_init(ndev, priv->pdev);
1140
1141                netif_device_attach(ndev);
1142        }
1143
1144        return 0;
1145}
1146
1147static int ravb_get_ts_info(struct net_device *ndev,
1148                            struct ethtool_ts_info *info)
1149{
1150        struct ravb_private *priv = netdev_priv(ndev);
1151
1152        info->so_timestamping =
1153                SOF_TIMESTAMPING_TX_SOFTWARE |
1154                SOF_TIMESTAMPING_RX_SOFTWARE |
1155                SOF_TIMESTAMPING_SOFTWARE |
1156                SOF_TIMESTAMPING_TX_HARDWARE |
1157                SOF_TIMESTAMPING_RX_HARDWARE |
1158                SOF_TIMESTAMPING_RAW_HARDWARE;
1159        info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1160        info->rx_filters =
1161                (1 << HWTSTAMP_FILTER_NONE) |
1162                (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1163                (1 << HWTSTAMP_FILTER_ALL);
1164        info->phc_index = ptp_clock_index(priv->ptp.clock);
1165
1166        return 0;
1167}
1168
1169static const struct ethtool_ops ravb_ethtool_ops = {
1170        .get_settings           = ravb_get_settings,
1171        .set_settings           = ravb_set_settings,
1172        .nway_reset             = ravb_nway_reset,
1173        .get_msglevel           = ravb_get_msglevel,
1174        .set_msglevel           = ravb_set_msglevel,
1175        .get_link               = ethtool_op_get_link,
1176        .get_strings            = ravb_get_strings,
1177        .get_ethtool_stats      = ravb_get_ethtool_stats,
1178        .get_sset_count         = ravb_get_sset_count,
1179        .get_ringparam          = ravb_get_ringparam,
1180        .set_ringparam          = ravb_set_ringparam,
1181        .get_ts_info            = ravb_get_ts_info,
1182};
1183
1184/* Network device open function for Ethernet AVB */
1185static int ravb_open(struct net_device *ndev)
1186{
1187        struct ravb_private *priv = netdev_priv(ndev);
1188        int error;
1189
1190        napi_enable(&priv->napi[RAVB_BE]);
1191        napi_enable(&priv->napi[RAVB_NC]);
1192
1193        error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
1194                            ndev);
1195        if (error) {
1196                netdev_err(ndev, "cannot request IRQ\n");
1197                goto out_napi_off;
1198        }
1199
1200        /* Device init */
1201        error = ravb_dmac_init(ndev);
1202        if (error)
1203                goto out_free_irq;
1204        ravb_emac_init(ndev);
1205
1206        /* Initialise PTP Clock driver */
1207        ravb_ptp_init(ndev, priv->pdev);
1208
1209        netif_tx_start_all_queues(ndev);
1210
1211        /* PHY control start */
1212        error = ravb_phy_start(ndev);
1213        if (error)
1214                goto out_ptp_stop;
1215
1216        return 0;
1217
1218out_ptp_stop:
1219        /* Stop PTP Clock driver */
1220        ravb_ptp_stop(ndev);
1221out_free_irq:
1222        free_irq(ndev->irq, ndev);
1223out_napi_off:
1224        napi_disable(&priv->napi[RAVB_NC]);
1225        napi_disable(&priv->napi[RAVB_BE]);
1226        return error;
1227}
1228
1229/* Timeout function for Ethernet AVB */
1230static void ravb_tx_timeout(struct net_device *ndev)
1231{
1232        struct ravb_private *priv = netdev_priv(ndev);
1233
1234        netif_err(priv, tx_err, ndev,
1235                  "transmit timed out, status %08x, resetting...\n",
1236                  ravb_read(ndev, ISS));
1237
1238        /* tx_errors count up */
1239        ndev->stats.tx_errors++;
1240
1241        schedule_work(&priv->work);
1242}
1243
1244static void ravb_tx_timeout_work(struct work_struct *work)
1245{
1246        struct ravb_private *priv = container_of(work, struct ravb_private,
1247                                                 work);
1248        struct net_device *ndev = priv->ndev;
1249
1250        netif_tx_stop_all_queues(ndev);
1251
1252        /* Stop PTP Clock driver */
1253        ravb_ptp_stop(ndev);
1254
1255        /* Wait for DMA stopping */
1256        ravb_stop_dma(ndev);
1257
1258        ravb_ring_free(ndev, RAVB_BE);
1259        ravb_ring_free(ndev, RAVB_NC);
1260
1261        /* Device init */
1262        ravb_dmac_init(ndev);
1263        ravb_emac_init(ndev);
1264
1265        /* Initialise PTP Clock driver */
1266        ravb_ptp_init(ndev, priv->pdev);
1267
1268        netif_tx_start_all_queues(ndev);
1269}
1270
1271/* Packet transmit function for Ethernet AVB */
1272static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1273{
1274        struct ravb_private *priv = netdev_priv(ndev);
1275        u16 q = skb_get_queue_mapping(skb);
1276        struct ravb_tstamp_skb *ts_skb;
1277        struct ravb_tx_desc *desc;
1278        unsigned long flags;
1279        u32 dma_addr;
1280        void *buffer;
1281        u32 entry;
1282        u32 len;
1283
1284        spin_lock_irqsave(&priv->lock, flags);
1285        if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1286            NUM_TX_DESC) {
1287                netif_err(priv, tx_queued, ndev,
1288                          "still transmitting with the full ring!\n");
1289                netif_stop_subqueue(ndev, q);
1290                spin_unlock_irqrestore(&priv->lock, flags);
1291                return NETDEV_TX_BUSY;
1292        }
1293        entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
1294        priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1295
1296        if (skb_put_padto(skb, ETH_ZLEN))
1297                goto drop;
1298
1299        buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1300                 entry / NUM_TX_DESC * DPTR_ALIGN;
1301        len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1302        memcpy(buffer, skb->data, len);
1303        dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
1304        if (dma_mapping_error(&ndev->dev, dma_addr))
1305                goto drop;
1306
1307        desc = &priv->tx_ring[q][entry];
1308        desc->ds_tagl = cpu_to_le16(len);
1309        desc->dptr = cpu_to_le32(dma_addr);
1310
1311        buffer = skb->data + len;
1312        len = skb->len - len;
1313        dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
1314        if (dma_mapping_error(&ndev->dev, dma_addr))
1315                goto unmap;
1316
1317        desc++;
1318        desc->ds_tagl = cpu_to_le16(len);
1319        desc->dptr = cpu_to_le32(dma_addr);
1320
1321        /* TX timestamp required */
1322        if (q == RAVB_NC) {
1323                ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
1324                if (!ts_skb) {
1325                        desc--;
1326                        dma_unmap_single(&ndev->dev, dma_addr, len,
1327                                         DMA_TO_DEVICE);
1328                        goto unmap;
1329                }
1330                ts_skb->skb = skb;
1331                ts_skb->tag = priv->ts_skb_tag++;
1332                priv->ts_skb_tag &= 0x3ff;
1333                list_add_tail(&ts_skb->list, &priv->ts_skb_list);
1334
1335                /* TAG and timestamp required flag */
1336                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1337                skb_tx_timestamp(skb);
1338                desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1339                desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1340        }
1341
1342        /* Descriptor type must be set after all the above writes */
1343        dma_wmb();
1344        desc->die_dt = DT_FEND;
1345        desc--;
1346        desc->die_dt = DT_FSTART;
1347
1348        ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1349
1350        priv->cur_tx[q] += NUM_TX_DESC;
1351        if (priv->cur_tx[q] - priv->dirty_tx[q] >
1352            (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
1353                netif_stop_subqueue(ndev, q);
1354
1355exit:
1356        mmiowb();
1357        spin_unlock_irqrestore(&priv->lock, flags);
1358        return NETDEV_TX_OK;
1359
1360unmap:
1361        dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
1362                         le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
1363drop:
1364        dev_kfree_skb_any(skb);
1365        priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
1366        goto exit;
1367}
1368
1369static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1370                             void *accel_priv, select_queue_fallback_t fallback)
1371{
1372        /* If skb needs TX timestamp, it is handled in network control queue */
1373        return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
1374                                                               RAVB_BE;
1375
1376}
1377
1378static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1379{
1380        struct ravb_private *priv = netdev_priv(ndev);
1381        struct net_device_stats *nstats, *stats0, *stats1;
1382
1383        nstats = &ndev->stats;
1384        stats0 = &priv->stats[RAVB_BE];
1385        stats1 = &priv->stats[RAVB_NC];
1386
1387        nstats->tx_dropped += ravb_read(ndev, TROCR);
1388        ravb_write(ndev, 0, TROCR);     /* (write clear) */
1389        nstats->collisions += ravb_read(ndev, CDCR);
1390        ravb_write(ndev, 0, CDCR);      /* (write clear) */
1391        nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
1392        ravb_write(ndev, 0, LCCR);      /* (write clear) */
1393
1394        nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
1395        ravb_write(ndev, 0, CERCR);     /* (write clear) */
1396        nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
1397        ravb_write(ndev, 0, CEECR);     /* (write clear) */
1398
1399        nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
1400        nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
1401        nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
1402        nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
1403        nstats->multicast = stats0->multicast + stats1->multicast;
1404        nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
1405        nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
1406        nstats->rx_frame_errors =
1407                stats0->rx_frame_errors + stats1->rx_frame_errors;
1408        nstats->rx_length_errors =
1409                stats0->rx_length_errors + stats1->rx_length_errors;
1410        nstats->rx_missed_errors =
1411                stats0->rx_missed_errors + stats1->rx_missed_errors;
1412        nstats->rx_over_errors =
1413                stats0->rx_over_errors + stats1->rx_over_errors;
1414
1415        return nstats;
1416}
1417
1418/* Update promiscuous bit */
1419static void ravb_set_rx_mode(struct net_device *ndev)
1420{
1421        struct ravb_private *priv = netdev_priv(ndev);
1422        unsigned long flags;
1423        u32 ecmr;
1424
1425        spin_lock_irqsave(&priv->lock, flags);
1426        ecmr = ravb_read(ndev, ECMR);
1427        if (ndev->flags & IFF_PROMISC)
1428                ecmr |=  ECMR_PRM;
1429        else
1430                ecmr &= ~ECMR_PRM;
1431        ravb_write(ndev, ecmr, ECMR);
1432        mmiowb();
1433        spin_unlock_irqrestore(&priv->lock, flags);
1434}
1435
1436/* Device close function for Ethernet AVB */
1437static int ravb_close(struct net_device *ndev)
1438{
1439        struct ravb_private *priv = netdev_priv(ndev);
1440        struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1441
1442        netif_tx_stop_all_queues(ndev);
1443
1444        /* Disable interrupts by clearing the interrupt masks. */
1445        ravb_write(ndev, 0, RIC0);
1446        ravb_write(ndev, 0, RIC1);
1447        ravb_write(ndev, 0, RIC2);
1448        ravb_write(ndev, 0, TIC);
1449
1450        /* Stop PTP Clock driver */
1451        ravb_ptp_stop(ndev);
1452
1453        /* Set the config mode to stop the AVB-DMAC's processes */
1454        if (ravb_stop_dma(ndev) < 0)
1455                netdev_err(ndev,
1456                           "device will be stopped after h/w processes are done.\n");
1457
1458        /* Clear the timestamp list */
1459        list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1460                list_del(&ts_skb->list);
1461                kfree(ts_skb);
1462        }
1463
1464        /* PHY disconnect */
1465        if (priv->phydev) {
1466                phy_stop(priv->phydev);
1467                phy_disconnect(priv->phydev);
1468                priv->phydev = NULL;
1469        }
1470
1471        free_irq(ndev->irq, ndev);
1472
1473        napi_disable(&priv->napi[RAVB_NC]);
1474        napi_disable(&priv->napi[RAVB_BE]);
1475
1476        /* Free all the skb's in the RX queue and the DMA buffers. */
1477        ravb_ring_free(ndev, RAVB_BE);
1478        ravb_ring_free(ndev, RAVB_NC);
1479
1480        return 0;
1481}
1482
1483static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1484{
1485        struct ravb_private *priv = netdev_priv(ndev);
1486        struct hwtstamp_config config;
1487
1488        config.flags = 0;
1489        config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1490                                                HWTSTAMP_TX_OFF;
1491        if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
1492                config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1493        else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
1494                config.rx_filter = HWTSTAMP_FILTER_ALL;
1495        else
1496                config.rx_filter = HWTSTAMP_FILTER_NONE;
1497
1498        return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1499                -EFAULT : 0;
1500}
1501
1502/* Control hardware time stamping */
1503static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1504{
1505        struct ravb_private *priv = netdev_priv(ndev);
1506        struct hwtstamp_config config;
1507        u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
1508        u32 tstamp_tx_ctrl;
1509
1510        if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1511                return -EFAULT;
1512
1513        /* Reserved for future extensions */
1514        if (config.flags)
1515                return -EINVAL;
1516
1517        switch (config.tx_type) {
1518        case HWTSTAMP_TX_OFF:
1519                tstamp_tx_ctrl = 0;
1520                break;
1521        case HWTSTAMP_TX_ON:
1522                tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
1523                break;
1524        default:
1525                return -ERANGE;
1526        }
1527
1528        switch (config.rx_filter) {
1529        case HWTSTAMP_FILTER_NONE:
1530                tstamp_rx_ctrl = 0;
1531                break;
1532        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1533                tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
1534                break;
1535        default:
1536                config.rx_filter = HWTSTAMP_FILTER_ALL;
1537                tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
1538        }
1539
1540        priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1541        priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1542
1543        return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
1544                -EFAULT : 0;
1545}
1546
1547/* ioctl to device function */
1548static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1549{
1550        struct ravb_private *priv = netdev_priv(ndev);
1551        struct phy_device *phydev = priv->phydev;
1552
1553        if (!netif_running(ndev))
1554                return -EINVAL;
1555
1556        if (!phydev)
1557                return -ENODEV;
1558
1559        switch (cmd) {
1560        case SIOCGHWTSTAMP:
1561                return ravb_hwtstamp_get(ndev, req);
1562        case SIOCSHWTSTAMP:
1563                return ravb_hwtstamp_set(ndev, req);
1564        }
1565
1566        return phy_mii_ioctl(phydev, req, cmd);
1567}
1568
1569static const struct net_device_ops ravb_netdev_ops = {
1570        .ndo_open               = ravb_open,
1571        .ndo_stop               = ravb_close,
1572        .ndo_start_xmit         = ravb_start_xmit,
1573        .ndo_select_queue       = ravb_select_queue,
1574        .ndo_get_stats          = ravb_get_stats,
1575        .ndo_set_rx_mode        = ravb_set_rx_mode,
1576        .ndo_tx_timeout         = ravb_tx_timeout,
1577        .ndo_do_ioctl           = ravb_do_ioctl,
1578        .ndo_validate_addr      = eth_validate_addr,
1579        .ndo_set_mac_address    = eth_mac_addr,
1580        .ndo_change_mtu         = eth_change_mtu,
1581};
1582
1583/* MDIO bus init function */
1584static int ravb_mdio_init(struct ravb_private *priv)
1585{
1586        struct platform_device *pdev = priv->pdev;
1587        struct device *dev = &pdev->dev;
1588        int error;
1589
1590        /* Bitbang init */
1591        priv->mdiobb.ops = &bb_ops;
1592
1593        /* MII controller setting */
1594        priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
1595        if (!priv->mii_bus)
1596                return -ENOMEM;
1597
1598        /* Hook up MII support for ethtool */
1599        priv->mii_bus->name = "ravb_mii";
1600        priv->mii_bus->parent = dev;
1601        snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1602                 pdev->name, pdev->id);
1603
1604        /* Register MDIO bus */
1605        error = of_mdiobus_register(priv->mii_bus, dev->of_node);
1606        if (error)
1607                goto out_free_bus;
1608
1609        return 0;
1610
1611out_free_bus:
1612        free_mdio_bitbang(priv->mii_bus);
1613        return error;
1614}
1615
1616/* MDIO bus release function */
1617static int ravb_mdio_release(struct ravb_private *priv)
1618{
1619        /* Unregister mdio bus */
1620        mdiobus_unregister(priv->mii_bus);
1621
1622        /* Free bitbang info */
1623        free_mdio_bitbang(priv->mii_bus);
1624
1625        return 0;
1626}
1627
1628static int ravb_probe(struct platform_device *pdev)
1629{
1630        struct device_node *np = pdev->dev.of_node;
1631        struct ravb_private *priv;
1632        struct net_device *ndev;
1633        int error, irq, q;
1634        struct resource *res;
1635
1636        if (!np) {
1637                dev_err(&pdev->dev,
1638                        "this driver is required to be instantiated from device tree\n");
1639                return -EINVAL;
1640        }
1641
1642        /* Get base address */
1643        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1644        if (!res) {
1645                dev_err(&pdev->dev, "invalid resource\n");
1646                return -EINVAL;
1647        }
1648
1649        ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
1650                                  NUM_TX_QUEUE, NUM_RX_QUEUE);
1651        if (!ndev)
1652                return -ENOMEM;
1653
1654        pm_runtime_enable(&pdev->dev);
1655        pm_runtime_get_sync(&pdev->dev);
1656
1657        /* The Ether-specific entries in the device structure. */
1658        ndev->base_addr = res->start;
1659        ndev->dma = -1;
1660        irq = platform_get_irq(pdev, 0);
1661        if (irq < 0) {
1662                error = irq;
1663                goto out_release;
1664        }
1665        ndev->irq = irq;
1666
1667        SET_NETDEV_DEV(ndev, &pdev->dev);
1668
1669        priv = netdev_priv(ndev);
1670        priv->ndev = ndev;
1671        priv->pdev = pdev;
1672        priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
1673        priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
1674        priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
1675        priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
1676        priv->addr = devm_ioremap_resource(&pdev->dev, res);
1677        if (IS_ERR(priv->addr)) {
1678                error = PTR_ERR(priv->addr);
1679                goto out_release;
1680        }
1681
1682        spin_lock_init(&priv->lock);
1683        INIT_WORK(&priv->work, ravb_tx_timeout_work);
1684
1685        priv->phy_interface = of_get_phy_mode(np);
1686
1687        priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
1688        priv->avb_link_active_low =
1689                of_property_read_bool(np, "renesas,ether-link-active-low");
1690
1691        /* Set function */
1692        ndev->netdev_ops = &ravb_netdev_ops;
1693        ndev->ethtool_ops = &ravb_ethtool_ops;
1694
1695        /* Set AVB config mode */
1696        ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
1697                   CCC);
1698
1699        /* Set CSEL value */
1700        ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1701                   CCC);
1702
1703        /* Set GTI value */
1704        ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
1705
1706        /* Request GTI loading */
1707        ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
1708
1709        /* Allocate descriptor base address table */
1710        priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
1711        priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
1712                                            &priv->desc_bat_dma, GFP_KERNEL);
1713        if (!priv->desc_bat) {
1714                dev_err(&ndev->dev,
1715                        "Cannot allocate desc base address table (size %d bytes)\n",
1716                        priv->desc_bat_size);
1717                error = -ENOMEM;
1718                goto out_release;
1719        }
1720        for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
1721                priv->desc_bat[q].die_dt = DT_EOS;
1722        ravb_write(ndev, priv->desc_bat_dma, DBAT);
1723
1724        /* Initialise HW timestamp list */
1725        INIT_LIST_HEAD(&priv->ts_skb_list);
1726
1727        /* Debug message level */
1728        priv->msg_enable = RAVB_DEF_MSG_ENABLE;
1729
1730        /* Read and set MAC address */
1731        ravb_read_mac_address(ndev, of_get_mac_address(np));
1732        if (!is_valid_ether_addr(ndev->dev_addr)) {
1733                dev_warn(&pdev->dev,
1734                         "no valid MAC address supplied, using a random one\n");
1735                eth_hw_addr_random(ndev);
1736        }
1737
1738        /* MDIO bus init */
1739        error = ravb_mdio_init(priv);
1740        if (error) {
1741                dev_err(&ndev->dev, "failed to initialize MDIO\n");
1742                goto out_dma_free;
1743        }
1744
1745        netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
1746        netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
1747
1748        /* Network device register */
1749        error = register_netdev(ndev);
1750        if (error)
1751                goto out_napi_del;
1752
1753        /* Print device information */
1754        netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
1755                    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1756
1757        platform_set_drvdata(pdev, ndev);
1758
1759        return 0;
1760
1761out_napi_del:
1762        netif_napi_del(&priv->napi[RAVB_NC]);
1763        netif_napi_del(&priv->napi[RAVB_BE]);
1764        ravb_mdio_release(priv);
1765out_dma_free:
1766        dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
1767                          priv->desc_bat_dma);
1768out_release:
1769        if (ndev)
1770                free_netdev(ndev);
1771
1772        pm_runtime_put(&pdev->dev);
1773        pm_runtime_disable(&pdev->dev);
1774        return error;
1775}
1776
1777static int ravb_remove(struct platform_device *pdev)
1778{
1779        struct net_device *ndev = platform_get_drvdata(pdev);
1780        struct ravb_private *priv = netdev_priv(ndev);
1781
1782        dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
1783                          priv->desc_bat_dma);
1784        /* Set reset mode */
1785        ravb_write(ndev, CCC_OPC_RESET, CCC);
1786        pm_runtime_put_sync(&pdev->dev);
1787        unregister_netdev(ndev);
1788        netif_napi_del(&priv->napi[RAVB_NC]);
1789        netif_napi_del(&priv->napi[RAVB_BE]);
1790        ravb_mdio_release(priv);
1791        pm_runtime_disable(&pdev->dev);
1792        free_netdev(ndev);
1793        platform_set_drvdata(pdev, NULL);
1794
1795        return 0;
1796}
1797
1798#ifdef CONFIG_PM
1799static int ravb_runtime_nop(struct device *dev)
1800{
1801        /* Runtime PM callback shared between ->runtime_suspend()
1802         * and ->runtime_resume(). Simply returns success.
1803         *
1804         * This driver re-initializes all registers after
1805         * pm_runtime_get_sync() anyway so there is no need
1806         * to save and restore registers here.
1807         */
1808        return 0;
1809}
1810
1811static const struct dev_pm_ops ravb_dev_pm_ops = {
1812        .runtime_suspend = ravb_runtime_nop,
1813        .runtime_resume = ravb_runtime_nop,
1814};
1815
1816#define RAVB_PM_OPS (&ravb_dev_pm_ops)
1817#else
1818#define RAVB_PM_OPS NULL
1819#endif
1820
1821static const struct of_device_id ravb_match_table[] = {
1822        { .compatible = "renesas,etheravb-r8a7790" },
1823        { .compatible = "renesas,etheravb-r8a7794" },
1824        { }
1825};
1826MODULE_DEVICE_TABLE(of, ravb_match_table);
1827
1828static struct platform_driver ravb_driver = {
1829        .probe          = ravb_probe,
1830        .remove         = ravb_remove,
1831        .driver = {
1832                .name   = "ravb",
1833                .pm     = RAVB_PM_OPS,
1834                .of_match_table = ravb_match_table,
1835        },
1836};
1837
1838module_platform_driver(ravb_driver);
1839
1840MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
1841MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
1842MODULE_LICENSE("GPL v2");
1843