linux/drivers/net/ethernet/renesas/ravb_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Renesas Ethernet AVB device driver
   3 *
   4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
   5 * Copyright (C) 2015 Renesas Solutions Corp.
   6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
   7 *
   8 * Based on the SuperH Ethernet driver
   9 */
  10
  11#include <linux/cache.h>
  12#include <linux/clk.h>
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/err.h>
  16#include <linux/etherdevice.h>
  17#include <linux/ethtool.h>
  18#include <linux/if_vlan.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/net_tstamp.h>
  23#include <linux/of.h>
  24#include <linux/of_device.h>
  25#include <linux/of_irq.h>
  26#include <linux/of_mdio.h>
  27#include <linux/of_net.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/slab.h>
  30#include <linux/spinlock.h>
  31#include <linux/sys_soc.h>
  32#include <linux/reset.h>
  33#include <linux/math64.h>
  34
  35#include "ravb.h"
  36
  37#define RAVB_DEF_MSG_ENABLE \
  38                (NETIF_MSG_LINK   | \
  39                 NETIF_MSG_TIMER  | \
  40                 NETIF_MSG_RX_ERR | \
  41                 NETIF_MSG_TX_ERR)
  42
  43static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
  44        "ch0", /* RAVB_BE */
  45        "ch1", /* RAVB_NC */
  46};
  47
  48static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
  49        "ch18", /* RAVB_BE */
  50        "ch19", /* RAVB_NC */
  51};
  52
  53void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
  54                 u32 set)
  55{
  56        ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
  57}
  58
  59int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  60{
  61        int i;
  62
  63        for (i = 0; i < 10000; i++) {
  64                if ((ravb_read(ndev, reg) & mask) == value)
  65                        return 0;
  66                udelay(10);
  67        }
  68        return -ETIMEDOUT;
  69}
  70
  71static int ravb_config(struct net_device *ndev)
  72{
  73        int error;
  74
  75        /* Set config mode */
  76        ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
  77        /* Check if the operating mode is changed to the config mode */
  78        error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
  79        if (error)
  80                netdev_err(ndev, "failed to switch device to config mode\n");
  81
  82        return error;
  83}
  84
  85static void ravb_set_rate_gbeth(struct net_device *ndev)
  86{
  87        struct ravb_private *priv = netdev_priv(ndev);
  88
  89        switch (priv->speed) {
  90        case 10:                /* 10BASE */
  91                ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
  92                break;
  93        case 100:               /* 100BASE */
  94                ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
  95                break;
  96        case 1000:              /* 1000BASE */
  97                ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
  98                break;
  99        }
 100}
 101
 102static void ravb_set_rate_rcar(struct net_device *ndev)
 103{
 104        struct ravb_private *priv = netdev_priv(ndev);
 105
 106        switch (priv->speed) {
 107        case 100:               /* 100BASE */
 108                ravb_write(ndev, GECMR_SPEED_100, GECMR);
 109                break;
 110        case 1000:              /* 1000BASE */
 111                ravb_write(ndev, GECMR_SPEED_1000, GECMR);
 112                break;
 113        }
 114}
 115
 116static void ravb_set_buffer_align(struct sk_buff *skb)
 117{
 118        u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
 119
 120        if (reserve)
 121                skb_reserve(skb, RAVB_ALIGN - reserve);
 122}
 123
 124/* Get MAC address from the MAC address registers
 125 *
 126 * Ethernet AVB device doesn't have ROM for MAC address.
 127 * This function gets the MAC address that was used by a bootloader.
 128 */
 129static void ravb_read_mac_address(struct device_node *np,
 130                                  struct net_device *ndev)
 131{
 132        int ret;
 133
 134        ret = of_get_ethdev_address(np, ndev);
 135        if (ret) {
 136                u32 mahr = ravb_read(ndev, MAHR);
 137                u32 malr = ravb_read(ndev, MALR);
 138                u8 addr[ETH_ALEN];
 139
 140                addr[0] = (mahr >> 24) & 0xFF;
 141                addr[1] = (mahr >> 16) & 0xFF;
 142                addr[2] = (mahr >>  8) & 0xFF;
 143                addr[3] = (mahr >>  0) & 0xFF;
 144                addr[4] = (malr >>  8) & 0xFF;
 145                addr[5] = (malr >>  0) & 0xFF;
 146                eth_hw_addr_set(ndev, addr);
 147        }
 148}
 149
 150static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 151{
 152        struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 153                                                 mdiobb);
 154
 155        ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 156}
 157
 158/* MDC pin control */
 159static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
 160{
 161        ravb_mdio_ctrl(ctrl, PIR_MDC, level);
 162}
 163
 164/* Data I/O pin control */
 165static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
 166{
 167        ravb_mdio_ctrl(ctrl, PIR_MMD, output);
 168}
 169
 170/* Set data bit */
 171static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
 172{
 173        ravb_mdio_ctrl(ctrl, PIR_MDO, value);
 174}
 175
 176/* Get data bit */
 177static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
 178{
 179        struct ravb_private *priv = container_of(ctrl, struct ravb_private,
 180                                                 mdiobb);
 181
 182        return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
 183}
 184
 185/* MDIO bus control struct */
 186static const struct mdiobb_ops bb_ops = {
 187        .owner = THIS_MODULE,
 188        .set_mdc = ravb_set_mdc,
 189        .set_mdio_dir = ravb_set_mdio_dir,
 190        .set_mdio_data = ravb_set_mdio_data,
 191        .get_mdio_data = ravb_get_mdio_data,
 192};
 193
 194/* Free TX skb function for AVB-IP */
 195static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
 196{
 197        struct ravb_private *priv = netdev_priv(ndev);
 198        struct net_device_stats *stats = &priv->stats[q];
 199        unsigned int num_tx_desc = priv->num_tx_desc;
 200        struct ravb_tx_desc *desc;
 201        unsigned int entry;
 202        int free_num = 0;
 203        u32 size;
 204
 205        for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
 206                bool txed;
 207
 208                entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
 209                                             num_tx_desc);
 210                desc = &priv->tx_ring[q][entry];
 211                txed = desc->die_dt == DT_FEMPTY;
 212                if (free_txed_only && !txed)
 213                        break;
 214                /* Descriptor type must be checked before all other reads */
 215                dma_rmb();
 216                size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 217                /* Free the original skb. */
 218                if (priv->tx_skb[q][entry / num_tx_desc]) {
 219                        dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 220                                         size, DMA_TO_DEVICE);
 221                        /* Last packet descriptor? */
 222                        if (entry % num_tx_desc == num_tx_desc - 1) {
 223                                entry /= num_tx_desc;
 224                                dev_kfree_skb_any(priv->tx_skb[q][entry]);
 225                                priv->tx_skb[q][entry] = NULL;
 226                                if (txed)
 227                                        stats->tx_packets++;
 228                        }
 229                        free_num++;
 230                }
 231                if (txed)
 232                        stats->tx_bytes += size;
 233                desc->die_dt = DT_EEMPTY;
 234        }
 235        return free_num;
 236}
 237
 238static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
 239{
 240        struct ravb_private *priv = netdev_priv(ndev);
 241        unsigned int ring_size;
 242        unsigned int i;
 243
 244        if (!priv->gbeth_rx_ring)
 245                return;
 246
 247        for (i = 0; i < priv->num_rx_ring[q]; i++) {
 248                struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
 249
 250                if (!dma_mapping_error(ndev->dev.parent,
 251                                       le32_to_cpu(desc->dptr)))
 252                        dma_unmap_single(ndev->dev.parent,
 253                                         le32_to_cpu(desc->dptr),
 254                                         GBETH_RX_BUFF_MAX,
 255                                         DMA_FROM_DEVICE);
 256        }
 257        ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 258        dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
 259                          priv->rx_desc_dma[q]);
 260        priv->gbeth_rx_ring = NULL;
 261}
 262
 263static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
 264{
 265        struct ravb_private *priv = netdev_priv(ndev);
 266        unsigned int ring_size;
 267        unsigned int i;
 268
 269        if (!priv->rx_ring[q])
 270                return;
 271
 272        for (i = 0; i < priv->num_rx_ring[q]; i++) {
 273                struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
 274
 275                if (!dma_mapping_error(ndev->dev.parent,
 276                                       le32_to_cpu(desc->dptr)))
 277                        dma_unmap_single(ndev->dev.parent,
 278                                         le32_to_cpu(desc->dptr),
 279                                         RX_BUF_SZ,
 280                                         DMA_FROM_DEVICE);
 281        }
 282        ring_size = sizeof(struct ravb_ex_rx_desc) *
 283                    (priv->num_rx_ring[q] + 1);
 284        dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
 285                          priv->rx_desc_dma[q]);
 286        priv->rx_ring[q] = NULL;
 287}
 288
 289/* Free skb's and DMA buffers for Ethernet AVB */
 290static void ravb_ring_free(struct net_device *ndev, int q)
 291{
 292        struct ravb_private *priv = netdev_priv(ndev);
 293        const struct ravb_hw_info *info = priv->info;
 294        unsigned int num_tx_desc = priv->num_tx_desc;
 295        unsigned int ring_size;
 296        unsigned int i;
 297
 298        info->rx_ring_free(ndev, q);
 299
 300        if (priv->tx_ring[q]) {
 301                ravb_tx_free(ndev, q, false);
 302
 303                ring_size = sizeof(struct ravb_tx_desc) *
 304                            (priv->num_tx_ring[q] * num_tx_desc + 1);
 305                dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 306                                  priv->tx_desc_dma[q]);
 307                priv->tx_ring[q] = NULL;
 308        }
 309
 310        /* Free RX skb ringbuffer */
 311        if (priv->rx_skb[q]) {
 312                for (i = 0; i < priv->num_rx_ring[q]; i++)
 313                        dev_kfree_skb(priv->rx_skb[q][i]);
 314        }
 315        kfree(priv->rx_skb[q]);
 316        priv->rx_skb[q] = NULL;
 317
 318        /* Free aligned TX buffers */
 319        kfree(priv->tx_align[q]);
 320        priv->tx_align[q] = NULL;
 321
 322        /* Free TX skb ringbuffer.
 323         * SKBs are freed by ravb_tx_free() call above.
 324         */
 325        kfree(priv->tx_skb[q]);
 326        priv->tx_skb[q] = NULL;
 327}
 328
 329static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
 330{
 331        struct ravb_private *priv = netdev_priv(ndev);
 332        struct ravb_rx_desc *rx_desc;
 333        unsigned int rx_ring_size;
 334        dma_addr_t dma_addr;
 335        unsigned int i;
 336
 337        rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 338        memset(priv->gbeth_rx_ring, 0, rx_ring_size);
 339        /* Build RX ring buffer */
 340        for (i = 0; i < priv->num_rx_ring[q]; i++) {
 341                /* RX descriptor */
 342                rx_desc = &priv->gbeth_rx_ring[i];
 343                rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 344                dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 345                                          GBETH_RX_BUFF_MAX,
 346                                          DMA_FROM_DEVICE);
 347                /* We just set the data size to 0 for a failed mapping which
 348                 * should prevent DMA from happening...
 349                 */
 350                if (dma_mapping_error(ndev->dev.parent, dma_addr))
 351                        rx_desc->ds_cc = cpu_to_le16(0);
 352                rx_desc->dptr = cpu_to_le32(dma_addr);
 353                rx_desc->die_dt = DT_FEMPTY;
 354        }
 355        rx_desc = &priv->gbeth_rx_ring[i];
 356        rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 357        rx_desc->die_dt = DT_LINKFIX; /* type */
 358}
 359
 360static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
 361{
 362        struct ravb_private *priv = netdev_priv(ndev);
 363        struct ravb_ex_rx_desc *rx_desc;
 364        unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
 365        dma_addr_t dma_addr;
 366        unsigned int i;
 367
 368        memset(priv->rx_ring[q], 0, rx_ring_size);
 369        /* Build RX ring buffer */
 370        for (i = 0; i < priv->num_rx_ring[q]; i++) {
 371                /* RX descriptor */
 372                rx_desc = &priv->rx_ring[q][i];
 373                rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 374                dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 375                                          RX_BUF_SZ,
 376                                          DMA_FROM_DEVICE);
 377                /* We just set the data size to 0 for a failed mapping which
 378                 * should prevent DMA from happening...
 379                 */
 380                if (dma_mapping_error(ndev->dev.parent, dma_addr))
 381                        rx_desc->ds_cc = cpu_to_le16(0);
 382                rx_desc->dptr = cpu_to_le32(dma_addr);
 383                rx_desc->die_dt = DT_FEMPTY;
 384        }
 385        rx_desc = &priv->rx_ring[q][i];
 386        rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 387        rx_desc->die_dt = DT_LINKFIX; /* type */
 388}
 389
 390/* Format skb and descriptor buffer for Ethernet AVB */
 391static void ravb_ring_format(struct net_device *ndev, int q)
 392{
 393        struct ravb_private *priv = netdev_priv(ndev);
 394        const struct ravb_hw_info *info = priv->info;
 395        unsigned int num_tx_desc = priv->num_tx_desc;
 396        struct ravb_tx_desc *tx_desc;
 397        struct ravb_desc *desc;
 398        unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
 399                                    num_tx_desc;
 400        unsigned int i;
 401
 402        priv->cur_rx[q] = 0;
 403        priv->cur_tx[q] = 0;
 404        priv->dirty_rx[q] = 0;
 405        priv->dirty_tx[q] = 0;
 406
 407        info->rx_ring_format(ndev, q);
 408
 409        memset(priv->tx_ring[q], 0, tx_ring_size);
 410        /* Build TX ring buffer */
 411        for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
 412             i++, tx_desc++) {
 413                tx_desc->die_dt = DT_EEMPTY;
 414                if (num_tx_desc > 1) {
 415                        tx_desc++;
 416                        tx_desc->die_dt = DT_EEMPTY;
 417                }
 418        }
 419        tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 420        tx_desc->die_dt = DT_LINKFIX; /* type */
 421
 422        /* RX descriptor base address for best effort */
 423        desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
 424        desc->die_dt = DT_LINKFIX; /* type */
 425        desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
 426
 427        /* TX descriptor base address for best effort */
 428        desc = &priv->desc_bat[q];
 429        desc->die_dt = DT_LINKFIX; /* type */
 430        desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 431}
 432
 433static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
 434{
 435        struct ravb_private *priv = netdev_priv(ndev);
 436        unsigned int ring_size;
 437
 438        ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
 439
 440        priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
 441                                                 &priv->rx_desc_dma[q],
 442                                                 GFP_KERNEL);
 443        return priv->gbeth_rx_ring;
 444}
 445
 446static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
 447{
 448        struct ravb_private *priv = netdev_priv(ndev);
 449        unsigned int ring_size;
 450
 451        ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 452
 453        priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 454                                              &priv->rx_desc_dma[q],
 455                                              GFP_KERNEL);
 456        return priv->rx_ring[q];
 457}
 458
 459/* Init skb and descriptor buffer for Ethernet AVB */
 460static int ravb_ring_init(struct net_device *ndev, int q)
 461{
 462        struct ravb_private *priv = netdev_priv(ndev);
 463        const struct ravb_hw_info *info = priv->info;
 464        unsigned int num_tx_desc = priv->num_tx_desc;
 465        unsigned int ring_size;
 466        struct sk_buff *skb;
 467        unsigned int i;
 468
 469        /* Allocate RX and TX skb rings */
 470        priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
 471                                  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
 472        priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
 473                                  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
 474        if (!priv->rx_skb[q] || !priv->tx_skb[q])
 475                goto error;
 476
 477        for (i = 0; i < priv->num_rx_ring[q]; i++) {
 478                skb = netdev_alloc_skb(ndev, info->max_rx_len);
 479                if (!skb)
 480                        goto error;
 481                ravb_set_buffer_align(skb);
 482                priv->rx_skb[q][i] = skb;
 483        }
 484
 485        if (num_tx_desc > 1) {
 486                /* Allocate rings for the aligned buffers */
 487                priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
 488                                            DPTR_ALIGN - 1, GFP_KERNEL);
 489                if (!priv->tx_align[q])
 490                        goto error;
 491        }
 492
 493        /* Allocate all RX descriptors. */
 494        if (!info->alloc_rx_desc(ndev, q))
 495                goto error;
 496
 497        priv->dirty_rx[q] = 0;
 498
 499        /* Allocate all TX descriptors. */
 500        ring_size = sizeof(struct ravb_tx_desc) *
 501                    (priv->num_tx_ring[q] * num_tx_desc + 1);
 502        priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 503                                              &priv->tx_desc_dma[q],
 504                                              GFP_KERNEL);
 505        if (!priv->tx_ring[q])
 506                goto error;
 507
 508        return 0;
 509
 510error:
 511        ravb_ring_free(ndev, q);
 512
 513        return -ENOMEM;
 514}
 515
 516static void ravb_emac_init_gbeth(struct net_device *ndev)
 517{
 518        struct ravb_private *priv = netdev_priv(ndev);
 519
 520        /* Receive frame limit set register */
 521        ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
 522
 523        /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
 524        ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
 525                         ECMR_TE | ECMR_RE | ECMR_RCPT |
 526                         ECMR_TXF | ECMR_RXF, ECMR);
 527
 528        ravb_set_rate_gbeth(ndev);
 529
 530        /* Set MAC address */
 531        ravb_write(ndev,
 532                   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 533                   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 534        ravb_write(ndev, (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 535
 536        /* E-MAC status register clear */
 537        ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
 538        ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
 539
 540        /* E-MAC interrupt enable register */
 541        ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
 542
 543        ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
 544}
 545
 546static void ravb_emac_init_rcar(struct net_device *ndev)
 547{
 548        /* Receive frame limit set register */
 549        ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
 550
 551        /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
 552        ravb_write(ndev, ECMR_ZPF | ECMR_DM |
 553                   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
 554                   ECMR_TE | ECMR_RE, ECMR);
 555
 556        ravb_set_rate_rcar(ndev);
 557
 558        /* Set MAC address */
 559        ravb_write(ndev,
 560                   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 561                   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
 562        ravb_write(ndev,
 563                   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
 564
 565        /* E-MAC status register clear */
 566        ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
 567
 568        /* E-MAC interrupt enable register */
 569        ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
 570}
 571
 572/* E-MAC init function */
 573static void ravb_emac_init(struct net_device *ndev)
 574{
 575        struct ravb_private *priv = netdev_priv(ndev);
 576        const struct ravb_hw_info *info = priv->info;
 577
 578        info->emac_init(ndev);
 579}
 580
 581static int ravb_dmac_init_gbeth(struct net_device *ndev)
 582{
 583        int error;
 584
 585        error = ravb_ring_init(ndev, RAVB_BE);
 586        if (error)
 587                return error;
 588
 589        /* Descriptor format */
 590        ravb_ring_format(ndev, RAVB_BE);
 591
 592        /* Set DMAC RX */
 593        ravb_write(ndev, 0x60000000, RCR);
 594
 595        /* Set Max Frame Length (RTC) */
 596        ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
 597
 598        /* Set FIFO size */
 599        ravb_write(ndev, 0x00222200, TGC);
 600
 601        ravb_write(ndev, 0, TCCR);
 602
 603        /* Frame receive */
 604        ravb_write(ndev, RIC0_FRE0, RIC0);
 605        /* Disable FIFO full warning */
 606        ravb_write(ndev, 0x0, RIC1);
 607        /* Receive FIFO full error, descriptor empty */
 608        ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
 609
 610        ravb_write(ndev, TIC_FTE0, TIC);
 611
 612        return 0;
 613}
 614
 615static int ravb_dmac_init_rcar(struct net_device *ndev)
 616{
 617        struct ravb_private *priv = netdev_priv(ndev);
 618        const struct ravb_hw_info *info = priv->info;
 619        int error;
 620
 621        error = ravb_ring_init(ndev, RAVB_BE);
 622        if (error)
 623                return error;
 624        error = ravb_ring_init(ndev, RAVB_NC);
 625        if (error) {
 626                ravb_ring_free(ndev, RAVB_BE);
 627                return error;
 628        }
 629
 630        /* Descriptor format */
 631        ravb_ring_format(ndev, RAVB_BE);
 632        ravb_ring_format(ndev, RAVB_NC);
 633
 634        /* Set AVB RX */
 635        ravb_write(ndev,
 636                   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
 637
 638        /* Set FIFO size */
 639        ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
 640
 641        /* Timestamp enable */
 642        ravb_write(ndev, TCCR_TFEN, TCCR);
 643
 644        /* Interrupt init: */
 645        if (info->multi_irqs) {
 646                /* Clear DIL.DPLx */
 647                ravb_write(ndev, 0, DIL);
 648                /* Set queue specific interrupt */
 649                ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
 650        }
 651        /* Frame receive */
 652        ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 653        /* Disable FIFO full warning */
 654        ravb_write(ndev, 0, RIC1);
 655        /* Receive FIFO full error, descriptor empty */
 656        ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 657        /* Frame transmitted, timestamp FIFO updated */
 658        ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 659
 660        return 0;
 661}
 662
 663/* Device init function for Ethernet AVB */
 664static int ravb_dmac_init(struct net_device *ndev)
 665{
 666        struct ravb_private *priv = netdev_priv(ndev);
 667        const struct ravb_hw_info *info = priv->info;
 668        int error;
 669
 670        /* Set CONFIG mode */
 671        error = ravb_config(ndev);
 672        if (error)
 673                return error;
 674
 675        error = info->dmac_init(ndev);
 676        if (error)
 677                return error;
 678
 679        /* Setting the control will start the AVB-DMAC process. */
 680        ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
 681
 682        return 0;
 683}
 684
 685static void ravb_get_tx_tstamp(struct net_device *ndev)
 686{
 687        struct ravb_private *priv = netdev_priv(ndev);
 688        struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 689        struct skb_shared_hwtstamps shhwtstamps;
 690        struct sk_buff *skb;
 691        struct timespec64 ts;
 692        u16 tag, tfa_tag;
 693        int count;
 694        u32 tfa2;
 695
 696        count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
 697        while (count--) {
 698                tfa2 = ravb_read(ndev, TFA2);
 699                tfa_tag = (tfa2 & TFA2_TST) >> 16;
 700                ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
 701                ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
 702                            ravb_read(ndev, TFA1);
 703                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 704                shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
 705                list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
 706                                         list) {
 707                        skb = ts_skb->skb;
 708                        tag = ts_skb->tag;
 709                        list_del(&ts_skb->list);
 710                        kfree(ts_skb);
 711                        if (tag == tfa_tag) {
 712                                skb_tstamp_tx(skb, &shhwtstamps);
 713                                dev_consume_skb_any(skb);
 714                                break;
 715                        } else {
 716                                dev_kfree_skb_any(skb);
 717                        }
 718                }
 719                ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
 720        }
 721}
 722
 723static void ravb_rx_csum(struct sk_buff *skb)
 724{
 725        u8 *hw_csum;
 726
 727        /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
 728         * appended to packet data
 729         */
 730        if (unlikely(skb->len < sizeof(__sum16)))
 731                return;
 732        hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 733        skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 734        skb->ip_summed = CHECKSUM_COMPLETE;
 735        skb_trim(skb, skb->len - sizeof(__sum16));
 736}
 737
 738static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
 739                                          struct ravb_rx_desc *desc)
 740{
 741        struct ravb_private *priv = netdev_priv(ndev);
 742        struct sk_buff *skb;
 743
 744        skb = priv->rx_skb[RAVB_BE][entry];
 745        priv->rx_skb[RAVB_BE][entry] = NULL;
 746        dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 747                         ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
 748
 749        return skb;
 750}
 751
 752/* Packet receive function for Gigabit Ethernet */
 753static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
 754{
 755        struct ravb_private *priv = netdev_priv(ndev);
 756        const struct ravb_hw_info *info = priv->info;
 757        struct net_device_stats *stats;
 758        struct ravb_rx_desc *desc;
 759        struct sk_buff *skb;
 760        dma_addr_t dma_addr;
 761        u8  desc_status;
 762        int boguscnt;
 763        u16 pkt_len;
 764        u8  die_dt;
 765        int entry;
 766        int limit;
 767
 768        entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 769        boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
 770        stats = &priv->stats[q];
 771
 772        boguscnt = min(boguscnt, *quota);
 773        limit = boguscnt;
 774        desc = &priv->gbeth_rx_ring[entry];
 775        while (desc->die_dt != DT_FEMPTY) {
 776                /* Descriptor type must be checked before all other reads */
 777                dma_rmb();
 778                desc_status = desc->msc;
 779                pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 780
 781                if (--boguscnt < 0)
 782                        break;
 783
 784                /* We use 0-byte descriptors to mark the DMA mapping errors */
 785                if (!pkt_len)
 786                        continue;
 787
 788                if (desc_status & MSC_MC)
 789                        stats->multicast++;
 790
 791                if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
 792                        stats->rx_errors++;
 793                        if (desc_status & MSC_CRC)
 794                                stats->rx_crc_errors++;
 795                        if (desc_status & MSC_RFE)
 796                                stats->rx_frame_errors++;
 797                        if (desc_status & (MSC_RTLF | MSC_RTSF))
 798                                stats->rx_length_errors++;
 799                        if (desc_status & MSC_CEEF)
 800                                stats->rx_missed_errors++;
 801                } else {
 802                        die_dt = desc->die_dt & 0xF0;
 803                        switch (die_dt) {
 804                        case DT_FSINGLE:
 805                                skb = ravb_get_skb_gbeth(ndev, entry, desc);
 806                                skb_put(skb, pkt_len);
 807                                skb->protocol = eth_type_trans(skb, ndev);
 808                                napi_gro_receive(&priv->napi[q], skb);
 809                                stats->rx_packets++;
 810                                stats->rx_bytes += pkt_len;
 811                                break;
 812                        case DT_FSTART:
 813                                priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
 814                                skb_put(priv->rx_1st_skb, pkt_len);
 815                                break;
 816                        case DT_FMID:
 817                                skb = ravb_get_skb_gbeth(ndev, entry, desc);
 818                                skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 819                                                               priv->rx_1st_skb->len,
 820                                                               skb->data,
 821                                                               pkt_len);
 822                                skb_put(priv->rx_1st_skb, pkt_len);
 823                                dev_kfree_skb(skb);
 824                                break;
 825                        case DT_FEND:
 826                                skb = ravb_get_skb_gbeth(ndev, entry, desc);
 827                                skb_copy_to_linear_data_offset(priv->rx_1st_skb,
 828                                                               priv->rx_1st_skb->len,
 829                                                               skb->data,
 830                                                               pkt_len);
 831                                skb_put(priv->rx_1st_skb, pkt_len);
 832                                dev_kfree_skb(skb);
 833                                priv->rx_1st_skb->protocol =
 834                                        eth_type_trans(priv->rx_1st_skb, ndev);
 835                                napi_gro_receive(&priv->napi[q],
 836                                                 priv->rx_1st_skb);
 837                                stats->rx_packets++;
 838                                stats->rx_bytes += priv->rx_1st_skb->len;
 839                                break;
 840                        }
 841                }
 842
 843                entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 844                desc = &priv->gbeth_rx_ring[entry];
 845        }
 846
 847        /* Refill the RX ring buffers. */
 848        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 849                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 850                desc = &priv->gbeth_rx_ring[entry];
 851                desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
 852
 853                if (!priv->rx_skb[q][entry]) {
 854                        skb = netdev_alloc_skb(ndev, info->max_rx_len);
 855                        if (!skb)
 856                                break;
 857                        ravb_set_buffer_align(skb);
 858                        dma_addr = dma_map_single(ndev->dev.parent,
 859                                                  skb->data,
 860                                                  GBETH_RX_BUFF_MAX,
 861                                                  DMA_FROM_DEVICE);
 862                        skb_checksum_none_assert(skb);
 863                        /* We just set the data size to 0 for a failed mapping
 864                         * which should prevent DMA  from happening...
 865                         */
 866                        if (dma_mapping_error(ndev->dev.parent, dma_addr))
 867                                desc->ds_cc = cpu_to_le16(0);
 868                        desc->dptr = cpu_to_le32(dma_addr);
 869                        priv->rx_skb[q][entry] = skb;
 870                }
 871                /* Descriptor type must be set after all the above writes */
 872                dma_wmb();
 873                desc->die_dt = DT_FEMPTY;
 874        }
 875
 876        *quota -= limit - (++boguscnt);
 877
 878        return boguscnt <= 0;
 879}
 880
 881/* Packet receive function for Ethernet AVB */
 882static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
 883{
 884        struct ravb_private *priv = netdev_priv(ndev);
 885        const struct ravb_hw_info *info = priv->info;
 886        int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
 887        int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
 888                        priv->cur_rx[q];
 889        struct net_device_stats *stats = &priv->stats[q];
 890        struct ravb_ex_rx_desc *desc;
 891        struct sk_buff *skb;
 892        dma_addr_t dma_addr;
 893        struct timespec64 ts;
 894        u8  desc_status;
 895        u16 pkt_len;
 896        int limit;
 897
 898        boguscnt = min(boguscnt, *quota);
 899        limit = boguscnt;
 900        desc = &priv->rx_ring[q][entry];
 901        while (desc->die_dt != DT_FEMPTY) {
 902                /* Descriptor type must be checked before all other reads */
 903                dma_rmb();
 904                desc_status = desc->msc;
 905                pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
 906
 907                if (--boguscnt < 0)
 908                        break;
 909
 910                /* We use 0-byte descriptors to mark the DMA mapping errors */
 911                if (!pkt_len)
 912                        continue;
 913
 914                if (desc_status & MSC_MC)
 915                        stats->multicast++;
 916
 917                if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
 918                                   MSC_CEEF)) {
 919                        stats->rx_errors++;
 920                        if (desc_status & MSC_CRC)
 921                                stats->rx_crc_errors++;
 922                        if (desc_status & MSC_RFE)
 923                                stats->rx_frame_errors++;
 924                        if (desc_status & (MSC_RTLF | MSC_RTSF))
 925                                stats->rx_length_errors++;
 926                        if (desc_status & MSC_CEEF)
 927                                stats->rx_missed_errors++;
 928                } else {
 929                        u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
 930
 931                        skb = priv->rx_skb[q][entry];
 932                        priv->rx_skb[q][entry] = NULL;
 933                        dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 934                                         RX_BUF_SZ,
 935                                         DMA_FROM_DEVICE);
 936                        get_ts &= (q == RAVB_NC) ?
 937                                        RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
 938                                        ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
 939                        if (get_ts) {
 940                                struct skb_shared_hwtstamps *shhwtstamps;
 941
 942                                shhwtstamps = skb_hwtstamps(skb);
 943                                memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 944                                ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
 945                                             32) | le32_to_cpu(desc->ts_sl);
 946                                ts.tv_nsec = le32_to_cpu(desc->ts_n);
 947                                shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
 948                        }
 949
 950                        skb_put(skb, pkt_len);
 951                        skb->protocol = eth_type_trans(skb, ndev);
 952                        if (ndev->features & NETIF_F_RXCSUM)
 953                                ravb_rx_csum(skb);
 954                        napi_gro_receive(&priv->napi[q], skb);
 955                        stats->rx_packets++;
 956                        stats->rx_bytes += pkt_len;
 957                }
 958
 959                entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
 960                desc = &priv->rx_ring[q][entry];
 961        }
 962
 963        /* Refill the RX ring buffers. */
 964        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 965                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 966                desc = &priv->rx_ring[q][entry];
 967                desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
 968
 969                if (!priv->rx_skb[q][entry]) {
 970                        skb = netdev_alloc_skb(ndev, info->max_rx_len);
 971                        if (!skb)
 972                                break;  /* Better luck next round. */
 973                        ravb_set_buffer_align(skb);
 974                        dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 975                                                  le16_to_cpu(desc->ds_cc),
 976                                                  DMA_FROM_DEVICE);
 977                        skb_checksum_none_assert(skb);
 978                        /* We just set the data size to 0 for a failed mapping
 979                         * which should prevent DMA  from happening...
 980                         */
 981                        if (dma_mapping_error(ndev->dev.parent, dma_addr))
 982                                desc->ds_cc = cpu_to_le16(0);
 983                        desc->dptr = cpu_to_le32(dma_addr);
 984                        priv->rx_skb[q][entry] = skb;
 985                }
 986                /* Descriptor type must be set after all the above writes */
 987                dma_wmb();
 988                desc->die_dt = DT_FEMPTY;
 989        }
 990
 991        *quota -= limit - (++boguscnt);
 992
 993        return boguscnt <= 0;
 994}
 995
 996/* Packet receive function for Ethernet AVB */
 997static bool ravb_rx(struct net_device *ndev, int *quota, int q)
 998{
 999        struct ravb_private *priv = netdev_priv(ndev);
1000        const struct ravb_hw_info *info = priv->info;
1001
1002        return info->receive(ndev, quota, q);
1003}
1004
1005static void ravb_rcv_snd_disable(struct net_device *ndev)
1006{
1007        /* Disable TX and RX */
1008        ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1009}
1010
1011static void ravb_rcv_snd_enable(struct net_device *ndev)
1012{
1013        /* Enable TX and RX */
1014        ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1015}
1016
1017/* function for waiting dma process finished */
1018static int ravb_stop_dma(struct net_device *ndev)
1019{
1020        struct ravb_private *priv = netdev_priv(ndev);
1021        const struct ravb_hw_info *info = priv->info;
1022        int error;
1023
1024        /* Wait for stopping the hardware TX process */
1025        error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
1026
1027        if (error)
1028                return error;
1029
1030        error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
1031                          0);
1032        if (error)
1033                return error;
1034
1035        /* Stop the E-MAC's RX/TX processes. */
1036        ravb_rcv_snd_disable(ndev);
1037
1038        /* Wait for stopping the RX DMA process */
1039        error = ravb_wait(ndev, CSR, CSR_RPO, 0);
1040        if (error)
1041                return error;
1042
1043        /* Stop AVB-DMAC process */
1044        return ravb_config(ndev);
1045}
1046
1047/* E-MAC interrupt handler */
1048static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
1049{
1050        struct ravb_private *priv = netdev_priv(ndev);
1051        u32 ecsr, psr;
1052
1053        ecsr = ravb_read(ndev, ECSR);
1054        ravb_write(ndev, ecsr, ECSR);   /* clear interrupt */
1055
1056        if (ecsr & ECSR_MPD)
1057                pm_wakeup_event(&priv->pdev->dev, 0);
1058        if (ecsr & ECSR_ICD)
1059                ndev->stats.tx_carrier_errors++;
1060        if (ecsr & ECSR_LCHNG) {
1061                /* Link changed */
1062                if (priv->no_avb_link)
1063                        return;
1064                psr = ravb_read(ndev, PSR);
1065                if (priv->avb_link_active_low)
1066                        psr ^= PSR_LMON;
1067                if (!(psr & PSR_LMON)) {
1068                        /* DIsable RX and TX */
1069                        ravb_rcv_snd_disable(ndev);
1070                } else {
1071                        /* Enable RX and TX */
1072                        ravb_rcv_snd_enable(ndev);
1073                }
1074        }
1075}
1076
1077static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
1078{
1079        struct net_device *ndev = dev_id;
1080        struct ravb_private *priv = netdev_priv(ndev);
1081
1082        spin_lock(&priv->lock);
1083        ravb_emac_interrupt_unlocked(ndev);
1084        spin_unlock(&priv->lock);
1085        return IRQ_HANDLED;
1086}
1087
1088/* Error interrupt handler */
1089static void ravb_error_interrupt(struct net_device *ndev)
1090{
1091        struct ravb_private *priv = netdev_priv(ndev);
1092        u32 eis, ris2;
1093
1094        eis = ravb_read(ndev, EIS);
1095        ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1096        if (eis & EIS_QFS) {
1097                ris2 = ravb_read(ndev, RIS2);
1098                ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
1099                           RIS2);
1100
1101                /* Receive Descriptor Empty int */
1102                if (ris2 & RIS2_QFF0)
1103                        priv->stats[RAVB_BE].rx_over_errors++;
1104
1105                    /* Receive Descriptor Empty int */
1106                if (ris2 & RIS2_QFF1)
1107                        priv->stats[RAVB_NC].rx_over_errors++;
1108
1109                /* Receive FIFO Overflow int */
1110                if (ris2 & RIS2_RFFF)
1111                        priv->rx_fifo_errors++;
1112        }
1113}
1114
1115static bool ravb_queue_interrupt(struct net_device *ndev, int q)
1116{
1117        struct ravb_private *priv = netdev_priv(ndev);
1118        const struct ravb_hw_info *info = priv->info;
1119        u32 ris0 = ravb_read(ndev, RIS0);
1120        u32 ric0 = ravb_read(ndev, RIC0);
1121        u32 tis  = ravb_read(ndev, TIS);
1122        u32 tic  = ravb_read(ndev, TIC);
1123
1124        if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
1125                if (napi_schedule_prep(&priv->napi[q])) {
1126                        /* Mask RX and TX interrupts */
1127                        if (!info->multi_irqs) {
1128                                ravb_write(ndev, ric0 & ~BIT(q), RIC0);
1129                                ravb_write(ndev, tic & ~BIT(q), TIC);
1130                        } else {
1131                                ravb_write(ndev, BIT(q), RID0);
1132                                ravb_write(ndev, BIT(q), TID);
1133                        }
1134                        __napi_schedule(&priv->napi[q]);
1135                } else {
1136                        netdev_warn(ndev,
1137                                    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
1138                                    ris0, ric0);
1139                        netdev_warn(ndev,
1140                                    "                    tx status 0x%08x, tx mask 0x%08x.\n",
1141                                    tis, tic);
1142                }
1143                return true;
1144        }
1145        return false;
1146}
1147
1148static bool ravb_timestamp_interrupt(struct net_device *ndev)
1149{
1150        u32 tis = ravb_read(ndev, TIS);
1151
1152        if (tis & TIS_TFUF) {
1153                ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1154                ravb_get_tx_tstamp(ndev);
1155                return true;
1156        }
1157        return false;
1158}
1159
1160static irqreturn_t ravb_interrupt(int irq, void *dev_id)
1161{
1162        struct net_device *ndev = dev_id;
1163        struct ravb_private *priv = netdev_priv(ndev);
1164        const struct ravb_hw_info *info = priv->info;
1165        irqreturn_t result = IRQ_NONE;
1166        u32 iss;
1167
1168        spin_lock(&priv->lock);
1169        /* Get interrupt status */
1170        iss = ravb_read(ndev, ISS);
1171
1172        /* Received and transmitted interrupts */
1173        if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
1174                int q;
1175
1176                /* Timestamp updated */
1177                if (ravb_timestamp_interrupt(ndev))
1178                        result = IRQ_HANDLED;
1179
1180                /* Network control and best effort queue RX/TX */
1181                if (info->nc_queues) {
1182                        for (q = RAVB_NC; q >= RAVB_BE; q--) {
1183                                if (ravb_queue_interrupt(ndev, q))
1184                                        result = IRQ_HANDLED;
1185                        }
1186                } else {
1187                        if (ravb_queue_interrupt(ndev, RAVB_BE))
1188                                result = IRQ_HANDLED;
1189                }
1190        }
1191
1192        /* E-MAC status summary */
1193        if (iss & ISS_MS) {
1194                ravb_emac_interrupt_unlocked(ndev);
1195                result = IRQ_HANDLED;
1196        }
1197
1198        /* Error status summary */
1199        if (iss & ISS_ES) {
1200                ravb_error_interrupt(ndev);
1201                result = IRQ_HANDLED;
1202        }
1203
1204        /* gPTP interrupt status summary */
1205        if (iss & ISS_CGIS) {
1206                ravb_ptp_interrupt(ndev);
1207                result = IRQ_HANDLED;
1208        }
1209
1210        spin_unlock(&priv->lock);
1211        return result;
1212}
1213
1214/* Timestamp/Error/gPTP interrupt handler */
1215static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
1216{
1217        struct net_device *ndev = dev_id;
1218        struct ravb_private *priv = netdev_priv(ndev);
1219        irqreturn_t result = IRQ_NONE;
1220        u32 iss;
1221
1222        spin_lock(&priv->lock);
1223        /* Get interrupt status */
1224        iss = ravb_read(ndev, ISS);
1225
1226        /* Timestamp updated */
1227        if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
1228                result = IRQ_HANDLED;
1229
1230        /* Error status summary */
1231        if (iss & ISS_ES) {
1232                ravb_error_interrupt(ndev);
1233                result = IRQ_HANDLED;
1234        }
1235
1236        /* gPTP interrupt status summary */
1237        if (iss & ISS_CGIS) {
1238                ravb_ptp_interrupt(ndev);
1239                result = IRQ_HANDLED;
1240        }
1241
1242        spin_unlock(&priv->lock);
1243        return result;
1244}
1245
1246static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
1247{
1248        struct net_device *ndev = dev_id;
1249        struct ravb_private *priv = netdev_priv(ndev);
1250        irqreturn_t result = IRQ_NONE;
1251
1252        spin_lock(&priv->lock);
1253
1254        /* Network control/Best effort queue RX/TX */
1255        if (ravb_queue_interrupt(ndev, q))
1256                result = IRQ_HANDLED;
1257
1258        spin_unlock(&priv->lock);
1259        return result;
1260}
1261
1262static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
1263{
1264        return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
1265}
1266
1267static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
1268{
1269        return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
1270}
1271
1272static int ravb_poll(struct napi_struct *napi, int budget)
1273{
1274        struct net_device *ndev = napi->dev;
1275        struct ravb_private *priv = netdev_priv(ndev);
1276        const struct ravb_hw_info *info = priv->info;
1277        bool gptp = info->gptp || info->ccc_gac;
1278        struct ravb_rx_desc *desc;
1279        unsigned long flags;
1280        int q = napi - priv->napi;
1281        int mask = BIT(q);
1282        int quota = budget;
1283        unsigned int entry;
1284
1285        if (!gptp) {
1286                entry = priv->cur_rx[q] % priv->num_rx_ring[q];
1287                desc = &priv->gbeth_rx_ring[entry];
1288        }
1289        /* Processing RX Descriptor Ring */
1290        /* Clear RX interrupt */
1291        ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1292        if (gptp || desc->die_dt != DT_FEMPTY) {
1293                if (ravb_rx(ndev, &quota, q))
1294                        goto out;
1295        }
1296
1297        /* Processing TX Descriptor Ring */
1298        spin_lock_irqsave(&priv->lock, flags);
1299        /* Clear TX interrupt */
1300        ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1301        ravb_tx_free(ndev, q, true);
1302        netif_wake_subqueue(ndev, q);
1303        spin_unlock_irqrestore(&priv->lock, flags);
1304
1305        napi_complete(napi);
1306
1307        /* Re-enable RX/TX interrupts */
1308        spin_lock_irqsave(&priv->lock, flags);
1309        if (!info->multi_irqs) {
1310                ravb_modify(ndev, RIC0, mask, mask);
1311                ravb_modify(ndev, TIC,  mask, mask);
1312        } else {
1313                ravb_write(ndev, mask, RIE0);
1314                ravb_write(ndev, mask, TIE);
1315        }
1316        spin_unlock_irqrestore(&priv->lock, flags);
1317
1318        /* Receive error message handling */
1319        priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
1320        if (info->nc_queues)
1321                priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
1322        if (priv->rx_over_errors != ndev->stats.rx_over_errors)
1323                ndev->stats.rx_over_errors = priv->rx_over_errors;
1324        if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
1325                ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
1326out:
1327        return budget - quota;
1328}
1329
1330static void ravb_set_duplex_gbeth(struct net_device *ndev)
1331{
1332        struct ravb_private *priv = netdev_priv(ndev);
1333
1334        ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
1335}
1336
1337/* PHY state control function */
1338static void ravb_adjust_link(struct net_device *ndev)
1339{
1340        struct ravb_private *priv = netdev_priv(ndev);
1341        const struct ravb_hw_info *info = priv->info;
1342        struct phy_device *phydev = ndev->phydev;
1343        bool new_state = false;
1344        unsigned long flags;
1345
1346        spin_lock_irqsave(&priv->lock, flags);
1347
1348        /* Disable TX and RX right over here, if E-MAC change is ignored */
1349        if (priv->no_avb_link)
1350                ravb_rcv_snd_disable(ndev);
1351
1352        if (phydev->link) {
1353                if (info->half_duplex && phydev->duplex != priv->duplex) {
1354                        new_state = true;
1355                        priv->duplex = phydev->duplex;
1356                        ravb_set_duplex_gbeth(ndev);
1357                }
1358
1359                if (phydev->speed != priv->speed) {
1360                        new_state = true;
1361                        priv->speed = phydev->speed;
1362                        info->set_rate(ndev);
1363                }
1364                if (!priv->link) {
1365                        ravb_modify(ndev, ECMR, ECMR_TXF, 0);
1366                        new_state = true;
1367                        priv->link = phydev->link;
1368                }
1369        } else if (priv->link) {
1370                new_state = true;
1371                priv->link = 0;
1372                priv->speed = 0;
1373                if (info->half_duplex)
1374                        priv->duplex = -1;
1375        }
1376
1377        /* Enable TX and RX right over here, if E-MAC change is ignored */
1378        if (priv->no_avb_link && phydev->link)
1379                ravb_rcv_snd_enable(ndev);
1380
1381        spin_unlock_irqrestore(&priv->lock, flags);
1382
1383        if (new_state && netif_msg_link(priv))
1384                phy_print_status(phydev);
1385}
1386
1387static const struct soc_device_attribute r8a7795es10[] = {
1388        { .soc_id = "r8a7795", .revision = "ES1.0", },
1389        { /* sentinel */ }
1390};
1391
1392/* PHY init function */
1393static int ravb_phy_init(struct net_device *ndev)
1394{
1395        struct device_node *np = ndev->dev.parent->of_node;
1396        struct ravb_private *priv = netdev_priv(ndev);
1397        const struct ravb_hw_info *info = priv->info;
1398        struct phy_device *phydev;
1399        struct device_node *pn;
1400        phy_interface_t iface;
1401        int err;
1402
1403        priv->link = 0;
1404        priv->speed = 0;
1405        priv->duplex = -1;
1406
1407        /* Try connecting to PHY */
1408        pn = of_parse_phandle(np, "phy-handle", 0);
1409        if (!pn) {
1410                /* In the case of a fixed PHY, the DT node associated
1411                 * to the PHY is the Ethernet MAC DT node.
1412                 */
1413                if (of_phy_is_fixed_link(np)) {
1414                        err = of_phy_register_fixed_link(np);
1415                        if (err)
1416                                return err;
1417                }
1418                pn = of_node_get(np);
1419        }
1420
1421        iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
1422                                     : priv->phy_interface;
1423        phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1424        of_node_put(pn);
1425        if (!phydev) {
1426                netdev_err(ndev, "failed to connect PHY\n");
1427                err = -ENOENT;
1428                goto err_deregister_fixed_link;
1429        }
1430
1431        /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
1432         * at this time.
1433         */
1434        if (soc_device_match(r8a7795es10)) {
1435                err = phy_set_max_speed(phydev, SPEED_100);
1436                if (err) {
1437                        netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1438                        goto err_phy_disconnect;
1439                }
1440
1441                netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1442        }
1443
1444        if (!info->half_duplex) {
1445                /* 10BASE, Pause and Asym Pause is not supported */
1446                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1447                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1448                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
1449                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
1450
1451                /* Half Duplex is not supported */
1452                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1453                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1454        }
1455
1456        phy_attached_info(phydev);
1457
1458        return 0;
1459
1460err_phy_disconnect:
1461        phy_disconnect(phydev);
1462err_deregister_fixed_link:
1463        if (of_phy_is_fixed_link(np))
1464                of_phy_deregister_fixed_link(np);
1465
1466        return err;
1467}
1468
1469/* PHY control start function */
1470static int ravb_phy_start(struct net_device *ndev)
1471{
1472        int error;
1473
1474        error = ravb_phy_init(ndev);
1475        if (error)
1476                return error;
1477
1478        phy_start(ndev->phydev);
1479
1480        return 0;
1481}
1482
1483static u32 ravb_get_msglevel(struct net_device *ndev)
1484{
1485        struct ravb_private *priv = netdev_priv(ndev);
1486
1487        return priv->msg_enable;
1488}
1489
1490static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1491{
1492        struct ravb_private *priv = netdev_priv(ndev);
1493
1494        priv->msg_enable = value;
1495}
1496
1497static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
1498        "rx_queue_0_current",
1499        "tx_queue_0_current",
1500        "rx_queue_0_dirty",
1501        "tx_queue_0_dirty",
1502        "rx_queue_0_packets",
1503        "tx_queue_0_packets",
1504        "rx_queue_0_bytes",
1505        "tx_queue_0_bytes",
1506        "rx_queue_0_mcast_packets",
1507        "rx_queue_0_errors",
1508        "rx_queue_0_crc_errors",
1509        "rx_queue_0_frame_errors",
1510        "rx_queue_0_length_errors",
1511        "rx_queue_0_csum_offload_errors",
1512        "rx_queue_0_over_errors",
1513};
1514
1515static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1516        "rx_queue_0_current",
1517        "tx_queue_0_current",
1518        "rx_queue_0_dirty",
1519        "tx_queue_0_dirty",
1520        "rx_queue_0_packets",
1521        "tx_queue_0_packets",
1522        "rx_queue_0_bytes",
1523        "tx_queue_0_bytes",
1524        "rx_queue_0_mcast_packets",
1525        "rx_queue_0_errors",
1526        "rx_queue_0_crc_errors",
1527        "rx_queue_0_frame_errors",
1528        "rx_queue_0_length_errors",
1529        "rx_queue_0_missed_errors",
1530        "rx_queue_0_over_errors",
1531
1532        "rx_queue_1_current",
1533        "tx_queue_1_current",
1534        "rx_queue_1_dirty",
1535        "tx_queue_1_dirty",
1536        "rx_queue_1_packets",
1537        "tx_queue_1_packets",
1538        "rx_queue_1_bytes",
1539        "tx_queue_1_bytes",
1540        "rx_queue_1_mcast_packets",
1541        "rx_queue_1_errors",
1542        "rx_queue_1_crc_errors",
1543        "rx_queue_1_frame_errors",
1544        "rx_queue_1_length_errors",
1545        "rx_queue_1_missed_errors",
1546        "rx_queue_1_over_errors",
1547};
1548
1549static int ravb_get_sset_count(struct net_device *netdev, int sset)
1550{
1551        struct ravb_private *priv = netdev_priv(netdev);
1552        const struct ravb_hw_info *info = priv->info;
1553
1554        switch (sset) {
1555        case ETH_SS_STATS:
1556                return info->stats_len;
1557        default:
1558                return -EOPNOTSUPP;
1559        }
1560}
1561
1562static void ravb_get_ethtool_stats(struct net_device *ndev,
1563                                   struct ethtool_stats *estats, u64 *data)
1564{
1565        struct ravb_private *priv = netdev_priv(ndev);
1566        const struct ravb_hw_info *info = priv->info;
1567        int num_rx_q;
1568        int i = 0;
1569        int q;
1570
1571        num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
1572        /* Device-specific stats */
1573        for (q = RAVB_BE; q < num_rx_q; q++) {
1574                struct net_device_stats *stats = &priv->stats[q];
1575
1576                data[i++] = priv->cur_rx[q];
1577                data[i++] = priv->cur_tx[q];
1578                data[i++] = priv->dirty_rx[q];
1579                data[i++] = priv->dirty_tx[q];
1580                data[i++] = stats->rx_packets;
1581                data[i++] = stats->tx_packets;
1582                data[i++] = stats->rx_bytes;
1583                data[i++] = stats->tx_bytes;
1584                data[i++] = stats->multicast;
1585                data[i++] = stats->rx_errors;
1586                data[i++] = stats->rx_crc_errors;
1587                data[i++] = stats->rx_frame_errors;
1588                data[i++] = stats->rx_length_errors;
1589                data[i++] = stats->rx_missed_errors;
1590                data[i++] = stats->rx_over_errors;
1591        }
1592}
1593
1594static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1595{
1596        struct ravb_private *priv = netdev_priv(ndev);
1597        const struct ravb_hw_info *info = priv->info;
1598
1599        switch (stringset) {
1600        case ETH_SS_STATS:
1601                memcpy(data, info->gstrings_stats, info->gstrings_size);
1602                break;
1603        }
1604}
1605
1606static void ravb_get_ringparam(struct net_device *ndev,
1607                               struct ethtool_ringparam *ring,
1608                               struct kernel_ethtool_ringparam *kernel_ring,
1609                               struct netlink_ext_ack *extack)
1610{
1611        struct ravb_private *priv = netdev_priv(ndev);
1612
1613        ring->rx_max_pending = BE_RX_RING_MAX;
1614        ring->tx_max_pending = BE_TX_RING_MAX;
1615        ring->rx_pending = priv->num_rx_ring[RAVB_BE];
1616        ring->tx_pending = priv->num_tx_ring[RAVB_BE];
1617}
1618
1619static int ravb_set_ringparam(struct net_device *ndev,
1620                              struct ethtool_ringparam *ring,
1621                              struct kernel_ethtool_ringparam *kernel_ring,
1622                              struct netlink_ext_ack *extack)
1623{
1624        struct ravb_private *priv = netdev_priv(ndev);
1625        const struct ravb_hw_info *info = priv->info;
1626        int error;
1627
1628        if (ring->tx_pending > BE_TX_RING_MAX ||
1629            ring->rx_pending > BE_RX_RING_MAX ||
1630            ring->tx_pending < BE_TX_RING_MIN ||
1631            ring->rx_pending < BE_RX_RING_MIN)
1632                return -EINVAL;
1633        if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1634                return -EINVAL;
1635
1636        if (netif_running(ndev)) {
1637                netif_device_detach(ndev);
1638                /* Stop PTP Clock driver */
1639                if (info->gptp)
1640                        ravb_ptp_stop(ndev);
1641                /* Wait for DMA stopping */
1642                error = ravb_stop_dma(ndev);
1643                if (error) {
1644                        netdev_err(ndev,
1645                                   "cannot set ringparam! Any AVB processes are still running?\n");
1646                        return error;
1647                }
1648                synchronize_irq(ndev->irq);
1649
1650                /* Free all the skb's in the RX queue and the DMA buffers. */
1651                ravb_ring_free(ndev, RAVB_BE);
1652                if (info->nc_queues)
1653                        ravb_ring_free(ndev, RAVB_NC);
1654        }
1655
1656        /* Set new parameters */
1657        priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
1658        priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
1659
1660        if (netif_running(ndev)) {
1661                error = ravb_dmac_init(ndev);
1662                if (error) {
1663                        netdev_err(ndev,
1664                                   "%s: ravb_dmac_init() failed, error %d\n",
1665                                   __func__, error);
1666                        return error;
1667                }
1668
1669                ravb_emac_init(ndev);
1670
1671                /* Initialise PTP Clock driver */
1672                if (info->gptp)
1673                        ravb_ptp_init(ndev, priv->pdev);
1674
1675                netif_device_attach(ndev);
1676        }
1677
1678        return 0;
1679}
1680
1681static int ravb_get_ts_info(struct net_device *ndev,
1682                            struct ethtool_ts_info *info)
1683{
1684        struct ravb_private *priv = netdev_priv(ndev);
1685        const struct ravb_hw_info *hw_info = priv->info;
1686
1687        info->so_timestamping =
1688                SOF_TIMESTAMPING_TX_SOFTWARE |
1689                SOF_TIMESTAMPING_RX_SOFTWARE |
1690                SOF_TIMESTAMPING_SOFTWARE |
1691                SOF_TIMESTAMPING_TX_HARDWARE |
1692                SOF_TIMESTAMPING_RX_HARDWARE |
1693                SOF_TIMESTAMPING_RAW_HARDWARE;
1694        info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1695        info->rx_filters =
1696                (1 << HWTSTAMP_FILTER_NONE) |
1697                (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1698                (1 << HWTSTAMP_FILTER_ALL);
1699        if (hw_info->gptp || hw_info->ccc_gac)
1700                info->phc_index = ptp_clock_index(priv->ptp.clock);
1701
1702        return 0;
1703}
1704
1705static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1706{
1707        struct ravb_private *priv = netdev_priv(ndev);
1708
1709        wol->supported = WAKE_MAGIC;
1710        wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
1711}
1712
1713static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1714{
1715        struct ravb_private *priv = netdev_priv(ndev);
1716        const struct ravb_hw_info *info = priv->info;
1717
1718        if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
1719                return -EOPNOTSUPP;
1720
1721        priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
1722
1723        device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
1724
1725        return 0;
1726}
1727
1728static const struct ethtool_ops ravb_ethtool_ops = {
1729        .nway_reset             = phy_ethtool_nway_reset,
1730        .get_msglevel           = ravb_get_msglevel,
1731        .set_msglevel           = ravb_set_msglevel,
1732        .get_link               = ethtool_op_get_link,
1733        .get_strings            = ravb_get_strings,
1734        .get_ethtool_stats      = ravb_get_ethtool_stats,
1735        .get_sset_count         = ravb_get_sset_count,
1736        .get_ringparam          = ravb_get_ringparam,
1737        .set_ringparam          = ravb_set_ringparam,
1738        .get_ts_info            = ravb_get_ts_info,
1739        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
1740        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
1741        .get_wol                = ravb_get_wol,
1742        .set_wol                = ravb_set_wol,
1743};
1744
1745static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
1746                                struct net_device *ndev, struct device *dev,
1747                                const char *ch)
1748{
1749        char *name;
1750        int error;
1751
1752        name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1753        if (!name)
1754                return -ENOMEM;
1755        error = request_irq(irq, handler, 0, name, ndev);
1756        if (error)
1757                netdev_err(ndev, "cannot request IRQ %s\n", name);
1758
1759        return error;
1760}
1761
1762/* Network device open function for Ethernet AVB */
1763static int ravb_open(struct net_device *ndev)
1764{
1765        struct ravb_private *priv = netdev_priv(ndev);
1766        const struct ravb_hw_info *info = priv->info;
1767        struct platform_device *pdev = priv->pdev;
1768        struct device *dev = &pdev->dev;
1769        int error;
1770
1771        napi_enable(&priv->napi[RAVB_BE]);
1772        if (info->nc_queues)
1773                napi_enable(&priv->napi[RAVB_NC]);
1774
1775        if (!info->multi_irqs) {
1776                error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1777                                    ndev->name, ndev);
1778                if (error) {
1779                        netdev_err(ndev, "cannot request IRQ\n");
1780                        goto out_napi_off;
1781                }
1782        } else {
1783                error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1784                                      dev, "ch22:multi");
1785                if (error)
1786                        goto out_napi_off;
1787                error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1788                                      dev, "ch24:emac");
1789                if (error)
1790                        goto out_free_irq;
1791                error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
1792                                      ndev, dev, "ch0:rx_be");
1793                if (error)
1794                        goto out_free_irq_emac;
1795                error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
1796                                      ndev, dev, "ch18:tx_be");
1797                if (error)
1798                        goto out_free_irq_be_rx;
1799                error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
1800                                      ndev, dev, "ch1:rx_nc");
1801                if (error)
1802                        goto out_free_irq_be_tx;
1803                error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
1804                                      ndev, dev, "ch19:tx_nc");
1805                if (error)
1806                        goto out_free_irq_nc_rx;
1807        }
1808
1809        /* Device init */
1810        error = ravb_dmac_init(ndev);
1811        if (error)
1812                goto out_free_irq_nc_tx;
1813        ravb_emac_init(ndev);
1814
1815        /* Initialise PTP Clock driver */
1816        if (info->gptp)
1817                ravb_ptp_init(ndev, priv->pdev);
1818
1819        netif_tx_start_all_queues(ndev);
1820
1821        /* PHY control start */
1822        error = ravb_phy_start(ndev);
1823        if (error)
1824                goto out_ptp_stop;
1825
1826        return 0;
1827
1828out_ptp_stop:
1829        /* Stop PTP Clock driver */
1830        if (info->gptp)
1831                ravb_ptp_stop(ndev);
1832out_free_irq_nc_tx:
1833        if (!info->multi_irqs)
1834                goto out_free_irq;
1835        free_irq(priv->tx_irqs[RAVB_NC], ndev);
1836out_free_irq_nc_rx:
1837        free_irq(priv->rx_irqs[RAVB_NC], ndev);
1838out_free_irq_be_tx:
1839        free_irq(priv->tx_irqs[RAVB_BE], ndev);
1840out_free_irq_be_rx:
1841        free_irq(priv->rx_irqs[RAVB_BE], ndev);
1842out_free_irq_emac:
1843        free_irq(priv->emac_irq, ndev);
1844out_free_irq:
1845        free_irq(ndev->irq, ndev);
1846out_napi_off:
1847        if (info->nc_queues)
1848                napi_disable(&priv->napi[RAVB_NC]);
1849        napi_disable(&priv->napi[RAVB_BE]);
1850        return error;
1851}
1852
1853/* Timeout function for Ethernet AVB */
1854static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1855{
1856        struct ravb_private *priv = netdev_priv(ndev);
1857
1858        netif_err(priv, tx_err, ndev,
1859                  "transmit timed out, status %08x, resetting...\n",
1860                  ravb_read(ndev, ISS));
1861
1862        /* tx_errors count up */
1863        ndev->stats.tx_errors++;
1864
1865        schedule_work(&priv->work);
1866}
1867
1868static void ravb_tx_timeout_work(struct work_struct *work)
1869{
1870        struct ravb_private *priv = container_of(work, struct ravb_private,
1871                                                 work);
1872        const struct ravb_hw_info *info = priv->info;
1873        struct net_device *ndev = priv->ndev;
1874        int error;
1875
1876        netif_tx_stop_all_queues(ndev);
1877
1878        /* Stop PTP Clock driver */
1879        if (info->gptp)
1880                ravb_ptp_stop(ndev);
1881
1882        /* Wait for DMA stopping */
1883        if (ravb_stop_dma(ndev)) {
1884                /* If ravb_stop_dma() fails, the hardware is still operating
1885                 * for TX and/or RX. So, this should not call the following
1886                 * functions because ravb_dmac_init() is possible to fail too.
1887                 * Also, this should not retry ravb_stop_dma() again and again
1888                 * here because it's possible to wait forever. So, this just
1889                 * re-enables the TX and RX and skip the following
1890                 * re-initialization procedure.
1891                 */
1892                ravb_rcv_snd_enable(ndev);
1893                goto out;
1894        }
1895
1896        ravb_ring_free(ndev, RAVB_BE);
1897        if (info->nc_queues)
1898                ravb_ring_free(ndev, RAVB_NC);
1899
1900        /* Device init */
1901        error = ravb_dmac_init(ndev);
1902        if (error) {
1903                /* If ravb_dmac_init() fails, descriptors are freed. So, this
1904                 * should return here to avoid re-enabling the TX and RX in
1905                 * ravb_emac_init().
1906                 */
1907                netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1908                           __func__, error);
1909                return;
1910        }
1911        ravb_emac_init(ndev);
1912
1913out:
1914        /* Initialise PTP Clock driver */
1915        if (info->gptp)
1916                ravb_ptp_init(ndev, priv->pdev);
1917
1918        netif_tx_start_all_queues(ndev);
1919}
1920
1921/* Packet transmit function for Ethernet AVB */
1922static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1923{
1924        struct ravb_private *priv = netdev_priv(ndev);
1925        const struct ravb_hw_info *info = priv->info;
1926        unsigned int num_tx_desc = priv->num_tx_desc;
1927        u16 q = skb_get_queue_mapping(skb);
1928        struct ravb_tstamp_skb *ts_skb;
1929        struct ravb_tx_desc *desc;
1930        unsigned long flags;
1931        u32 dma_addr;
1932        void *buffer;
1933        u32 entry;
1934        u32 len;
1935
1936        spin_lock_irqsave(&priv->lock, flags);
1937        if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
1938            num_tx_desc) {
1939                netif_err(priv, tx_queued, ndev,
1940                          "still transmitting with the full ring!\n");
1941                netif_stop_subqueue(ndev, q);
1942                spin_unlock_irqrestore(&priv->lock, flags);
1943                return NETDEV_TX_BUSY;
1944        }
1945
1946        if (skb_put_padto(skb, ETH_ZLEN))
1947                goto exit;
1948
1949        entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
1950        priv->tx_skb[q][entry / num_tx_desc] = skb;
1951
1952        if (num_tx_desc > 1) {
1953                buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1954                         entry / num_tx_desc * DPTR_ALIGN;
1955                len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1956
1957                /* Zero length DMA descriptors are problematic as they seem
1958                 * to terminate DMA transfers. Avoid them by simply using a
1959                 * length of DPTR_ALIGN (4) when skb data is aligned to
1960                 * DPTR_ALIGN.
1961                 *
1962                 * As skb is guaranteed to have at least ETH_ZLEN (60)
1963                 * bytes of data by the call to skb_put_padto() above this
1964                 * is safe with respect to both the length of the first DMA
1965                 * descriptor (len) overflowing the available data and the
1966                 * length of the second DMA descriptor (skb->len - len)
1967                 * being negative.
1968                 */
1969                if (len == 0)
1970                        len = DPTR_ALIGN;
1971
1972                memcpy(buffer, skb->data, len);
1973                dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1974                                          DMA_TO_DEVICE);
1975                if (dma_mapping_error(ndev->dev.parent, dma_addr))
1976                        goto drop;
1977
1978                desc = &priv->tx_ring[q][entry];
1979                desc->ds_tagl = cpu_to_le16(len);
1980                desc->dptr = cpu_to_le32(dma_addr);
1981
1982                buffer = skb->data + len;
1983                len = skb->len - len;
1984                dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1985                                          DMA_TO_DEVICE);
1986                if (dma_mapping_error(ndev->dev.parent, dma_addr))
1987                        goto unmap;
1988
1989                desc++;
1990        } else {
1991                desc = &priv->tx_ring[q][entry];
1992                len = skb->len;
1993                dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1994                                          DMA_TO_DEVICE);
1995                if (dma_mapping_error(ndev->dev.parent, dma_addr))
1996                        goto drop;
1997        }
1998        desc->ds_tagl = cpu_to_le16(len);
1999        desc->dptr = cpu_to_le32(dma_addr);
2000
2001        /* TX timestamp required */
2002        if (info->gptp || info->ccc_gac) {
2003                if (q == RAVB_NC) {
2004                        ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
2005                        if (!ts_skb) {
2006                                if (num_tx_desc > 1) {
2007                                        desc--;
2008                                        dma_unmap_single(ndev->dev.parent, dma_addr,
2009                                                         len, DMA_TO_DEVICE);
2010                                }
2011                                goto unmap;
2012                        }
2013                        ts_skb->skb = skb_get(skb);
2014                        ts_skb->tag = priv->ts_skb_tag++;
2015                        priv->ts_skb_tag &= 0x3ff;
2016                        list_add_tail(&ts_skb->list, &priv->ts_skb_list);
2017
2018                        /* TAG and timestamp required flag */
2019                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2020                        desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
2021                        desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
2022                }
2023
2024                skb_tx_timestamp(skb);
2025        }
2026        /* Descriptor type must be set after all the above writes */
2027        dma_wmb();
2028        if (num_tx_desc > 1) {
2029                desc->die_dt = DT_FEND;
2030                desc--;
2031                desc->die_dt = DT_FSTART;
2032        } else {
2033                desc->die_dt = DT_FSINGLE;
2034        }
2035        ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
2036
2037        priv->cur_tx[q] += num_tx_desc;
2038        if (priv->cur_tx[q] - priv->dirty_tx[q] >
2039            (priv->num_tx_ring[q] - 1) * num_tx_desc &&
2040            !ravb_tx_free(ndev, q, true))
2041                netif_stop_subqueue(ndev, q);
2042
2043exit:
2044        spin_unlock_irqrestore(&priv->lock, flags);
2045        return NETDEV_TX_OK;
2046
2047unmap:
2048        dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
2049                         le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
2050drop:
2051        dev_kfree_skb_any(skb);
2052        priv->tx_skb[q][entry / num_tx_desc] = NULL;
2053        goto exit;
2054}
2055
2056static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
2057                             struct net_device *sb_dev)
2058{
2059        /* If skb needs TX timestamp, it is handled in network control queue */
2060        return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
2061                                                               RAVB_BE;
2062
2063}
2064
2065static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
2066{
2067        struct ravb_private *priv = netdev_priv(ndev);
2068        const struct ravb_hw_info *info = priv->info;
2069        struct net_device_stats *nstats, *stats0, *stats1;
2070
2071        nstats = &ndev->stats;
2072        stats0 = &priv->stats[RAVB_BE];
2073
2074        if (info->tx_counters) {
2075                nstats->tx_dropped += ravb_read(ndev, TROCR);
2076                ravb_write(ndev, 0, TROCR);     /* (write clear) */
2077        }
2078
2079        if (info->carrier_counters) {
2080                nstats->collisions += ravb_read(ndev, CXR41);
2081                ravb_write(ndev, 0, CXR41);     /* (write clear) */
2082                nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
2083                ravb_write(ndev, 0, CXR42);     /* (write clear) */
2084        }
2085
2086        nstats->rx_packets = stats0->rx_packets;
2087        nstats->tx_packets = stats0->tx_packets;
2088        nstats->rx_bytes = stats0->rx_bytes;
2089        nstats->tx_bytes = stats0->tx_bytes;
2090        nstats->multicast = stats0->multicast;
2091        nstats->rx_errors = stats0->rx_errors;
2092        nstats->rx_crc_errors = stats0->rx_crc_errors;
2093        nstats->rx_frame_errors = stats0->rx_frame_errors;
2094        nstats->rx_length_errors = stats0->rx_length_errors;
2095        nstats->rx_missed_errors = stats0->rx_missed_errors;
2096        nstats->rx_over_errors = stats0->rx_over_errors;
2097        if (info->nc_queues) {
2098                stats1 = &priv->stats[RAVB_NC];
2099
2100                nstats->rx_packets += stats1->rx_packets;
2101                nstats->tx_packets += stats1->tx_packets;
2102                nstats->rx_bytes += stats1->rx_bytes;
2103                nstats->tx_bytes += stats1->tx_bytes;
2104                nstats->multicast += stats1->multicast;
2105                nstats->rx_errors += stats1->rx_errors;
2106                nstats->rx_crc_errors += stats1->rx_crc_errors;
2107                nstats->rx_frame_errors += stats1->rx_frame_errors;
2108                nstats->rx_length_errors += stats1->rx_length_errors;
2109                nstats->rx_missed_errors += stats1->rx_missed_errors;
2110                nstats->rx_over_errors += stats1->rx_over_errors;
2111        }
2112
2113        return nstats;
2114}
2115
2116/* Update promiscuous bit */
2117static void ravb_set_rx_mode(struct net_device *ndev)
2118{
2119        struct ravb_private *priv = netdev_priv(ndev);
2120        unsigned long flags;
2121
2122        spin_lock_irqsave(&priv->lock, flags);
2123        ravb_modify(ndev, ECMR, ECMR_PRM,
2124                    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
2125        spin_unlock_irqrestore(&priv->lock, flags);
2126}
2127
2128/* Device close function for Ethernet AVB */
2129static int ravb_close(struct net_device *ndev)
2130{
2131        struct device_node *np = ndev->dev.parent->of_node;
2132        struct ravb_private *priv = netdev_priv(ndev);
2133        const struct ravb_hw_info *info = priv->info;
2134        struct ravb_tstamp_skb *ts_skb, *ts_skb2;
2135
2136        netif_tx_stop_all_queues(ndev);
2137
2138        /* Disable interrupts by clearing the interrupt masks. */
2139        ravb_write(ndev, 0, RIC0);
2140        ravb_write(ndev, 0, RIC2);
2141        ravb_write(ndev, 0, TIC);
2142
2143        /* Stop PTP Clock driver */
2144        if (info->gptp)
2145                ravb_ptp_stop(ndev);
2146
2147        /* Set the config mode to stop the AVB-DMAC's processes */
2148        if (ravb_stop_dma(ndev) < 0)
2149                netdev_err(ndev,
2150                           "device will be stopped after h/w processes are done.\n");
2151
2152        /* Clear the timestamp list */
2153        if (info->gptp || info->ccc_gac) {
2154                list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
2155                        list_del(&ts_skb->list);
2156                        kfree_skb(ts_skb->skb);
2157                        kfree(ts_skb);
2158                }
2159        }
2160
2161        /* PHY disconnect */
2162        if (ndev->phydev) {
2163                phy_stop(ndev->phydev);
2164                phy_disconnect(ndev->phydev);
2165                if (of_phy_is_fixed_link(np))
2166                        of_phy_deregister_fixed_link(np);
2167        }
2168
2169        if (info->multi_irqs) {
2170                free_irq(priv->tx_irqs[RAVB_NC], ndev);
2171                free_irq(priv->rx_irqs[RAVB_NC], ndev);
2172                free_irq(priv->tx_irqs[RAVB_BE], ndev);
2173                free_irq(priv->rx_irqs[RAVB_BE], ndev);
2174                free_irq(priv->emac_irq, ndev);
2175        }
2176        free_irq(ndev->irq, ndev);
2177
2178        if (info->nc_queues)
2179                napi_disable(&priv->napi[RAVB_NC]);
2180        napi_disable(&priv->napi[RAVB_BE]);
2181
2182        /* Free all the skb's in the RX queue and the DMA buffers. */
2183        ravb_ring_free(ndev, RAVB_BE);
2184        if (info->nc_queues)
2185                ravb_ring_free(ndev, RAVB_NC);
2186
2187        return 0;
2188}
2189
2190static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
2191{
2192        struct ravb_private *priv = netdev_priv(ndev);
2193        struct hwtstamp_config config;
2194
2195        config.flags = 0;
2196        config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
2197                                                HWTSTAMP_TX_OFF;
2198        switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
2199        case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
2200                config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
2201                break;
2202        case RAVB_RXTSTAMP_TYPE_ALL:
2203                config.rx_filter = HWTSTAMP_FILTER_ALL;
2204                break;
2205        default:
2206                config.rx_filter = HWTSTAMP_FILTER_NONE;
2207        }
2208
2209        return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2210                -EFAULT : 0;
2211}
2212
2213/* Control hardware time stamping */
2214static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
2215{
2216        struct ravb_private *priv = netdev_priv(ndev);
2217        struct hwtstamp_config config;
2218        u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
2219        u32 tstamp_tx_ctrl;
2220
2221        if (copy_from_user(&config, req->ifr_data, sizeof(config)))
2222                return -EFAULT;
2223
2224        switch (config.tx_type) {
2225        case HWTSTAMP_TX_OFF:
2226                tstamp_tx_ctrl = 0;
2227                break;
2228        case HWTSTAMP_TX_ON:
2229                tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
2230                break;
2231        default:
2232                return -ERANGE;
2233        }
2234
2235        switch (config.rx_filter) {
2236        case HWTSTAMP_FILTER_NONE:
2237                tstamp_rx_ctrl = 0;
2238                break;
2239        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2240                tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
2241                break;
2242        default:
2243                config.rx_filter = HWTSTAMP_FILTER_ALL;
2244                tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
2245        }
2246
2247        priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
2248        priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
2249
2250        return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
2251                -EFAULT : 0;
2252}
2253
2254/* ioctl to device function */
2255static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
2256{
2257        struct phy_device *phydev = ndev->phydev;
2258
2259        if (!netif_running(ndev))
2260                return -EINVAL;
2261
2262        if (!phydev)
2263                return -ENODEV;
2264
2265        switch (cmd) {
2266        case SIOCGHWTSTAMP:
2267                return ravb_hwtstamp_get(ndev, req);
2268        case SIOCSHWTSTAMP:
2269                return ravb_hwtstamp_set(ndev, req);
2270        }
2271
2272        return phy_mii_ioctl(phydev, req, cmd);
2273}
2274
2275static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
2276{
2277        struct ravb_private *priv = netdev_priv(ndev);
2278
2279        ndev->mtu = new_mtu;
2280
2281        if (netif_running(ndev)) {
2282                synchronize_irq(priv->emac_irq);
2283                ravb_emac_init(ndev);
2284        }
2285
2286        netdev_update_features(ndev);
2287
2288        return 0;
2289}
2290
2291static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
2292{
2293        struct ravb_private *priv = netdev_priv(ndev);
2294        unsigned long flags;
2295
2296        spin_lock_irqsave(&priv->lock, flags);
2297
2298        /* Disable TX and RX */
2299        ravb_rcv_snd_disable(ndev);
2300
2301        /* Modify RX Checksum setting */
2302        ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2303
2304        /* Enable TX and RX */
2305        ravb_rcv_snd_enable(ndev);
2306
2307        spin_unlock_irqrestore(&priv->lock, flags);
2308}
2309
2310static int ravb_set_features_gbeth(struct net_device *ndev,
2311                                   netdev_features_t features)
2312{
2313        /* Place holder */
2314        return 0;
2315}
2316
2317static int ravb_set_features_rcar(struct net_device *ndev,
2318                                  netdev_features_t features)
2319{
2320        netdev_features_t changed = ndev->features ^ features;
2321
2322        if (changed & NETIF_F_RXCSUM)
2323                ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2324
2325        ndev->features = features;
2326
2327        return 0;
2328}
2329
2330static int ravb_set_features(struct net_device *ndev,
2331                             netdev_features_t features)
2332{
2333        struct ravb_private *priv = netdev_priv(ndev);
2334        const struct ravb_hw_info *info = priv->info;
2335
2336        return info->set_feature(ndev, features);
2337}
2338
2339static const struct net_device_ops ravb_netdev_ops = {
2340        .ndo_open               = ravb_open,
2341        .ndo_stop               = ravb_close,
2342        .ndo_start_xmit         = ravb_start_xmit,
2343        .ndo_select_queue       = ravb_select_queue,
2344        .ndo_get_stats          = ravb_get_stats,
2345        .ndo_set_rx_mode        = ravb_set_rx_mode,
2346        .ndo_tx_timeout         = ravb_tx_timeout,
2347        .ndo_eth_ioctl          = ravb_do_ioctl,
2348        .ndo_change_mtu         = ravb_change_mtu,
2349        .ndo_validate_addr      = eth_validate_addr,
2350        .ndo_set_mac_address    = eth_mac_addr,
2351        .ndo_set_features       = ravb_set_features,
2352};
2353
2354/* MDIO bus init function */
2355static int ravb_mdio_init(struct ravb_private *priv)
2356{
2357        struct platform_device *pdev = priv->pdev;
2358        struct device *dev = &pdev->dev;
2359        int error;
2360
2361        /* Bitbang init */
2362        priv->mdiobb.ops = &bb_ops;
2363
2364        /* MII controller setting */
2365        priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
2366        if (!priv->mii_bus)
2367                return -ENOMEM;
2368
2369        /* Hook up MII support for ethtool */
2370        priv->mii_bus->name = "ravb_mii";
2371        priv->mii_bus->parent = dev;
2372        snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2373                 pdev->name, pdev->id);
2374
2375        /* Register MDIO bus */
2376        error = of_mdiobus_register(priv->mii_bus, dev->of_node);
2377        if (error)
2378                goto out_free_bus;
2379
2380        return 0;
2381
2382out_free_bus:
2383        free_mdio_bitbang(priv->mii_bus);
2384        return error;
2385}
2386
2387/* MDIO bus release function */
2388static int ravb_mdio_release(struct ravb_private *priv)
2389{
2390        /* Unregister mdio bus */
2391        mdiobus_unregister(priv->mii_bus);
2392
2393        /* Free bitbang info */
2394        free_mdio_bitbang(priv->mii_bus);
2395
2396        return 0;
2397}
2398
2399static const struct ravb_hw_info ravb_gen3_hw_info = {
2400        .rx_ring_free = ravb_rx_ring_free_rcar,
2401        .rx_ring_format = ravb_rx_ring_format_rcar,
2402        .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2403        .receive = ravb_rx_rcar,
2404        .set_rate = ravb_set_rate_rcar,
2405        .set_feature = ravb_set_features_rcar,
2406        .dmac_init = ravb_dmac_init_rcar,
2407        .emac_init = ravb_emac_init_rcar,
2408        .gstrings_stats = ravb_gstrings_stats,
2409        .gstrings_size = sizeof(ravb_gstrings_stats),
2410        .net_hw_features = NETIF_F_RXCSUM,
2411        .net_features = NETIF_F_RXCSUM,
2412        .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2413        .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2414        .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2415        .rx_max_buf_size = SZ_2K,
2416        .internal_delay = 1,
2417        .tx_counters = 1,
2418        .multi_irqs = 1,
2419        .ccc_gac = 1,
2420        .nc_queues = 1,
2421        .magic_pkt = 1,
2422};
2423
2424static const struct ravb_hw_info ravb_gen2_hw_info = {
2425        .rx_ring_free = ravb_rx_ring_free_rcar,
2426        .rx_ring_format = ravb_rx_ring_format_rcar,
2427        .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
2428        .receive = ravb_rx_rcar,
2429        .set_rate = ravb_set_rate_rcar,
2430        .set_feature = ravb_set_features_rcar,
2431        .dmac_init = ravb_dmac_init_rcar,
2432        .emac_init = ravb_emac_init_rcar,
2433        .gstrings_stats = ravb_gstrings_stats,
2434        .gstrings_size = sizeof(ravb_gstrings_stats),
2435        .net_hw_features = NETIF_F_RXCSUM,
2436        .net_features = NETIF_F_RXCSUM,
2437        .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
2438        .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2439        .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
2440        .rx_max_buf_size = SZ_2K,
2441        .aligned_tx = 1,
2442        .gptp = 1,
2443        .nc_queues = 1,
2444        .magic_pkt = 1,
2445};
2446
2447static const struct ravb_hw_info gbeth_hw_info = {
2448        .rx_ring_free = ravb_rx_ring_free_gbeth,
2449        .rx_ring_format = ravb_rx_ring_format_gbeth,
2450        .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
2451        .receive = ravb_rx_gbeth,
2452        .set_rate = ravb_set_rate_gbeth,
2453        .set_feature = ravb_set_features_gbeth,
2454        .dmac_init = ravb_dmac_init_gbeth,
2455        .emac_init = ravb_emac_init_gbeth,
2456        .gstrings_stats = ravb_gstrings_stats_gbeth,
2457        .gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
2458        .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
2459        .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
2460        .tccr_mask = TCCR_TSRQ0,
2461        .rx_max_buf_size = SZ_8K,
2462        .aligned_tx = 1,
2463        .tx_counters = 1,
2464        .carrier_counters = 1,
2465        .half_duplex = 1,
2466};
2467
2468static const struct of_device_id ravb_match_table[] = {
2469        { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2470        { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2471        { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2472        { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2473        { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2474        { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2475        { }
2476};
2477MODULE_DEVICE_TABLE(of, ravb_match_table);
2478
2479static int ravb_set_gti(struct net_device *ndev)
2480{
2481        struct ravb_private *priv = netdev_priv(ndev);
2482        struct device *dev = ndev->dev.parent;
2483        unsigned long rate;
2484        uint64_t inc;
2485
2486        rate = clk_get_rate(priv->clk);
2487        if (!rate)
2488                return -EINVAL;
2489
2490        inc = div64_ul(1000000000ULL << 20, rate);
2491
2492        if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
2493                dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
2494                        inc, GTI_TIV_MIN, GTI_TIV_MAX);
2495                return -EINVAL;
2496        }
2497
2498        ravb_write(ndev, inc, GTI);
2499
2500        return 0;
2501}
2502
2503static void ravb_set_config_mode(struct net_device *ndev)
2504{
2505        struct ravb_private *priv = netdev_priv(ndev);
2506        const struct ravb_hw_info *info = priv->info;
2507
2508        if (info->gptp) {
2509                ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
2510                /* Set CSEL value */
2511                ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
2512        } else if (info->ccc_gac) {
2513                ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
2514                            CCC_GAC | CCC_CSEL_HPB);
2515        } else {
2516                ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
2517        }
2518}
2519
2520/* Set tx and rx clock internal delay modes */
2521static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
2522{
2523        struct ravb_private *priv = netdev_priv(ndev);
2524        bool explicit_delay = false;
2525        u32 delay;
2526
2527        if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
2528                /* Valid values are 0 and 1800, according to DT bindings */
2529                priv->rxcidm = !!delay;
2530                explicit_delay = true;
2531        }
2532        if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
2533                /* Valid values are 0 and 2000, according to DT bindings */
2534                priv->txcidm = !!delay;
2535                explicit_delay = true;
2536        }
2537
2538        if (explicit_delay)
2539                return;
2540
2541        /* Fall back to legacy rgmii-*id behavior */
2542        if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2543            priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
2544                priv->rxcidm = 1;
2545                priv->rgmii_override = 1;
2546        }
2547
2548        if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
2549            priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
2550                priv->txcidm = 1;
2551                priv->rgmii_override = 1;
2552        }
2553}
2554
2555static void ravb_set_delay_mode(struct net_device *ndev)
2556{
2557        struct ravb_private *priv = netdev_priv(ndev);
2558        u32 set = 0;
2559
2560        if (priv->rxcidm)
2561                set |= APSR_RDM;
2562        if (priv->txcidm)
2563                set |= APSR_TDM;
2564        ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
2565}
2566
2567static int ravb_probe(struct platform_device *pdev)
2568{
2569        struct device_node *np = pdev->dev.of_node;
2570        const struct ravb_hw_info *info;
2571        struct reset_control *rstc;
2572        struct ravb_private *priv;
2573        struct net_device *ndev;
2574        int error, irq, q;
2575        struct resource *res;
2576        int i;
2577
2578        if (!np) {
2579                dev_err(&pdev->dev,
2580                        "this driver is required to be instantiated from device tree\n");
2581                return -EINVAL;
2582        }
2583
2584        rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
2585        if (IS_ERR(rstc))
2586                return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
2587                                     "failed to get cpg reset\n");
2588
2589        ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2590                                  NUM_TX_QUEUE, NUM_RX_QUEUE);
2591        if (!ndev)
2592                return -ENOMEM;
2593
2594        info = of_device_get_match_data(&pdev->dev);
2595
2596        ndev->features = info->net_features;
2597        ndev->hw_features = info->net_hw_features;
2598
2599        reset_control_deassert(rstc);
2600        pm_runtime_enable(&pdev->dev);
2601        pm_runtime_get_sync(&pdev->dev);
2602
2603        if (info->multi_irqs)
2604                irq = platform_get_irq_byname(pdev, "ch22");
2605        else
2606                irq = platform_get_irq(pdev, 0);
2607        if (irq < 0) {
2608                error = irq;
2609                goto out_release;
2610        }
2611        ndev->irq = irq;
2612
2613        SET_NETDEV_DEV(ndev, &pdev->dev);
2614
2615        priv = netdev_priv(ndev);
2616        priv->info = info;
2617        priv->rstc = rstc;
2618        priv->ndev = ndev;
2619        priv->pdev = pdev;
2620        priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
2621        priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
2622        if (info->nc_queues) {
2623                priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
2624                priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
2625        }
2626
2627        priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2628        if (IS_ERR(priv->addr)) {
2629                error = PTR_ERR(priv->addr);
2630                goto out_release;
2631        }
2632
2633        /* The Ether-specific entries in the device structure. */
2634        ndev->base_addr = res->start;
2635
2636        spin_lock_init(&priv->lock);
2637        INIT_WORK(&priv->work, ravb_tx_timeout_work);
2638
2639        error = of_get_phy_mode(np, &priv->phy_interface);
2640        if (error && error != -ENODEV)
2641                goto out_release;
2642
2643        priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
2644        priv->avb_link_active_low =
2645                of_property_read_bool(np, "renesas,ether-link-active-low");
2646
2647        if (info->multi_irqs) {
2648                irq = platform_get_irq_byname(pdev, "ch24");
2649                if (irq < 0) {
2650                        error = irq;
2651                        goto out_release;
2652                }
2653                priv->emac_irq = irq;
2654                for (i = 0; i < NUM_RX_QUEUE; i++) {
2655                        irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
2656                        if (irq < 0) {
2657                                error = irq;
2658                                goto out_release;
2659                        }
2660                        priv->rx_irqs[i] = irq;
2661                }
2662                for (i = 0; i < NUM_TX_QUEUE; i++) {
2663                        irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
2664                        if (irq < 0) {
2665                                error = irq;
2666                                goto out_release;
2667                        }
2668                        priv->tx_irqs[i] = irq;
2669                }
2670        }
2671
2672        priv->clk = devm_clk_get(&pdev->dev, NULL);
2673        if (IS_ERR(priv->clk)) {
2674                error = PTR_ERR(priv->clk);
2675                goto out_release;
2676        }
2677
2678        priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
2679        if (IS_ERR(priv->refclk)) {
2680                error = PTR_ERR(priv->refclk);
2681                goto out_release;
2682        }
2683        clk_prepare_enable(priv->refclk);
2684
2685        ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2686        ndev->min_mtu = ETH_MIN_MTU;
2687
2688        /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
2689         * Use two descriptor to handle such situation. First descriptor to
2690         * handle aligned data buffer and second descriptor to handle the
2691         * overflow data because of alignment.
2692         */
2693        priv->num_tx_desc = info->aligned_tx ? 2 : 1;
2694
2695        /* Set function */
2696        ndev->netdev_ops = &ravb_netdev_ops;
2697        ndev->ethtool_ops = &ravb_ethtool_ops;
2698
2699        /* Set AVB config mode */
2700        ravb_set_config_mode(ndev);
2701
2702        if (info->gptp || info->ccc_gac) {
2703                /* Set GTI value */
2704                error = ravb_set_gti(ndev);
2705                if (error)
2706                        goto out_disable_refclk;
2707
2708                /* Request GTI loading */
2709                ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2710        }
2711
2712        if (info->internal_delay) {
2713                ravb_parse_delay_mode(np, ndev);
2714                ravb_set_delay_mode(ndev);
2715        }
2716
2717        /* Allocate descriptor base address table */
2718        priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2719        priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2720                                            &priv->desc_bat_dma, GFP_KERNEL);
2721        if (!priv->desc_bat) {
2722                dev_err(&pdev->dev,
2723                        "Cannot allocate desc base address table (size %d bytes)\n",
2724                        priv->desc_bat_size);
2725                error = -ENOMEM;
2726                goto out_disable_refclk;
2727        }
2728        for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
2729                priv->desc_bat[q].die_dt = DT_EOS;
2730        ravb_write(ndev, priv->desc_bat_dma, DBAT);
2731
2732        /* Initialise HW timestamp list */
2733        INIT_LIST_HEAD(&priv->ts_skb_list);
2734
2735        /* Initialise PTP Clock driver */
2736        if (info->ccc_gac)
2737                ravb_ptp_init(ndev, pdev);
2738
2739        /* Debug message level */
2740        priv->msg_enable = RAVB_DEF_MSG_ENABLE;
2741
2742        /* Read and set MAC address */
2743        ravb_read_mac_address(np, ndev);
2744        if (!is_valid_ether_addr(ndev->dev_addr)) {
2745                dev_warn(&pdev->dev,
2746                         "no valid MAC address supplied, using a random one\n");
2747                eth_hw_addr_random(ndev);
2748        }
2749
2750        /* MDIO bus init */
2751        error = ravb_mdio_init(priv);
2752        if (error) {
2753                dev_err(&pdev->dev, "failed to initialize MDIO\n");
2754                goto out_dma_free;
2755        }
2756
2757        netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
2758        if (info->nc_queues)
2759                netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
2760
2761        /* Network device register */
2762        error = register_netdev(ndev);
2763        if (error)
2764                goto out_napi_del;
2765
2766        device_set_wakeup_capable(&pdev->dev, 1);
2767
2768        /* Print device information */
2769        netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2770                    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2771
2772        platform_set_drvdata(pdev, ndev);
2773
2774        return 0;
2775
2776out_napi_del:
2777        if (info->nc_queues)
2778                netif_napi_del(&priv->napi[RAVB_NC]);
2779
2780        netif_napi_del(&priv->napi[RAVB_BE]);
2781        ravb_mdio_release(priv);
2782out_dma_free:
2783        dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2784                          priv->desc_bat_dma);
2785
2786        /* Stop PTP Clock driver */
2787        if (info->ccc_gac)
2788                ravb_ptp_stop(ndev);
2789out_disable_refclk:
2790        clk_disable_unprepare(priv->refclk);
2791out_release:
2792        free_netdev(ndev);
2793
2794        pm_runtime_put(&pdev->dev);
2795        pm_runtime_disable(&pdev->dev);
2796        reset_control_assert(rstc);
2797        return error;
2798}
2799
2800static int ravb_remove(struct platform_device *pdev)
2801{
2802        struct net_device *ndev = platform_get_drvdata(pdev);
2803        struct ravb_private *priv = netdev_priv(ndev);
2804        const struct ravb_hw_info *info = priv->info;
2805
2806        /* Stop PTP Clock driver */
2807        if (info->ccc_gac)
2808                ravb_ptp_stop(ndev);
2809
2810        clk_disable_unprepare(priv->refclk);
2811
2812        dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2813                          priv->desc_bat_dma);
2814        /* Set reset mode */
2815        ravb_write(ndev, CCC_OPC_RESET, CCC);
2816        pm_runtime_put_sync(&pdev->dev);
2817        unregister_netdev(ndev);
2818        if (info->nc_queues)
2819                netif_napi_del(&priv->napi[RAVB_NC]);
2820        netif_napi_del(&priv->napi[RAVB_BE]);
2821        ravb_mdio_release(priv);
2822        pm_runtime_disable(&pdev->dev);
2823        reset_control_assert(priv->rstc);
2824        free_netdev(ndev);
2825        platform_set_drvdata(pdev, NULL);
2826
2827        return 0;
2828}
2829
2830static int ravb_wol_setup(struct net_device *ndev)
2831{
2832        struct ravb_private *priv = netdev_priv(ndev);
2833        const struct ravb_hw_info *info = priv->info;
2834
2835        /* Disable interrupts by clearing the interrupt masks. */
2836        ravb_write(ndev, 0, RIC0);
2837        ravb_write(ndev, 0, RIC2);
2838        ravb_write(ndev, 0, TIC);
2839
2840        /* Only allow ECI interrupts */
2841        synchronize_irq(priv->emac_irq);
2842        if (info->nc_queues)
2843                napi_disable(&priv->napi[RAVB_NC]);
2844        napi_disable(&priv->napi[RAVB_BE]);
2845        ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2846
2847        /* Enable MagicPacket */
2848        ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2849
2850        return enable_irq_wake(priv->emac_irq);
2851}
2852
2853static int ravb_wol_restore(struct net_device *ndev)
2854{
2855        struct ravb_private *priv = netdev_priv(ndev);
2856        const struct ravb_hw_info *info = priv->info;
2857        int ret;
2858
2859        if (info->nc_queues)
2860                napi_enable(&priv->napi[RAVB_NC]);
2861        napi_enable(&priv->napi[RAVB_BE]);
2862
2863        /* Disable MagicPacket */
2864        ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2865
2866        ret = ravb_close(ndev);
2867        if (ret < 0)
2868                return ret;
2869
2870        return disable_irq_wake(priv->emac_irq);
2871}
2872
2873static int __maybe_unused ravb_suspend(struct device *dev)
2874{
2875        struct net_device *ndev = dev_get_drvdata(dev);
2876        struct ravb_private *priv = netdev_priv(ndev);
2877        int ret;
2878
2879        if (!netif_running(ndev))
2880                return 0;
2881
2882        netif_device_detach(ndev);
2883
2884        if (priv->wol_enabled)
2885                ret = ravb_wol_setup(ndev);
2886        else
2887                ret = ravb_close(ndev);
2888
2889        return ret;
2890}
2891
2892static int __maybe_unused ravb_resume(struct device *dev)
2893{
2894        struct net_device *ndev = dev_get_drvdata(dev);
2895        struct ravb_private *priv = netdev_priv(ndev);
2896        const struct ravb_hw_info *info = priv->info;
2897        int ret = 0;
2898
2899        /* If WoL is enabled set reset mode to rearm the WoL logic */
2900        if (priv->wol_enabled)
2901                ravb_write(ndev, CCC_OPC_RESET, CCC);
2902
2903        /* All register have been reset to default values.
2904         * Restore all registers which where setup at probe time and
2905         * reopen device if it was running before system suspended.
2906         */
2907
2908        /* Set AVB config mode */
2909        ravb_set_config_mode(ndev);
2910
2911        if (info->gptp || info->ccc_gac) {
2912                /* Set GTI value */
2913                ret = ravb_set_gti(ndev);
2914                if (ret)
2915                        return ret;
2916
2917                /* Request GTI loading */
2918                ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2919        }
2920
2921        if (info->internal_delay)
2922                ravb_set_delay_mode(ndev);
2923
2924        /* Restore descriptor base address table */
2925        ravb_write(ndev, priv->desc_bat_dma, DBAT);
2926
2927        if (netif_running(ndev)) {
2928                if (priv->wol_enabled) {
2929                        ret = ravb_wol_restore(ndev);
2930                        if (ret)
2931                                return ret;
2932                }
2933                ret = ravb_open(ndev);
2934                if (ret < 0)
2935                        return ret;
2936                netif_device_attach(ndev);
2937        }
2938
2939        return ret;
2940}
2941
2942static int __maybe_unused ravb_runtime_nop(struct device *dev)
2943{
2944        /* Runtime PM callback shared between ->runtime_suspend()
2945         * and ->runtime_resume(). Simply returns success.
2946         *
2947         * This driver re-initializes all registers after
2948         * pm_runtime_get_sync() anyway so there is no need
2949         * to save and restore registers here.
2950         */
2951        return 0;
2952}
2953
2954static const struct dev_pm_ops ravb_dev_pm_ops = {
2955        SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
2956        SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
2957};
2958
2959static struct platform_driver ravb_driver = {
2960        .probe          = ravb_probe,
2961        .remove         = ravb_remove,
2962        .driver = {
2963                .name   = "ravb",
2964                .pm     = &ravb_dev_pm_ops,
2965                .of_match_table = ravb_match_table,
2966        },
2967};
2968
2969module_platform_driver(ravb_driver);
2970
2971MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
2972MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
2973MODULE_LICENSE("GPL v2");
2974