linux/drivers/net/ethernet/ti/cpsw_new.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Texas Instruments Ethernet Switch Driver
   4 *
   5 * Copyright (C) 2019 Texas Instruments
   6 */
   7
   8#include <linux/io.h>
   9#include <linux/clk.h>
  10#include <linux/timer.h>
  11#include <linux/module.h>
  12#include <linux/irqreturn.h>
  13#include <linux/interrupt.h>
  14#include <linux/if_ether.h>
  15#include <linux/etherdevice.h>
  16#include <linux/net_tstamp.h>
  17#include <linux/phy.h>
  18#include <linux/phy/phy.h>
  19#include <linux/delay.h>
  20#include <linux/pinctrl/consumer.h>
  21#include <linux/pm_runtime.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/of.h>
  24#include <linux/of_mdio.h>
  25#include <linux/of_net.h>
  26#include <linux/of_device.h>
  27#include <linux/if_vlan.h>
  28#include <linux/kmemleak.h>
  29#include <linux/sys_soc.h>
  30
  31#include <net/switchdev.h>
  32#include <net/page_pool.h>
  33#include <net/pkt_cls.h>
  34#include <net/devlink.h>
  35
  36#include "cpsw.h"
  37#include "cpsw_ale.h"
  38#include "cpsw_priv.h"
  39#include "cpsw_sl.h"
  40#include "cpsw_switchdev.h"
  41#include "cpts.h"
  42#include "davinci_cpdma.h"
  43
  44#include <net/pkt_sched.h>
  45
  46static int debug_level;
  47static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
  48static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
  49static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
  50
  51struct cpsw_devlink {
  52        struct cpsw_common *cpsw;
  53};
  54
  55enum cpsw_devlink_param_id {
  56        CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
  57        CPSW_DL_PARAM_SWITCH_MODE,
  58        CPSW_DL_PARAM_ALE_BYPASS,
  59};
  60
  61/* struct cpsw_common is not needed, kept here for compatibility
  62 * reasons witrh the old driver
  63 */
  64static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
  65                                 struct cpsw_priv *priv)
  66{
  67        if (priv->emac_port == HOST_PORT_NUM)
  68                return -1;
  69
  70        return priv->emac_port - 1;
  71}
  72
  73static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
  74{
  75        return !cpsw->data.dual_emac;
  76}
  77
  78static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
  79{
  80        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  81        bool enable_uni = false;
  82        int i;
  83
  84        if (cpsw_is_switch_en(cpsw))
  85                return;
  86
  87        /* Enabling promiscuous mode for one interface will be
  88         * common for both the interface as the interface shares
  89         * the same hardware resource.
  90         */
  91        for (i = 0; i < cpsw->data.slaves; i++)
  92                if (cpsw->slaves[i].ndev &&
  93                    (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
  94                        enable_uni = true;
  95
  96        if (!enable && enable_uni) {
  97                enable = enable_uni;
  98                dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
  99        }
 100
 101        if (enable) {
 102                /* Enable unknown unicast, reg/unreg mcast */
 103                cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 104                                     ALE_P0_UNI_FLOOD, 1);
 105
 106                dev_dbg(cpsw->dev, "promiscuity enabled\n");
 107        } else {
 108                /* Disable unknown unicast */
 109                cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 110                                     ALE_P0_UNI_FLOOD, 0);
 111                dev_dbg(cpsw->dev, "promiscuity disabled\n");
 112        }
 113}
 114
 115/**
 116 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
 117 * if it's not deleted
 118 * @ndev: device to sync
 119 * @addr: address to be added or deleted
 120 * @vid: vlan id, if vid < 0 set/unset address for real device
 121 * @add: add address if the flag is set or remove otherwise
 122 */
 123static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
 124                       int vid, int add)
 125{
 126        struct cpsw_priv *priv = netdev_priv(ndev);
 127        struct cpsw_common *cpsw = priv->cpsw;
 128        int mask, flags, ret, slave_no;
 129
 130        slave_no = cpsw_slave_index(cpsw, priv);
 131        if (vid < 0)
 132                vid = cpsw->slaves[slave_no].port_vlan;
 133
 134        mask =  ALE_PORT_HOST;
 135        flags = vid ? ALE_VLAN : 0;
 136
 137        if (add)
 138                ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
 139        else
 140                ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
 141
 142        return ret;
 143}
 144
 145static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
 146{
 147        struct addr_sync_ctx *sync_ctx = ctx;
 148        struct netdev_hw_addr *ha;
 149        int found = 0, ret = 0;
 150
 151        if (!vdev || !(vdev->flags & IFF_UP))
 152                return 0;
 153
 154        /* vlan address is relevant if its sync_cnt != 0 */
 155        netdev_for_each_mc_addr(ha, vdev) {
 156                if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
 157                        found = ha->sync_cnt;
 158                        break;
 159                }
 160        }
 161
 162        if (found)
 163                sync_ctx->consumed++;
 164
 165        if (sync_ctx->flush) {
 166                if (!found)
 167                        cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
 168                return 0;
 169        }
 170
 171        if (found)
 172                ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
 173
 174        return ret;
 175}
 176
 177static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
 178{
 179        struct addr_sync_ctx sync_ctx;
 180        int ret;
 181
 182        sync_ctx.consumed = 0;
 183        sync_ctx.addr = addr;
 184        sync_ctx.ndev = ndev;
 185        sync_ctx.flush = 0;
 186
 187        ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
 188        if (sync_ctx.consumed < num && !ret)
 189                ret = cpsw_set_mc(ndev, addr, -1, 1);
 190
 191        return ret;
 192}
 193
 194static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
 195{
 196        struct addr_sync_ctx sync_ctx;
 197
 198        sync_ctx.consumed = 0;
 199        sync_ctx.addr = addr;
 200        sync_ctx.ndev = ndev;
 201        sync_ctx.flush = 1;
 202
 203        vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
 204        if (sync_ctx.consumed == num)
 205                cpsw_set_mc(ndev, addr, -1, 0);
 206
 207        return 0;
 208}
 209
 210static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
 211{
 212        struct addr_sync_ctx *sync_ctx = ctx;
 213        struct netdev_hw_addr *ha;
 214        int found = 0;
 215
 216        if (!vdev || !(vdev->flags & IFF_UP))
 217                return 0;
 218
 219        /* vlan address is relevant if its sync_cnt != 0 */
 220        netdev_for_each_mc_addr(ha, vdev) {
 221                if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
 222                        found = ha->sync_cnt;
 223                        break;
 224                }
 225        }
 226
 227        if (!found)
 228                return 0;
 229
 230        sync_ctx->consumed++;
 231        cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
 232        return 0;
 233}
 234
 235static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
 236{
 237        struct addr_sync_ctx sync_ctx;
 238
 239        sync_ctx.addr = addr;
 240        sync_ctx.ndev = ndev;
 241        sync_ctx.consumed = 0;
 242
 243        vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
 244        if (sync_ctx.consumed < num)
 245                cpsw_set_mc(ndev, addr, -1, 0);
 246
 247        return 0;
 248}
 249
 250static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 251{
 252        struct cpsw_priv *priv = netdev_priv(ndev);
 253        struct cpsw_common *cpsw = priv->cpsw;
 254
 255        if (ndev->flags & IFF_PROMISC) {
 256                /* Enable promiscuous mode */
 257                cpsw_set_promiscious(ndev, true);
 258                cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
 259                return;
 260        }
 261
 262        /* Disable promiscuous mode */
 263        cpsw_set_promiscious(ndev, false);
 264
 265        /* Restore allmulti on vlans if necessary */
 266        cpsw_ale_set_allmulti(cpsw->ale,
 267                              ndev->flags & IFF_ALLMULTI, priv->emac_port);
 268
 269        /* add/remove mcast address either for real netdev or for vlan */
 270        __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
 271                               cpsw_del_mc_addr);
 272}
 273
 274static unsigned int cpsw_rxbuf_total_len(unsigned int len)
 275{
 276        len += CPSW_HEADROOM;
 277        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 278
 279        return SKB_DATA_ALIGN(len);
 280}
 281
 282static void cpsw_rx_handler(void *token, int len, int status)
 283{
 284        struct page *new_page, *page = token;
 285        void *pa = page_address(page);
 286        int headroom = CPSW_HEADROOM_NA;
 287        struct cpsw_meta_xdp *xmeta;
 288        struct cpsw_common *cpsw;
 289        struct net_device *ndev;
 290        int port, ch, pkt_size;
 291        struct cpsw_priv *priv;
 292        struct page_pool *pool;
 293        struct sk_buff *skb;
 294        struct xdp_buff xdp;
 295        int ret = 0;
 296        dma_addr_t dma;
 297
 298        xmeta = pa + CPSW_XMETA_OFFSET;
 299        cpsw = ndev_to_cpsw(xmeta->ndev);
 300        ndev = xmeta->ndev;
 301        pkt_size = cpsw->rx_packet_max;
 302        ch = xmeta->ch;
 303
 304        if (status >= 0) {
 305                port = CPDMA_RX_SOURCE_PORT(status);
 306                if (port)
 307                        ndev = cpsw->slaves[--port].ndev;
 308        }
 309
 310        priv = netdev_priv(ndev);
 311        pool = cpsw->page_pool[ch];
 312
 313        if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
 314                /* In dual emac mode check for all interfaces */
 315                if (cpsw->usage_count && status >= 0) {
 316                        /* The packet received is for the interface which
 317                         * is already down and the other interface is up
 318                         * and running, instead of freeing which results
 319                         * in reducing of the number of rx descriptor in
 320                         * DMA engine, requeue page back to cpdma.
 321                         */
 322                        new_page = page;
 323                        goto requeue;
 324                }
 325
 326                /* the interface is going down, pages are purged */
 327                page_pool_recycle_direct(pool, page);
 328                return;
 329        }
 330
 331        new_page = page_pool_dev_alloc_pages(pool);
 332        if (unlikely(!new_page)) {
 333                new_page = page;
 334                ndev->stats.rx_dropped++;
 335                goto requeue;
 336        }
 337
 338        if (priv->xdp_prog) {
 339                int size = len;
 340
 341                xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
 342                if (status & CPDMA_RX_VLAN_ENCAP) {
 343                        headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
 344                        size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
 345                }
 346
 347                xdp_prepare_buff(&xdp, pa, headroom, size, false);
 348
 349                ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
 350                if (ret != CPSW_XDP_PASS)
 351                        goto requeue;
 352
 353                headroom = xdp.data - xdp.data_hard_start;
 354
 355                /* XDP prog can modify vlan tag, so can't use encap header */
 356                status &= ~CPDMA_RX_VLAN_ENCAP;
 357        }
 358
 359        /* pass skb to netstack if no XDP prog or returned XDP_PASS */
 360        skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
 361        if (!skb) {
 362                ndev->stats.rx_dropped++;
 363                page_pool_recycle_direct(pool, page);
 364                goto requeue;
 365        }
 366
 367        skb->offload_fwd_mark = priv->offload_fwd_mark;
 368        skb_reserve(skb, headroom);
 369        skb_put(skb, len);
 370        skb->dev = ndev;
 371        if (status & CPDMA_RX_VLAN_ENCAP)
 372                cpsw_rx_vlan_encap(skb);
 373        if (priv->rx_ts_enabled)
 374                cpts_rx_timestamp(cpsw->cpts, skb);
 375        skb->protocol = eth_type_trans(skb, ndev);
 376
 377        /* mark skb for recycling */
 378        skb_mark_for_recycle(skb);
 379        netif_receive_skb(skb);
 380
 381        ndev->stats.rx_bytes += len;
 382        ndev->stats.rx_packets++;
 383
 384requeue:
 385        xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
 386        xmeta->ndev = ndev;
 387        xmeta->ch = ch;
 388
 389        dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
 390        ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
 391                                       pkt_size, 0);
 392        if (ret < 0) {
 393                WARN_ON(ret == -ENOMEM);
 394                page_pool_recycle_direct(pool, new_page);
 395        }
 396}
 397
 398static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
 399                                   unsigned short vid)
 400{
 401        struct cpsw_common *cpsw = priv->cpsw;
 402        int unreg_mcast_mask = 0;
 403        int mcast_mask;
 404        u32 port_mask;
 405        int ret;
 406
 407        port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
 408
 409        mcast_mask = ALE_PORT_HOST;
 410        if (priv->ndev->flags & IFF_ALLMULTI)
 411                unreg_mcast_mask = mcast_mask;
 412
 413        ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
 414                                unreg_mcast_mask);
 415        if (ret != 0)
 416                return ret;
 417
 418        ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 419                                 HOST_PORT_NUM, ALE_VLAN, vid);
 420        if (ret != 0)
 421                goto clean_vid;
 422
 423        ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 424                                 mcast_mask, ALE_VLAN, vid, 0);
 425        if (ret != 0)
 426                goto clean_vlan_ucast;
 427        return 0;
 428
 429clean_vlan_ucast:
 430        cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
 431                           HOST_PORT_NUM, ALE_VLAN, vid);
 432clean_vid:
 433        cpsw_ale_del_vlan(cpsw->ale, vid, 0);
 434        return ret;
 435}
 436
 437static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
 438                                    __be16 proto, u16 vid)
 439{
 440        struct cpsw_priv *priv = netdev_priv(ndev);
 441        struct cpsw_common *cpsw = priv->cpsw;
 442        int ret, i;
 443
 444        if (cpsw_is_switch_en(cpsw)) {
 445                dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
 446                return 0;
 447        }
 448
 449        if (vid == cpsw->data.default_vlan)
 450                return 0;
 451
 452        ret = pm_runtime_get_sync(cpsw->dev);
 453        if (ret < 0) {
 454                pm_runtime_put_noidle(cpsw->dev);
 455                return ret;
 456        }
 457
 458        /* In dual EMAC, reserved VLAN id should not be used for
 459         * creating VLAN interfaces as this can break the dual
 460         * EMAC port separation
 461         */
 462        for (i = 0; i < cpsw->data.slaves; i++) {
 463                if (cpsw->slaves[i].ndev &&
 464                    vid == cpsw->slaves[i].port_vlan) {
 465                        ret = -EINVAL;
 466                        goto err;
 467                }
 468        }
 469
 470        dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
 471        ret = cpsw_add_vlan_ale_entry(priv, vid);
 472err:
 473        pm_runtime_put(cpsw->dev);
 474        return ret;
 475}
 476
 477static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
 478{
 479        struct cpsw_priv *priv = arg;
 480
 481        if (!vdev || !vid)
 482                return 0;
 483
 484        cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
 485        return 0;
 486}
 487
 488/* restore resources after port reset */
 489static void cpsw_restore(struct cpsw_priv *priv)
 490{
 491        struct cpsw_common *cpsw = priv->cpsw;
 492
 493        /* restore vlan configurations */
 494        vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
 495
 496        /* restore MQPRIO offload */
 497        cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
 498
 499        /* restore CBS offload */
 500        cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
 501}
 502
 503static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
 504{
 505        static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
 506
 507        cpsw_ale_add_mcast(cpsw->ale, stpa,
 508                           ALE_PORT_HOST, ALE_SUPER, 0,
 509                           ALE_MCAST_BLOCK_LEARN_FWD);
 510}
 511
 512static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
 513{
 514        int vlan = cpsw->data.default_vlan;
 515
 516        writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
 517
 518        writel(vlan, &cpsw->host_port_regs->port_vlan);
 519
 520        cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
 521                          ALE_ALL_PORTS, ALE_ALL_PORTS,
 522                          ALE_PORT_1 | ALE_PORT_2);
 523
 524        cpsw_init_stp_ale_entry(cpsw);
 525
 526        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
 527        dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
 528        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
 529}
 530
 531static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
 532{
 533        int vlan = cpsw->data.default_vlan;
 534
 535        writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
 536
 537        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
 538        dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
 539
 540        writel(vlan, &cpsw->host_port_regs->port_vlan);
 541
 542        cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 543        /* learning make no sense in dual_mac mode */
 544        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
 545}
 546
 547static void cpsw_init_host_port(struct cpsw_priv *priv)
 548{
 549        struct cpsw_common *cpsw = priv->cpsw;
 550        u32 control_reg;
 551
 552        /* soft reset the controller and initialize ale */
 553        soft_reset("cpsw", &cpsw->regs->soft_reset);
 554        cpsw_ale_start(cpsw->ale);
 555
 556        /* switch to vlan unaware mode */
 557        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
 558                             CPSW_ALE_VLAN_AWARE);
 559        control_reg = readl(&cpsw->regs->control);
 560        control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
 561        writel(control_reg, &cpsw->regs->control);
 562
 563        /* setup host port priority mapping */
 564        writel_relaxed(CPDMA_TX_PRIORITY_MAP,
 565                       &cpsw->host_port_regs->cpdma_tx_pri_map);
 566        writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
 567
 568        /* disable priority elevation */
 569        writel_relaxed(0, &cpsw->regs->ptype);
 570
 571        /* enable statistics collection only on all ports */
 572        writel_relaxed(0x7, &cpsw->regs->stat_port_en);
 573
 574        /* Enable internal fifo flow control */
 575        writel(0x7, &cpsw->regs->flow_control);
 576
 577        if (cpsw_is_switch_en(cpsw))
 578                cpsw_init_host_port_switch(cpsw);
 579        else
 580                cpsw_init_host_port_dual_mac(cpsw);
 581
 582        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 583                             ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 584}
 585
 586static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
 587                                                    struct cpsw_slave *slave)
 588{
 589        u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
 590        struct cpsw_common *cpsw = priv->cpsw;
 591        u32 reg;
 592
 593        reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
 594               CPSW2_PORT_VLAN;
 595        slave_write(slave, slave->port_vlan, reg);
 596
 597        cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
 598                          port_mask, port_mask, 0);
 599        cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 600                           ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
 601                           ALE_MCAST_FWD);
 602        cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 603                           HOST_PORT_NUM, ALE_VLAN |
 604                           ALE_SECURE, slave->port_vlan);
 605        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 606                             ALE_PORT_DROP_UNKNOWN_VLAN, 1);
 607        /* learning make no sense in dual_mac mode */
 608        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 609                             ALE_PORT_NOLEARN, 1);
 610}
 611
 612static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
 613                                                 struct cpsw_slave *slave)
 614{
 615        u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
 616        struct cpsw_common *cpsw = priv->cpsw;
 617        u32 reg;
 618
 619        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 620                             ALE_PORT_DROP_UNKNOWN_VLAN, 0);
 621        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 622                             ALE_PORT_NOLEARN, 0);
 623        /* disabling SA_UPDATE required to make stp work, without this setting
 624         * Host MAC addresses will jump between ports.
 625         * As per TRM MAC address can be defined as unicast supervisory (super)
 626         * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
 627         * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
 628         * causes STP packets to be dropped due to ingress filter
 629         *      if (source address found) and (secure) and
 630         *         (receive port number != port_number))
 631         *         then discard the packet
 632         */
 633        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 634                             ALE_PORT_NO_SA_UPDATE, 1);
 635
 636        cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 637                           port_mask, ALE_VLAN, slave->port_vlan,
 638                           ALE_MCAST_FWD_2);
 639        cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 640                           HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
 641
 642        reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
 643               CPSW2_PORT_VLAN;
 644        slave_write(slave, slave->port_vlan, reg);
 645}
 646
 647static void cpsw_adjust_link(struct net_device *ndev)
 648{
 649        struct cpsw_priv *priv = netdev_priv(ndev);
 650        struct cpsw_common *cpsw = priv->cpsw;
 651        struct cpsw_slave *slave;
 652        struct phy_device *phy;
 653        u32 mac_control = 0;
 654
 655        slave = &cpsw->slaves[priv->emac_port - 1];
 656        phy = slave->phy;
 657
 658        if (!phy)
 659                return;
 660
 661        if (phy->link) {
 662                mac_control = CPSW_SL_CTL_GMII_EN;
 663
 664                if (phy->speed == 1000)
 665                        mac_control |= CPSW_SL_CTL_GIG;
 666                if (phy->duplex)
 667                        mac_control |= CPSW_SL_CTL_FULLDUPLEX;
 668
 669                /* set speed_in input in case RMII mode is used in 100Mbps */
 670                if (phy->speed == 100)
 671                        mac_control |= CPSW_SL_CTL_IFCTL_A;
 672                /* in band mode only works in 10Mbps RGMII mode */
 673                else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
 674                        mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
 675
 676                if (priv->rx_pause)
 677                        mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
 678
 679                if (priv->tx_pause)
 680                        mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
 681
 682                if (mac_control != slave->mac_control)
 683                        cpsw_sl_ctl_set(slave->mac_sl, mac_control);
 684
 685                /* enable forwarding */
 686                cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 687                                     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 688
 689                netif_tx_wake_all_queues(ndev);
 690
 691                if (priv->shp_cfg_speed &&
 692                    priv->shp_cfg_speed != slave->phy->speed &&
 693                    !cpsw_shp_is_off(priv))
 694                        dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
 695        } else {
 696                netif_tx_stop_all_queues(ndev);
 697
 698                mac_control = 0;
 699                /* disable forwarding */
 700                cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 701                                     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
 702
 703                cpsw_sl_wait_for_idle(slave->mac_sl, 100);
 704
 705                cpsw_sl_ctl_reset(slave->mac_sl);
 706        }
 707
 708        if (mac_control != slave->mac_control)
 709                phy_print_status(phy);
 710
 711        slave->mac_control = mac_control;
 712
 713        if (phy->link && cpsw_need_resplit(cpsw))
 714                cpsw_split_res(cpsw);
 715}
 716
 717static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 718{
 719        struct cpsw_common *cpsw = priv->cpsw;
 720        struct phy_device *phy;
 721
 722        cpsw_sl_reset(slave->mac_sl, 100);
 723        cpsw_sl_ctl_reset(slave->mac_sl);
 724
 725        /* setup priority mapping */
 726        cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
 727                          RX_PRIORITY_MAPPING);
 728
 729        switch (cpsw->version) {
 730        case CPSW_VERSION_1:
 731                slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
 732                /* Increase RX FIFO size to 5 for supporting fullduplex
 733                 * flow control mode
 734                 */
 735                slave_write(slave,
 736                            (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
 737                            CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
 738                break;
 739        case CPSW_VERSION_2:
 740        case CPSW_VERSION_3:
 741        case CPSW_VERSION_4:
 742                slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
 743                /* Increase RX FIFO size to 5 for supporting fullduplex
 744                 * flow control mode
 745                 */
 746                slave_write(slave,
 747                            (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
 748                            CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
 749                break;
 750        }
 751
 752        /* setup max packet size, and mac address */
 753        cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
 754                          cpsw->rx_packet_max);
 755        cpsw_set_slave_mac(slave, priv);
 756
 757        slave->mac_control = 0; /* no link yet */
 758
 759        if (cpsw_is_switch_en(cpsw))
 760                cpsw_port_add_switch_def_ale_entries(priv, slave);
 761        else
 762                cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
 763
 764        if (!slave->data->phy_node)
 765                dev_err(priv->dev, "no phy found on slave %d\n",
 766                        slave->slave_num);
 767        phy = of_phy_connect(priv->ndev, slave->data->phy_node,
 768                             &cpsw_adjust_link, 0, slave->data->phy_if);
 769        if (!phy) {
 770                dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
 771                        slave->data->phy_node,
 772                        slave->slave_num);
 773                return;
 774        }
 775        slave->phy = phy;
 776
 777        phy_attached_info(slave->phy);
 778
 779        phy_start(slave->phy);
 780
 781        /* Configure GMII_SEL register */
 782        phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
 783                         slave->data->phy_if);
 784}
 785
 786static int cpsw_ndo_stop(struct net_device *ndev)
 787{
 788        struct cpsw_priv *priv = netdev_priv(ndev);
 789        struct cpsw_common *cpsw = priv->cpsw;
 790        struct cpsw_slave *slave;
 791
 792        cpsw_info(priv, ifdown, "shutting down ndev\n");
 793        slave = &cpsw->slaves[priv->emac_port - 1];
 794        if (slave->phy)
 795                phy_stop(slave->phy);
 796
 797        netif_tx_stop_all_queues(priv->ndev);
 798
 799        if (slave->phy) {
 800                phy_disconnect(slave->phy);
 801                slave->phy = NULL;
 802        }
 803
 804        __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
 805
 806        if (cpsw->usage_count <= 1) {
 807                napi_disable(&cpsw->napi_rx);
 808                napi_disable(&cpsw->napi_tx);
 809                cpts_unregister(cpsw->cpts);
 810                cpsw_intr_disable(cpsw);
 811                cpdma_ctlr_stop(cpsw->dma);
 812                cpsw_ale_stop(cpsw->ale);
 813                cpsw_destroy_xdp_rxqs(cpsw);
 814        }
 815
 816        if (cpsw_need_resplit(cpsw))
 817                cpsw_split_res(cpsw);
 818
 819        cpsw->usage_count--;
 820        pm_runtime_put_sync(cpsw->dev);
 821        return 0;
 822}
 823
 824static int cpsw_ndo_open(struct net_device *ndev)
 825{
 826        struct cpsw_priv *priv = netdev_priv(ndev);
 827        struct cpsw_common *cpsw = priv->cpsw;
 828        int ret;
 829
 830        dev_info(priv->dev, "starting ndev. mode: %s\n",
 831                 cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
 832        ret = pm_runtime_get_sync(cpsw->dev);
 833        if (ret < 0) {
 834                pm_runtime_put_noidle(cpsw->dev);
 835                return ret;
 836        }
 837
 838        /* Notify the stack of the actual queue counts. */
 839        ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
 840        if (ret) {
 841                dev_err(priv->dev, "cannot set real number of tx queues\n");
 842                goto pm_cleanup;
 843        }
 844
 845        ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
 846        if (ret) {
 847                dev_err(priv->dev, "cannot set real number of rx queues\n");
 848                goto pm_cleanup;
 849        }
 850
 851        /* Initialize host and slave ports */
 852        if (!cpsw->usage_count)
 853                cpsw_init_host_port(priv);
 854        cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
 855
 856        /* initialize shared resources for every ndev */
 857        if (!cpsw->usage_count) {
 858                /* create rxqs for both infs in dual mac as they use same pool
 859                 * and must be destroyed together when no users.
 860                 */
 861                ret = cpsw_create_xdp_rxqs(cpsw);
 862                if (ret < 0)
 863                        goto err_cleanup;
 864
 865                ret = cpsw_fill_rx_channels(priv);
 866                if (ret < 0)
 867                        goto err_cleanup;
 868
 869                if (cpsw->cpts) {
 870                        if (cpts_register(cpsw->cpts))
 871                                dev_err(priv->dev, "error registering cpts device\n");
 872                        else
 873                                writel(0x10, &cpsw->wr_regs->misc_en);
 874                }
 875
 876                napi_enable(&cpsw->napi_rx);
 877                napi_enable(&cpsw->napi_tx);
 878
 879                if (cpsw->tx_irq_disabled) {
 880                        cpsw->tx_irq_disabled = false;
 881                        enable_irq(cpsw->irqs_table[1]);
 882                }
 883
 884                if (cpsw->rx_irq_disabled) {
 885                        cpsw->rx_irq_disabled = false;
 886                        enable_irq(cpsw->irqs_table[0]);
 887                }
 888        }
 889
 890        cpsw_restore(priv);
 891
 892        /* Enable Interrupt pacing if configured */
 893        if (cpsw->coal_intvl != 0) {
 894                struct ethtool_coalesce coal;
 895
 896                coal.rx_coalesce_usecs = cpsw->coal_intvl;
 897                cpsw_set_coalesce(ndev, &coal, NULL, NULL);
 898        }
 899
 900        cpdma_ctlr_start(cpsw->dma);
 901        cpsw_intr_enable(cpsw);
 902        cpsw->usage_count++;
 903
 904        return 0;
 905
 906err_cleanup:
 907        cpsw_ndo_stop(ndev);
 908
 909pm_cleanup:
 910        pm_runtime_put_sync(cpsw->dev);
 911        return ret;
 912}
 913
 914static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 915                                       struct net_device *ndev)
 916{
 917        struct cpsw_priv *priv = netdev_priv(ndev);
 918        struct cpsw_common *cpsw = priv->cpsw;
 919        struct cpts *cpts = cpsw->cpts;
 920        struct netdev_queue *txq;
 921        struct cpdma_chan *txch;
 922        int ret, q_idx;
 923
 924        if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
 925                cpsw_err(priv, tx_err, "packet pad failed\n");
 926                ndev->stats.tx_dropped++;
 927                return NET_XMIT_DROP;
 928        }
 929
 930        if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 931            priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
 932                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 933
 934        q_idx = skb_get_queue_mapping(skb);
 935        if (q_idx >= cpsw->tx_ch_num)
 936                q_idx = q_idx % cpsw->tx_ch_num;
 937
 938        txch = cpsw->txv[q_idx].ch;
 939        txq = netdev_get_tx_queue(ndev, q_idx);
 940        skb_tx_timestamp(skb);
 941        ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
 942                                priv->emac_port);
 943        if (unlikely(ret != 0)) {
 944                cpsw_err(priv, tx_err, "desc submit failed\n");
 945                goto fail;
 946        }
 947
 948        /* If there is no more tx desc left free then we need to
 949         * tell the kernel to stop sending us tx frames.
 950         */
 951        if (unlikely(!cpdma_check_free_tx_desc(txch))) {
 952                netif_tx_stop_queue(txq);
 953
 954                /* Barrier, so that stop_queue visible to other cpus */
 955                smp_mb__after_atomic();
 956
 957                if (cpdma_check_free_tx_desc(txch))
 958                        netif_tx_wake_queue(txq);
 959        }
 960
 961        return NETDEV_TX_OK;
 962fail:
 963        ndev->stats.tx_dropped++;
 964        netif_tx_stop_queue(txq);
 965
 966        /* Barrier, so that stop_queue visible to other cpus */
 967        smp_mb__after_atomic();
 968
 969        if (cpdma_check_free_tx_desc(txch))
 970                netif_tx_wake_queue(txq);
 971
 972        return NETDEV_TX_BUSY;
 973}
 974
 975static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
 976{
 977        struct sockaddr *addr = (struct sockaddr *)p;
 978        struct cpsw_priv *priv = netdev_priv(ndev);
 979        struct cpsw_common *cpsw = priv->cpsw;
 980        int ret, slave_no;
 981        int flags = 0;
 982        u16 vid = 0;
 983
 984        slave_no = cpsw_slave_index(cpsw, priv);
 985        if (!is_valid_ether_addr(addr->sa_data))
 986                return -EADDRNOTAVAIL;
 987
 988        ret = pm_runtime_get_sync(cpsw->dev);
 989        if (ret < 0) {
 990                pm_runtime_put_noidle(cpsw->dev);
 991                return ret;
 992        }
 993
 994        vid = cpsw->slaves[slave_no].port_vlan;
 995        flags = ALE_VLAN | ALE_SECURE;
 996
 997        cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
 998                           flags, vid);
 999        cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1000                           flags, vid);
1001
1002        ether_addr_copy(priv->mac_addr, addr->sa_data);
1003        eth_hw_addr_set(ndev, priv->mac_addr);
1004        cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
1005
1006        pm_runtime_put(cpsw->dev);
1007
1008        return 0;
1009}
1010
1011static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1012                                     __be16 proto, u16 vid)
1013{
1014        struct cpsw_priv *priv = netdev_priv(ndev);
1015        struct cpsw_common *cpsw = priv->cpsw;
1016        int ret;
1017        int i;
1018
1019        if (cpsw_is_switch_en(cpsw)) {
1020                dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
1021                return 0;
1022        }
1023
1024        if (vid == cpsw->data.default_vlan)
1025                return 0;
1026
1027        ret = pm_runtime_get_sync(cpsw->dev);
1028        if (ret < 0) {
1029                pm_runtime_put_noidle(cpsw->dev);
1030                return ret;
1031        }
1032
1033        /* reset the return code as pm_runtime_get_sync() can return
1034         * non zero values as well.
1035         */
1036        ret = 0;
1037        for (i = 0; i < cpsw->data.slaves; i++) {
1038                if (cpsw->slaves[i].ndev &&
1039                    vid == cpsw->slaves[i].port_vlan) {
1040                        ret = -EINVAL;
1041                        goto err;
1042                }
1043        }
1044
1045        dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1046        ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1047        if (ret)
1048                dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
1049        ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1050                                 HOST_PORT_NUM, ALE_VLAN, vid);
1051        if (ret)
1052                dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
1053                        ret);
1054        ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1055                                 0, ALE_VLAN, vid);
1056        if (ret)
1057                dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
1058                        ret);
1059        cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
1060        ret = 0;
1061err:
1062        pm_runtime_put(cpsw->dev);
1063        return ret;
1064}
1065
1066static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1067                                       size_t len)
1068{
1069        struct cpsw_priv *priv = netdev_priv(ndev);
1070        int err;
1071
1072        err = snprintf(name, len, "p%d", priv->emac_port);
1073
1074        if (err >= len)
1075                return -EINVAL;
1076
1077        return 0;
1078}
1079
1080#ifdef CONFIG_NET_POLL_CONTROLLER
1081static void cpsw_ndo_poll_controller(struct net_device *ndev)
1082{
1083        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1084
1085        cpsw_intr_disable(cpsw);
1086        cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1087        cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1088        cpsw_intr_enable(cpsw);
1089}
1090#endif
1091
1092static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1093                             struct xdp_frame **frames, u32 flags)
1094{
1095        struct cpsw_priv *priv = netdev_priv(ndev);
1096        struct xdp_frame *xdpf;
1097        int i, nxmit = 0;
1098
1099        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1100                return -EINVAL;
1101
1102        for (i = 0; i < n; i++) {
1103                xdpf = frames[i];
1104                if (xdpf->len < READ_ONCE(priv->tx_packet_min))
1105                        break;
1106
1107                if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
1108                        break;
1109                nxmit++;
1110        }
1111
1112        return nxmit;
1113}
1114
1115static int cpsw_get_port_parent_id(struct net_device *ndev,
1116                                   struct netdev_phys_item_id *ppid)
1117{
1118        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1119
1120        ppid->id_len = sizeof(cpsw->base_mac);
1121        memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
1122
1123        return 0;
1124}
1125
1126static const struct net_device_ops cpsw_netdev_ops = {
1127        .ndo_open               = cpsw_ndo_open,
1128        .ndo_stop               = cpsw_ndo_stop,
1129        .ndo_start_xmit         = cpsw_ndo_start_xmit,
1130        .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
1131        .ndo_eth_ioctl          = cpsw_ndo_ioctl,
1132        .ndo_validate_addr      = eth_validate_addr,
1133        .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
1134        .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
1135        .ndo_set_tx_maxrate     = cpsw_ndo_set_tx_maxrate,
1136#ifdef CONFIG_NET_POLL_CONTROLLER
1137        .ndo_poll_controller    = cpsw_ndo_poll_controller,
1138#endif
1139        .ndo_vlan_rx_add_vid    = cpsw_ndo_vlan_rx_add_vid,
1140        .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
1141        .ndo_setup_tc           = cpsw_ndo_setup_tc,
1142        .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
1143        .ndo_bpf                = cpsw_ndo_bpf,
1144        .ndo_xdp_xmit           = cpsw_ndo_xdp_xmit,
1145        .ndo_get_port_parent_id = cpsw_get_port_parent_id,
1146};
1147
1148static void cpsw_get_drvinfo(struct net_device *ndev,
1149                             struct ethtool_drvinfo *info)
1150{
1151        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1152        struct platform_device *pdev;
1153
1154        pdev = to_platform_device(cpsw->dev);
1155        strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
1156        strlcpy(info->version, "2.0", sizeof(info->version));
1157        strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
1158}
1159
1160static int cpsw_set_pauseparam(struct net_device *ndev,
1161                               struct ethtool_pauseparam *pause)
1162{
1163        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1164        struct cpsw_priv *priv = netdev_priv(ndev);
1165        int slave_no;
1166
1167        slave_no = cpsw_slave_index(cpsw, priv);
1168        if (!cpsw->slaves[slave_no].phy)
1169                return -EINVAL;
1170
1171        if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
1172                return -EINVAL;
1173
1174        priv->rx_pause = pause->rx_pause ? true : false;
1175        priv->tx_pause = pause->tx_pause ? true : false;
1176
1177        phy_set_asym_pause(cpsw->slaves[slave_no].phy,
1178                           priv->rx_pause, priv->tx_pause);
1179
1180        return 0;
1181}
1182
1183static int cpsw_set_channels(struct net_device *ndev,
1184                             struct ethtool_channels *chs)
1185{
1186        return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
1187}
1188
1189static const struct ethtool_ops cpsw_ethtool_ops = {
1190        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
1191        .get_drvinfo            = cpsw_get_drvinfo,
1192        .get_msglevel           = cpsw_get_msglevel,
1193        .set_msglevel           = cpsw_set_msglevel,
1194        .get_link               = ethtool_op_get_link,
1195        .get_ts_info            = cpsw_get_ts_info,
1196        .get_coalesce           = cpsw_get_coalesce,
1197        .set_coalesce           = cpsw_set_coalesce,
1198        .get_sset_count         = cpsw_get_sset_count,
1199        .get_strings            = cpsw_get_strings,
1200        .get_ethtool_stats      = cpsw_get_ethtool_stats,
1201        .get_pauseparam         = cpsw_get_pauseparam,
1202        .set_pauseparam         = cpsw_set_pauseparam,
1203        .get_wol                = cpsw_get_wol,
1204        .set_wol                = cpsw_set_wol,
1205        .get_regs_len           = cpsw_get_regs_len,
1206        .get_regs               = cpsw_get_regs,
1207        .begin                  = cpsw_ethtool_op_begin,
1208        .complete               = cpsw_ethtool_op_complete,
1209        .get_channels           = cpsw_get_channels,
1210        .set_channels           = cpsw_set_channels,
1211        .get_link_ksettings     = cpsw_get_link_ksettings,
1212        .set_link_ksettings     = cpsw_set_link_ksettings,
1213        .get_eee                = cpsw_get_eee,
1214        .set_eee                = cpsw_set_eee,
1215        .nway_reset             = cpsw_nway_reset,
1216        .get_ringparam          = cpsw_get_ringparam,
1217        .set_ringparam          = cpsw_set_ringparam,
1218};
1219
1220static int cpsw_probe_dt(struct cpsw_common *cpsw)
1221{
1222        struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
1223        struct cpsw_platform_data *data = &cpsw->data;
1224        struct device *dev = cpsw->dev;
1225        int ret;
1226        u32 prop;
1227
1228        if (!node)
1229                return -EINVAL;
1230
1231        tmp_node = of_get_child_by_name(node, "ethernet-ports");
1232        if (!tmp_node)
1233                return -ENOENT;
1234        data->slaves = of_get_child_count(tmp_node);
1235        if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
1236                of_node_put(tmp_node);
1237                return -ENOENT;
1238        }
1239
1240        data->active_slave = 0;
1241        data->channels = CPSW_MAX_QUEUES;
1242        data->dual_emac = true;
1243        data->bd_ram_size = CPSW_BD_RAM_SIZE;
1244        data->mac_control = 0;
1245
1246        data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
1247                                        sizeof(struct cpsw_slave_data),
1248                                        GFP_KERNEL);
1249        if (!data->slave_data) {
1250                of_node_put(tmp_node);
1251                return -ENOMEM;
1252        }
1253
1254        /* Populate all the child nodes here...
1255         */
1256        ret = devm_of_platform_populate(dev);
1257        /* We do not want to force this, as in some cases may not have child */
1258        if (ret)
1259                dev_warn(dev, "Doesn't have any child node\n");
1260
1261        for_each_child_of_node(tmp_node, port_np) {
1262                struct cpsw_slave_data *slave_data;
1263                u32 port_id;
1264
1265                ret = of_property_read_u32(port_np, "reg", &port_id);
1266                if (ret < 0) {
1267                        dev_err(dev, "%pOF error reading port_id %d\n",
1268                                port_np, ret);
1269                        goto err_node_put;
1270                }
1271
1272                if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
1273                        dev_err(dev, "%pOF has invalid port_id %u\n",
1274                                port_np, port_id);
1275                        ret = -EINVAL;
1276                        goto err_node_put;
1277                }
1278
1279                slave_data = &data->slave_data[port_id - 1];
1280
1281                slave_data->disabled = !of_device_is_available(port_np);
1282                if (slave_data->disabled)
1283                        continue;
1284
1285                slave_data->slave_node = port_np;
1286                slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
1287                if (IS_ERR(slave_data->ifphy)) {
1288                        ret = PTR_ERR(slave_data->ifphy);
1289                        dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
1290                                port_np, ret);
1291                        goto err_node_put;
1292                }
1293
1294                if (of_phy_is_fixed_link(port_np)) {
1295                        ret = of_phy_register_fixed_link(port_np);
1296                        if (ret) {
1297                                if (ret != -EPROBE_DEFER)
1298                                        dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
1299                                                port_np, ret);
1300                                goto err_node_put;
1301                        }
1302                        slave_data->phy_node = of_node_get(port_np);
1303                } else {
1304                        slave_data->phy_node =
1305                                of_parse_phandle(port_np, "phy-handle", 0);
1306                }
1307
1308                if (!slave_data->phy_node) {
1309                        dev_err(dev, "%pOF no phy found\n", port_np);
1310                        ret = -ENODEV;
1311                        goto err_node_put;
1312                }
1313
1314                ret = of_get_phy_mode(port_np, &slave_data->phy_if);
1315                if (ret) {
1316                        dev_err(dev, "%pOF read phy-mode err %d\n",
1317                                port_np, ret);
1318                        goto err_node_put;
1319                }
1320
1321                ret = of_get_mac_address(port_np, slave_data->mac_addr);
1322                if (ret) {
1323                        ret = ti_cm_get_macid(dev, port_id - 1,
1324                                              slave_data->mac_addr);
1325                        if (ret)
1326                                goto err_node_put;
1327                }
1328
1329                if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
1330                                         &prop)) {
1331                        dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
1332                                port_np);
1333                        slave_data->dual_emac_res_vlan = port_id;
1334                        dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
1335                                port_np, slave_data->dual_emac_res_vlan);
1336                } else {
1337                        slave_data->dual_emac_res_vlan = prop;
1338                }
1339        }
1340
1341        of_node_put(tmp_node);
1342        return 0;
1343
1344err_node_put:
1345        of_node_put(port_np);
1346        of_node_put(tmp_node);
1347        return ret;
1348}
1349
1350static void cpsw_remove_dt(struct cpsw_common *cpsw)
1351{
1352        struct cpsw_platform_data *data = &cpsw->data;
1353        int i = 0;
1354
1355        for (i = 0; i < cpsw->data.slaves; i++) {
1356                struct cpsw_slave_data *slave_data = &data->slave_data[i];
1357                struct device_node *port_np = slave_data->phy_node;
1358
1359                if (port_np) {
1360                        if (of_phy_is_fixed_link(port_np))
1361                                of_phy_deregister_fixed_link(port_np);
1362
1363                        of_node_put(port_np);
1364                }
1365        }
1366}
1367
1368static int cpsw_create_ports(struct cpsw_common *cpsw)
1369{
1370        struct cpsw_platform_data *data = &cpsw->data;
1371        struct net_device *ndev, *napi_ndev = NULL;
1372        struct device *dev = cpsw->dev;
1373        struct cpsw_priv *priv;
1374        int ret = 0, i = 0;
1375
1376        for (i = 0; i < cpsw->data.slaves; i++) {
1377                struct cpsw_slave_data *slave_data = &data->slave_data[i];
1378
1379                if (slave_data->disabled)
1380                        continue;
1381
1382                ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
1383                                               CPSW_MAX_QUEUES,
1384                                               CPSW_MAX_QUEUES);
1385                if (!ndev) {
1386                        dev_err(dev, "error allocating net_device\n");
1387                        return -ENOMEM;
1388                }
1389
1390                priv = netdev_priv(ndev);
1391                priv->cpsw = cpsw;
1392                priv->ndev = ndev;
1393                priv->dev  = dev;
1394                priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1395                priv->emac_port = i + 1;
1396                priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
1397
1398                if (is_valid_ether_addr(slave_data->mac_addr)) {
1399                        ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1400                        dev_info(cpsw->dev, "Detected MACID = %pM\n",
1401                                 priv->mac_addr);
1402                } else {
1403                        eth_random_addr(slave_data->mac_addr);
1404                        dev_info(cpsw->dev, "Random MACID = %pM\n",
1405                                 priv->mac_addr);
1406                }
1407                eth_hw_addr_set(ndev, slave_data->mac_addr);
1408                ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1409
1410                cpsw->slaves[i].ndev = ndev;
1411
1412                ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
1413                                  NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
1414
1415                ndev->netdev_ops = &cpsw_netdev_ops;
1416                ndev->ethtool_ops = &cpsw_ethtool_ops;
1417                SET_NETDEV_DEV(ndev, dev);
1418
1419                if (!napi_ndev) {
1420                        /* CPSW Host port CPDMA interface is shared between
1421                         * ports and there is only one TX and one RX IRQs
1422                         * available for all possible TX and RX channels
1423                         * accordingly.
1424                         */
1425                        netif_napi_add(ndev, &cpsw->napi_rx,
1426                                       cpsw->quirk_irq ?
1427                                       cpsw_rx_poll : cpsw_rx_mq_poll,
1428                                       CPSW_POLL_WEIGHT);
1429                        netif_tx_napi_add(ndev, &cpsw->napi_tx,
1430                                          cpsw->quirk_irq ?
1431                                          cpsw_tx_poll : cpsw_tx_mq_poll,
1432                                          CPSW_POLL_WEIGHT);
1433                }
1434
1435                napi_ndev = ndev;
1436        }
1437
1438        return ret;
1439}
1440
1441static void cpsw_unregister_ports(struct cpsw_common *cpsw)
1442{
1443        int i = 0;
1444
1445        for (i = 0; i < cpsw->data.slaves; i++) {
1446                if (!cpsw->slaves[i].ndev)
1447                        continue;
1448
1449                unregister_netdev(cpsw->slaves[i].ndev);
1450        }
1451}
1452
1453static int cpsw_register_ports(struct cpsw_common *cpsw)
1454{
1455        int ret = 0, i = 0;
1456
1457        for (i = 0; i < cpsw->data.slaves; i++) {
1458                if (!cpsw->slaves[i].ndev)
1459                        continue;
1460
1461                /* register the network device */
1462                ret = register_netdev(cpsw->slaves[i].ndev);
1463                if (ret) {
1464                        dev_err(cpsw->dev,
1465                                "cpsw: err registering net device%d\n", i);
1466                        cpsw->slaves[i].ndev = NULL;
1467                        break;
1468                }
1469        }
1470
1471        if (ret)
1472                cpsw_unregister_ports(cpsw);
1473        return ret;
1474}
1475
1476bool cpsw_port_dev_check(const struct net_device *ndev)
1477{
1478        if (ndev->netdev_ops == &cpsw_netdev_ops) {
1479                struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1480
1481                return !cpsw->data.dual_emac;
1482        }
1483
1484        return false;
1485}
1486
1487static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
1488{
1489        int set_val = 0;
1490        int i;
1491
1492        if (!cpsw->ale_bypass &&
1493            (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
1494                set_val = 1;
1495
1496        dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
1497
1498        for (i = 0; i < cpsw->data.slaves; i++) {
1499                struct net_device *sl_ndev = cpsw->slaves[i].ndev;
1500                struct cpsw_priv *priv = netdev_priv(sl_ndev);
1501
1502                priv->offload_fwd_mark = set_val;
1503        }
1504}
1505
1506static int cpsw_netdevice_port_link(struct net_device *ndev,
1507                                    struct net_device *br_ndev,
1508                                    struct netlink_ext_ack *extack)
1509{
1510        struct cpsw_priv *priv = netdev_priv(ndev);
1511        struct cpsw_common *cpsw = priv->cpsw;
1512        int err;
1513
1514        if (!cpsw->br_members) {
1515                cpsw->hw_bridge_dev = br_ndev;
1516        } else {
1517                /* This is adding the port to a second bridge, this is
1518                 * unsupported
1519                 */
1520                if (cpsw->hw_bridge_dev != br_ndev)
1521                        return -EOPNOTSUPP;
1522        }
1523
1524        err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
1525                                            false, extack);
1526        if (err)
1527                return err;
1528
1529        cpsw->br_members |= BIT(priv->emac_port);
1530
1531        cpsw_port_offload_fwd_mark_update(cpsw);
1532
1533        return NOTIFY_DONE;
1534}
1535
1536static void cpsw_netdevice_port_unlink(struct net_device *ndev)
1537{
1538        struct cpsw_priv *priv = netdev_priv(ndev);
1539        struct cpsw_common *cpsw = priv->cpsw;
1540
1541        switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
1542
1543        cpsw->br_members &= ~BIT(priv->emac_port);
1544
1545        cpsw_port_offload_fwd_mark_update(cpsw);
1546
1547        if (!cpsw->br_members)
1548                cpsw->hw_bridge_dev = NULL;
1549}
1550
1551/* netdev notifier */
1552static int cpsw_netdevice_event(struct notifier_block *unused,
1553                                unsigned long event, void *ptr)
1554{
1555        struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1556        struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1557        struct netdev_notifier_changeupper_info *info;
1558        int ret = NOTIFY_DONE;
1559
1560        if (!cpsw_port_dev_check(ndev))
1561                return NOTIFY_DONE;
1562
1563        switch (event) {
1564        case NETDEV_CHANGEUPPER:
1565                info = ptr;
1566
1567                if (netif_is_bridge_master(info->upper_dev)) {
1568                        if (info->linking)
1569                                ret = cpsw_netdevice_port_link(ndev,
1570                                                               info->upper_dev,
1571                                                               extack);
1572                        else
1573                                cpsw_netdevice_port_unlink(ndev);
1574                }
1575                break;
1576        default:
1577                return NOTIFY_DONE;
1578        }
1579
1580        return notifier_from_errno(ret);
1581}
1582
1583static struct notifier_block cpsw_netdevice_nb __read_mostly = {
1584        .notifier_call = cpsw_netdevice_event,
1585};
1586
1587static int cpsw_register_notifiers(struct cpsw_common *cpsw)
1588{
1589        int ret = 0;
1590
1591        ret = register_netdevice_notifier(&cpsw_netdevice_nb);
1592        if (ret) {
1593                dev_err(cpsw->dev, "can't register netdevice notifier\n");
1594                return ret;
1595        }
1596
1597        ret = cpsw_switchdev_register_notifiers(cpsw);
1598        if (ret)
1599                unregister_netdevice_notifier(&cpsw_netdevice_nb);
1600
1601        return ret;
1602}
1603
1604static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
1605{
1606        cpsw_switchdev_unregister_notifiers(cpsw);
1607        unregister_netdevice_notifier(&cpsw_netdevice_nb);
1608}
1609
1610static const struct devlink_ops cpsw_devlink_ops = {
1611};
1612
1613static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
1614                                   struct devlink_param_gset_ctx *ctx)
1615{
1616        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1617        struct cpsw_common *cpsw = dl_priv->cpsw;
1618
1619        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1620
1621        if (id != CPSW_DL_PARAM_SWITCH_MODE)
1622                return  -EOPNOTSUPP;
1623
1624        ctx->val.vbool = !cpsw->data.dual_emac;
1625
1626        return 0;
1627}
1628
1629static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
1630                                   struct devlink_param_gset_ctx *ctx)
1631{
1632        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1633        struct cpsw_common *cpsw = dl_priv->cpsw;
1634        int vlan = cpsw->data.default_vlan;
1635        bool switch_en = ctx->val.vbool;
1636        bool if_running = false;
1637        int i;
1638
1639        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1640
1641        if (id != CPSW_DL_PARAM_SWITCH_MODE)
1642                return  -EOPNOTSUPP;
1643
1644        if (switch_en == !cpsw->data.dual_emac)
1645                return 0;
1646
1647        if (!switch_en && cpsw->br_members) {
1648                dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
1649                return -EINVAL;
1650        }
1651
1652        rtnl_lock();
1653
1654        for (i = 0; i < cpsw->data.slaves; i++) {
1655                struct cpsw_slave *slave = &cpsw->slaves[i];
1656                struct net_device *sl_ndev = slave->ndev;
1657
1658                if (!sl_ndev || !netif_running(sl_ndev))
1659                        continue;
1660
1661                if_running = true;
1662        }
1663
1664        if (!if_running) {
1665                /* all ndevs are down */
1666                cpsw->data.dual_emac = !switch_en;
1667                for (i = 0; i < cpsw->data.slaves; i++) {
1668                        struct cpsw_slave *slave = &cpsw->slaves[i];
1669                        struct net_device *sl_ndev = slave->ndev;
1670
1671                        if (!sl_ndev)
1672                                continue;
1673
1674                        if (switch_en)
1675                                vlan = cpsw->data.default_vlan;
1676                        else
1677                                vlan = slave->data->dual_emac_res_vlan;
1678                        slave->port_vlan = vlan;
1679                }
1680                goto exit;
1681        }
1682
1683        if (switch_en) {
1684                dev_info(cpsw->dev, "Enable switch mode\n");
1685
1686                /* enable bypass - no forwarding; all traffic goes to Host */
1687                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1688
1689                /* clean up ALE table */
1690                cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1691                cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1692
1693                cpsw_init_host_port_switch(cpsw);
1694
1695                for (i = 0; i < cpsw->data.slaves; i++) {
1696                        struct cpsw_slave *slave = &cpsw->slaves[i];
1697                        struct net_device *sl_ndev = slave->ndev;
1698                        struct cpsw_priv *priv;
1699
1700                        if (!sl_ndev)
1701                                continue;
1702
1703                        priv = netdev_priv(sl_ndev);
1704                        slave->port_vlan = vlan;
1705                        WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
1706                        if (netif_running(sl_ndev))
1707                                cpsw_port_add_switch_def_ale_entries(priv,
1708                                                                     slave);
1709                }
1710
1711                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1712                cpsw->data.dual_emac = false;
1713        } else {
1714                dev_info(cpsw->dev, "Disable switch mode\n");
1715
1716                /* enable bypass - no forwarding; all traffic goes to Host */
1717                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1718
1719                cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1720                cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1721
1722                cpsw_init_host_port_dual_mac(cpsw);
1723
1724                for (i = 0; i < cpsw->data.slaves; i++) {
1725                        struct cpsw_slave *slave = &cpsw->slaves[i];
1726                        struct net_device *sl_ndev = slave->ndev;
1727                        struct cpsw_priv *priv;
1728
1729                        if (!sl_ndev)
1730                                continue;
1731
1732                        priv = netdev_priv(slave->ndev);
1733                        slave->port_vlan = slave->data->dual_emac_res_vlan;
1734                        WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
1735                        cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
1736                }
1737
1738                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1739                cpsw->data.dual_emac = true;
1740        }
1741exit:
1742        rtnl_unlock();
1743
1744        return 0;
1745}
1746
1747static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
1748                                struct devlink_param_gset_ctx *ctx)
1749{
1750        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1751        struct cpsw_common *cpsw = dl_priv->cpsw;
1752
1753        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1754
1755        switch (id) {
1756        case CPSW_DL_PARAM_ALE_BYPASS:
1757                ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
1758                break;
1759        default:
1760                return -EOPNOTSUPP;
1761        }
1762
1763        return 0;
1764}
1765
1766static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
1767                                struct devlink_param_gset_ctx *ctx)
1768{
1769        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1770        struct cpsw_common *cpsw = dl_priv->cpsw;
1771        int ret = -EOPNOTSUPP;
1772
1773        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1774
1775        switch (id) {
1776        case CPSW_DL_PARAM_ALE_BYPASS:
1777                ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
1778                                           ctx->val.vbool);
1779                if (!ret) {
1780                        cpsw->ale_bypass = ctx->val.vbool;
1781                        cpsw_port_offload_fwd_mark_update(cpsw);
1782                }
1783                break;
1784        default:
1785                return -EOPNOTSUPP;
1786        }
1787
1788        return 0;
1789}
1790
1791static const struct devlink_param cpsw_devlink_params[] = {
1792        DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
1793                             "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
1794                             BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1795                             cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
1796                             NULL),
1797        DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
1798                             "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
1799                             BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1800                             cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
1801};
1802
1803static int cpsw_register_devlink(struct cpsw_common *cpsw)
1804{
1805        struct device *dev = cpsw->dev;
1806        struct cpsw_devlink *dl_priv;
1807        int ret = 0;
1808
1809        cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev);
1810        if (!cpsw->devlink)
1811                return -ENOMEM;
1812
1813        dl_priv = devlink_priv(cpsw->devlink);
1814        dl_priv->cpsw = cpsw;
1815
1816        ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
1817                                      ARRAY_SIZE(cpsw_devlink_params));
1818        if (ret) {
1819                dev_err(dev, "DL params reg fail ret:%d\n", ret);
1820                goto dl_unreg;
1821        }
1822
1823        devlink_register(cpsw->devlink);
1824        return ret;
1825
1826dl_unreg:
1827        devlink_free(cpsw->devlink);
1828        return ret;
1829}
1830
1831static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
1832{
1833        devlink_unregister(cpsw->devlink);
1834        devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
1835                                  ARRAY_SIZE(cpsw_devlink_params));
1836        devlink_free(cpsw->devlink);
1837}
1838
1839static const struct of_device_id cpsw_of_mtable[] = {
1840        { .compatible = "ti,cpsw-switch"},
1841        { .compatible = "ti,am335x-cpsw-switch"},
1842        { .compatible = "ti,am4372-cpsw-switch"},
1843        { .compatible = "ti,dra7-cpsw-switch"},
1844        { /* sentinel */ },
1845};
1846MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1847
1848static const struct soc_device_attribute cpsw_soc_devices[] = {
1849        { .family = "AM33xx", .revision = "ES1.0"},
1850        { /* sentinel */ }
1851};
1852
1853static int cpsw_probe(struct platform_device *pdev)
1854{
1855        const struct soc_device_attribute *soc;
1856        struct device *dev = &pdev->dev;
1857        struct cpsw_common *cpsw;
1858        struct resource *ss_res;
1859        struct gpio_descs *mode;
1860        void __iomem *ss_regs;
1861        int ret = 0, ch;
1862        struct clk *clk;
1863        int irq;
1864
1865        cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
1866        if (!cpsw)
1867                return -ENOMEM;
1868
1869        cpsw_slave_index = cpsw_slave_index_priv;
1870
1871        cpsw->dev = dev;
1872
1873        cpsw->slaves = devm_kcalloc(dev,
1874                                    CPSW_SLAVE_PORTS_NUM,
1875                                    sizeof(struct cpsw_slave),
1876                                    GFP_KERNEL);
1877        if (!cpsw->slaves)
1878                return -ENOMEM;
1879
1880        mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
1881        if (IS_ERR(mode)) {
1882                ret = PTR_ERR(mode);
1883                dev_err(dev, "gpio request failed, ret %d\n", ret);
1884                return ret;
1885        }
1886
1887        clk = devm_clk_get(dev, "fck");
1888        if (IS_ERR(clk)) {
1889                ret = PTR_ERR(clk);
1890                dev_err(dev, "fck is not found %d\n", ret);
1891                return ret;
1892        }
1893        cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
1894
1895        ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
1896        if (IS_ERR(ss_regs)) {
1897                ret = PTR_ERR(ss_regs);
1898                return ret;
1899        }
1900        cpsw->regs = ss_regs;
1901
1902        irq = platform_get_irq_byname(pdev, "rx");
1903        if (irq < 0)
1904                return irq;
1905        cpsw->irqs_table[0] = irq;
1906
1907        irq = platform_get_irq_byname(pdev, "tx");
1908        if (irq < 0)
1909                return irq;
1910        cpsw->irqs_table[1] = irq;
1911
1912        irq = platform_get_irq_byname(pdev, "misc");
1913        if (irq <= 0)
1914                return irq;
1915        cpsw->misc_irq = irq;
1916
1917        platform_set_drvdata(pdev, cpsw);
1918        /* This may be required here for child devices. */
1919        pm_runtime_enable(dev);
1920
1921        /* Need to enable clocks with runtime PM api to access module
1922         * registers
1923         */
1924        ret = pm_runtime_get_sync(dev);
1925        if (ret < 0) {
1926                pm_runtime_put_noidle(dev);
1927                pm_runtime_disable(dev);
1928                return ret;
1929        }
1930
1931        ret = cpsw_probe_dt(cpsw);
1932        if (ret)
1933                goto clean_dt_ret;
1934
1935        soc = soc_device_match(cpsw_soc_devices);
1936        if (soc)
1937                cpsw->quirk_irq = true;
1938
1939        cpsw->rx_packet_max = rx_packet_max;
1940        cpsw->descs_pool_size = descs_pool_size;
1941        eth_random_addr(cpsw->base_mac);
1942
1943        ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
1944                               (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
1945                               descs_pool_size);
1946        if (ret)
1947                goto clean_dt_ret;
1948
1949        cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
1950                        ss_regs + CPSW1_WR_OFFSET :
1951                        ss_regs + CPSW2_WR_OFFSET;
1952
1953        ch = cpsw->quirk_irq ? 0 : 7;
1954        cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
1955        if (IS_ERR(cpsw->txv[0].ch)) {
1956                dev_err(dev, "error initializing tx dma channel\n");
1957                ret = PTR_ERR(cpsw->txv[0].ch);
1958                goto clean_cpts;
1959        }
1960
1961        cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
1962        if (IS_ERR(cpsw->rxv[0].ch)) {
1963                dev_err(dev, "error initializing rx dma channel\n");
1964                ret = PTR_ERR(cpsw->rxv[0].ch);
1965                goto clean_cpts;
1966        }
1967        cpsw_split_res(cpsw);
1968
1969        /* setup netdevs */
1970        ret = cpsw_create_ports(cpsw);
1971        if (ret)
1972                goto clean_unregister_netdev;
1973
1974        /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
1975         * MISC IRQs which are always kept disabled with this driver so
1976         * we will not request them.
1977         *
1978         * If anyone wants to implement support for those, make sure to
1979         * first request and append them to irqs_table array.
1980         */
1981
1982        ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
1983                               0, dev_name(dev), cpsw);
1984        if (ret < 0) {
1985                dev_err(dev, "error attaching irq (%d)\n", ret);
1986                goto clean_unregister_netdev;
1987        }
1988
1989        ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
1990                               0, dev_name(dev), cpsw);
1991        if (ret < 0) {
1992                dev_err(dev, "error attaching irq (%d)\n", ret);
1993                goto clean_unregister_netdev;
1994        }
1995
1996        if (!cpsw->cpts)
1997                goto skip_cpts;
1998
1999        ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
2000                               0, dev_name(&pdev->dev), cpsw);
2001        if (ret < 0) {
2002                dev_err(dev, "error attaching misc irq (%d)\n", ret);
2003                goto clean_unregister_netdev;
2004        }
2005
2006        /* Enable misc CPTS evnt_pend IRQ */
2007        cpts_set_irqpoll(cpsw->cpts, false);
2008
2009skip_cpts:
2010        ret = cpsw_register_notifiers(cpsw);
2011        if (ret)
2012                goto clean_unregister_netdev;
2013
2014        ret = cpsw_register_devlink(cpsw);
2015        if (ret)
2016                goto clean_unregister_notifiers;
2017
2018        ret = cpsw_register_ports(cpsw);
2019        if (ret)
2020                goto clean_unregister_notifiers;
2021
2022        dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
2023                   &ss_res->start, descs_pool_size,
2024                   cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
2025                   CPSW_MINOR_VERSION(cpsw->version),
2026                   CPSW_RTL_VERSION(cpsw->version));
2027
2028        pm_runtime_put(dev);
2029
2030        return 0;
2031
2032clean_unregister_notifiers:
2033        cpsw_unregister_notifiers(cpsw);
2034clean_unregister_netdev:
2035        cpsw_unregister_ports(cpsw);
2036clean_cpts:
2037        cpts_release(cpsw->cpts);
2038        cpdma_ctlr_destroy(cpsw->dma);
2039clean_dt_ret:
2040        cpsw_remove_dt(cpsw);
2041        pm_runtime_put_sync(dev);
2042        pm_runtime_disable(dev);
2043        return ret;
2044}
2045
2046static int cpsw_remove(struct platform_device *pdev)
2047{
2048        struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2049        int ret;
2050
2051        ret = pm_runtime_get_sync(&pdev->dev);
2052        if (ret < 0) {
2053                pm_runtime_put_noidle(&pdev->dev);
2054                return ret;
2055        }
2056
2057        cpsw_unregister_notifiers(cpsw);
2058        cpsw_unregister_devlink(cpsw);
2059        cpsw_unregister_ports(cpsw);
2060
2061        cpts_release(cpsw->cpts);
2062        cpdma_ctlr_destroy(cpsw->dma);
2063        cpsw_remove_dt(cpsw);
2064        pm_runtime_put_sync(&pdev->dev);
2065        pm_runtime_disable(&pdev->dev);
2066        return 0;
2067}
2068
2069static int __maybe_unused cpsw_suspend(struct device *dev)
2070{
2071        struct cpsw_common *cpsw = dev_get_drvdata(dev);
2072        int i;
2073
2074        rtnl_lock();
2075
2076        for (i = 0; i < cpsw->data.slaves; i++) {
2077                struct net_device *ndev = cpsw->slaves[i].ndev;
2078
2079                if (!(ndev && netif_running(ndev)))
2080                        continue;
2081
2082                cpsw_ndo_stop(ndev);
2083        }
2084
2085        rtnl_unlock();
2086
2087        /* Select sleep pin state */
2088        pinctrl_pm_select_sleep_state(dev);
2089
2090        return 0;
2091}
2092
2093static int __maybe_unused cpsw_resume(struct device *dev)
2094{
2095        struct cpsw_common *cpsw = dev_get_drvdata(dev);
2096        int i;
2097
2098        /* Select default pin state */
2099        pinctrl_pm_select_default_state(dev);
2100
2101        /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2102        rtnl_lock();
2103
2104        for (i = 0; i < cpsw->data.slaves; i++) {
2105                struct net_device *ndev = cpsw->slaves[i].ndev;
2106
2107                if (!(ndev && netif_running(ndev)))
2108                        continue;
2109
2110                cpsw_ndo_open(ndev);
2111        }
2112
2113        rtnl_unlock();
2114
2115        return 0;
2116}
2117
2118static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2119
2120static struct platform_driver cpsw_driver = {
2121        .driver = {
2122                .name    = "cpsw-switch",
2123                .pm      = &cpsw_pm_ops,
2124                .of_match_table = cpsw_of_mtable,
2125        },
2126        .probe = cpsw_probe,
2127        .remove = cpsw_remove,
2128};
2129
2130module_platform_driver(cpsw_driver);
2131
2132MODULE_LICENSE("GPL");
2133MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
2134