linux/drivers/net/ethernet/ti/cpsw_new.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Texas Instruments Ethernet Switch Driver
   4 *
   5 * Copyright (C) 2019 Texas Instruments
   6 */
   7
   8#include <linux/io.h>
   9#include <linux/clk.h>
  10#include <linux/timer.h>
  11#include <linux/module.h>
  12#include <linux/irqreturn.h>
  13#include <linux/interrupt.h>
  14#include <linux/if_ether.h>
  15#include <linux/etherdevice.h>
  16#include <linux/net_tstamp.h>
  17#include <linux/phy.h>
  18#include <linux/phy/phy.h>
  19#include <linux/delay.h>
  20#include <linux/pinctrl/consumer.h>
  21#include <linux/pm_runtime.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/of.h>
  24#include <linux/of_mdio.h>
  25#include <linux/of_net.h>
  26#include <linux/of_device.h>
  27#include <linux/if_vlan.h>
  28#include <linux/kmemleak.h>
  29#include <linux/sys_soc.h>
  30
  31#include <net/switchdev.h>
  32#include <net/page_pool.h>
  33#include <net/pkt_cls.h>
  34#include <net/devlink.h>
  35
  36#include "cpsw.h"
  37#include "cpsw_ale.h"
  38#include "cpsw_priv.h"
  39#include "cpsw_sl.h"
  40#include "cpsw_switchdev.h"
  41#include "cpts.h"
  42#include "davinci_cpdma.h"
  43
  44#include <net/pkt_sched.h>
  45
  46static int debug_level;
  47static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
  48static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
  49static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
  50
  51struct cpsw_devlink {
  52        struct cpsw_common *cpsw;
  53};
  54
  55enum cpsw_devlink_param_id {
  56        CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
  57        CPSW_DL_PARAM_SWITCH_MODE,
  58        CPSW_DL_PARAM_ALE_BYPASS,
  59};
  60
  61/* struct cpsw_common is not needed, kept here for compatibility
  62 * reasons witrh the old driver
  63 */
  64static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
  65                                 struct cpsw_priv *priv)
  66{
  67        if (priv->emac_port == HOST_PORT_NUM)
  68                return -1;
  69
  70        return priv->emac_port - 1;
  71}
  72
  73static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
  74{
  75        return !cpsw->data.dual_emac;
  76}
  77
  78static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
  79{
  80        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  81        bool enable_uni = false;
  82        int i;
  83
  84        if (cpsw_is_switch_en(cpsw))
  85                return;
  86
  87        /* Enabling promiscuous mode for one interface will be
  88         * common for both the interface as the interface shares
  89         * the same hardware resource.
  90         */
  91        for (i = 0; i < cpsw->data.slaves; i++)
  92                if (cpsw->slaves[i].ndev &&
  93                    (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
  94                        enable_uni = true;
  95
  96        if (!enable && enable_uni) {
  97                enable = enable_uni;
  98                dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
  99        }
 100
 101        if (enable) {
 102                /* Enable unknown unicast, reg/unreg mcast */
 103                cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 104                                     ALE_P0_UNI_FLOOD, 1);
 105
 106                dev_dbg(cpsw->dev, "promiscuity enabled\n");
 107        } else {
 108                /* Disable unknown unicast */
 109                cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 110                                     ALE_P0_UNI_FLOOD, 0);
 111                dev_dbg(cpsw->dev, "promiscuity disabled\n");
 112        }
 113}
 114
 115/**
 116 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
 117 * if it's not deleted
 118 * @ndev: device to sync
 119 * @addr: address to be added or deleted
 120 * @vid: vlan id, if vid < 0 set/unset address for real device
 121 * @add: add address if the flag is set or remove otherwise
 122 */
 123static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
 124                       int vid, int add)
 125{
 126        struct cpsw_priv *priv = netdev_priv(ndev);
 127        struct cpsw_common *cpsw = priv->cpsw;
 128        int mask, flags, ret, slave_no;
 129
 130        slave_no = cpsw_slave_index(cpsw, priv);
 131        if (vid < 0)
 132                vid = cpsw->slaves[slave_no].port_vlan;
 133
 134        mask =  ALE_PORT_HOST;
 135        flags = vid ? ALE_VLAN : 0;
 136
 137        if (add)
 138                ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
 139        else
 140                ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
 141
 142        return ret;
 143}
 144
 145static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
 146{
 147        struct addr_sync_ctx *sync_ctx = ctx;
 148        struct netdev_hw_addr *ha;
 149        int found = 0, ret = 0;
 150
 151        if (!vdev || !(vdev->flags & IFF_UP))
 152                return 0;
 153
 154        /* vlan address is relevant if its sync_cnt != 0 */
 155        netdev_for_each_mc_addr(ha, vdev) {
 156                if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
 157                        found = ha->sync_cnt;
 158                        break;
 159                }
 160        }
 161
 162        if (found)
 163                sync_ctx->consumed++;
 164
 165        if (sync_ctx->flush) {
 166                if (!found)
 167                        cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
 168                return 0;
 169        }
 170
 171        if (found)
 172                ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
 173
 174        return ret;
 175}
 176
 177static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
 178{
 179        struct addr_sync_ctx sync_ctx;
 180        int ret;
 181
 182        sync_ctx.consumed = 0;
 183        sync_ctx.addr = addr;
 184        sync_ctx.ndev = ndev;
 185        sync_ctx.flush = 0;
 186
 187        ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
 188        if (sync_ctx.consumed < num && !ret)
 189                ret = cpsw_set_mc(ndev, addr, -1, 1);
 190
 191        return ret;
 192}
 193
 194static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
 195{
 196        struct addr_sync_ctx sync_ctx;
 197
 198        sync_ctx.consumed = 0;
 199        sync_ctx.addr = addr;
 200        sync_ctx.ndev = ndev;
 201        sync_ctx.flush = 1;
 202
 203        vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
 204        if (sync_ctx.consumed == num)
 205                cpsw_set_mc(ndev, addr, -1, 0);
 206
 207        return 0;
 208}
 209
 210static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
 211{
 212        struct addr_sync_ctx *sync_ctx = ctx;
 213        struct netdev_hw_addr *ha;
 214        int found = 0;
 215
 216        if (!vdev || !(vdev->flags & IFF_UP))
 217                return 0;
 218
 219        /* vlan address is relevant if its sync_cnt != 0 */
 220        netdev_for_each_mc_addr(ha, vdev) {
 221                if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
 222                        found = ha->sync_cnt;
 223                        break;
 224                }
 225        }
 226
 227        if (!found)
 228                return 0;
 229
 230        sync_ctx->consumed++;
 231        cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
 232        return 0;
 233}
 234
 235static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
 236{
 237        struct addr_sync_ctx sync_ctx;
 238
 239        sync_ctx.addr = addr;
 240        sync_ctx.ndev = ndev;
 241        sync_ctx.consumed = 0;
 242
 243        vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
 244        if (sync_ctx.consumed < num)
 245                cpsw_set_mc(ndev, addr, -1, 0);
 246
 247        return 0;
 248}
 249
 250static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 251{
 252        struct cpsw_priv *priv = netdev_priv(ndev);
 253        struct cpsw_common *cpsw = priv->cpsw;
 254
 255        if (ndev->flags & IFF_PROMISC) {
 256                /* Enable promiscuous mode */
 257                cpsw_set_promiscious(ndev, true);
 258                cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
 259                return;
 260        }
 261
 262        /* Disable promiscuous mode */
 263        cpsw_set_promiscious(ndev, false);
 264
 265        /* Restore allmulti on vlans if necessary */
 266        cpsw_ale_set_allmulti(cpsw->ale,
 267                              ndev->flags & IFF_ALLMULTI, priv->emac_port);
 268
 269        /* add/remove mcast address either for real netdev or for vlan */
 270        __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
 271                               cpsw_del_mc_addr);
 272}
 273
 274static unsigned int cpsw_rxbuf_total_len(unsigned int len)
 275{
 276        len += CPSW_HEADROOM;
 277        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 278
 279        return SKB_DATA_ALIGN(len);
 280}
 281
 282static void cpsw_rx_handler(void *token, int len, int status)
 283{
 284        struct page *new_page, *page = token;
 285        void *pa = page_address(page);
 286        int headroom = CPSW_HEADROOM;
 287        struct cpsw_meta_xdp *xmeta;
 288        struct cpsw_common *cpsw;
 289        struct net_device *ndev;
 290        int port, ch, pkt_size;
 291        struct cpsw_priv *priv;
 292        struct page_pool *pool;
 293        struct sk_buff *skb;
 294        struct xdp_buff xdp;
 295        int ret = 0;
 296        dma_addr_t dma;
 297
 298        xmeta = pa + CPSW_XMETA_OFFSET;
 299        cpsw = ndev_to_cpsw(xmeta->ndev);
 300        ndev = xmeta->ndev;
 301        pkt_size = cpsw->rx_packet_max;
 302        ch = xmeta->ch;
 303
 304        if (status >= 0) {
 305                port = CPDMA_RX_SOURCE_PORT(status);
 306                if (port)
 307                        ndev = cpsw->slaves[--port].ndev;
 308        }
 309
 310        priv = netdev_priv(ndev);
 311        pool = cpsw->page_pool[ch];
 312
 313        if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
 314                /* In dual emac mode check for all interfaces */
 315                if (cpsw->usage_count && status >= 0) {
 316                        /* The packet received is for the interface which
 317                         * is already down and the other interface is up
 318                         * and running, instead of freeing which results
 319                         * in reducing of the number of rx descriptor in
 320                         * DMA engine, requeue page back to cpdma.
 321                         */
 322                        new_page = page;
 323                        goto requeue;
 324                }
 325
 326                /* the interface is going down, pages are purged */
 327                page_pool_recycle_direct(pool, page);
 328                return;
 329        }
 330
 331        new_page = page_pool_dev_alloc_pages(pool);
 332        if (unlikely(!new_page)) {
 333                new_page = page;
 334                ndev->stats.rx_dropped++;
 335                goto requeue;
 336        }
 337
 338        if (priv->xdp_prog) {
 339                int headroom = CPSW_HEADROOM, size = len;
 340
 341                xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
 342                if (status & CPDMA_RX_VLAN_ENCAP) {
 343                        headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
 344                        size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
 345                }
 346
 347                xdp_prepare_buff(&xdp, pa, headroom, size, false);
 348
 349                ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
 350                if (ret != CPSW_XDP_PASS)
 351                        goto requeue;
 352
 353                headroom = xdp.data - xdp.data_hard_start;
 354
 355                /* XDP prog can modify vlan tag, so can't use encap header */
 356                status &= ~CPDMA_RX_VLAN_ENCAP;
 357        }
 358
 359        /* pass skb to netstack if no XDP prog or returned XDP_PASS */
 360        skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
 361        if (!skb) {
 362                ndev->stats.rx_dropped++;
 363                page_pool_recycle_direct(pool, page);
 364                goto requeue;
 365        }
 366
 367        skb->offload_fwd_mark = priv->offload_fwd_mark;
 368        skb_reserve(skb, headroom);
 369        skb_put(skb, len);
 370        skb->dev = ndev;
 371        if (status & CPDMA_RX_VLAN_ENCAP)
 372                cpsw_rx_vlan_encap(skb);
 373        if (priv->rx_ts_enabled)
 374                cpts_rx_timestamp(cpsw->cpts, skb);
 375        skb->protocol = eth_type_trans(skb, ndev);
 376
 377        /* mark skb for recycling */
 378        skb_mark_for_recycle(skb);
 379        netif_receive_skb(skb);
 380
 381        ndev->stats.rx_bytes += len;
 382        ndev->stats.rx_packets++;
 383
 384requeue:
 385        xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
 386        xmeta->ndev = ndev;
 387        xmeta->ch = ch;
 388
 389        dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
 390        ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
 391                                       pkt_size, 0);
 392        if (ret < 0) {
 393                WARN_ON(ret == -ENOMEM);
 394                page_pool_recycle_direct(pool, new_page);
 395        }
 396}
 397
 398static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
 399                                   unsigned short vid)
 400{
 401        struct cpsw_common *cpsw = priv->cpsw;
 402        int unreg_mcast_mask = 0;
 403        int mcast_mask;
 404        u32 port_mask;
 405        int ret;
 406
 407        port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
 408
 409        mcast_mask = ALE_PORT_HOST;
 410        if (priv->ndev->flags & IFF_ALLMULTI)
 411                unreg_mcast_mask = mcast_mask;
 412
 413        ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
 414                                unreg_mcast_mask);
 415        if (ret != 0)
 416                return ret;
 417
 418        ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 419                                 HOST_PORT_NUM, ALE_VLAN, vid);
 420        if (ret != 0)
 421                goto clean_vid;
 422
 423        ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 424                                 mcast_mask, ALE_VLAN, vid, 0);
 425        if (ret != 0)
 426                goto clean_vlan_ucast;
 427        return 0;
 428
 429clean_vlan_ucast:
 430        cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
 431                           HOST_PORT_NUM, ALE_VLAN, vid);
 432clean_vid:
 433        cpsw_ale_del_vlan(cpsw->ale, vid, 0);
 434        return ret;
 435}
 436
 437static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
 438                                    __be16 proto, u16 vid)
 439{
 440        struct cpsw_priv *priv = netdev_priv(ndev);
 441        struct cpsw_common *cpsw = priv->cpsw;
 442        int ret, i;
 443
 444        if (cpsw_is_switch_en(cpsw)) {
 445                dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
 446                return 0;
 447        }
 448
 449        if (vid == cpsw->data.default_vlan)
 450                return 0;
 451
 452        ret = pm_runtime_get_sync(cpsw->dev);
 453        if (ret < 0) {
 454                pm_runtime_put_noidle(cpsw->dev);
 455                return ret;
 456        }
 457
 458        /* In dual EMAC, reserved VLAN id should not be used for
 459         * creating VLAN interfaces as this can break the dual
 460         * EMAC port separation
 461         */
 462        for (i = 0; i < cpsw->data.slaves; i++) {
 463                if (cpsw->slaves[i].ndev &&
 464                    vid == cpsw->slaves[i].port_vlan) {
 465                        ret = -EINVAL;
 466                        goto err;
 467                }
 468        }
 469
 470        dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
 471        ret = cpsw_add_vlan_ale_entry(priv, vid);
 472err:
 473        pm_runtime_put(cpsw->dev);
 474        return ret;
 475}
 476
 477static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
 478{
 479        struct cpsw_priv *priv = arg;
 480
 481        if (!vdev || !vid)
 482                return 0;
 483
 484        cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
 485        return 0;
 486}
 487
 488/* restore resources after port reset */
 489static void cpsw_restore(struct cpsw_priv *priv)
 490{
 491        struct cpsw_common *cpsw = priv->cpsw;
 492
 493        /* restore vlan configurations */
 494        vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
 495
 496        /* restore MQPRIO offload */
 497        cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
 498
 499        /* restore CBS offload */
 500        cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
 501}
 502
 503static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
 504{
 505        static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
 506
 507        cpsw_ale_add_mcast(cpsw->ale, stpa,
 508                           ALE_PORT_HOST, ALE_SUPER, 0,
 509                           ALE_MCAST_BLOCK_LEARN_FWD);
 510}
 511
 512static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
 513{
 514        int vlan = cpsw->data.default_vlan;
 515
 516        writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
 517
 518        writel(vlan, &cpsw->host_port_regs->port_vlan);
 519
 520        cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
 521                          ALE_ALL_PORTS, ALE_ALL_PORTS,
 522                          ALE_PORT_1 | ALE_PORT_2);
 523
 524        cpsw_init_stp_ale_entry(cpsw);
 525
 526        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
 527        dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
 528        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
 529}
 530
 531static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
 532{
 533        int vlan = cpsw->data.default_vlan;
 534
 535        writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
 536
 537        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
 538        dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
 539
 540        writel(vlan, &cpsw->host_port_regs->port_vlan);
 541
 542        cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 543        /* learning make no sense in dual_mac mode */
 544        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
 545}
 546
 547static void cpsw_init_host_port(struct cpsw_priv *priv)
 548{
 549        struct cpsw_common *cpsw = priv->cpsw;
 550        u32 control_reg;
 551
 552        /* soft reset the controller and initialize ale */
 553        soft_reset("cpsw", &cpsw->regs->soft_reset);
 554        cpsw_ale_start(cpsw->ale);
 555
 556        /* switch to vlan unaware mode */
 557        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
 558                             CPSW_ALE_VLAN_AWARE);
 559        control_reg = readl(&cpsw->regs->control);
 560        control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
 561        writel(control_reg, &cpsw->regs->control);
 562
 563        /* setup host port priority mapping */
 564        writel_relaxed(CPDMA_TX_PRIORITY_MAP,
 565                       &cpsw->host_port_regs->cpdma_tx_pri_map);
 566        writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
 567
 568        /* disable priority elevation */
 569        writel_relaxed(0, &cpsw->regs->ptype);
 570
 571        /* enable statistics collection only on all ports */
 572        writel_relaxed(0x7, &cpsw->regs->stat_port_en);
 573
 574        /* Enable internal fifo flow control */
 575        writel(0x7, &cpsw->regs->flow_control);
 576
 577        if (cpsw_is_switch_en(cpsw))
 578                cpsw_init_host_port_switch(cpsw);
 579        else
 580                cpsw_init_host_port_dual_mac(cpsw);
 581
 582        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 583                             ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 584}
 585
 586static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
 587                                                    struct cpsw_slave *slave)
 588{
 589        u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
 590        struct cpsw_common *cpsw = priv->cpsw;
 591        u32 reg;
 592
 593        reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
 594               CPSW2_PORT_VLAN;
 595        slave_write(slave, slave->port_vlan, reg);
 596
 597        cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
 598                          port_mask, port_mask, 0);
 599        cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 600                           ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
 601                           ALE_MCAST_FWD);
 602        cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 603                           HOST_PORT_NUM, ALE_VLAN |
 604                           ALE_SECURE, slave->port_vlan);
 605        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 606                             ALE_PORT_DROP_UNKNOWN_VLAN, 1);
 607        /* learning make no sense in dual_mac mode */
 608        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 609                             ALE_PORT_NOLEARN, 1);
 610}
 611
 612static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
 613                                                 struct cpsw_slave *slave)
 614{
 615        u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
 616        struct cpsw_common *cpsw = priv->cpsw;
 617        u32 reg;
 618
 619        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 620                             ALE_PORT_DROP_UNKNOWN_VLAN, 0);
 621        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 622                             ALE_PORT_NOLEARN, 0);
 623        /* disabling SA_UPDATE required to make stp work, without this setting
 624         * Host MAC addresses will jump between ports.
 625         * As per TRM MAC address can be defined as unicast supervisory (super)
 626         * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
 627         * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
 628         * causes STP packets to be dropped due to ingress filter
 629         *      if (source address found) and (secure) and
 630         *         (receive port number != port_number))
 631         *         then discard the packet
 632         */
 633        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 634                             ALE_PORT_NO_SA_UPDATE, 1);
 635
 636        cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 637                           port_mask, ALE_VLAN, slave->port_vlan,
 638                           ALE_MCAST_FWD_2);
 639        cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 640                           HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
 641
 642        reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
 643               CPSW2_PORT_VLAN;
 644        slave_write(slave, slave->port_vlan, reg);
 645}
 646
 647static void cpsw_adjust_link(struct net_device *ndev)
 648{
 649        struct cpsw_priv *priv = netdev_priv(ndev);
 650        struct cpsw_common *cpsw = priv->cpsw;
 651        struct cpsw_slave *slave;
 652        struct phy_device *phy;
 653        u32 mac_control = 0;
 654
 655        slave = &cpsw->slaves[priv->emac_port - 1];
 656        phy = slave->phy;
 657
 658        if (!phy)
 659                return;
 660
 661        if (phy->link) {
 662                mac_control = CPSW_SL_CTL_GMII_EN;
 663
 664                if (phy->speed == 1000)
 665                        mac_control |= CPSW_SL_CTL_GIG;
 666                if (phy->duplex)
 667                        mac_control |= CPSW_SL_CTL_FULLDUPLEX;
 668
 669                /* set speed_in input in case RMII mode is used in 100Mbps */
 670                if (phy->speed == 100)
 671                        mac_control |= CPSW_SL_CTL_IFCTL_A;
 672                /* in band mode only works in 10Mbps RGMII mode */
 673                else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
 674                        mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
 675
 676                if (priv->rx_pause)
 677                        mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
 678
 679                if (priv->tx_pause)
 680                        mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
 681
 682                if (mac_control != slave->mac_control)
 683                        cpsw_sl_ctl_set(slave->mac_sl, mac_control);
 684
 685                /* enable forwarding */
 686                cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 687                                     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 688
 689                netif_tx_wake_all_queues(ndev);
 690
 691                if (priv->shp_cfg_speed &&
 692                    priv->shp_cfg_speed != slave->phy->speed &&
 693                    !cpsw_shp_is_off(priv))
 694                        dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
 695        } else {
 696                netif_tx_stop_all_queues(ndev);
 697
 698                mac_control = 0;
 699                /* disable forwarding */
 700                cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 701                                     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
 702
 703                cpsw_sl_wait_for_idle(slave->mac_sl, 100);
 704
 705                cpsw_sl_ctl_reset(slave->mac_sl);
 706        }
 707
 708        if (mac_control != slave->mac_control)
 709                phy_print_status(phy);
 710
 711        slave->mac_control = mac_control;
 712
 713        if (phy->link && cpsw_need_resplit(cpsw))
 714                cpsw_split_res(cpsw);
 715}
 716
 717static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 718{
 719        struct cpsw_common *cpsw = priv->cpsw;
 720        struct phy_device *phy;
 721
 722        cpsw_sl_reset(slave->mac_sl, 100);
 723        cpsw_sl_ctl_reset(slave->mac_sl);
 724
 725        /* setup priority mapping */
 726        cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
 727                          RX_PRIORITY_MAPPING);
 728
 729        switch (cpsw->version) {
 730        case CPSW_VERSION_1:
 731                slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
 732                /* Increase RX FIFO size to 5 for supporting fullduplex
 733                 * flow control mode
 734                 */
 735                slave_write(slave,
 736                            (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
 737                            CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
 738                break;
 739        case CPSW_VERSION_2:
 740        case CPSW_VERSION_3:
 741        case CPSW_VERSION_4:
 742                slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
 743                /* Increase RX FIFO size to 5 for supporting fullduplex
 744                 * flow control mode
 745                 */
 746                slave_write(slave,
 747                            (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
 748                            CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
 749                break;
 750        }
 751
 752        /* setup max packet size, and mac address */
 753        cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
 754                          cpsw->rx_packet_max);
 755        cpsw_set_slave_mac(slave, priv);
 756
 757        slave->mac_control = 0; /* no link yet */
 758
 759        if (cpsw_is_switch_en(cpsw))
 760                cpsw_port_add_switch_def_ale_entries(priv, slave);
 761        else
 762                cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
 763
 764        if (!slave->data->phy_node)
 765                dev_err(priv->dev, "no phy found on slave %d\n",
 766                        slave->slave_num);
 767        phy = of_phy_connect(priv->ndev, slave->data->phy_node,
 768                             &cpsw_adjust_link, 0, slave->data->phy_if);
 769        if (!phy) {
 770                dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
 771                        slave->data->phy_node,
 772                        slave->slave_num);
 773                return;
 774        }
 775        slave->phy = phy;
 776
 777        phy_attached_info(slave->phy);
 778
 779        phy_start(slave->phy);
 780
 781        /* Configure GMII_SEL register */
 782        phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
 783                         slave->data->phy_if);
 784}
 785
 786static int cpsw_ndo_stop(struct net_device *ndev)
 787{
 788        struct cpsw_priv *priv = netdev_priv(ndev);
 789        struct cpsw_common *cpsw = priv->cpsw;
 790        struct cpsw_slave *slave;
 791
 792        cpsw_info(priv, ifdown, "shutting down ndev\n");
 793        slave = &cpsw->slaves[priv->emac_port - 1];
 794        if (slave->phy)
 795                phy_stop(slave->phy);
 796
 797        netif_tx_stop_all_queues(priv->ndev);
 798
 799        if (slave->phy) {
 800                phy_disconnect(slave->phy);
 801                slave->phy = NULL;
 802        }
 803
 804        __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
 805
 806        if (cpsw->usage_count <= 1) {
 807                napi_disable(&cpsw->napi_rx);
 808                napi_disable(&cpsw->napi_tx);
 809                cpts_unregister(cpsw->cpts);
 810                cpsw_intr_disable(cpsw);
 811                cpdma_ctlr_stop(cpsw->dma);
 812                cpsw_ale_stop(cpsw->ale);
 813                cpsw_destroy_xdp_rxqs(cpsw);
 814        }
 815
 816        if (cpsw_need_resplit(cpsw))
 817                cpsw_split_res(cpsw);
 818
 819        cpsw->usage_count--;
 820        pm_runtime_put_sync(cpsw->dev);
 821        return 0;
 822}
 823
 824static int cpsw_ndo_open(struct net_device *ndev)
 825{
 826        struct cpsw_priv *priv = netdev_priv(ndev);
 827        struct cpsw_common *cpsw = priv->cpsw;
 828        int ret;
 829
 830        dev_info(priv->dev, "starting ndev. mode: %s\n",
 831                 cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
 832        ret = pm_runtime_get_sync(cpsw->dev);
 833        if (ret < 0) {
 834                pm_runtime_put_noidle(cpsw->dev);
 835                return ret;
 836        }
 837
 838        /* Notify the stack of the actual queue counts. */
 839        ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
 840        if (ret) {
 841                dev_err(priv->dev, "cannot set real number of tx queues\n");
 842                goto pm_cleanup;
 843        }
 844
 845        ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
 846        if (ret) {
 847                dev_err(priv->dev, "cannot set real number of rx queues\n");
 848                goto pm_cleanup;
 849        }
 850
 851        /* Initialize host and slave ports */
 852        if (!cpsw->usage_count)
 853                cpsw_init_host_port(priv);
 854        cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
 855
 856        /* initialize shared resources for every ndev */
 857        if (!cpsw->usage_count) {
 858                /* create rxqs for both infs in dual mac as they use same pool
 859                 * and must be destroyed together when no users.
 860                 */
 861                ret = cpsw_create_xdp_rxqs(cpsw);
 862                if (ret < 0)
 863                        goto err_cleanup;
 864
 865                ret = cpsw_fill_rx_channels(priv);
 866                if (ret < 0)
 867                        goto err_cleanup;
 868
 869                if (cpsw->cpts) {
 870                        if (cpts_register(cpsw->cpts))
 871                                dev_err(priv->dev, "error registering cpts device\n");
 872                        else
 873                                writel(0x10, &cpsw->wr_regs->misc_en);
 874                }
 875
 876                napi_enable(&cpsw->napi_rx);
 877                napi_enable(&cpsw->napi_tx);
 878
 879                if (cpsw->tx_irq_disabled) {
 880                        cpsw->tx_irq_disabled = false;
 881                        enable_irq(cpsw->irqs_table[1]);
 882                }
 883
 884                if (cpsw->rx_irq_disabled) {
 885                        cpsw->rx_irq_disabled = false;
 886                        enable_irq(cpsw->irqs_table[0]);
 887                }
 888        }
 889
 890        cpsw_restore(priv);
 891
 892        /* Enable Interrupt pacing if configured */
 893        if (cpsw->coal_intvl != 0) {
 894                struct ethtool_coalesce coal;
 895
 896                coal.rx_coalesce_usecs = cpsw->coal_intvl;
 897                cpsw_set_coalesce(ndev, &coal, NULL, NULL);
 898        }
 899
 900        cpdma_ctlr_start(cpsw->dma);
 901        cpsw_intr_enable(cpsw);
 902        cpsw->usage_count++;
 903
 904        return 0;
 905
 906err_cleanup:
 907        cpsw_ndo_stop(ndev);
 908
 909pm_cleanup:
 910        pm_runtime_put_sync(cpsw->dev);
 911        return ret;
 912}
 913
 914static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 915                                       struct net_device *ndev)
 916{
 917        struct cpsw_priv *priv = netdev_priv(ndev);
 918        struct cpsw_common *cpsw = priv->cpsw;
 919        struct cpts *cpts = cpsw->cpts;
 920        struct netdev_queue *txq;
 921        struct cpdma_chan *txch;
 922        int ret, q_idx;
 923
 924        if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
 925                cpsw_err(priv, tx_err, "packet pad failed\n");
 926                ndev->stats.tx_dropped++;
 927                return NET_XMIT_DROP;
 928        }
 929
 930        if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 931            priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
 932                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 933
 934        q_idx = skb_get_queue_mapping(skb);
 935        if (q_idx >= cpsw->tx_ch_num)
 936                q_idx = q_idx % cpsw->tx_ch_num;
 937
 938        txch = cpsw->txv[q_idx].ch;
 939        txq = netdev_get_tx_queue(ndev, q_idx);
 940        skb_tx_timestamp(skb);
 941        ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
 942                                priv->emac_port);
 943        if (unlikely(ret != 0)) {
 944                cpsw_err(priv, tx_err, "desc submit failed\n");
 945                goto fail;
 946        }
 947
 948        /* If there is no more tx desc left free then we need to
 949         * tell the kernel to stop sending us tx frames.
 950         */
 951        if (unlikely(!cpdma_check_free_tx_desc(txch))) {
 952                netif_tx_stop_queue(txq);
 953
 954                /* Barrier, so that stop_queue visible to other cpus */
 955                smp_mb__after_atomic();
 956
 957                if (cpdma_check_free_tx_desc(txch))
 958                        netif_tx_wake_queue(txq);
 959        }
 960
 961        return NETDEV_TX_OK;
 962fail:
 963        ndev->stats.tx_dropped++;
 964        netif_tx_stop_queue(txq);
 965
 966        /* Barrier, so that stop_queue visible to other cpus */
 967        smp_mb__after_atomic();
 968
 969        if (cpdma_check_free_tx_desc(txch))
 970                netif_tx_wake_queue(txq);
 971
 972        return NETDEV_TX_BUSY;
 973}
 974
 975static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
 976{
 977        struct sockaddr *addr = (struct sockaddr *)p;
 978        struct cpsw_priv *priv = netdev_priv(ndev);
 979        struct cpsw_common *cpsw = priv->cpsw;
 980        int ret, slave_no;
 981        int flags = 0;
 982        u16 vid = 0;
 983
 984        slave_no = cpsw_slave_index(cpsw, priv);
 985        if (!is_valid_ether_addr(addr->sa_data))
 986                return -EADDRNOTAVAIL;
 987
 988        ret = pm_runtime_get_sync(cpsw->dev);
 989        if (ret < 0) {
 990                pm_runtime_put_noidle(cpsw->dev);
 991                return ret;
 992        }
 993
 994        vid = cpsw->slaves[slave_no].port_vlan;
 995        flags = ALE_VLAN | ALE_SECURE;
 996
 997        cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
 998                           flags, vid);
 999        cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1000                           flags, vid);
1001
1002        ether_addr_copy(priv->mac_addr, addr->sa_data);
1003        ether_addr_copy(ndev->dev_addr, priv->mac_addr);
1004        cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
1005
1006        pm_runtime_put(cpsw->dev);
1007
1008        return 0;
1009}
1010
1011static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1012                                     __be16 proto, u16 vid)
1013{
1014        struct cpsw_priv *priv = netdev_priv(ndev);
1015        struct cpsw_common *cpsw = priv->cpsw;
1016        int ret;
1017        int i;
1018
1019        if (cpsw_is_switch_en(cpsw)) {
1020                dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
1021                return 0;
1022        }
1023
1024        if (vid == cpsw->data.default_vlan)
1025                return 0;
1026
1027        ret = pm_runtime_get_sync(cpsw->dev);
1028        if (ret < 0) {
1029                pm_runtime_put_noidle(cpsw->dev);
1030                return ret;
1031        }
1032
1033        /* reset the return code as pm_runtime_get_sync() can return
1034         * non zero values as well.
1035         */
1036        ret = 0;
1037        for (i = 0; i < cpsw->data.slaves; i++) {
1038                if (cpsw->slaves[i].ndev &&
1039                    vid == cpsw->slaves[i].port_vlan) {
1040                        ret = -EINVAL;
1041                        goto err;
1042                }
1043        }
1044
1045        dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1046        ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1047        if (ret)
1048                dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
1049        ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1050                                 HOST_PORT_NUM, ALE_VLAN, vid);
1051        if (ret)
1052                dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
1053                        ret);
1054        ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1055                                 0, ALE_VLAN, vid);
1056        if (ret)
1057                dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
1058                        ret);
1059        cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
1060        ret = 0;
1061err:
1062        pm_runtime_put(cpsw->dev);
1063        return ret;
1064}
1065
1066static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1067                                       size_t len)
1068{
1069        struct cpsw_priv *priv = netdev_priv(ndev);
1070        int err;
1071
1072        err = snprintf(name, len, "p%d", priv->emac_port);
1073
1074        if (err >= len)
1075                return -EINVAL;
1076
1077        return 0;
1078}
1079
1080#ifdef CONFIG_NET_POLL_CONTROLLER
1081static void cpsw_ndo_poll_controller(struct net_device *ndev)
1082{
1083        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1084
1085        cpsw_intr_disable(cpsw);
1086        cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1087        cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1088        cpsw_intr_enable(cpsw);
1089}
1090#endif
1091
1092static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1093                             struct xdp_frame **frames, u32 flags)
1094{
1095        struct cpsw_priv *priv = netdev_priv(ndev);
1096        struct xdp_frame *xdpf;
1097        int i, nxmit = 0;
1098
1099        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1100                return -EINVAL;
1101
1102        for (i = 0; i < n; i++) {
1103                xdpf = frames[i];
1104                if (xdpf->len < READ_ONCE(priv->tx_packet_min))
1105                        break;
1106
1107                if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
1108                        break;
1109                nxmit++;
1110        }
1111
1112        return nxmit;
1113}
1114
1115static int cpsw_get_port_parent_id(struct net_device *ndev,
1116                                   struct netdev_phys_item_id *ppid)
1117{
1118        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1119
1120        ppid->id_len = sizeof(cpsw->base_mac);
1121        memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
1122
1123        return 0;
1124}
1125
1126static const struct net_device_ops cpsw_netdev_ops = {
1127        .ndo_open               = cpsw_ndo_open,
1128        .ndo_stop               = cpsw_ndo_stop,
1129        .ndo_start_xmit         = cpsw_ndo_start_xmit,
1130        .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
1131        .ndo_eth_ioctl          = cpsw_ndo_ioctl,
1132        .ndo_validate_addr      = eth_validate_addr,
1133        .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
1134        .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
1135        .ndo_set_tx_maxrate     = cpsw_ndo_set_tx_maxrate,
1136#ifdef CONFIG_NET_POLL_CONTROLLER
1137        .ndo_poll_controller    = cpsw_ndo_poll_controller,
1138#endif
1139        .ndo_vlan_rx_add_vid    = cpsw_ndo_vlan_rx_add_vid,
1140        .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
1141        .ndo_setup_tc           = cpsw_ndo_setup_tc,
1142        .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
1143        .ndo_bpf                = cpsw_ndo_bpf,
1144        .ndo_xdp_xmit           = cpsw_ndo_xdp_xmit,
1145        .ndo_get_port_parent_id = cpsw_get_port_parent_id,
1146};
1147
1148static void cpsw_get_drvinfo(struct net_device *ndev,
1149                             struct ethtool_drvinfo *info)
1150{
1151        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1152        struct platform_device *pdev;
1153
1154        pdev = to_platform_device(cpsw->dev);
1155        strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
1156        strlcpy(info->version, "2.0", sizeof(info->version));
1157        strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
1158}
1159
1160static int cpsw_set_pauseparam(struct net_device *ndev,
1161                               struct ethtool_pauseparam *pause)
1162{
1163        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1164        struct cpsw_priv *priv = netdev_priv(ndev);
1165        int slave_no;
1166
1167        slave_no = cpsw_slave_index(cpsw, priv);
1168        if (!cpsw->slaves[slave_no].phy)
1169                return -EINVAL;
1170
1171        if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
1172                return -EINVAL;
1173
1174        priv->rx_pause = pause->rx_pause ? true : false;
1175        priv->tx_pause = pause->tx_pause ? true : false;
1176
1177        phy_set_asym_pause(cpsw->slaves[slave_no].phy,
1178                           priv->rx_pause, priv->tx_pause);
1179
1180        return 0;
1181}
1182
1183static int cpsw_set_channels(struct net_device *ndev,
1184                             struct ethtool_channels *chs)
1185{
1186        return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
1187}
1188
1189static const struct ethtool_ops cpsw_ethtool_ops = {
1190        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
1191        .get_drvinfo            = cpsw_get_drvinfo,
1192        .get_msglevel           = cpsw_get_msglevel,
1193        .set_msglevel           = cpsw_set_msglevel,
1194        .get_link               = ethtool_op_get_link,
1195        .get_ts_info            = cpsw_get_ts_info,
1196        .get_coalesce           = cpsw_get_coalesce,
1197        .set_coalesce           = cpsw_set_coalesce,
1198        .get_sset_count         = cpsw_get_sset_count,
1199        .get_strings            = cpsw_get_strings,
1200        .get_ethtool_stats      = cpsw_get_ethtool_stats,
1201        .get_pauseparam         = cpsw_get_pauseparam,
1202        .set_pauseparam         = cpsw_set_pauseparam,
1203        .get_wol                = cpsw_get_wol,
1204        .set_wol                = cpsw_set_wol,
1205        .get_regs_len           = cpsw_get_regs_len,
1206        .get_regs               = cpsw_get_regs,
1207        .begin                  = cpsw_ethtool_op_begin,
1208        .complete               = cpsw_ethtool_op_complete,
1209        .get_channels           = cpsw_get_channels,
1210        .set_channels           = cpsw_set_channels,
1211        .get_link_ksettings     = cpsw_get_link_ksettings,
1212        .set_link_ksettings     = cpsw_set_link_ksettings,
1213        .get_eee                = cpsw_get_eee,
1214        .set_eee                = cpsw_set_eee,
1215        .nway_reset             = cpsw_nway_reset,
1216        .get_ringparam          = cpsw_get_ringparam,
1217        .set_ringparam          = cpsw_set_ringparam,
1218};
1219
1220static int cpsw_probe_dt(struct cpsw_common *cpsw)
1221{
1222        struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
1223        struct cpsw_platform_data *data = &cpsw->data;
1224        struct device *dev = cpsw->dev;
1225        int ret;
1226        u32 prop;
1227
1228        if (!node)
1229                return -EINVAL;
1230
1231        tmp_node = of_get_child_by_name(node, "ethernet-ports");
1232        if (!tmp_node)
1233                return -ENOENT;
1234        data->slaves = of_get_child_count(tmp_node);
1235        if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
1236                of_node_put(tmp_node);
1237                return -ENOENT;
1238        }
1239
1240        data->active_slave = 0;
1241        data->channels = CPSW_MAX_QUEUES;
1242        data->dual_emac = true;
1243        data->bd_ram_size = CPSW_BD_RAM_SIZE;
1244        data->mac_control = 0;
1245
1246        data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
1247                                        sizeof(struct cpsw_slave_data),
1248                                        GFP_KERNEL);
1249        if (!data->slave_data)
1250                return -ENOMEM;
1251
1252        /* Populate all the child nodes here...
1253         */
1254        ret = devm_of_platform_populate(dev);
1255        /* We do not want to force this, as in some cases may not have child */
1256        if (ret)
1257                dev_warn(dev, "Doesn't have any child node\n");
1258
1259        for_each_child_of_node(tmp_node, port_np) {
1260                struct cpsw_slave_data *slave_data;
1261                u32 port_id;
1262
1263                ret = of_property_read_u32(port_np, "reg", &port_id);
1264                if (ret < 0) {
1265                        dev_err(dev, "%pOF error reading port_id %d\n",
1266                                port_np, ret);
1267                        goto err_node_put;
1268                }
1269
1270                if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
1271                        dev_err(dev, "%pOF has invalid port_id %u\n",
1272                                port_np, port_id);
1273                        ret = -EINVAL;
1274                        goto err_node_put;
1275                }
1276
1277                slave_data = &data->slave_data[port_id - 1];
1278
1279                slave_data->disabled = !of_device_is_available(port_np);
1280                if (slave_data->disabled)
1281                        continue;
1282
1283                slave_data->slave_node = port_np;
1284                slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
1285                if (IS_ERR(slave_data->ifphy)) {
1286                        ret = PTR_ERR(slave_data->ifphy);
1287                        dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
1288                                port_np, ret);
1289                        goto err_node_put;
1290                }
1291
1292                if (of_phy_is_fixed_link(port_np)) {
1293                        ret = of_phy_register_fixed_link(port_np);
1294                        if (ret) {
1295                                if (ret != -EPROBE_DEFER)
1296                                        dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
1297                                                port_np, ret);
1298                                goto err_node_put;
1299                        }
1300                        slave_data->phy_node = of_node_get(port_np);
1301                } else {
1302                        slave_data->phy_node =
1303                                of_parse_phandle(port_np, "phy-handle", 0);
1304                }
1305
1306                if (!slave_data->phy_node) {
1307                        dev_err(dev, "%pOF no phy found\n", port_np);
1308                        ret = -ENODEV;
1309                        goto err_node_put;
1310                }
1311
1312                ret = of_get_phy_mode(port_np, &slave_data->phy_if);
1313                if (ret) {
1314                        dev_err(dev, "%pOF read phy-mode err %d\n",
1315                                port_np, ret);
1316                        goto err_node_put;
1317                }
1318
1319                ret = of_get_mac_address(port_np, slave_data->mac_addr);
1320                if (ret) {
1321                        ret = ti_cm_get_macid(dev, port_id - 1,
1322                                              slave_data->mac_addr);
1323                        if (ret)
1324                                goto err_node_put;
1325                }
1326
1327                if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
1328                                         &prop)) {
1329                        dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
1330                                port_np);
1331                        slave_data->dual_emac_res_vlan = port_id;
1332                        dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
1333                                port_np, slave_data->dual_emac_res_vlan);
1334                } else {
1335                        slave_data->dual_emac_res_vlan = prop;
1336                }
1337        }
1338
1339        of_node_put(tmp_node);
1340        return 0;
1341
1342err_node_put:
1343        of_node_put(port_np);
1344        return ret;
1345}
1346
1347static void cpsw_remove_dt(struct cpsw_common *cpsw)
1348{
1349        struct cpsw_platform_data *data = &cpsw->data;
1350        int i = 0;
1351
1352        for (i = 0; i < cpsw->data.slaves; i++) {
1353                struct cpsw_slave_data *slave_data = &data->slave_data[i];
1354                struct device_node *port_np = slave_data->phy_node;
1355
1356                if (port_np) {
1357                        if (of_phy_is_fixed_link(port_np))
1358                                of_phy_deregister_fixed_link(port_np);
1359
1360                        of_node_put(port_np);
1361                }
1362        }
1363}
1364
1365static int cpsw_create_ports(struct cpsw_common *cpsw)
1366{
1367        struct cpsw_platform_data *data = &cpsw->data;
1368        struct net_device *ndev, *napi_ndev = NULL;
1369        struct device *dev = cpsw->dev;
1370        struct cpsw_priv *priv;
1371        int ret = 0, i = 0;
1372
1373        for (i = 0; i < cpsw->data.slaves; i++) {
1374                struct cpsw_slave_data *slave_data = &data->slave_data[i];
1375
1376                if (slave_data->disabled)
1377                        continue;
1378
1379                ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
1380                                               CPSW_MAX_QUEUES,
1381                                               CPSW_MAX_QUEUES);
1382                if (!ndev) {
1383                        dev_err(dev, "error allocating net_device\n");
1384                        return -ENOMEM;
1385                }
1386
1387                priv = netdev_priv(ndev);
1388                priv->cpsw = cpsw;
1389                priv->ndev = ndev;
1390                priv->dev  = dev;
1391                priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1392                priv->emac_port = i + 1;
1393                priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
1394
1395                if (is_valid_ether_addr(slave_data->mac_addr)) {
1396                        ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1397                        dev_info(cpsw->dev, "Detected MACID = %pM\n",
1398                                 priv->mac_addr);
1399                } else {
1400                        eth_random_addr(slave_data->mac_addr);
1401                        dev_info(cpsw->dev, "Random MACID = %pM\n",
1402                                 priv->mac_addr);
1403                }
1404                ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
1405                ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1406
1407                cpsw->slaves[i].ndev = ndev;
1408
1409                ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
1410                                  NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
1411
1412                ndev->netdev_ops = &cpsw_netdev_ops;
1413                ndev->ethtool_ops = &cpsw_ethtool_ops;
1414                SET_NETDEV_DEV(ndev, dev);
1415
1416                if (!napi_ndev) {
1417                        /* CPSW Host port CPDMA interface is shared between
1418                         * ports and there is only one TX and one RX IRQs
1419                         * available for all possible TX and RX channels
1420                         * accordingly.
1421                         */
1422                        netif_napi_add(ndev, &cpsw->napi_rx,
1423                                       cpsw->quirk_irq ?
1424                                       cpsw_rx_poll : cpsw_rx_mq_poll,
1425                                       CPSW_POLL_WEIGHT);
1426                        netif_tx_napi_add(ndev, &cpsw->napi_tx,
1427                                          cpsw->quirk_irq ?
1428                                          cpsw_tx_poll : cpsw_tx_mq_poll,
1429                                          CPSW_POLL_WEIGHT);
1430                }
1431
1432                napi_ndev = ndev;
1433        }
1434
1435        return ret;
1436}
1437
1438static void cpsw_unregister_ports(struct cpsw_common *cpsw)
1439{
1440        int i = 0;
1441
1442        for (i = 0; i < cpsw->data.slaves; i++) {
1443                if (!cpsw->slaves[i].ndev)
1444                        continue;
1445
1446                unregister_netdev(cpsw->slaves[i].ndev);
1447        }
1448}
1449
1450static int cpsw_register_ports(struct cpsw_common *cpsw)
1451{
1452        int ret = 0, i = 0;
1453
1454        for (i = 0; i < cpsw->data.slaves; i++) {
1455                if (!cpsw->slaves[i].ndev)
1456                        continue;
1457
1458                /* register the network device */
1459                ret = register_netdev(cpsw->slaves[i].ndev);
1460                if (ret) {
1461                        dev_err(cpsw->dev,
1462                                "cpsw: err registering net device%d\n", i);
1463                        cpsw->slaves[i].ndev = NULL;
1464                        break;
1465                }
1466        }
1467
1468        if (ret)
1469                cpsw_unregister_ports(cpsw);
1470        return ret;
1471}
1472
1473bool cpsw_port_dev_check(const struct net_device *ndev)
1474{
1475        if (ndev->netdev_ops == &cpsw_netdev_ops) {
1476                struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1477
1478                return !cpsw->data.dual_emac;
1479        }
1480
1481        return false;
1482}
1483
1484static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
1485{
1486        int set_val = 0;
1487        int i;
1488
1489        if (!cpsw->ale_bypass &&
1490            (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
1491                set_val = 1;
1492
1493        dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
1494
1495        for (i = 0; i < cpsw->data.slaves; i++) {
1496                struct net_device *sl_ndev = cpsw->slaves[i].ndev;
1497                struct cpsw_priv *priv = netdev_priv(sl_ndev);
1498
1499                priv->offload_fwd_mark = set_val;
1500        }
1501}
1502
1503static int cpsw_netdevice_port_link(struct net_device *ndev,
1504                                    struct net_device *br_ndev,
1505                                    struct netlink_ext_ack *extack)
1506{
1507        struct cpsw_priv *priv = netdev_priv(ndev);
1508        struct cpsw_common *cpsw = priv->cpsw;
1509        int err;
1510
1511        if (!cpsw->br_members) {
1512                cpsw->hw_bridge_dev = br_ndev;
1513        } else {
1514                /* This is adding the port to a second bridge, this is
1515                 * unsupported
1516                 */
1517                if (cpsw->hw_bridge_dev != br_ndev)
1518                        return -EOPNOTSUPP;
1519        }
1520
1521        err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
1522                                            false, extack);
1523        if (err)
1524                return err;
1525
1526        cpsw->br_members |= BIT(priv->emac_port);
1527
1528        cpsw_port_offload_fwd_mark_update(cpsw);
1529
1530        return NOTIFY_DONE;
1531}
1532
1533static void cpsw_netdevice_port_unlink(struct net_device *ndev)
1534{
1535        struct cpsw_priv *priv = netdev_priv(ndev);
1536        struct cpsw_common *cpsw = priv->cpsw;
1537
1538        switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
1539
1540        cpsw->br_members &= ~BIT(priv->emac_port);
1541
1542        cpsw_port_offload_fwd_mark_update(cpsw);
1543
1544        if (!cpsw->br_members)
1545                cpsw->hw_bridge_dev = NULL;
1546}
1547
1548/* netdev notifier */
1549static int cpsw_netdevice_event(struct notifier_block *unused,
1550                                unsigned long event, void *ptr)
1551{
1552        struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
1553        struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1554        struct netdev_notifier_changeupper_info *info;
1555        int ret = NOTIFY_DONE;
1556
1557        if (!cpsw_port_dev_check(ndev))
1558                return NOTIFY_DONE;
1559
1560        switch (event) {
1561        case NETDEV_CHANGEUPPER:
1562                info = ptr;
1563
1564                if (netif_is_bridge_master(info->upper_dev)) {
1565                        if (info->linking)
1566                                ret = cpsw_netdevice_port_link(ndev,
1567                                                               info->upper_dev,
1568                                                               extack);
1569                        else
1570                                cpsw_netdevice_port_unlink(ndev);
1571                }
1572                break;
1573        default:
1574                return NOTIFY_DONE;
1575        }
1576
1577        return notifier_from_errno(ret);
1578}
1579
1580static struct notifier_block cpsw_netdevice_nb __read_mostly = {
1581        .notifier_call = cpsw_netdevice_event,
1582};
1583
1584static int cpsw_register_notifiers(struct cpsw_common *cpsw)
1585{
1586        int ret = 0;
1587
1588        ret = register_netdevice_notifier(&cpsw_netdevice_nb);
1589        if (ret) {
1590                dev_err(cpsw->dev, "can't register netdevice notifier\n");
1591                return ret;
1592        }
1593
1594        ret = cpsw_switchdev_register_notifiers(cpsw);
1595        if (ret)
1596                unregister_netdevice_notifier(&cpsw_netdevice_nb);
1597
1598        return ret;
1599}
1600
1601static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
1602{
1603        cpsw_switchdev_unregister_notifiers(cpsw);
1604        unregister_netdevice_notifier(&cpsw_netdevice_nb);
1605}
1606
1607static const struct devlink_ops cpsw_devlink_ops = {
1608};
1609
1610static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
1611                                   struct devlink_param_gset_ctx *ctx)
1612{
1613        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1614        struct cpsw_common *cpsw = dl_priv->cpsw;
1615
1616        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1617
1618        if (id != CPSW_DL_PARAM_SWITCH_MODE)
1619                return  -EOPNOTSUPP;
1620
1621        ctx->val.vbool = !cpsw->data.dual_emac;
1622
1623        return 0;
1624}
1625
1626static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
1627                                   struct devlink_param_gset_ctx *ctx)
1628{
1629        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1630        struct cpsw_common *cpsw = dl_priv->cpsw;
1631        int vlan = cpsw->data.default_vlan;
1632        bool switch_en = ctx->val.vbool;
1633        bool if_running = false;
1634        int i;
1635
1636        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1637
1638        if (id != CPSW_DL_PARAM_SWITCH_MODE)
1639                return  -EOPNOTSUPP;
1640
1641        if (switch_en == !cpsw->data.dual_emac)
1642                return 0;
1643
1644        if (!switch_en && cpsw->br_members) {
1645                dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
1646                return -EINVAL;
1647        }
1648
1649        rtnl_lock();
1650
1651        for (i = 0; i < cpsw->data.slaves; i++) {
1652                struct cpsw_slave *slave = &cpsw->slaves[i];
1653                struct net_device *sl_ndev = slave->ndev;
1654
1655                if (!sl_ndev || !netif_running(sl_ndev))
1656                        continue;
1657
1658                if_running = true;
1659        }
1660
1661        if (!if_running) {
1662                /* all ndevs are down */
1663                cpsw->data.dual_emac = !switch_en;
1664                for (i = 0; i < cpsw->data.slaves; i++) {
1665                        struct cpsw_slave *slave = &cpsw->slaves[i];
1666                        struct net_device *sl_ndev = slave->ndev;
1667
1668                        if (!sl_ndev)
1669                                continue;
1670
1671                        if (switch_en)
1672                                vlan = cpsw->data.default_vlan;
1673                        else
1674                                vlan = slave->data->dual_emac_res_vlan;
1675                        slave->port_vlan = vlan;
1676                }
1677                goto exit;
1678        }
1679
1680        if (switch_en) {
1681                dev_info(cpsw->dev, "Enable switch mode\n");
1682
1683                /* enable bypass - no forwarding; all traffic goes to Host */
1684                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1685
1686                /* clean up ALE table */
1687                cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1688                cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1689
1690                cpsw_init_host_port_switch(cpsw);
1691
1692                for (i = 0; i < cpsw->data.slaves; i++) {
1693                        struct cpsw_slave *slave = &cpsw->slaves[i];
1694                        struct net_device *sl_ndev = slave->ndev;
1695                        struct cpsw_priv *priv;
1696
1697                        if (!sl_ndev)
1698                                continue;
1699
1700                        priv = netdev_priv(sl_ndev);
1701                        slave->port_vlan = vlan;
1702                        WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
1703                        if (netif_running(sl_ndev))
1704                                cpsw_port_add_switch_def_ale_entries(priv,
1705                                                                     slave);
1706                }
1707
1708                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1709                cpsw->data.dual_emac = false;
1710        } else {
1711                dev_info(cpsw->dev, "Disable switch mode\n");
1712
1713                /* enable bypass - no forwarding; all traffic goes to Host */
1714                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1715
1716                cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1717                cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1718
1719                cpsw_init_host_port_dual_mac(cpsw);
1720
1721                for (i = 0; i < cpsw->data.slaves; i++) {
1722                        struct cpsw_slave *slave = &cpsw->slaves[i];
1723                        struct net_device *sl_ndev = slave->ndev;
1724                        struct cpsw_priv *priv;
1725
1726                        if (!sl_ndev)
1727                                continue;
1728
1729                        priv = netdev_priv(slave->ndev);
1730                        slave->port_vlan = slave->data->dual_emac_res_vlan;
1731                        WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
1732                        cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
1733                }
1734
1735                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1736                cpsw->data.dual_emac = true;
1737        }
1738exit:
1739        rtnl_unlock();
1740
1741        return 0;
1742}
1743
1744static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
1745                                struct devlink_param_gset_ctx *ctx)
1746{
1747        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1748        struct cpsw_common *cpsw = dl_priv->cpsw;
1749
1750        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1751
1752        switch (id) {
1753        case CPSW_DL_PARAM_ALE_BYPASS:
1754                ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
1755                break;
1756        default:
1757                return -EOPNOTSUPP;
1758        }
1759
1760        return 0;
1761}
1762
1763static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
1764                                struct devlink_param_gset_ctx *ctx)
1765{
1766        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1767        struct cpsw_common *cpsw = dl_priv->cpsw;
1768        int ret = -EOPNOTSUPP;
1769
1770        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1771
1772        switch (id) {
1773        case CPSW_DL_PARAM_ALE_BYPASS:
1774                ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
1775                                           ctx->val.vbool);
1776                if (!ret) {
1777                        cpsw->ale_bypass = ctx->val.vbool;
1778                        cpsw_port_offload_fwd_mark_update(cpsw);
1779                }
1780                break;
1781        default:
1782                return -EOPNOTSUPP;
1783        }
1784
1785        return 0;
1786}
1787
1788static const struct devlink_param cpsw_devlink_params[] = {
1789        DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
1790                             "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
1791                             BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1792                             cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
1793                             NULL),
1794        DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
1795                             "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
1796                             BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1797                             cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
1798};
1799
1800static int cpsw_register_devlink(struct cpsw_common *cpsw)
1801{
1802        struct device *dev = cpsw->dev;
1803        struct cpsw_devlink *dl_priv;
1804        int ret = 0;
1805
1806        cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev);
1807        if (!cpsw->devlink)
1808                return -ENOMEM;
1809
1810        dl_priv = devlink_priv(cpsw->devlink);
1811        dl_priv->cpsw = cpsw;
1812
1813        ret = devlink_register(cpsw->devlink);
1814        if (ret) {
1815                dev_err(dev, "DL reg fail ret:%d\n", ret);
1816                goto dl_free;
1817        }
1818
1819        ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
1820                                      ARRAY_SIZE(cpsw_devlink_params));
1821        if (ret) {
1822                dev_err(dev, "DL params reg fail ret:%d\n", ret);
1823                goto dl_unreg;
1824        }
1825
1826        devlink_params_publish(cpsw->devlink);
1827        return ret;
1828
1829dl_unreg:
1830        devlink_unregister(cpsw->devlink);
1831dl_free:
1832        devlink_free(cpsw->devlink);
1833        return ret;
1834}
1835
1836static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
1837{
1838        devlink_params_unpublish(cpsw->devlink);
1839        devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
1840                                  ARRAY_SIZE(cpsw_devlink_params));
1841        devlink_unregister(cpsw->devlink);
1842        devlink_free(cpsw->devlink);
1843}
1844
1845static const struct of_device_id cpsw_of_mtable[] = {
1846        { .compatible = "ti,cpsw-switch"},
1847        { .compatible = "ti,am335x-cpsw-switch"},
1848        { .compatible = "ti,am4372-cpsw-switch"},
1849        { .compatible = "ti,dra7-cpsw-switch"},
1850        { /* sentinel */ },
1851};
1852MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1853
1854static const struct soc_device_attribute cpsw_soc_devices[] = {
1855        { .family = "AM33xx", .revision = "ES1.0"},
1856        { /* sentinel */ }
1857};
1858
1859static int cpsw_probe(struct platform_device *pdev)
1860{
1861        const struct soc_device_attribute *soc;
1862        struct device *dev = &pdev->dev;
1863        struct cpsw_common *cpsw;
1864        struct resource *ss_res;
1865        struct gpio_descs *mode;
1866        void __iomem *ss_regs;
1867        int ret = 0, ch;
1868        struct clk *clk;
1869        int irq;
1870
1871        cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
1872        if (!cpsw)
1873                return -ENOMEM;
1874
1875        cpsw_slave_index = cpsw_slave_index_priv;
1876
1877        cpsw->dev = dev;
1878
1879        cpsw->slaves = devm_kcalloc(dev,
1880                                    CPSW_SLAVE_PORTS_NUM,
1881                                    sizeof(struct cpsw_slave),
1882                                    GFP_KERNEL);
1883        if (!cpsw->slaves)
1884                return -ENOMEM;
1885
1886        mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
1887        if (IS_ERR(mode)) {
1888                ret = PTR_ERR(mode);
1889                dev_err(dev, "gpio request failed, ret %d\n", ret);
1890                return ret;
1891        }
1892
1893        clk = devm_clk_get(dev, "fck");
1894        if (IS_ERR(clk)) {
1895                ret = PTR_ERR(clk);
1896                dev_err(dev, "fck is not found %d\n", ret);
1897                return ret;
1898        }
1899        cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
1900
1901        ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
1902        if (IS_ERR(ss_regs)) {
1903                ret = PTR_ERR(ss_regs);
1904                return ret;
1905        }
1906        cpsw->regs = ss_regs;
1907
1908        irq = platform_get_irq_byname(pdev, "rx");
1909        if (irq < 0)
1910                return irq;
1911        cpsw->irqs_table[0] = irq;
1912
1913        irq = platform_get_irq_byname(pdev, "tx");
1914        if (irq < 0)
1915                return irq;
1916        cpsw->irqs_table[1] = irq;
1917
1918        irq = platform_get_irq_byname(pdev, "misc");
1919        if (irq <= 0)
1920                return irq;
1921        cpsw->misc_irq = irq;
1922
1923        platform_set_drvdata(pdev, cpsw);
1924        /* This may be required here for child devices. */
1925        pm_runtime_enable(dev);
1926
1927        /* Need to enable clocks with runtime PM api to access module
1928         * registers
1929         */
1930        ret = pm_runtime_get_sync(dev);
1931        if (ret < 0) {
1932                pm_runtime_put_noidle(dev);
1933                pm_runtime_disable(dev);
1934                return ret;
1935        }
1936
1937        ret = cpsw_probe_dt(cpsw);
1938        if (ret)
1939                goto clean_dt_ret;
1940
1941        soc = soc_device_match(cpsw_soc_devices);
1942        if (soc)
1943                cpsw->quirk_irq = true;
1944
1945        cpsw->rx_packet_max = rx_packet_max;
1946        cpsw->descs_pool_size = descs_pool_size;
1947        eth_random_addr(cpsw->base_mac);
1948
1949        ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
1950                               (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
1951                               descs_pool_size);
1952        if (ret)
1953                goto clean_dt_ret;
1954
1955        cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
1956                        ss_regs + CPSW1_WR_OFFSET :
1957                        ss_regs + CPSW2_WR_OFFSET;
1958
1959        ch = cpsw->quirk_irq ? 0 : 7;
1960        cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
1961        if (IS_ERR(cpsw->txv[0].ch)) {
1962                dev_err(dev, "error initializing tx dma channel\n");
1963                ret = PTR_ERR(cpsw->txv[0].ch);
1964                goto clean_cpts;
1965        }
1966
1967        cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
1968        if (IS_ERR(cpsw->rxv[0].ch)) {
1969                dev_err(dev, "error initializing rx dma channel\n");
1970                ret = PTR_ERR(cpsw->rxv[0].ch);
1971                goto clean_cpts;
1972        }
1973        cpsw_split_res(cpsw);
1974
1975        /* setup netdevs */
1976        ret = cpsw_create_ports(cpsw);
1977        if (ret)
1978                goto clean_unregister_netdev;
1979
1980        /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
1981         * MISC IRQs which are always kept disabled with this driver so
1982         * we will not request them.
1983         *
1984         * If anyone wants to implement support for those, make sure to
1985         * first request and append them to irqs_table array.
1986         */
1987
1988        ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
1989                               0, dev_name(dev), cpsw);
1990        if (ret < 0) {
1991                dev_err(dev, "error attaching irq (%d)\n", ret);
1992                goto clean_unregister_netdev;
1993        }
1994
1995        ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
1996                               0, dev_name(dev), cpsw);
1997        if (ret < 0) {
1998                dev_err(dev, "error attaching irq (%d)\n", ret);
1999                goto clean_unregister_netdev;
2000        }
2001
2002        if (!cpsw->cpts)
2003                goto skip_cpts;
2004
2005        ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
2006                               0, dev_name(&pdev->dev), cpsw);
2007        if (ret < 0) {
2008                dev_err(dev, "error attaching misc irq (%d)\n", ret);
2009                goto clean_unregister_netdev;
2010        }
2011
2012        /* Enable misc CPTS evnt_pend IRQ */
2013        cpts_set_irqpoll(cpsw->cpts, false);
2014
2015skip_cpts:
2016        ret = cpsw_register_notifiers(cpsw);
2017        if (ret)
2018                goto clean_unregister_netdev;
2019
2020        ret = cpsw_register_devlink(cpsw);
2021        if (ret)
2022                goto clean_unregister_notifiers;
2023
2024        ret = cpsw_register_ports(cpsw);
2025        if (ret)
2026                goto clean_unregister_notifiers;
2027
2028        dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
2029                   &ss_res->start, descs_pool_size,
2030                   cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
2031                   CPSW_MINOR_VERSION(cpsw->version),
2032                   CPSW_RTL_VERSION(cpsw->version));
2033
2034        pm_runtime_put(dev);
2035
2036        return 0;
2037
2038clean_unregister_notifiers:
2039        cpsw_unregister_notifiers(cpsw);
2040clean_unregister_netdev:
2041        cpsw_unregister_ports(cpsw);
2042clean_cpts:
2043        cpts_release(cpsw->cpts);
2044        cpdma_ctlr_destroy(cpsw->dma);
2045clean_dt_ret:
2046        cpsw_remove_dt(cpsw);
2047        pm_runtime_put_sync(dev);
2048        pm_runtime_disable(dev);
2049        return ret;
2050}
2051
2052static int cpsw_remove(struct platform_device *pdev)
2053{
2054        struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2055        int ret;
2056
2057        ret = pm_runtime_get_sync(&pdev->dev);
2058        if (ret < 0) {
2059                pm_runtime_put_noidle(&pdev->dev);
2060                return ret;
2061        }
2062
2063        cpsw_unregister_notifiers(cpsw);
2064        cpsw_unregister_devlink(cpsw);
2065        cpsw_unregister_ports(cpsw);
2066
2067        cpts_release(cpsw->cpts);
2068        cpdma_ctlr_destroy(cpsw->dma);
2069        cpsw_remove_dt(cpsw);
2070        pm_runtime_put_sync(&pdev->dev);
2071        pm_runtime_disable(&pdev->dev);
2072        return 0;
2073}
2074
2075static int __maybe_unused cpsw_suspend(struct device *dev)
2076{
2077        struct cpsw_common *cpsw = dev_get_drvdata(dev);
2078        int i;
2079
2080        rtnl_lock();
2081
2082        for (i = 0; i < cpsw->data.slaves; i++) {
2083                struct net_device *ndev = cpsw->slaves[i].ndev;
2084
2085                if (!(ndev && netif_running(ndev)))
2086                        continue;
2087
2088                cpsw_ndo_stop(ndev);
2089        }
2090
2091        rtnl_unlock();
2092
2093        /* Select sleep pin state */
2094        pinctrl_pm_select_sleep_state(dev);
2095
2096        return 0;
2097}
2098
2099static int __maybe_unused cpsw_resume(struct device *dev)
2100{
2101        struct cpsw_common *cpsw = dev_get_drvdata(dev);
2102        int i;
2103
2104        /* Select default pin state */
2105        pinctrl_pm_select_default_state(dev);
2106
2107        /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2108        rtnl_lock();
2109
2110        for (i = 0; i < cpsw->data.slaves; i++) {
2111                struct net_device *ndev = cpsw->slaves[i].ndev;
2112
2113                if (!(ndev && netif_running(ndev)))
2114                        continue;
2115
2116                cpsw_ndo_open(ndev);
2117        }
2118
2119        rtnl_unlock();
2120
2121        return 0;
2122}
2123
2124static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2125
2126static struct platform_driver cpsw_driver = {
2127        .driver = {
2128                .name    = "cpsw-switch",
2129                .pm      = &cpsw_pm_ops,
2130                .of_match_table = cpsw_of_mtable,
2131        },
2132        .probe = cpsw_probe,
2133        .remove = cpsw_remove,
2134};
2135
2136module_platform_driver(cpsw_driver);
2137
2138MODULE_LICENSE("GPL");
2139MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
2140