linux/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/module.h>
   4#include <linux/if_bridge.h>
   5#include <linux/if_vlan.h>
   6#include <linux/iopoll.h>
   7#include <linux/of_platform.h>
   8#include <linux/of_net.h>
   9#include <linux/packing.h>
  10#include <linux/phy/phy.h>
  11#include <linux/reset.h>
  12
  13#include "lan966x_main.h"
  14
  15#define XTR_EOF_0                       0x00000080U
  16#define XTR_EOF_1                       0x01000080U
  17#define XTR_EOF_2                       0x02000080U
  18#define XTR_EOF_3                       0x03000080U
  19#define XTR_PRUNED                      0x04000080U
  20#define XTR_ABORT                       0x05000080U
  21#define XTR_ESCAPE                      0x06000080U
  22#define XTR_NOT_READY                   0x07000080U
  23#define XTR_VALID_BYTES(x)              (4 - (((x) >> 24) & 3))
  24
  25#define READL_SLEEP_US                  10
  26#define READL_TIMEOUT_US                100000000
  27
  28#define IO_RANGES 2
  29
  30static const struct of_device_id lan966x_match[] = {
  31        { .compatible = "microchip,lan966x-switch" },
  32        { }
  33};
  34MODULE_DEVICE_TABLE(of, lan966x_match);
  35
  36struct lan966x_main_io_resource {
  37        enum lan966x_target id;
  38        phys_addr_t offset;
  39        int range;
  40};
  41
  42static const struct lan966x_main_io_resource lan966x_main_iomap[] =  {
  43        { TARGET_CPU,                   0xc0000, 0 }, /* 0xe00c0000 */
  44        { TARGET_ORG,                         0, 1 }, /* 0xe2000000 */
  45        { TARGET_GCB,                    0x4000, 1 }, /* 0xe2004000 */
  46        { TARGET_QS,                     0x8000, 1 }, /* 0xe2008000 */
  47        { TARGET_CHIP_TOP,              0x10000, 1 }, /* 0xe2010000 */
  48        { TARGET_REW,                   0x14000, 1 }, /* 0xe2014000 */
  49        { TARGET_SYS,                   0x28000, 1 }, /* 0xe2028000 */
  50        { TARGET_DEV,                   0x34000, 1 }, /* 0xe2034000 */
  51        { TARGET_DEV +  1,              0x38000, 1 }, /* 0xe2038000 */
  52        { TARGET_DEV +  2,              0x3c000, 1 }, /* 0xe203c000 */
  53        { TARGET_DEV +  3,              0x40000, 1 }, /* 0xe2040000 */
  54        { TARGET_DEV +  4,              0x44000, 1 }, /* 0xe2044000 */
  55        { TARGET_DEV +  5,              0x48000, 1 }, /* 0xe2048000 */
  56        { TARGET_DEV +  6,              0x4c000, 1 }, /* 0xe204c000 */
  57        { TARGET_DEV +  7,              0x50000, 1 }, /* 0xe2050000 */
  58        { TARGET_QSYS,                 0x100000, 1 }, /* 0xe2100000 */
  59        { TARGET_AFI,                  0x120000, 1 }, /* 0xe2120000 */
  60        { TARGET_ANA,                  0x140000, 1 }, /* 0xe2140000 */
  61};
  62
  63static int lan966x_create_targets(struct platform_device *pdev,
  64                                  struct lan966x *lan966x)
  65{
  66        struct resource *iores[IO_RANGES];
  67        void __iomem *begin[IO_RANGES];
  68        int idx;
  69
  70        /* Initially map the entire range and after that update each target to
  71         * point inside the region at the correct offset. It is possible that
  72         * other devices access the same region so don't add any checks about
  73         * this.
  74         */
  75        for (idx = 0; idx < IO_RANGES; idx++) {
  76                iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
  77                                                   idx);
  78                if (!iores[idx]) {
  79                        dev_err(&pdev->dev, "Invalid resource\n");
  80                        return -EINVAL;
  81                }
  82
  83                begin[idx] = devm_ioremap(&pdev->dev,
  84                                          iores[idx]->start,
  85                                          resource_size(iores[idx]));
  86                if (!begin[idx]) {
  87                        dev_err(&pdev->dev, "Unable to get registers: %s\n",
  88                                iores[idx]->name);
  89                        return -ENOMEM;
  90                }
  91        }
  92
  93        for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
  94                const struct lan966x_main_io_resource *iomap =
  95                        &lan966x_main_iomap[idx];
  96
  97                lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
  98        }
  99
 100        return 0;
 101}
 102
 103static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
 104{
 105        struct lan966x_port *port = netdev_priv(dev);
 106        struct lan966x *lan966x = port->lan966x;
 107        const struct sockaddr *addr = p;
 108        int ret;
 109
 110        /* Learn the new net device MAC address in the mac table. */
 111        ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
 112        if (ret)
 113                return ret;
 114
 115        /* Then forget the previous one. */
 116        ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
 117        if (ret)
 118                return ret;
 119
 120        eth_hw_addr_set(dev, addr->sa_data);
 121        return ret;
 122}
 123
 124static int lan966x_port_get_phys_port_name(struct net_device *dev,
 125                                           char *buf, size_t len)
 126{
 127        struct lan966x_port *port = netdev_priv(dev);
 128        int ret;
 129
 130        ret = snprintf(buf, len, "p%d", port->chip_port);
 131        if (ret >= len)
 132                return -EINVAL;
 133
 134        return 0;
 135}
 136
 137static int lan966x_port_open(struct net_device *dev)
 138{
 139        struct lan966x_port *port = netdev_priv(dev);
 140        struct lan966x *lan966x = port->lan966x;
 141        int err;
 142
 143        /* Enable receiving frames on the port, and activate auto-learning of
 144         * MAC addresses.
 145         */
 146        lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
 147                ANA_PORT_CFG_RECV_ENA_SET(1) |
 148                ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
 149                ANA_PORT_CFG_LEARNAUTO |
 150                ANA_PORT_CFG_RECV_ENA |
 151                ANA_PORT_CFG_PORTID_VAL,
 152                lan966x, ANA_PORT_CFG(port->chip_port));
 153
 154        err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
 155        if (err) {
 156                netdev_err(dev, "Could not attach to PHY\n");
 157                return err;
 158        }
 159
 160        phylink_start(port->phylink);
 161
 162        return 0;
 163}
 164
 165static int lan966x_port_stop(struct net_device *dev)
 166{
 167        struct lan966x_port *port = netdev_priv(dev);
 168
 169        lan966x_port_config_down(port);
 170        phylink_stop(port->phylink);
 171        phylink_disconnect_phy(port->phylink);
 172
 173        return 0;
 174}
 175
 176static int lan966x_port_inj_status(struct lan966x *lan966x)
 177{
 178        return lan_rd(lan966x, QS_INJ_STATUS);
 179}
 180
 181static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
 182{
 183        u32 val;
 184
 185        return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
 186                                         QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
 187                                         READL_SLEEP_US, READL_TIMEOUT_US);
 188}
 189
 190static int lan966x_port_ifh_xmit(struct sk_buff *skb,
 191                                 __be32 *ifh,
 192                                 struct net_device *dev)
 193{
 194        struct lan966x_port *port = netdev_priv(dev);
 195        struct lan966x *lan966x = port->lan966x;
 196        u32 i, count, last;
 197        u8 grp = 0;
 198        u32 val;
 199        int err;
 200
 201        val = lan_rd(lan966x, QS_INJ_STATUS);
 202        if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
 203            (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
 204                return NETDEV_TX_BUSY;
 205
 206        /* Write start of frame */
 207        lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
 208               QS_INJ_CTRL_SOF_SET(1),
 209               lan966x, QS_INJ_CTRL(grp));
 210
 211        /* Write IFH header */
 212        for (i = 0; i < IFH_LEN; ++i) {
 213                /* Wait until the fifo is ready */
 214                err = lan966x_port_inj_ready(lan966x, grp);
 215                if (err)
 216                        return NETDEV_TX_BUSY;
 217
 218                lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
 219        }
 220
 221        /* Write frame */
 222        count = DIV_ROUND_UP(skb->len, 4);
 223        last = skb->len % 4;
 224        for (i = 0; i < count; ++i) {
 225                /* Wait until the fifo is ready */
 226                err = lan966x_port_inj_ready(lan966x, grp);
 227                if (err)
 228                        return NETDEV_TX_BUSY;
 229
 230                lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
 231        }
 232
 233        /* Add padding */
 234        while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
 235                /* Wait until the fifo is ready */
 236                err = lan966x_port_inj_ready(lan966x, grp);
 237                if (err)
 238                        return NETDEV_TX_BUSY;
 239
 240                lan_wr(0, lan966x, QS_INJ_WR(grp));
 241                ++i;
 242        }
 243
 244        /* Inidcate EOF and valid bytes in the last word */
 245        lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
 246               QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
 247                                     0 : last) |
 248               QS_INJ_CTRL_EOF_SET(1),
 249               lan966x, QS_INJ_CTRL(grp));
 250
 251        /* Add dummy CRC */
 252        lan_wr(0, lan966x, QS_INJ_WR(grp));
 253        skb_tx_timestamp(skb);
 254
 255        dev->stats.tx_packets++;
 256        dev->stats.tx_bytes += skb->len;
 257
 258        dev_consume_skb_any(skb);
 259        return NETDEV_TX_OK;
 260}
 261
 262static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
 263{
 264        packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
 265                IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
 266}
 267
 268static void lan966x_ifh_set_port(void *ifh, u64 bypass)
 269{
 270        packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
 271                IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
 272}
 273
 274static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass)
 275{
 276        packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1,
 277                IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0);
 278}
 279
 280static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
 281{
 282        packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1,
 283                IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
 284}
 285
 286static void lan966x_ifh_set_vid(void *ifh, u64 vid)
 287{
 288        packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1,
 289                IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
 290}
 291
 292static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
 293{
 294        struct lan966x_port *port = netdev_priv(dev);
 295        __be32 ifh[IFH_LEN];
 296
 297        memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
 298
 299        lan966x_ifh_set_bypass(ifh, 1);
 300        lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
 301        lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
 302        lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
 303        lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
 304
 305        return lan966x_port_ifh_xmit(skb, ifh, dev);
 306}
 307
 308static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
 309{
 310        struct lan966x_port *port = netdev_priv(dev);
 311        struct lan966x *lan966x = port->lan966x;
 312
 313        lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
 314               lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
 315        dev->mtu = new_mtu;
 316
 317        return 0;
 318}
 319
 320static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
 321{
 322        struct lan966x_port *port = netdev_priv(dev);
 323        struct lan966x *lan966x = port->lan966x;
 324
 325        return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
 326}
 327
 328static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
 329{
 330        struct lan966x_port *port = netdev_priv(dev);
 331        struct lan966x *lan966x = port->lan966x;
 332
 333        return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
 334}
 335
 336static void lan966x_port_set_rx_mode(struct net_device *dev)
 337{
 338        __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
 339}
 340
 341static int lan966x_port_get_parent_id(struct net_device *dev,
 342                                      struct netdev_phys_item_id *ppid)
 343{
 344        struct lan966x_port *port = netdev_priv(dev);
 345        struct lan966x *lan966x = port->lan966x;
 346
 347        ppid->id_len = sizeof(lan966x->base_mac);
 348        memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
 349
 350        return 0;
 351}
 352
 353static const struct net_device_ops lan966x_port_netdev_ops = {
 354        .ndo_open                       = lan966x_port_open,
 355        .ndo_stop                       = lan966x_port_stop,
 356        .ndo_start_xmit                 = lan966x_port_xmit,
 357        .ndo_change_mtu                 = lan966x_port_change_mtu,
 358        .ndo_set_rx_mode                = lan966x_port_set_rx_mode,
 359        .ndo_get_phys_port_name         = lan966x_port_get_phys_port_name,
 360        .ndo_get_stats64                = lan966x_stats_get,
 361        .ndo_set_mac_address            = lan966x_port_set_mac_address,
 362        .ndo_get_port_parent_id         = lan966x_port_get_parent_id,
 363};
 364
 365bool lan966x_netdevice_check(const struct net_device *dev)
 366{
 367        return dev->netdev_ops == &lan966x_port_netdev_ops;
 368}
 369
 370static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
 371{
 372        return lan_rd(lan966x, QS_XTR_RD(grp));
 373}
 374
 375static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
 376{
 377        u32 val;
 378
 379        return read_poll_timeout(lan966x_port_xtr_status, val,
 380                                 val != XTR_NOT_READY,
 381                                 READL_SLEEP_US, READL_TIMEOUT_US, false,
 382                                 lan966x, grp);
 383}
 384
 385static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
 386{
 387        u32 bytes_valid;
 388        u32 val;
 389        int err;
 390
 391        val = lan_rd(lan966x, QS_XTR_RD(grp));
 392        if (val == XTR_NOT_READY) {
 393                err = lan966x_port_xtr_ready(lan966x, grp);
 394                if (err)
 395                        return -EIO;
 396        }
 397
 398        switch (val) {
 399        case XTR_ABORT:
 400                return -EIO;
 401        case XTR_EOF_0:
 402        case XTR_EOF_1:
 403        case XTR_EOF_2:
 404        case XTR_EOF_3:
 405        case XTR_PRUNED:
 406                bytes_valid = XTR_VALID_BYTES(val);
 407                val = lan_rd(lan966x, QS_XTR_RD(grp));
 408                if (val == XTR_ESCAPE)
 409                        *rval = lan_rd(lan966x, QS_XTR_RD(grp));
 410                else
 411                        *rval = val;
 412
 413                return bytes_valid;
 414        case XTR_ESCAPE:
 415                *rval = lan_rd(lan966x, QS_XTR_RD(grp));
 416
 417                return 4;
 418        default:
 419                *rval = val;
 420
 421                return 4;
 422        }
 423}
 424
 425static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
 426{
 427        packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
 428                IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
 429}
 430
 431static void lan966x_ifh_get_len(void *ifh, u64 *len)
 432{
 433        packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1,
 434                IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
 435}
 436
 437static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
 438{
 439        struct lan966x *lan966x = args;
 440        int i, grp = 0, err = 0;
 441
 442        if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
 443                return IRQ_NONE;
 444
 445        do {
 446                struct net_device *dev;
 447                struct sk_buff *skb;
 448                int sz = 0, buf_len;
 449                u64 src_port, len;
 450                u32 ifh[IFH_LEN];
 451                u32 *buf;
 452                u32 val;
 453
 454                for (i = 0; i < IFH_LEN; i++) {
 455                        err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
 456                        if (err != 4)
 457                                goto recover;
 458                }
 459
 460                err = 0;
 461
 462                lan966x_ifh_get_src_port(ifh, &src_port);
 463                lan966x_ifh_get_len(ifh, &len);
 464
 465                WARN_ON(src_port >= lan966x->num_phys_ports);
 466
 467                dev = lan966x->ports[src_port]->dev;
 468                skb = netdev_alloc_skb(dev, len);
 469                if (unlikely(!skb)) {
 470                        netdev_err(dev, "Unable to allocate sk_buff\n");
 471                        err = -ENOMEM;
 472                        break;
 473                }
 474                buf_len = len - ETH_FCS_LEN;
 475                buf = (u32 *)skb_put(skb, buf_len);
 476
 477                len = 0;
 478                do {
 479                        sz = lan966x_rx_frame_word(lan966x, grp, &val);
 480                        if (sz < 0) {
 481                                kfree_skb(skb);
 482                                goto recover;
 483                        }
 484
 485                        *buf++ = val;
 486                        len += sz;
 487                } while (len < buf_len);
 488
 489                /* Read the FCS */
 490                sz = lan966x_rx_frame_word(lan966x, grp, &val);
 491                if (sz < 0) {
 492                        kfree_skb(skb);
 493                        goto recover;
 494                }
 495
 496                /* Update the statistics if part of the FCS was read before */
 497                len -= ETH_FCS_LEN - sz;
 498
 499                if (unlikely(dev->features & NETIF_F_RXFCS)) {
 500                        buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
 501                        *buf = val;
 502                }
 503
 504                skb->protocol = eth_type_trans(skb, dev);
 505
 506                if (lan966x->bridge_mask & BIT(src_port))
 507                        skb->offload_fwd_mark = 1;
 508
 509                netif_rx_ni(skb);
 510                dev->stats.rx_bytes += len;
 511                dev->stats.rx_packets++;
 512
 513recover:
 514                if (sz < 0 || err)
 515                        lan_rd(lan966x, QS_XTR_RD(grp));
 516
 517        } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
 518
 519        return IRQ_HANDLED;
 520}
 521
 522static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
 523{
 524        struct lan966x *lan966x = args;
 525
 526        return lan966x_mac_irq_handler(lan966x);
 527}
 528
 529static void lan966x_cleanup_ports(struct lan966x *lan966x)
 530{
 531        struct lan966x_port *port;
 532        int p;
 533
 534        for (p = 0; p < lan966x->num_phys_ports; p++) {
 535                port = lan966x->ports[p];
 536                if (!port)
 537                        continue;
 538
 539                if (port->dev)
 540                        unregister_netdev(port->dev);
 541
 542                if (port->phylink) {
 543                        rtnl_lock();
 544                        lan966x_port_stop(port->dev);
 545                        rtnl_unlock();
 546                        phylink_destroy(port->phylink);
 547                        port->phylink = NULL;
 548                }
 549
 550                if (port->fwnode)
 551                        fwnode_handle_put(port->fwnode);
 552        }
 553
 554        disable_irq(lan966x->xtr_irq);
 555        lan966x->xtr_irq = -ENXIO;
 556
 557        if (lan966x->ana_irq) {
 558                disable_irq(lan966x->ana_irq);
 559                lan966x->ana_irq = -ENXIO;
 560        }
 561}
 562
 563static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
 564                              phy_interface_t phy_mode,
 565                              struct fwnode_handle *portnp)
 566{
 567        struct lan966x_port *port;
 568        struct phylink *phylink;
 569        struct net_device *dev;
 570        int err;
 571
 572        if (p >= lan966x->num_phys_ports)
 573                return -EINVAL;
 574
 575        dev = devm_alloc_etherdev_mqs(lan966x->dev,
 576                                      sizeof(struct lan966x_port), 8, 1);
 577        if (!dev)
 578                return -ENOMEM;
 579
 580        SET_NETDEV_DEV(dev, lan966x->dev);
 581        port = netdev_priv(dev);
 582        port->dev = dev;
 583        port->lan966x = lan966x;
 584        port->chip_port = p;
 585        lan966x->ports[p] = port;
 586
 587        dev->max_mtu = ETH_MAX_MTU;
 588
 589        dev->netdev_ops = &lan966x_port_netdev_ops;
 590        dev->ethtool_ops = &lan966x_ethtool_ops;
 591        dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
 592                         NETIF_F_HW_VLAN_STAG_TX;
 593        dev->needed_headroom = IFH_LEN * sizeof(u32);
 594
 595        eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
 596
 597        lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
 598                          ENTRYTYPE_LOCKED);
 599
 600        port->phylink_config.dev = &port->dev->dev;
 601        port->phylink_config.type = PHYLINK_NETDEV;
 602        port->phylink_pcs.poll = true;
 603        port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
 604
 605        port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
 606                MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
 607
 608        __set_bit(PHY_INTERFACE_MODE_MII,
 609                  port->phylink_config.supported_interfaces);
 610        __set_bit(PHY_INTERFACE_MODE_GMII,
 611                  port->phylink_config.supported_interfaces);
 612        __set_bit(PHY_INTERFACE_MODE_SGMII,
 613                  port->phylink_config.supported_interfaces);
 614        __set_bit(PHY_INTERFACE_MODE_QSGMII,
 615                  port->phylink_config.supported_interfaces);
 616        __set_bit(PHY_INTERFACE_MODE_1000BASEX,
 617                  port->phylink_config.supported_interfaces);
 618        __set_bit(PHY_INTERFACE_MODE_2500BASEX,
 619                  port->phylink_config.supported_interfaces);
 620
 621        phylink = phylink_create(&port->phylink_config,
 622                                 portnp,
 623                                 phy_mode,
 624                                 &lan966x_phylink_mac_ops);
 625        if (IS_ERR(phylink)) {
 626                port->dev = NULL;
 627                return PTR_ERR(phylink);
 628        }
 629
 630        port->phylink = phylink;
 631        phylink_set_pcs(phylink, &port->phylink_pcs);
 632
 633        err = register_netdev(dev);
 634        if (err) {
 635                dev_err(lan966x->dev, "register_netdev failed\n");
 636                return err;
 637        }
 638
 639        lan966x_vlan_port_set_vlan_aware(port, 0);
 640        lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
 641        lan966x_vlan_port_apply(port);
 642
 643        return 0;
 644}
 645
 646static void lan966x_init(struct lan966x *lan966x)
 647{
 648        u32 p, i;
 649
 650        /* MAC table initialization */
 651        lan966x_mac_init(lan966x);
 652
 653        lan966x_vlan_init(lan966x);
 654
 655        /* Flush queues */
 656        lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
 657               GENMASK(1, 0),
 658               lan966x, QS_XTR_FLUSH);
 659
 660        /* Allow to drain */
 661        mdelay(1);
 662
 663        /* All Queues normal */
 664        lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
 665               ~(GENMASK(1, 0)),
 666               lan966x, QS_XTR_FLUSH);
 667
 668        /* Set MAC age time to default value, the entry is aged after
 669         * 2 * AGE_PERIOD
 670         */
 671        lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
 672               lan966x, ANA_AUTOAGE);
 673
 674        /* Disable learning for frames discarded by VLAN ingress filtering */
 675        lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
 676                ANA_ADVLEARN_VLAN_CHK,
 677                lan966x, ANA_ADVLEARN);
 678
 679        /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
 680        lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
 681               (20000000 / 65),
 682               lan966x,  SYS_FRM_AGING);
 683
 684        /* Map the 8 CPU extraction queues to CPU port */
 685        lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
 686
 687        /* Do byte-swap and expect status after last data word
 688         * Extraction: Mode: manual extraction) | Byte_swap
 689         */
 690        lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
 691               QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
 692               lan966x, QS_XTR_GRP_CFG(0));
 693
 694        /* Injection: Mode: manual injection | Byte_swap */
 695        lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
 696               QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
 697               lan966x, QS_INJ_GRP_CFG(0));
 698
 699        lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
 700                QS_INJ_CTRL_GAP_SIZE,
 701                lan966x, QS_INJ_CTRL(0));
 702
 703        /* Enable IFH insertion/parsing on CPU ports */
 704        lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
 705               SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
 706               lan966x, SYS_PORT_MODE(CPU_PORT));
 707
 708        /* Setup flooding PGIDs */
 709        lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
 710               ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
 711               ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
 712               ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
 713               lan966x, ANA_FLOODING_IPMC);
 714
 715        /* There are 8 priorities */
 716        for (i = 0; i < 8; ++i)
 717                lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
 718                        ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
 719                        ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
 720                        ANA_FLOODING_FLD_MULTICAST |
 721                        ANA_FLOODING_FLD_UNICAST |
 722                        ANA_FLOODING_FLD_BROADCAST,
 723                        lan966x, ANA_FLOODING(i));
 724
 725        for (i = 0; i < PGID_ENTRIES; ++i)
 726                /* Set all the entries to obey VLAN_VLAN */
 727                lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
 728                        ANA_PGID_CFG_OBEY_VLAN,
 729                        lan966x, ANA_PGID_CFG(i));
 730
 731        for (p = 0; p < lan966x->num_phys_ports; p++) {
 732                /* Disable bridging by default */
 733                lan_rmw(ANA_PGID_PGID_SET(0x0),
 734                        ANA_PGID_PGID,
 735                        lan966x, ANA_PGID(p + PGID_SRC));
 736
 737                /* Do not forward BPDU frames to the front ports and copy them
 738                 * to CPU
 739                 */
 740                lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
 741        }
 742
 743        /* Set source buffer size for each priority and each port to 1500 bytes */
 744        for (i = 0; i <= QSYS_Q_RSRV; ++i) {
 745                lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
 746                lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
 747        }
 748
 749        /* Enable switching to/from cpu port */
 750        lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
 751               QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
 752               QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
 753               lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
 754
 755        /* Configure and enable the CPU port */
 756        lan_rmw(ANA_PGID_PGID_SET(0),
 757                ANA_PGID_PGID,
 758                lan966x, ANA_PGID(CPU_PORT));
 759        lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
 760                ANA_PGID_PGID,
 761                lan966x, ANA_PGID(PGID_CPU));
 762
 763        /* Multicast to all other ports */
 764        lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
 765                ANA_PGID_PGID,
 766                lan966x, ANA_PGID(PGID_MC));
 767
 768        /* This will be controlled by mrouter ports */
 769        lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
 770                ANA_PGID_PGID,
 771                lan966x, ANA_PGID(PGID_MCIPV4));
 772
 773        /* Unicast to all other ports */
 774        lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
 775                ANA_PGID_PGID,
 776                lan966x, ANA_PGID(PGID_UC));
 777
 778        /* Broadcast to the CPU port and to other ports */
 779        lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
 780                ANA_PGID_PGID,
 781                lan966x, ANA_PGID(PGID_BC));
 782
 783        lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
 784               lan966x, REW_PORT_CFG(CPU_PORT));
 785
 786        lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
 787                ANA_ANAINTR_INTR_ENA,
 788                lan966x, ANA_ANAINTR);
 789}
 790
 791static int lan966x_ram_init(struct lan966x *lan966x)
 792{
 793        return lan_rd(lan966x, SYS_RAM_INIT);
 794}
 795
 796static int lan966x_reset_switch(struct lan966x *lan966x)
 797{
 798        struct reset_control *switch_reset, *phy_reset;
 799        int val = 0;
 800        int ret;
 801
 802        switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
 803        if (IS_ERR(switch_reset))
 804                return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
 805                                     "Could not obtain switch reset");
 806
 807        phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy");
 808        if (IS_ERR(phy_reset))
 809                return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset),
 810                                     "Could not obtain phy reset\n");
 811
 812        reset_control_reset(switch_reset);
 813        reset_control_reset(phy_reset);
 814
 815        lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
 816        lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
 817        ret = readx_poll_timeout(lan966x_ram_init, lan966x,
 818                                 val, (val & BIT(1)) == 0, READL_SLEEP_US,
 819                                 READL_TIMEOUT_US);
 820        if (ret)
 821                return ret;
 822
 823        lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
 824
 825        return 0;
 826}
 827
 828static int lan966x_probe(struct platform_device *pdev)
 829{
 830        struct fwnode_handle *ports, *portnp;
 831        struct lan966x *lan966x;
 832        u8 mac_addr[ETH_ALEN];
 833        int err, i;
 834
 835        lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
 836        if (!lan966x)
 837                return -ENOMEM;
 838
 839        platform_set_drvdata(pdev, lan966x);
 840        lan966x->dev = &pdev->dev;
 841
 842        if (!device_get_mac_address(&pdev->dev, mac_addr)) {
 843                ether_addr_copy(lan966x->base_mac, mac_addr);
 844        } else {
 845                pr_info("MAC addr was not set, use random MAC\n");
 846                eth_random_addr(lan966x->base_mac);
 847                lan966x->base_mac[5] &= 0xf0;
 848        }
 849
 850        ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
 851        if (!ports)
 852                return dev_err_probe(&pdev->dev, -ENODEV,
 853                                     "no ethernet-ports child found\n");
 854
 855        err = lan966x_create_targets(pdev, lan966x);
 856        if (err)
 857                return dev_err_probe(&pdev->dev, err,
 858                                     "Failed to create targets");
 859
 860        err = lan966x_reset_switch(lan966x);
 861        if (err)
 862                return dev_err_probe(&pdev->dev, err, "Reset failed");
 863
 864        i = 0;
 865        fwnode_for_each_available_child_node(ports, portnp)
 866                ++i;
 867
 868        lan966x->num_phys_ports = i;
 869        lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
 870                                      sizeof(struct lan966x_port *),
 871                                      GFP_KERNEL);
 872        if (!lan966x->ports)
 873                return -ENOMEM;
 874
 875        /* There QS system has 32KB of memory */
 876        lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
 877
 878        /* set irq */
 879        lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
 880        if (lan966x->xtr_irq <= 0)
 881                return -EINVAL;
 882
 883        err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
 884                                        lan966x_xtr_irq_handler, IRQF_ONESHOT,
 885                                        "frame extraction", lan966x);
 886        if (err) {
 887                pr_err("Unable to use xtr irq");
 888                return -ENODEV;
 889        }
 890
 891        lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
 892        if (lan966x->ana_irq) {
 893                err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
 894                                                lan966x_ana_irq_handler, IRQF_ONESHOT,
 895                                                "ana irq", lan966x);
 896                if (err)
 897                        return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
 898        }
 899
 900        /* init switch */
 901        lan966x_init(lan966x);
 902        lan966x_stats_init(lan966x);
 903
 904        /* go over the child nodes */
 905        fwnode_for_each_available_child_node(ports, portnp) {
 906                phy_interface_t phy_mode;
 907                struct phy *serdes;
 908                u32 p;
 909
 910                if (fwnode_property_read_u32(portnp, "reg", &p))
 911                        continue;
 912
 913                phy_mode = fwnode_get_phy_mode(portnp);
 914                err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
 915                if (err)
 916                        goto cleanup_ports;
 917
 918                /* Read needed configuration */
 919                lan966x->ports[p]->config.portmode = phy_mode;
 920                lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
 921
 922                serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
 923                if (!IS_ERR(serdes))
 924                        lan966x->ports[p]->serdes = serdes;
 925
 926                lan966x_port_init(lan966x->ports[p]);
 927        }
 928
 929        lan966x_mdb_init(lan966x);
 930        err = lan966x_fdb_init(lan966x);
 931        if (err)
 932                goto cleanup_ports;
 933
 934        return 0;
 935
 936cleanup_ports:
 937        fwnode_handle_put(portnp);
 938
 939        lan966x_cleanup_ports(lan966x);
 940
 941        cancel_delayed_work_sync(&lan966x->stats_work);
 942        destroy_workqueue(lan966x->stats_queue);
 943        mutex_destroy(&lan966x->stats_lock);
 944
 945        return err;
 946}
 947
 948static int lan966x_remove(struct platform_device *pdev)
 949{
 950        struct lan966x *lan966x = platform_get_drvdata(pdev);
 951
 952        lan966x_cleanup_ports(lan966x);
 953
 954        cancel_delayed_work_sync(&lan966x->stats_work);
 955        destroy_workqueue(lan966x->stats_queue);
 956        mutex_destroy(&lan966x->stats_lock);
 957
 958        lan966x_mac_purge_entries(lan966x);
 959        lan966x_mdb_deinit(lan966x);
 960        lan966x_fdb_deinit(lan966x);
 961
 962        return 0;
 963}
 964
 965static struct platform_driver lan966x_driver = {
 966        .probe = lan966x_probe,
 967        .remove = lan966x_remove,
 968        .driver = {
 969                .name = "lan966x-switch",
 970                .of_match_table = lan966x_match,
 971        },
 972};
 973
 974static int __init lan966x_switch_driver_init(void)
 975{
 976        int ret;
 977
 978        lan966x_register_notifier_blocks();
 979
 980        ret = platform_driver_register(&lan966x_driver);
 981        if (ret)
 982                goto err;
 983
 984        return 0;
 985
 986err:
 987        lan966x_unregister_notifier_blocks();
 988        return ret;
 989}
 990
 991static void __exit lan966x_switch_driver_exit(void)
 992{
 993        platform_driver_unregister(&lan966x_driver);
 994        lan966x_unregister_notifier_blocks();
 995}
 996
 997module_init(lan966x_switch_driver_init);
 998module_exit(lan966x_switch_driver_exit);
 999
1000MODULE_DESCRIPTION("Microchip LAN966X switch driver");
1001MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
1002MODULE_LICENSE("Dual MIT/GPL");
1003