linux/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTx2 CGX driver
   3 *
   4 * Copyright (C) 2018 Marvell.
   5 *
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/module.h>
  10#include <linux/interrupt.h>
  11#include <linux/pci.h>
  12#include <linux/netdevice.h>
  13#include <linux/etherdevice.h>
  14#include <linux/ethtool.h>
  15#include <linux/phy.h>
  16#include <linux/of.h>
  17#include <linux/of_mdio.h>
  18#include <linux/of_net.h>
  19
  20#include "cgx.h"
  21#include "rvu.h"
  22#include "lmac_common.h"
  23
  24#define DRV_NAME        "Marvell-CGX/RPM"
  25#define DRV_STRING      "Marvell CGX/RPM Driver"
  26
  27static LIST_HEAD(cgx_list);
  28
  29/* Convert firmware speed encoding to user format(Mbps) */
  30static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
  31        [CGX_LINK_NONE] = 0,
  32        [CGX_LINK_10M] = 10,
  33        [CGX_LINK_100M] = 100,
  34        [CGX_LINK_1G] = 1000,
  35        [CGX_LINK_2HG] = 2500,
  36        [CGX_LINK_5G] = 5000,
  37        [CGX_LINK_10G] = 10000,
  38        [CGX_LINK_20G] = 20000,
  39        [CGX_LINK_25G] = 25000,
  40        [CGX_LINK_40G] = 40000,
  41        [CGX_LINK_50G] = 50000,
  42        [CGX_LINK_80G] = 80000,
  43        [CGX_LINK_100G] = 100000,
  44};
  45
  46/* Convert firmware lmac type encoding to string */
  47static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
  48        [LMAC_MODE_SGMII] = "SGMII",
  49        [LMAC_MODE_XAUI] = "XAUI",
  50        [LMAC_MODE_RXAUI] = "RXAUI",
  51        [LMAC_MODE_10G_R] = "10G_R",
  52        [LMAC_MODE_40G_R] = "40G_R",
  53        [LMAC_MODE_QSGMII] = "QSGMII",
  54        [LMAC_MODE_25G_R] = "25G_R",
  55        [LMAC_MODE_50G_R] = "50G_R",
  56        [LMAC_MODE_100G_R] = "100G_R",
  57        [LMAC_MODE_USXGMII] = "USXGMII",
  58};
  59
  60/* CGX PHY management internal APIs */
  61static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
  62
  63/* Supported devices */
  64static const struct pci_device_id cgx_id_table[] = {
  65        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
  66        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
  67        { 0, }  /* end of table */
  68};
  69
  70MODULE_DEVICE_TABLE(pci, cgx_id_table);
  71
  72static bool is_dev_rpm(void *cgxd)
  73{
  74        struct cgx *cgx = cgxd;
  75
  76        return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
  77}
  78
  79bool is_lmac_valid(struct cgx *cgx, int lmac_id)
  80{
  81        if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
  82                return false;
  83        return test_bit(lmac_id, &cgx->lmac_bmap);
  84}
  85
  86/* Helper function to get sequential index
  87 * given the enabled LMAC of a CGX
  88 */
  89static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
  90{
  91        int tmp, id = 0;
  92
  93        for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
  94                if (tmp == lmac_id)
  95                        break;
  96                id++;
  97        }
  98
  99        return id;
 100}
 101
 102struct mac_ops *get_mac_ops(void *cgxd)
 103{
 104        if (!cgxd)
 105                return cgxd;
 106
 107        return ((struct cgx *)cgxd)->mac_ops;
 108}
 109
 110void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
 111{
 112        writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
 113               offset);
 114}
 115
 116u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
 117{
 118        return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
 119                     offset);
 120}
 121
 122struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
 123{
 124        if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
 125                return NULL;
 126
 127        return cgx->lmac_idmap[lmac_id];
 128}
 129
 130int cgx_get_cgxcnt_max(void)
 131{
 132        struct cgx *cgx_dev;
 133        int idmax = -ENODEV;
 134
 135        list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
 136                if (cgx_dev->cgx_id > idmax)
 137                        idmax = cgx_dev->cgx_id;
 138
 139        if (idmax < 0)
 140                return 0;
 141
 142        return idmax + 1;
 143}
 144
 145int cgx_get_lmac_cnt(void *cgxd)
 146{
 147        struct cgx *cgx = cgxd;
 148
 149        if (!cgx)
 150                return -ENODEV;
 151
 152        return cgx->lmac_count;
 153}
 154
 155void *cgx_get_pdata(int cgx_id)
 156{
 157        struct cgx *cgx_dev;
 158
 159        list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
 160                if (cgx_dev->cgx_id == cgx_id)
 161                        return cgx_dev;
 162        }
 163        return NULL;
 164}
 165
 166void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
 167{
 168        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 169
 170        cgx_write(cgx_dev, lmac_id, offset, val);
 171}
 172
 173u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
 174{
 175        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 176
 177        return cgx_read(cgx_dev, lmac_id, offset);
 178}
 179
 180int cgx_get_cgxid(void *cgxd)
 181{
 182        struct cgx *cgx = cgxd;
 183
 184        if (!cgx)
 185                return -EINVAL;
 186
 187        return cgx->cgx_id;
 188}
 189
 190u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
 191{
 192        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 193        u64 cfg;
 194
 195        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
 196
 197        return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
 198}
 199
 200/* Ensure the required lock for event queue(where asynchronous events are
 201 * posted) is acquired before calling this API. Else an asynchronous event(with
 202 * latest link status) can reach the destination before this function returns
 203 * and could make the link status appear wrong.
 204 */
 205int cgx_get_link_info(void *cgxd, int lmac_id,
 206                      struct cgx_link_user_info *linfo)
 207{
 208        struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
 209
 210        if (!lmac)
 211                return -ENODEV;
 212
 213        *linfo = lmac->link_info;
 214        return 0;
 215}
 216
 217static u64 mac2u64 (u8 *mac_addr)
 218{
 219        u64 mac = 0;
 220        int index;
 221
 222        for (index = ETH_ALEN - 1; index >= 0; index--)
 223                mac |= ((u64)*mac_addr++) << (8 * index);
 224        return mac;
 225}
 226
 227static void cfg2mac(u64 cfg, u8 *mac_addr)
 228{
 229        int i, index = 0;
 230
 231        for (i = ETH_ALEN - 1; i >= 0; i--, index++)
 232                mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
 233}
 234
 235int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
 236{
 237        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 238        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 239        struct mac_ops *mac_ops;
 240        int index, id;
 241        u64 cfg;
 242
 243        /* access mac_ops to know csr_offset */
 244        mac_ops = cgx_dev->mac_ops;
 245
 246        /* copy 6bytes from macaddr */
 247        /* memcpy(&cfg, mac_addr, 6); */
 248
 249        cfg = mac2u64 (mac_addr);
 250
 251        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 252
 253        index = id * lmac->mac_to_index_bmap.max;
 254
 255        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
 256                  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
 257
 258        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 259        cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
 260                CGX_DMAC_MCAST_MODE);
 261        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 262
 263        return 0;
 264}
 265
 266u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
 267{
 268        struct mac_ops *mac_ops;
 269        struct cgx *cgx = cgxd;
 270
 271        if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
 272                return 0;
 273
 274        cgx = cgxd;
 275        /* Get mac_ops to know csr offset */
 276        mac_ops = cgx->mac_ops;
 277
 278        return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 279}
 280
 281u64 cgx_read_dmac_entry(void *cgxd, int index)
 282{
 283        struct mac_ops *mac_ops;
 284        struct cgx *cgx;
 285
 286        if (!cgxd)
 287                return 0;
 288
 289        cgx = cgxd;
 290        mac_ops = cgx->mac_ops;
 291        return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
 292}
 293
 294int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
 295{
 296        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 297        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 298        struct mac_ops *mac_ops;
 299        int index, idx;
 300        u64 cfg = 0;
 301        int id;
 302
 303        if (!lmac)
 304                return -ENODEV;
 305
 306        mac_ops = cgx_dev->mac_ops;
 307        /* Get available index where entry is to be installed */
 308        idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
 309        if (idx < 0)
 310                return idx;
 311
 312        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 313
 314        index = id * lmac->mac_to_index_bmap.max + idx;
 315
 316        cfg = mac2u64 (mac_addr);
 317        cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
 318        cfg |= ((u64)lmac_id << 49);
 319        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
 320
 321        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 322        cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
 323
 324        if (is_multicast_ether_addr(mac_addr)) {
 325                cfg &= ~GENMASK_ULL(2, 1);
 326                cfg |= CGX_DMAC_MCAST_MODE_CAM;
 327                lmac->mcast_filters_count++;
 328        } else if (!lmac->mcast_filters_count) {
 329                cfg |= CGX_DMAC_MCAST_MODE;
 330        }
 331
 332        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 333
 334        return idx;
 335}
 336
 337int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
 338{
 339        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 340        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 341        struct mac_ops *mac_ops;
 342        u8 index = 0, id;
 343        u64 cfg;
 344
 345        if (!lmac)
 346                return -ENODEV;
 347
 348        mac_ops = cgx_dev->mac_ops;
 349        /* Restore index 0 to its default init value as done during
 350         * cgx_lmac_init
 351         */
 352        set_bit(0, lmac->mac_to_index_bmap.bmap);
 353
 354        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 355
 356        index = id * lmac->mac_to_index_bmap.max + index;
 357        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
 358
 359        /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
 360        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 361        cfg &= ~CGX_DMAC_CAM_ACCEPT;
 362        cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
 363        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 364
 365        return 0;
 366}
 367
 368/* Allows caller to change macaddress associated with index
 369 * in dmac filter table including index 0 reserved for
 370 * interface mac address
 371 */
 372int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
 373{
 374        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 375        struct mac_ops *mac_ops;
 376        struct lmac *lmac;
 377        u64 cfg;
 378        int id;
 379
 380        lmac = lmac_pdata(lmac_id, cgx_dev);
 381        if (!lmac)
 382                return -ENODEV;
 383
 384        mac_ops = cgx_dev->mac_ops;
 385        /* Validate the index */
 386        if (index >= lmac->mac_to_index_bmap.max)
 387                return -EINVAL;
 388
 389        /* ensure index is already set */
 390        if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
 391                return -EINVAL;
 392
 393        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 394
 395        index = id * lmac->mac_to_index_bmap.max + index;
 396
 397        cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
 398        cfg &= ~CGX_RX_DMAC_ADR_MASK;
 399        cfg |= mac2u64 (mac_addr);
 400
 401        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
 402        return 0;
 403}
 404
 405int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
 406{
 407        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 408        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 409        struct mac_ops *mac_ops;
 410        u8 mac[ETH_ALEN];
 411        u64 cfg;
 412        int id;
 413
 414        if (!lmac)
 415                return -ENODEV;
 416
 417        mac_ops = cgx_dev->mac_ops;
 418        /* Validate the index */
 419        if (index >= lmac->mac_to_index_bmap.max)
 420                return -EINVAL;
 421
 422        /* Skip deletion for reserved index i.e. index 0 */
 423        if (index == 0)
 424                return 0;
 425
 426        rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
 427
 428        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 429
 430        index = id * lmac->mac_to_index_bmap.max + index;
 431
 432        /* Read MAC address to check whether it is ucast or mcast */
 433        cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
 434
 435        cfg2mac(cfg, mac);
 436        if (is_multicast_ether_addr(mac))
 437                lmac->mcast_filters_count--;
 438
 439        if (!lmac->mcast_filters_count) {
 440                cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 441                cfg &= ~GENMASK_ULL(2, 1);
 442                cfg |= CGX_DMAC_MCAST_MODE;
 443                cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 444        }
 445
 446        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
 447
 448        return 0;
 449}
 450
 451int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
 452{
 453        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 454        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 455
 456        if (lmac)
 457                return lmac->mac_to_index_bmap.max;
 458
 459        return 0;
 460}
 461
 462u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
 463{
 464        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 465        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 466        struct mac_ops *mac_ops;
 467        int index;
 468        u64 cfg;
 469        int id;
 470
 471        mac_ops = cgx_dev->mac_ops;
 472
 473        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 474
 475        index = id * lmac->mac_to_index_bmap.max;
 476
 477        cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
 478        return cfg & CGX_RX_DMAC_ADR_MASK;
 479}
 480
 481int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
 482{
 483        struct cgx *cgx = cgxd;
 484
 485        if (!is_lmac_valid(cgx, lmac_id))
 486                return -ENODEV;
 487
 488        cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
 489        return 0;
 490}
 491
 492static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
 493{
 494        struct cgx *cgx = cgxd;
 495        u64 cfg;
 496
 497        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 498        return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
 499}
 500
 501/* Configure CGX LMAC in internal loopback mode */
 502int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
 503{
 504        struct cgx *cgx = cgxd;
 505        u8 lmac_type;
 506        u64 cfg;
 507
 508        if (!is_lmac_valid(cgx, lmac_id))
 509                return -ENODEV;
 510
 511        lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
 512        if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
 513                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
 514                if (enable)
 515                        cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
 516                else
 517                        cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
 518                cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
 519        } else {
 520                cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
 521                if (enable)
 522                        cfg |= CGXX_SPUX_CONTROL1_LBK;
 523                else
 524                        cfg &= ~CGXX_SPUX_CONTROL1_LBK;
 525                cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
 526        }
 527        return 0;
 528}
 529
 530void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
 531{
 532        struct cgx *cgx = cgx_get_pdata(cgx_id);
 533        struct lmac *lmac = lmac_pdata(lmac_id, cgx);
 534        u16 max_dmac = lmac->mac_to_index_bmap.max;
 535        struct mac_ops *mac_ops;
 536        int index, i;
 537        u64 cfg = 0;
 538        int id;
 539
 540        if (!cgx)
 541                return;
 542
 543        id = get_sequence_id_of_lmac(cgx, lmac_id);
 544
 545        mac_ops = cgx->mac_ops;
 546        if (enable) {
 547                /* Enable promiscuous mode on LMAC */
 548                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 549                cfg &= ~CGX_DMAC_CAM_ACCEPT;
 550                cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
 551                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 552
 553                for (i = 0; i < max_dmac; i++) {
 554                        index = id * max_dmac + i;
 555                        cfg = cgx_read(cgx, 0,
 556                                       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
 557                        cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
 558                        cgx_write(cgx, 0,
 559                                  (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
 560                }
 561        } else {
 562                /* Disable promiscuous mode */
 563                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 564                cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
 565                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 566                for (i = 0; i < max_dmac; i++) {
 567                        index = id * max_dmac + i;
 568                        cfg = cgx_read(cgx, 0,
 569                                       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
 570                        if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
 571                                cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
 572                                cgx_write(cgx, 0,
 573                                          (CGXX_CMRX_RX_DMAC_CAM0 +
 574                                           index * 0x8),
 575                                          cfg);
 576                        }
 577                }
 578        }
 579}
 580
 581/* Enable or disable forwarding received pause frames to Tx block */
 582void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
 583{
 584        struct cgx *cgx = cgxd;
 585        u64 cfg;
 586
 587        if (!cgx)
 588                return;
 589
 590        if (enable) {
 591                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 592                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 593                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 594
 595                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 596                cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 597                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 598        } else {
 599                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 600                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 601                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 602
 603                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 604                cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 605                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 606        }
 607}
 608
 609int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
 610{
 611        struct cgx *cgx = cgxd;
 612
 613        if (!is_lmac_valid(cgx, lmac_id))
 614                return -ENODEV;
 615        *rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
 616        return 0;
 617}
 618
 619int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
 620{
 621        struct cgx *cgx = cgxd;
 622
 623        if (!is_lmac_valid(cgx, lmac_id))
 624                return -ENODEV;
 625        *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
 626        return 0;
 627}
 628
 629u64 cgx_features_get(void *cgxd)
 630{
 631        return ((struct cgx *)cgxd)->hw_features;
 632}
 633
 634static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
 635{
 636        if (!linfo->fec)
 637                return 0;
 638
 639        switch (linfo->lmac_type_id) {
 640        case LMAC_MODE_SGMII:
 641        case LMAC_MODE_XAUI:
 642        case LMAC_MODE_RXAUI:
 643        case LMAC_MODE_QSGMII:
 644                return 0;
 645        case LMAC_MODE_10G_R:
 646        case LMAC_MODE_25G_R:
 647        case LMAC_MODE_100G_R:
 648        case LMAC_MODE_USXGMII:
 649                return 1;
 650        case LMAC_MODE_40G_R:
 651                return 4;
 652        case LMAC_MODE_50G_R:
 653                if (linfo->fec == OTX2_FEC_BASER)
 654                        return 2;
 655                else
 656                        return 1;
 657        default:
 658                return 0;
 659        }
 660}
 661
 662int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
 663{
 664        int stats, fec_stats_count = 0;
 665        int corr_reg, uncorr_reg;
 666        struct cgx *cgx = cgxd;
 667
 668        if (!cgx || lmac_id >= cgx->lmac_count)
 669                return -ENODEV;
 670        fec_stats_count =
 671                cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
 672        if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
 673                corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
 674                uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
 675        } else {
 676                corr_reg = CGXX_SPUX_RSFEC_CORR;
 677                uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
 678        }
 679        for (stats = 0; stats < fec_stats_count; stats++) {
 680                rsp->fec_corr_blks +=
 681                        cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
 682                rsp->fec_uncorr_blks +=
 683                        cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
 684        }
 685        return 0;
 686}
 687
 688int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
 689{
 690        struct cgx *cgx = cgxd;
 691        u64 cfg;
 692
 693        if (!is_lmac_valid(cgx, lmac_id))
 694                return -ENODEV;
 695
 696        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 697        if (enable)
 698                cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
 699        else
 700                cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
 701        cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
 702        return 0;
 703}
 704
 705int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
 706{
 707        struct cgx *cgx = cgxd;
 708        u64 cfg, last;
 709
 710        if (!is_lmac_valid(cgx, lmac_id))
 711                return -ENODEV;
 712
 713        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 714        last = cfg;
 715        if (enable)
 716                cfg |= DATA_PKT_TX_EN;
 717        else
 718                cfg &= ~DATA_PKT_TX_EN;
 719
 720        if (cfg != last)
 721                cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
 722        return !!(last & DATA_PKT_TX_EN);
 723}
 724
 725static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
 726                                         u8 *tx_pause, u8 *rx_pause)
 727{
 728        struct cgx *cgx = cgxd;
 729        u64 cfg;
 730
 731        if (is_dev_rpm(cgx))
 732                return 0;
 733
 734        if (!is_lmac_valid(cgx, lmac_id))
 735                return -ENODEV;
 736
 737        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 738        *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
 739
 740        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 741        *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
 742        return 0;
 743}
 744
 745static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
 746                                     u8 tx_pause, u8 rx_pause)
 747{
 748        struct cgx *cgx = cgxd;
 749        u64 cfg;
 750
 751        if (is_dev_rpm(cgx))
 752                return 0;
 753
 754        if (!is_lmac_valid(cgx, lmac_id))
 755                return -ENODEV;
 756
 757        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 758        cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 759        cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
 760        cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 761
 762        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 763        cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
 764        cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
 765        cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 766
 767        cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
 768        if (tx_pause) {
 769                cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
 770        } else {
 771                cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
 772                cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
 773        }
 774        cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
 775        return 0;
 776}
 777
 778static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
 779{
 780        struct cgx *cgx = cgxd;
 781        u64 cfg;
 782
 783        if (!is_lmac_valid(cgx, lmac_id))
 784                return;
 785        if (enable) {
 786                /* Enable receive pause frames */
 787                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 788                cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 789                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 790
 791                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 792                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 793                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 794
 795                /* Enable pause frames transmission */
 796                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 797                cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
 798                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 799
 800                /* Set pause time and interval */
 801                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
 802                          DEFAULT_PAUSE_TIME);
 803                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
 804                cfg &= ~0xFFFFULL;
 805                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
 806                          cfg | (DEFAULT_PAUSE_TIME / 2));
 807
 808                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
 809                          DEFAULT_PAUSE_TIME);
 810
 811                cfg = cgx_read(cgx, lmac_id,
 812                               CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
 813                cfg &= ~0xFFFFULL;
 814                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
 815                          cfg | (DEFAULT_PAUSE_TIME / 2));
 816        } else {
 817                /* ALL pause frames received are completely ignored */
 818                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 819                cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 820                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 821
 822                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 823                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 824                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 825
 826                /* Disable pause frames transmission */
 827                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 828                cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
 829                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 830        }
 831}
 832
 833void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
 834{
 835        struct cgx *cgx = cgxd;
 836        u64 cfg;
 837
 838        if (!cgx)
 839                return;
 840
 841        if (is_dev_rpm(cgx))
 842                return;
 843
 844        if (enable) {
 845                /* Enable inbound PTP timestamping */
 846                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 847                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
 848                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 849
 850                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 851                cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
 852                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 853        } else {
 854                /* Disable inbound PTP stamping */
 855                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 856                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
 857                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 858
 859                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 860                cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
 861                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 862        }
 863}
 864
 865/* CGX Firmware interface low level support */
 866int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
 867{
 868        struct cgx *cgx = lmac->cgx;
 869        struct device *dev;
 870        int err = 0;
 871        u64 cmd;
 872
 873        /* Ensure no other command is in progress */
 874        err = mutex_lock_interruptible(&lmac->cmd_lock);
 875        if (err)
 876                return err;
 877
 878        /* Ensure command register is free */
 879        cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
 880        if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
 881                err = -EBUSY;
 882                goto unlock;
 883        }
 884
 885        /* Update ownership in command request */
 886        req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
 887
 888        /* Mark this lmac as pending, before we start */
 889        lmac->cmd_pend = true;
 890
 891        /* Start command in hardware */
 892        cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
 893
 894        /* Ensure command is completed without errors */
 895        if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
 896                                msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
 897                dev = &cgx->pdev->dev;
 898                dev_err(dev, "cgx port %d:%d cmd timeout\n",
 899                        cgx->cgx_id, lmac->lmac_id);
 900                err = -EIO;
 901                goto unlock;
 902        }
 903
 904        /* we have a valid command response */
 905        smp_rmb(); /* Ensure the latest updates are visible */
 906        *resp = lmac->resp;
 907
 908unlock:
 909        mutex_unlock(&lmac->cmd_lock);
 910
 911        return err;
 912}
 913
 914int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
 915{
 916        struct lmac *lmac;
 917        int err;
 918
 919        lmac = lmac_pdata(lmac_id, cgx);
 920        if (!lmac)
 921                return -ENODEV;
 922
 923        err = cgx_fwi_cmd_send(req, resp, lmac);
 924
 925        /* Check for valid response */
 926        if (!err) {
 927                if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
 928                        return -EIO;
 929                else
 930                        return 0;
 931        }
 932
 933        return err;
 934}
 935
 936static int cgx_link_usertable_index_map(int speed)
 937{
 938        switch (speed) {
 939        case SPEED_10:
 940                return CGX_LINK_10M;
 941        case SPEED_100:
 942                return CGX_LINK_100M;
 943        case SPEED_1000:
 944                return CGX_LINK_1G;
 945        case SPEED_2500:
 946                return CGX_LINK_2HG;
 947        case SPEED_5000:
 948                return CGX_LINK_5G;
 949        case SPEED_10000:
 950                return CGX_LINK_10G;
 951        case SPEED_20000:
 952                return CGX_LINK_20G;
 953        case SPEED_25000:
 954                return CGX_LINK_25G;
 955        case SPEED_40000:
 956                return CGX_LINK_40G;
 957        case SPEED_50000:
 958                return CGX_LINK_50G;
 959        case 80000:
 960                return CGX_LINK_80G;
 961        case SPEED_100000:
 962                return CGX_LINK_100G;
 963        case SPEED_UNKNOWN:
 964                return CGX_LINK_NONE;
 965        }
 966        return CGX_LINK_NONE;
 967}
 968
 969static void set_mod_args(struct cgx_set_link_mode_args *args,
 970                         u32 speed, u8 duplex, u8 autoneg, u64 mode)
 971{
 972        /* Fill default values incase of user did not pass
 973         * valid parameters
 974         */
 975        if (args->duplex == DUPLEX_UNKNOWN)
 976                args->duplex = duplex;
 977        if (args->speed == SPEED_UNKNOWN)
 978                args->speed = speed;
 979        if (args->an == AUTONEG_UNKNOWN)
 980                args->an = autoneg;
 981        args->mode = mode;
 982        args->ports = 0;
 983}
 984
 985static void otx2_map_ethtool_link_modes(u64 bitmask,
 986                                        struct cgx_set_link_mode_args *args)
 987{
 988        switch (bitmask) {
 989        case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
 990                set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
 991                break;
 992        case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
 993                set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
 994                break;
 995        case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
 996                set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
 997                break;
 998        case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
 999                set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1000                break;
1001        case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1002                set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1003                break;
1004        case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1005                set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1006                break;
1007        case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1008                set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1009                break;
1010        case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1011                set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1012                break;
1013        case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1014                set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1015                break;
1016        case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1017                set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1018                break;
1019        case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1020                set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1021                break;
1022        case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1023                set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1024                break;
1025        case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1026                set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1027                break;
1028        case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1029                set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1030                break;
1031        case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1032                set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1033                break;
1034        case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1035                set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1036                break;
1037        case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1038                set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1039                break;
1040        case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1041                set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1042                break;
1043        case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1044                set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1045                break;
1046        case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1047                set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1048                break;
1049        case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1050                set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1051                break;
1052        case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1053                set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1054                break;
1055        case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1056                set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1057                break;
1058        case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1059                set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1060                break;
1061        case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1062                set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1063                break;
1064        case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1065                set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1066                break;
1067        default:
1068                set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1069                break;
1070        }
1071}
1072
1073static inline void link_status_user_format(u64 lstat,
1074                                           struct cgx_link_user_info *linfo,
1075                                           struct cgx *cgx, u8 lmac_id)
1076{
1077        const char *lmac_string;
1078
1079        linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1080        linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1081        linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1082        linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1083        linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1084        linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
1085        lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
1086        strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
1087}
1088
1089/* Hardware event handlers */
1090static inline void cgx_link_change_handler(u64 lstat,
1091                                           struct lmac *lmac)
1092{
1093        struct cgx_link_user_info *linfo;
1094        struct cgx *cgx = lmac->cgx;
1095        struct cgx_link_event event;
1096        struct device *dev;
1097        int err_type;
1098
1099        dev = &cgx->pdev->dev;
1100
1101        link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1102        err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1103
1104        event.cgx_id = cgx->cgx_id;
1105        event.lmac_id = lmac->lmac_id;
1106
1107        /* update the local copy of link status */
1108        lmac->link_info = event.link_uinfo;
1109        linfo = &lmac->link_info;
1110
1111        if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1112                return;
1113
1114        /* Ensure callback doesn't get unregistered until we finish it */
1115        spin_lock(&lmac->event_cb_lock);
1116
1117        if (!lmac->event_cb.notify_link_chg) {
1118                dev_dbg(dev, "cgx port %d:%d Link change handler null",
1119                        cgx->cgx_id, lmac->lmac_id);
1120                if (err_type != CGX_ERR_NONE) {
1121                        dev_err(dev, "cgx port %d:%d Link error %d\n",
1122                                cgx->cgx_id, lmac->lmac_id, err_type);
1123                }
1124                dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1125                         cgx->cgx_id, lmac->lmac_id,
1126                         linfo->link_up ? "UP" : "DOWN", linfo->speed);
1127                goto err;
1128        }
1129
1130        if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1131                dev_err(dev, "event notification failure\n");
1132err:
1133        spin_unlock(&lmac->event_cb_lock);
1134}
1135
1136static inline bool cgx_cmdresp_is_linkevent(u64 event)
1137{
1138        u8 id;
1139
1140        id = FIELD_GET(EVTREG_ID, event);
1141        if (id == CGX_CMD_LINK_BRING_UP ||
1142            id == CGX_CMD_LINK_BRING_DOWN ||
1143            id == CGX_CMD_MODE_CHANGE)
1144                return true;
1145        else
1146                return false;
1147}
1148
1149static inline bool cgx_event_is_linkevent(u64 event)
1150{
1151        if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1152                return true;
1153        else
1154                return false;
1155}
1156
1157static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1158{
1159        u64 event, offset, clear_bit;
1160        struct lmac *lmac = data;
1161        struct cgx *cgx;
1162
1163        cgx = lmac->cgx;
1164
1165        /* Clear SW_INT for RPM and CMR_INT for CGX */
1166        offset     = cgx->mac_ops->int_register;
1167        clear_bit  = cgx->mac_ops->int_ena_bit;
1168
1169        event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1170
1171        if (!FIELD_GET(EVTREG_ACK, event))
1172                return IRQ_NONE;
1173
1174        switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1175        case CGX_EVT_CMD_RESP:
1176                /* Copy the response. Since only one command is active at a
1177                 * time, there is no way a response can get overwritten
1178                 */
1179                lmac->resp = event;
1180                /* Ensure response is updated before thread context starts */
1181                smp_wmb();
1182
1183                /* There wont be separate events for link change initiated from
1184                 * software; Hence report the command responses as events
1185                 */
1186                if (cgx_cmdresp_is_linkevent(event))
1187                        cgx_link_change_handler(event, lmac);
1188
1189                /* Release thread waiting for completion  */
1190                lmac->cmd_pend = false;
1191                wake_up_interruptible(&lmac->wq_cmd_cmplt);
1192                break;
1193        case CGX_EVT_ASYNC:
1194                if (cgx_event_is_linkevent(event))
1195                        cgx_link_change_handler(event, lmac);
1196                break;
1197        }
1198
1199        /* Any new event or command response will be posted by firmware
1200         * only after the current status is acked.
1201         * Ack the interrupt register as well.
1202         */
1203        cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1204        cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1205
1206        return IRQ_HANDLED;
1207}
1208
1209/* APIs for PHY management using CGX firmware interface */
1210
1211/* callback registration for hardware events like link change */
1212int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1213{
1214        struct cgx *cgx = cgxd;
1215        struct lmac *lmac;
1216
1217        lmac = lmac_pdata(lmac_id, cgx);
1218        if (!lmac)
1219                return -ENODEV;
1220
1221        lmac->event_cb = *cb;
1222
1223        return 0;
1224}
1225
1226int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1227{
1228        struct lmac *lmac;
1229        unsigned long flags;
1230        struct cgx *cgx = cgxd;
1231
1232        lmac = lmac_pdata(lmac_id, cgx);
1233        if (!lmac)
1234                return -ENODEV;
1235
1236        spin_lock_irqsave(&lmac->event_cb_lock, flags);
1237        lmac->event_cb.notify_link_chg = NULL;
1238        lmac->event_cb.data = NULL;
1239        spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1240
1241        return 0;
1242}
1243
1244int cgx_get_fwdata_base(u64 *base)
1245{
1246        u64 req = 0, resp;
1247        struct cgx *cgx;
1248        int first_lmac;
1249        int err;
1250
1251        cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1252        if (!cgx)
1253                return -ENXIO;
1254
1255        first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1256        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1257        err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1258        if (!err)
1259                *base = FIELD_GET(RESP_FWD_BASE, resp);
1260
1261        return err;
1262}
1263
1264int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1265                      int cgx_id, int lmac_id)
1266{
1267        struct cgx *cgx = cgxd;
1268        u64 req = 0, resp;
1269
1270        if (!cgx)
1271                return -ENODEV;
1272
1273        if (args.mode)
1274                otx2_map_ethtool_link_modes(args.mode, &args);
1275        if (!args.speed && args.duplex && !args.an)
1276                return -EINVAL;
1277
1278        req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1279        req = FIELD_SET(CMDMODECHANGE_SPEED,
1280                        cgx_link_usertable_index_map(args.speed), req);
1281        req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1282        req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1283        req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1284        req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1285
1286        return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1287}
1288int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1289{
1290        u64 req = 0, resp;
1291        struct cgx *cgx;
1292        int err = 0;
1293
1294        cgx = cgx_get_pdata(cgx_id);
1295        if (!cgx)
1296                return -ENXIO;
1297
1298        req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1299        req = FIELD_SET(CMDSETFEC, fec, req);
1300        err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1301        if (err)
1302                return err;
1303
1304        cgx->lmac_idmap[lmac_id]->link_info.fec =
1305                        FIELD_GET(RESP_LINKSTAT_FEC, resp);
1306        return cgx->lmac_idmap[lmac_id]->link_info.fec;
1307}
1308
1309int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1310{
1311        struct cgx *cgx = cgxd;
1312        u64 req = 0, resp;
1313
1314        if (!cgx)
1315                return -ENODEV;
1316
1317        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1318        return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1319}
1320
1321static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1322{
1323        u64 req = 0;
1324        u64 resp;
1325
1326        if (enable)
1327                req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1328        else
1329                req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1330
1331        return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1332}
1333
1334static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1335{
1336        int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1337        u64 req = 0;
1338
1339        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1340        return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1341}
1342
1343static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1344{
1345        struct device *dev = &cgx->pdev->dev;
1346        int major_ver, minor_ver;
1347        u64 resp;
1348        int err;
1349
1350        if (!cgx->lmac_count)
1351                return 0;
1352
1353        err = cgx_fwi_read_version(&resp, cgx);
1354        if (err)
1355                return err;
1356
1357        major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1358        minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1359        dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1360                major_ver, minor_ver);
1361        if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1362                return -EIO;
1363        else
1364                return 0;
1365}
1366
1367static void cgx_lmac_linkup_work(struct work_struct *work)
1368{
1369        struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1370        struct device *dev = &cgx->pdev->dev;
1371        int i, err;
1372
1373        /* Do Link up for all the enabled lmacs */
1374        for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1375                err = cgx_fwi_link_change(cgx, i, true);
1376                if (err)
1377                        dev_info(dev, "cgx port %d:%d Link up command failed\n",
1378                                 cgx->cgx_id, i);
1379        }
1380}
1381
1382int cgx_lmac_linkup_start(void *cgxd)
1383{
1384        struct cgx *cgx = cgxd;
1385
1386        if (!cgx)
1387                return -ENODEV;
1388
1389        queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1390
1391        return 0;
1392}
1393
1394static void cgx_lmac_get_fifolen(struct cgx *cgx)
1395{
1396        u64 cfg;
1397
1398        cfg = cgx_read(cgx, 0, CGX_CONST);
1399        cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1400}
1401
1402static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1403                                   int cnt, bool req_free)
1404{
1405        struct mac_ops *mac_ops = cgx->mac_ops;
1406        u64 offset, ena_bit;
1407        unsigned int irq;
1408        int err;
1409
1410        irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1411                                  cnt * mac_ops->irq_offset);
1412        offset   = mac_ops->int_set_reg;
1413        ena_bit  = mac_ops->int_ena_bit;
1414
1415        if (req_free) {
1416                free_irq(irq, lmac);
1417                return 0;
1418        }
1419
1420        err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1421        if (err)
1422                return err;
1423
1424        /* Enable interrupt */
1425        cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1426        return 0;
1427}
1428
1429int cgx_get_nr_lmacs(void *cgxd)
1430{
1431        struct cgx *cgx = cgxd;
1432
1433        return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1434}
1435
1436u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1437{
1438        struct cgx *cgx = cgxd;
1439
1440        return cgx->lmac_idmap[lmac_index]->lmac_id;
1441}
1442
1443unsigned long cgx_get_lmac_bmap(void *cgxd)
1444{
1445        struct cgx *cgx = cgxd;
1446
1447        return cgx->lmac_bmap;
1448}
1449
1450static int cgx_lmac_init(struct cgx *cgx)
1451{
1452        struct lmac *lmac;
1453        u64 lmac_list;
1454        int i, err;
1455
1456        cgx_lmac_get_fifolen(cgx);
1457
1458        cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1459        /* lmac_list specifies which lmacs are enabled
1460         * when bit n is set to 1, LMAC[n] is enabled
1461         */
1462        if (cgx->mac_ops->non_contiguous_serdes_lane)
1463                lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1464
1465        if (cgx->lmac_count > MAX_LMAC_PER_CGX)
1466                cgx->lmac_count = MAX_LMAC_PER_CGX;
1467
1468        for (i = 0; i < cgx->lmac_count; i++) {
1469                lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1470                if (!lmac)
1471                        return -ENOMEM;
1472                lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1473                if (!lmac->name) {
1474                        err = -ENOMEM;
1475                        goto err_lmac_free;
1476                }
1477                sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1478                if (cgx->mac_ops->non_contiguous_serdes_lane) {
1479                        lmac->lmac_id = __ffs64(lmac_list);
1480                        lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1481                } else {
1482                        lmac->lmac_id = i;
1483                }
1484
1485                lmac->cgx = cgx;
1486                lmac->mac_to_index_bmap.max =
1487                                MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
1488                err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1489                if (err)
1490                        goto err_name_free;
1491
1492                /* Reserve first entry for default MAC address */
1493                set_bit(0, lmac->mac_to_index_bmap.bmap);
1494
1495                init_waitqueue_head(&lmac->wq_cmd_cmplt);
1496                mutex_init(&lmac->cmd_lock);
1497                spin_lock_init(&lmac->event_cb_lock);
1498                err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1499                if (err)
1500                        goto err_bitmap_free;
1501
1502                /* Add reference */
1503                cgx->lmac_idmap[lmac->lmac_id] = lmac;
1504                set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1505                cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1506        }
1507
1508        return cgx_lmac_verify_fwi_version(cgx);
1509
1510err_bitmap_free:
1511        rvu_free_bitmap(&lmac->mac_to_index_bmap);
1512err_name_free:
1513        kfree(lmac->name);
1514err_lmac_free:
1515        kfree(lmac);
1516        return err;
1517}
1518
1519static int cgx_lmac_exit(struct cgx *cgx)
1520{
1521        struct lmac *lmac;
1522        int i;
1523
1524        if (cgx->cgx_cmd_workq) {
1525                flush_workqueue(cgx->cgx_cmd_workq);
1526                destroy_workqueue(cgx->cgx_cmd_workq);
1527                cgx->cgx_cmd_workq = NULL;
1528        }
1529
1530        /* Free all lmac related resources */
1531        for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1532                lmac = cgx->lmac_idmap[i];
1533                if (!lmac)
1534                        continue;
1535                cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1536                cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1537                kfree(lmac->mac_to_index_bmap.bmap);
1538                kfree(lmac->name);
1539                kfree(lmac);
1540        }
1541
1542        return 0;
1543}
1544
1545static void cgx_populate_features(struct cgx *cgx)
1546{
1547        if (is_dev_rpm(cgx))
1548                cgx->hw_features =  (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
1549        else
1550                cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1551}
1552
1553static struct mac_ops   cgx_mac_ops    = {
1554        .name           =       "cgx",
1555        .csr_offset     =       0,
1556        .lmac_offset    =       18,
1557        .int_register   =       CGXX_CMRX_INT,
1558        .int_set_reg    =       CGXX_CMRX_INT_ENA_W1S,
1559        .irq_offset     =       9,
1560        .int_ena_bit    =       FW_CGX_INT,
1561        .lmac_fwi       =       CGX_LMAC_FWI,
1562        .non_contiguous_serdes_lane = false,
1563        .rx_stats_cnt   =       9,
1564        .tx_stats_cnt   =       18,
1565        .get_nr_lmacs   =       cgx_get_nr_lmacs,
1566        .get_lmac_type  =       cgx_get_lmac_type,
1567        .mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1568        .mac_get_rx_stats  =    cgx_get_rx_stats,
1569        .mac_get_tx_stats  =    cgx_get_tx_stats,
1570        .mac_enadis_rx_pause_fwding =   cgx_lmac_enadis_rx_pause_fwding,
1571        .mac_get_pause_frm_status =     cgx_lmac_get_pause_frm_status,
1572        .mac_enadis_pause_frm =         cgx_lmac_enadis_pause_frm,
1573        .mac_pause_frm_config =         cgx_lmac_pause_frm_config,
1574};
1575
1576static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1577{
1578        struct device *dev = &pdev->dev;
1579        struct cgx *cgx;
1580        int err, nvec;
1581
1582        cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1583        if (!cgx)
1584                return -ENOMEM;
1585        cgx->pdev = pdev;
1586
1587        pci_set_drvdata(pdev, cgx);
1588
1589        /* Use mac_ops to get MAC specific features */
1590        if (pdev->device == PCI_DEVID_CN10K_RPM)
1591                cgx->mac_ops = rpm_get_mac_ops();
1592        else
1593                cgx->mac_ops = &cgx_mac_ops;
1594
1595        err = pci_enable_device(pdev);
1596        if (err) {
1597                dev_err(dev, "Failed to enable PCI device\n");
1598                pci_set_drvdata(pdev, NULL);
1599                return err;
1600        }
1601
1602        err = pci_request_regions(pdev, DRV_NAME);
1603        if (err) {
1604                dev_err(dev, "PCI request regions failed 0x%x\n", err);
1605                goto err_disable_device;
1606        }
1607
1608        /* MAP configuration registers */
1609        cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1610        if (!cgx->reg_base) {
1611                dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1612                err = -ENOMEM;
1613                goto err_release_regions;
1614        }
1615
1616        nvec = pci_msix_vec_count(cgx->pdev);
1617        err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1618        if (err < 0 || err != nvec) {
1619                dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1620                        nvec, err);
1621                goto err_release_regions;
1622        }
1623
1624        cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1625                & CGX_ID_MASK;
1626
1627        /* init wq for processing linkup requests */
1628        INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1629        cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1630        if (!cgx->cgx_cmd_workq) {
1631                dev_err(dev, "alloc workqueue failed for cgx cmd");
1632                err = -ENOMEM;
1633                goto err_free_irq_vectors;
1634        }
1635
1636        list_add(&cgx->cgx_list, &cgx_list);
1637
1638
1639        cgx_populate_features(cgx);
1640
1641        mutex_init(&cgx->lock);
1642
1643        err = cgx_lmac_init(cgx);
1644        if (err)
1645                goto err_release_lmac;
1646
1647        return 0;
1648
1649err_release_lmac:
1650        cgx_lmac_exit(cgx);
1651        list_del(&cgx->cgx_list);
1652err_free_irq_vectors:
1653        pci_free_irq_vectors(pdev);
1654err_release_regions:
1655        pci_release_regions(pdev);
1656err_disable_device:
1657        pci_disable_device(pdev);
1658        pci_set_drvdata(pdev, NULL);
1659        return err;
1660}
1661
1662static void cgx_remove(struct pci_dev *pdev)
1663{
1664        struct cgx *cgx = pci_get_drvdata(pdev);
1665
1666        if (cgx) {
1667                cgx_lmac_exit(cgx);
1668                list_del(&cgx->cgx_list);
1669        }
1670        pci_free_irq_vectors(pdev);
1671        pci_release_regions(pdev);
1672        pci_disable_device(pdev);
1673        pci_set_drvdata(pdev, NULL);
1674}
1675
1676struct pci_driver cgx_driver = {
1677        .name = DRV_NAME,
1678        .id_table = cgx_id_table,
1679        .probe = cgx_probe,
1680        .remove = cgx_remove,
1681};
1682