linux/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTx2 CGX driver
   3 *
   4 * Copyright (C) 2018 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/acpi.h>
  12#include <linux/module.h>
  13#include <linux/interrupt.h>
  14#include <linux/pci.h>
  15#include <linux/netdevice.h>
  16#include <linux/etherdevice.h>
  17#include <linux/phy.h>
  18#include <linux/of.h>
  19#include <linux/of_mdio.h>
  20#include <linux/of_net.h>
  21
  22#include "cgx.h"
  23
  24#define DRV_NAME        "octeontx2-cgx"
  25#define DRV_STRING      "Marvell OcteonTX2 CGX/MAC Driver"
  26
  27/**
  28 * struct lmac
  29 * @wq_cmd_cmplt:       waitq to keep the process blocked until cmd completion
  30 * @cmd_lock:           Lock to serialize the command interface
  31 * @resp:               command response
  32 * @link_info:          link related information
  33 * @event_cb:           callback for linkchange events
  34 * @event_cb_lock:      lock for serializing callback with unregister
  35 * @cmd_pend:           flag set before new command is started
  36 *                      flag cleared after command response is received
  37 * @cgx:                parent cgx port
  38 * @lmac_id:            lmac port id
  39 * @name:               lmac port name
  40 */
  41struct lmac {
  42        wait_queue_head_t wq_cmd_cmplt;
  43        struct mutex cmd_lock;
  44        u64 resp;
  45        struct cgx_link_user_info link_info;
  46        struct cgx_event_cb event_cb;
  47        spinlock_t event_cb_lock;
  48        bool cmd_pend;
  49        struct cgx *cgx;
  50        u8 lmac_id;
  51        char *name;
  52};
  53
  54struct cgx {
  55        void __iomem            *reg_base;
  56        struct pci_dev          *pdev;
  57        u8                      cgx_id;
  58        u8                      lmac_count;
  59        struct lmac             *lmac_idmap[MAX_LMAC_PER_CGX];
  60        struct                  work_struct cgx_cmd_work;
  61        struct                  workqueue_struct *cgx_cmd_workq;
  62        struct list_head        cgx_list;
  63};
  64
  65static LIST_HEAD(cgx_list);
  66
  67/* Convert firmware speed encoding to user format(Mbps) */
  68static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
  69
  70/* Convert firmware lmac type encoding to string */
  71static char *cgx_lmactype_string[LMAC_MODE_MAX];
  72
  73/* CGX PHY management internal APIs */
  74static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
  75
  76/* Supported devices */
  77static const struct pci_device_id cgx_id_table[] = {
  78        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
  79        { 0, }  /* end of table */
  80};
  81
  82MODULE_DEVICE_TABLE(pci, cgx_id_table);
  83
  84static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
  85{
  86        writeq(val, cgx->reg_base + (lmac << 18) + offset);
  87}
  88
  89static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
  90{
  91        return readq(cgx->reg_base + (lmac << 18) + offset);
  92}
  93
  94static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
  95{
  96        if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
  97                return NULL;
  98
  99        return cgx->lmac_idmap[lmac_id];
 100}
 101
 102int cgx_get_cgxcnt_max(void)
 103{
 104        struct cgx *cgx_dev;
 105        int idmax = -ENODEV;
 106
 107        list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
 108                if (cgx_dev->cgx_id > idmax)
 109                        idmax = cgx_dev->cgx_id;
 110
 111        if (idmax < 0)
 112                return 0;
 113
 114        return idmax + 1;
 115}
 116
 117int cgx_get_lmac_cnt(void *cgxd)
 118{
 119        struct cgx *cgx = cgxd;
 120
 121        if (!cgx)
 122                return -ENODEV;
 123
 124        return cgx->lmac_count;
 125}
 126
 127void *cgx_get_pdata(int cgx_id)
 128{
 129        struct cgx *cgx_dev;
 130
 131        list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
 132                if (cgx_dev->cgx_id == cgx_id)
 133                        return cgx_dev;
 134        }
 135        return NULL;
 136}
 137
 138int cgx_get_cgxid(void *cgxd)
 139{
 140        struct cgx *cgx = cgxd;
 141
 142        if (!cgx)
 143                return -EINVAL;
 144
 145        return cgx->cgx_id;
 146}
 147
 148/* Ensure the required lock for event queue(where asynchronous events are
 149 * posted) is acquired before calling this API. Else an asynchronous event(with
 150 * latest link status) can reach the destination before this function returns
 151 * and could make the link status appear wrong.
 152 */
 153int cgx_get_link_info(void *cgxd, int lmac_id,
 154                      struct cgx_link_user_info *linfo)
 155{
 156        struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
 157
 158        if (!lmac)
 159                return -ENODEV;
 160
 161        *linfo = lmac->link_info;
 162        return 0;
 163}
 164
 165static u64 mac2u64 (u8 *mac_addr)
 166{
 167        u64 mac = 0;
 168        int index;
 169
 170        for (index = ETH_ALEN - 1; index >= 0; index--)
 171                mac |= ((u64)*mac_addr++) << (8 * index);
 172        return mac;
 173}
 174
 175int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
 176{
 177        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 178        u64 cfg;
 179
 180        /* copy 6bytes from macaddr */
 181        /* memcpy(&cfg, mac_addr, 6); */
 182
 183        cfg = mac2u64 (mac_addr);
 184
 185        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
 186                  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
 187
 188        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 189        cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
 190        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 191
 192        return 0;
 193}
 194
 195u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
 196{
 197        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 198        u64 cfg;
 199
 200        cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
 201        return cfg & CGX_RX_DMAC_ADR_MASK;
 202}
 203
 204int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
 205{
 206        struct cgx *cgx = cgxd;
 207
 208        if (!cgx || lmac_id >= cgx->lmac_count)
 209                return -ENODEV;
 210
 211        cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
 212        return 0;
 213}
 214
 215static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
 216{
 217        u64 cfg;
 218
 219        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 220        return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
 221}
 222
 223/* Configure CGX LMAC in internal loopback mode */
 224int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
 225{
 226        struct cgx *cgx = cgxd;
 227        u8 lmac_type;
 228        u64 cfg;
 229
 230        if (!cgx || lmac_id >= cgx->lmac_count)
 231                return -ENODEV;
 232
 233        lmac_type = cgx_get_lmac_type(cgx, lmac_id);
 234        if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
 235                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
 236                if (enable)
 237                        cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
 238                else
 239                        cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
 240                cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
 241        } else {
 242                cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
 243                if (enable)
 244                        cfg |= CGXX_SPUX_CONTROL1_LBK;
 245                else
 246                        cfg &= ~CGXX_SPUX_CONTROL1_LBK;
 247                cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
 248        }
 249        return 0;
 250}
 251
 252void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
 253{
 254        struct cgx *cgx = cgx_get_pdata(cgx_id);
 255        u64 cfg = 0;
 256
 257        if (!cgx)
 258                return;
 259
 260        if (enable) {
 261                /* Enable promiscuous mode on LMAC */
 262                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 263                cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
 264                cfg |= CGX_DMAC_BCAST_MODE;
 265                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 266
 267                cfg = cgx_read(cgx, 0,
 268                               (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
 269                cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
 270                cgx_write(cgx, 0,
 271                          (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
 272        } else {
 273                /* Disable promiscuous mode */
 274                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 275                cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
 276                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 277                cfg = cgx_read(cgx, 0,
 278                               (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
 279                cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
 280                cgx_write(cgx, 0,
 281                          (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
 282        }
 283}
 284
 285/* Enable or disable forwarding received pause frames to Tx block */
 286void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
 287{
 288        struct cgx *cgx = cgxd;
 289        u64 cfg;
 290
 291        if (!cgx)
 292                return;
 293
 294        if (enable) {
 295                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 296                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 297                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 298
 299                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 300                cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 301                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 302        } else {
 303                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 304                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 305                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 306
 307                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 308                cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 309                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 310        }
 311}
 312
 313int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
 314{
 315        struct cgx *cgx = cgxd;
 316
 317        if (!cgx || lmac_id >= cgx->lmac_count)
 318                return -ENODEV;
 319        *rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
 320        return 0;
 321}
 322
 323int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
 324{
 325        struct cgx *cgx = cgxd;
 326
 327        if (!cgx || lmac_id >= cgx->lmac_count)
 328                return -ENODEV;
 329        *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
 330        return 0;
 331}
 332
 333int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
 334{
 335        struct cgx *cgx = cgxd;
 336        u64 cfg;
 337
 338        if (!cgx || lmac_id >= cgx->lmac_count)
 339                return -ENODEV;
 340
 341        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 342        if (enable)
 343                cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
 344        else
 345                cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
 346        cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
 347        return 0;
 348}
 349
 350int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
 351{
 352        struct cgx *cgx = cgxd;
 353        u64 cfg, last;
 354
 355        if (!cgx || lmac_id >= cgx->lmac_count)
 356                return -ENODEV;
 357
 358        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 359        last = cfg;
 360        if (enable)
 361                cfg |= DATA_PKT_TX_EN;
 362        else
 363                cfg &= ~DATA_PKT_TX_EN;
 364
 365        if (cfg != last)
 366                cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
 367        return !!(last & DATA_PKT_TX_EN);
 368}
 369
 370int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
 371                           u8 *tx_pause, u8 *rx_pause)
 372{
 373        struct cgx *cgx = cgxd;
 374        u64 cfg;
 375
 376        if (!cgx || lmac_id >= cgx->lmac_count)
 377                return -ENODEV;
 378
 379        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 380        *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
 381
 382        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 383        *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
 384        return 0;
 385}
 386
 387int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
 388                           u8 tx_pause, u8 rx_pause)
 389{
 390        struct cgx *cgx = cgxd;
 391        u64 cfg;
 392
 393        if (!cgx || lmac_id >= cgx->lmac_count)
 394                return -ENODEV;
 395
 396        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 397        cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 398        cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
 399        cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 400
 401        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 402        cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
 403        cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
 404        cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 405
 406        cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
 407        if (tx_pause) {
 408                cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
 409        } else {
 410                cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
 411                cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
 412        }
 413        cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
 414        return 0;
 415}
 416
 417static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
 418{
 419        u64 cfg;
 420
 421        if (!cgx || lmac_id >= cgx->lmac_count)
 422                return;
 423        if (enable) {
 424                /* Enable receive pause frames */
 425                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 426                cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 427                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 428
 429                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 430                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 431                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 432
 433                /* Enable pause frames transmission */
 434                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 435                cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
 436                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 437
 438                /* Set pause time and interval */
 439                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
 440                          DEFAULT_PAUSE_TIME);
 441                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
 442                cfg &= ~0xFFFFULL;
 443                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
 444                          cfg | (DEFAULT_PAUSE_TIME / 2));
 445
 446                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
 447                          DEFAULT_PAUSE_TIME);
 448
 449                cfg = cgx_read(cgx, lmac_id,
 450                               CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
 451                cfg &= ~0xFFFFULL;
 452                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
 453                          cfg | (DEFAULT_PAUSE_TIME / 2));
 454        } else {
 455                /* ALL pause frames received are completely ignored */
 456                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 457                cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 458                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 459
 460                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 461                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 462                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 463
 464                /* Disable pause frames transmission */
 465                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 466                cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
 467                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 468        }
 469}
 470
 471/* CGX Firmware interface low level support */
 472static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
 473{
 474        struct cgx *cgx = lmac->cgx;
 475        struct device *dev;
 476        int err = 0;
 477        u64 cmd;
 478
 479        /* Ensure no other command is in progress */
 480        err = mutex_lock_interruptible(&lmac->cmd_lock);
 481        if (err)
 482                return err;
 483
 484        /* Ensure command register is free */
 485        cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
 486        if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
 487                err = -EBUSY;
 488                goto unlock;
 489        }
 490
 491        /* Update ownership in command request */
 492        req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
 493
 494        /* Mark this lmac as pending, before we start */
 495        lmac->cmd_pend = true;
 496
 497        /* Start command in hardware */
 498        cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
 499
 500        /* Ensure command is completed without errors */
 501        if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
 502                                msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
 503                dev = &cgx->pdev->dev;
 504                dev_err(dev, "cgx port %d:%d cmd timeout\n",
 505                        cgx->cgx_id, lmac->lmac_id);
 506                err = -EIO;
 507                goto unlock;
 508        }
 509
 510        /* we have a valid command response */
 511        smp_rmb(); /* Ensure the latest updates are visible */
 512        *resp = lmac->resp;
 513
 514unlock:
 515        mutex_unlock(&lmac->cmd_lock);
 516
 517        return err;
 518}
 519
 520static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
 521                                      struct cgx *cgx, int lmac_id)
 522{
 523        struct lmac *lmac;
 524        int err;
 525
 526        lmac = lmac_pdata(lmac_id, cgx);
 527        if (!lmac)
 528                return -ENODEV;
 529
 530        err = cgx_fwi_cmd_send(req, resp, lmac);
 531
 532        /* Check for valid response */
 533        if (!err) {
 534                if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
 535                        return -EIO;
 536                else
 537                        return 0;
 538        }
 539
 540        return err;
 541}
 542
 543static inline void cgx_link_usertable_init(void)
 544{
 545        cgx_speed_mbps[CGX_LINK_NONE] = 0;
 546        cgx_speed_mbps[CGX_LINK_10M] = 10;
 547        cgx_speed_mbps[CGX_LINK_100M] = 100;
 548        cgx_speed_mbps[CGX_LINK_1G] = 1000;
 549        cgx_speed_mbps[CGX_LINK_2HG] = 2500;
 550        cgx_speed_mbps[CGX_LINK_5G] = 5000;
 551        cgx_speed_mbps[CGX_LINK_10G] = 10000;
 552        cgx_speed_mbps[CGX_LINK_20G] = 20000;
 553        cgx_speed_mbps[CGX_LINK_25G] = 25000;
 554        cgx_speed_mbps[CGX_LINK_40G] = 40000;
 555        cgx_speed_mbps[CGX_LINK_50G] = 50000;
 556        cgx_speed_mbps[CGX_LINK_100G] = 100000;
 557
 558        cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
 559        cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
 560        cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
 561        cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
 562        cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
 563        cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
 564        cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
 565        cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
 566        cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
 567        cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
 568}
 569
 570static inline void link_status_user_format(u64 lstat,
 571                                           struct cgx_link_user_info *linfo,
 572                                           struct cgx *cgx, u8 lmac_id)
 573{
 574        char *lmac_string;
 575
 576        linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
 577        linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
 578        linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
 579        linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
 580        lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
 581        strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
 582}
 583
 584/* Hardware event handlers */
 585static inline void cgx_link_change_handler(u64 lstat,
 586                                           struct lmac *lmac)
 587{
 588        struct cgx_link_user_info *linfo;
 589        struct cgx *cgx = lmac->cgx;
 590        struct cgx_link_event event;
 591        struct device *dev;
 592        int err_type;
 593
 594        dev = &cgx->pdev->dev;
 595
 596        link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
 597        err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
 598
 599        event.cgx_id = cgx->cgx_id;
 600        event.lmac_id = lmac->lmac_id;
 601
 602        /* update the local copy of link status */
 603        lmac->link_info = event.link_uinfo;
 604        linfo = &lmac->link_info;
 605
 606        /* Ensure callback doesn't get unregistered until we finish it */
 607        spin_lock(&lmac->event_cb_lock);
 608
 609        if (!lmac->event_cb.notify_link_chg) {
 610                dev_dbg(dev, "cgx port %d:%d Link change handler null",
 611                        cgx->cgx_id, lmac->lmac_id);
 612                if (err_type != CGX_ERR_NONE) {
 613                        dev_err(dev, "cgx port %d:%d Link error %d\n",
 614                                cgx->cgx_id, lmac->lmac_id, err_type);
 615                }
 616                dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
 617                         cgx->cgx_id, lmac->lmac_id,
 618                         linfo->link_up ? "UP" : "DOWN", linfo->speed);
 619                goto err;
 620        }
 621
 622        if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
 623                dev_err(dev, "event notification failure\n");
 624err:
 625        spin_unlock(&lmac->event_cb_lock);
 626}
 627
 628static inline bool cgx_cmdresp_is_linkevent(u64 event)
 629{
 630        u8 id;
 631
 632        id = FIELD_GET(EVTREG_ID, event);
 633        if (id == CGX_CMD_LINK_BRING_UP ||
 634            id == CGX_CMD_LINK_BRING_DOWN)
 635                return true;
 636        else
 637                return false;
 638}
 639
 640static inline bool cgx_event_is_linkevent(u64 event)
 641{
 642        if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
 643                return true;
 644        else
 645                return false;
 646}
 647
 648static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
 649{
 650        struct lmac *lmac = data;
 651        struct cgx *cgx;
 652        u64 event;
 653
 654        cgx = lmac->cgx;
 655
 656        event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
 657
 658        if (!FIELD_GET(EVTREG_ACK, event))
 659                return IRQ_NONE;
 660
 661        switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
 662        case CGX_EVT_CMD_RESP:
 663                /* Copy the response. Since only one command is active at a
 664                 * time, there is no way a response can get overwritten
 665                 */
 666                lmac->resp = event;
 667                /* Ensure response is updated before thread context starts */
 668                smp_wmb();
 669
 670                /* There wont be separate events for link change initiated from
 671                 * software; Hence report the command responses as events
 672                 */
 673                if (cgx_cmdresp_is_linkevent(event))
 674                        cgx_link_change_handler(event, lmac);
 675
 676                /* Release thread waiting for completion  */
 677                lmac->cmd_pend = false;
 678                wake_up_interruptible(&lmac->wq_cmd_cmplt);
 679                break;
 680        case CGX_EVT_ASYNC:
 681                if (cgx_event_is_linkevent(event))
 682                        cgx_link_change_handler(event, lmac);
 683                break;
 684        }
 685
 686        /* Any new event or command response will be posted by firmware
 687         * only after the current status is acked.
 688         * Ack the interrupt register as well.
 689         */
 690        cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
 691        cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
 692
 693        return IRQ_HANDLED;
 694}
 695
 696/* APIs for PHY management using CGX firmware interface */
 697
 698/* callback registration for hardware events like link change */
 699int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
 700{
 701        struct cgx *cgx = cgxd;
 702        struct lmac *lmac;
 703
 704        lmac = lmac_pdata(lmac_id, cgx);
 705        if (!lmac)
 706                return -ENODEV;
 707
 708        lmac->event_cb = *cb;
 709
 710        return 0;
 711}
 712
 713int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
 714{
 715        struct lmac *lmac;
 716        unsigned long flags;
 717        struct cgx *cgx = cgxd;
 718
 719        lmac = lmac_pdata(lmac_id, cgx);
 720        if (!lmac)
 721                return -ENODEV;
 722
 723        spin_lock_irqsave(&lmac->event_cb_lock, flags);
 724        lmac->event_cb.notify_link_chg = NULL;
 725        lmac->event_cb.data = NULL;
 726        spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
 727
 728        return 0;
 729}
 730
 731int cgx_get_fwdata_base(u64 *base)
 732{
 733        u64 req = 0, resp;
 734        struct cgx *cgx;
 735        int err;
 736
 737        cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
 738        if (!cgx)
 739                return -ENXIO;
 740
 741        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
 742        err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
 743        if (!err)
 744                *base = FIELD_GET(RESP_FWD_BASE, resp);
 745
 746        return err;
 747}
 748
 749static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
 750{
 751        u64 req = 0;
 752        u64 resp;
 753
 754        if (enable)
 755                req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
 756        else
 757                req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
 758
 759        return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
 760}
 761
 762static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
 763{
 764        u64 req = 0;
 765
 766        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
 767        return cgx_fwi_cmd_generic(req, resp, cgx, 0);
 768}
 769
 770static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
 771{
 772        struct device *dev = &cgx->pdev->dev;
 773        int major_ver, minor_ver;
 774        u64 resp;
 775        int err;
 776
 777        if (!cgx->lmac_count)
 778                return 0;
 779
 780        err = cgx_fwi_read_version(&resp, cgx);
 781        if (err)
 782                return err;
 783
 784        major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
 785        minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
 786        dev_dbg(dev, "Firmware command interface version = %d.%d\n",
 787                major_ver, minor_ver);
 788        if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
 789            minor_ver != CGX_FIRMWARE_MINOR_VER)
 790                return -EIO;
 791        else
 792                return 0;
 793}
 794
 795static void cgx_lmac_linkup_work(struct work_struct *work)
 796{
 797        struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
 798        struct device *dev = &cgx->pdev->dev;
 799        int i, err;
 800
 801        /* Do Link up for all the lmacs */
 802        for (i = 0; i < cgx->lmac_count; i++) {
 803                err = cgx_fwi_link_change(cgx, i, true);
 804                if (err)
 805                        dev_info(dev, "cgx port %d:%d Link up command failed\n",
 806                                 cgx->cgx_id, i);
 807        }
 808}
 809
 810int cgx_lmac_linkup_start(void *cgxd)
 811{
 812        struct cgx *cgx = cgxd;
 813
 814        if (!cgx)
 815                return -ENODEV;
 816
 817        queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
 818
 819        return 0;
 820}
 821
 822static int cgx_lmac_init(struct cgx *cgx)
 823{
 824        struct lmac *lmac;
 825        int i, err;
 826
 827        cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
 828        if (cgx->lmac_count > MAX_LMAC_PER_CGX)
 829                cgx->lmac_count = MAX_LMAC_PER_CGX;
 830
 831        for (i = 0; i < cgx->lmac_count; i++) {
 832                lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
 833                if (!lmac)
 834                        return -ENOMEM;
 835                lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
 836                if (!lmac->name)
 837                        return -ENOMEM;
 838                sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
 839                lmac->lmac_id = i;
 840                lmac->cgx = cgx;
 841                init_waitqueue_head(&lmac->wq_cmd_cmplt);
 842                mutex_init(&lmac->cmd_lock);
 843                spin_lock_init(&lmac->event_cb_lock);
 844                err = request_irq(pci_irq_vector(cgx->pdev,
 845                                                 CGX_LMAC_FWI + i * 9),
 846                                   cgx_fwi_event_handler, 0, lmac->name, lmac);
 847                if (err)
 848                        return err;
 849
 850                /* Enable interrupt */
 851                cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
 852                          FW_CGX_INT);
 853
 854                /* Add reference */
 855                cgx->lmac_idmap[i] = lmac;
 856                cgx_lmac_pause_frm_config(cgx, i, true);
 857        }
 858
 859        return cgx_lmac_verify_fwi_version(cgx);
 860}
 861
 862static int cgx_lmac_exit(struct cgx *cgx)
 863{
 864        struct lmac *lmac;
 865        int i;
 866
 867        if (cgx->cgx_cmd_workq) {
 868                flush_workqueue(cgx->cgx_cmd_workq);
 869                destroy_workqueue(cgx->cgx_cmd_workq);
 870                cgx->cgx_cmd_workq = NULL;
 871        }
 872
 873        /* Free all lmac related resources */
 874        for (i = 0; i < cgx->lmac_count; i++) {
 875                cgx_lmac_pause_frm_config(cgx, i, false);
 876                lmac = cgx->lmac_idmap[i];
 877                if (!lmac)
 878                        continue;
 879                free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
 880                kfree(lmac->name);
 881                kfree(lmac);
 882        }
 883
 884        return 0;
 885}
 886
 887static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 888{
 889        struct device *dev = &pdev->dev;
 890        struct cgx *cgx;
 891        int err, nvec;
 892
 893        cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
 894        if (!cgx)
 895                return -ENOMEM;
 896        cgx->pdev = pdev;
 897
 898        pci_set_drvdata(pdev, cgx);
 899
 900        err = pci_enable_device(pdev);
 901        if (err) {
 902                dev_err(dev, "Failed to enable PCI device\n");
 903                pci_set_drvdata(pdev, NULL);
 904                return err;
 905        }
 906
 907        err = pci_request_regions(pdev, DRV_NAME);
 908        if (err) {
 909                dev_err(dev, "PCI request regions failed 0x%x\n", err);
 910                goto err_disable_device;
 911        }
 912
 913        /* MAP configuration registers */
 914        cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
 915        if (!cgx->reg_base) {
 916                dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
 917                err = -ENOMEM;
 918                goto err_release_regions;
 919        }
 920
 921        nvec = CGX_NVEC;
 922        err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
 923        if (err < 0 || err != nvec) {
 924                dev_err(dev, "Request for %d msix vectors failed, err %d\n",
 925                        nvec, err);
 926                goto err_release_regions;
 927        }
 928
 929        cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
 930                & CGX_ID_MASK;
 931
 932        /* init wq for processing linkup requests */
 933        INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
 934        cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
 935        if (!cgx->cgx_cmd_workq) {
 936                dev_err(dev, "alloc workqueue failed for cgx cmd");
 937                err = -ENOMEM;
 938                goto err_free_irq_vectors;
 939        }
 940
 941        list_add(&cgx->cgx_list, &cgx_list);
 942
 943        cgx_link_usertable_init();
 944
 945        err = cgx_lmac_init(cgx);
 946        if (err)
 947                goto err_release_lmac;
 948
 949        return 0;
 950
 951err_release_lmac:
 952        cgx_lmac_exit(cgx);
 953        list_del(&cgx->cgx_list);
 954err_free_irq_vectors:
 955        pci_free_irq_vectors(pdev);
 956err_release_regions:
 957        pci_release_regions(pdev);
 958err_disable_device:
 959        pci_disable_device(pdev);
 960        pci_set_drvdata(pdev, NULL);
 961        return err;
 962}
 963
 964static void cgx_remove(struct pci_dev *pdev)
 965{
 966        struct cgx *cgx = pci_get_drvdata(pdev);
 967
 968        cgx_lmac_exit(cgx);
 969        list_del(&cgx->cgx_list);
 970        pci_free_irq_vectors(pdev);
 971        pci_release_regions(pdev);
 972        pci_disable_device(pdev);
 973        pci_set_drvdata(pdev, NULL);
 974}
 975
 976struct pci_driver cgx_driver = {
 977        .name = DRV_NAME,
 978        .id_table = cgx_id_table,
 979        .probe = cgx_probe,
 980        .remove = cgx_remove,
 981};
 982