linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell.
   5 *
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/module.h>
  10#include <linux/pci.h>
  11
  12#include "rvu.h"
  13#include "cgx.h"
  14#include "lmac_common.h"
  15#include "rvu_reg.h"
  16#include "rvu_trace.h"
  17
  18struct cgx_evq_entry {
  19        struct list_head evq_node;
  20        struct cgx_link_event link_event;
  21};
  22
  23#define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
  24static struct _req_type __maybe_unused                                  \
  25*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)           \
  26{                                                                       \
  27        struct _req_type *req;                                          \
  28                                                                        \
  29        req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(              \
  30                &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
  31                sizeof(struct _rsp_type));                              \
  32        if (!req)                                                       \
  33                return NULL;                                            \
  34        req->hdr.sig = OTX2_MBOX_REQ_SIG;                               \
  35        req->hdr.id = _id;                                              \
  36        trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));             \
  37        return req;                                                     \
  38}
  39
  40MBOX_UP_CGX_MESSAGES
  41#undef M
  42
  43bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
  44{
  45        u8 cgx_id, lmac_id;
  46        void *cgxd;
  47
  48        if (!is_pf_cgxmapped(rvu, pf))
  49                return 0;
  50
  51        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
  52        cgxd = rvu_cgx_pdata(cgx_id, rvu);
  53
  54        return  (cgx_features_get(cgxd) & feature);
  55}
  56
  57/* Returns bitmap of mapped PFs */
  58static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
  59{
  60        return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
  61}
  62
  63int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
  64{
  65        unsigned long pfmap;
  66
  67        pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
  68
  69        /* Assumes only one pf mapped to a cgx lmac port */
  70        if (!pfmap)
  71                return -ENODEV;
  72        else
  73                return find_first_bit(&pfmap, 16);
  74}
  75
  76static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
  77{
  78        return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
  79}
  80
  81void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
  82{
  83        if (cgx_id >= rvu->cgx_cnt_max)
  84                return NULL;
  85
  86        return rvu->cgx_idmap[cgx_id];
  87}
  88
  89/* Return first enabled CGX instance if none are enabled then return NULL */
  90void *rvu_first_cgx_pdata(struct rvu *rvu)
  91{
  92        int first_enabled_cgx = 0;
  93        void *cgxd = NULL;
  94
  95        for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
  96                cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
  97                if (cgxd)
  98                        break;
  99        }
 100
 101        return cgxd;
 102}
 103
 104/* Based on P2X connectivity find mapped NIX block for a PF */
 105static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
 106                                  int cgx_id, int lmac_id)
 107{
 108        struct rvu_pfvf *pfvf = &rvu->pf[pf];
 109        u8 p2x;
 110
 111        p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
 112        /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
 113        pfvf->nix_blkaddr = BLKADDR_NIX0;
 114        if (p2x == CMR_P2X_SEL_NIX1)
 115                pfvf->nix_blkaddr = BLKADDR_NIX1;
 116}
 117
 118static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 119{
 120        struct npc_pkind *pkind = &rvu->hw->pkind;
 121        int cgx_cnt_max = rvu->cgx_cnt_max;
 122        int pf = PF_CGXMAP_BASE;
 123        unsigned long lmac_bmap;
 124        int size, free_pkind;
 125        int cgx, lmac, iter;
 126        int numvfs, hwvfs;
 127
 128        if (!cgx_cnt_max)
 129                return 0;
 130
 131        if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
 132                return -EINVAL;
 133
 134        /* Alloc map table
 135         * An additional entry is required since PF id starts from 1 and
 136         * hence entry at offset 0 is invalid.
 137         */
 138        size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
 139        rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
 140        if (!rvu->pf2cgxlmac_map)
 141                return -ENOMEM;
 142
 143        /* Initialize all entries with an invalid cgx and lmac id */
 144        memset(rvu->pf2cgxlmac_map, 0xFF, size);
 145
 146        /* Reverse map table */
 147        rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
 148                                  cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
 149                                  GFP_KERNEL);
 150        if (!rvu->cgxlmac2pf_map)
 151                return -ENOMEM;
 152
 153        rvu->cgx_mapped_pfs = 0;
 154        for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
 155                if (!rvu_cgx_pdata(cgx, rvu))
 156                        continue;
 157                lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
 158                for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
 159                        lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
 160                                              iter);
 161                        rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
 162                        rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
 163                        free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
 164                        pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
 165                        rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
 166                        rvu->cgx_mapped_pfs++;
 167                        rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
 168                        rvu->cgx_mapped_vfs += numvfs;
 169                        pf++;
 170                }
 171        }
 172        return 0;
 173}
 174
 175static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
 176{
 177        struct cgx_evq_entry *qentry;
 178        unsigned long flags;
 179        int err;
 180
 181        qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
 182        if (!qentry)
 183                return -ENOMEM;
 184
 185        /* Lock the event queue before we read the local link status */
 186        spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
 187        err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
 188                                &qentry->link_event.link_uinfo);
 189        qentry->link_event.cgx_id = cgx_id;
 190        qentry->link_event.lmac_id = lmac_id;
 191        if (err) {
 192                kfree(qentry);
 193                goto skip_add;
 194        }
 195        list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
 196skip_add:
 197        spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
 198
 199        /* start worker to process the events */
 200        queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
 201
 202        return 0;
 203}
 204
 205/* This is called from interrupt context and is expected to be atomic */
 206static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
 207{
 208        struct cgx_evq_entry *qentry;
 209        struct rvu *rvu = data;
 210
 211        /* post event to the event queue */
 212        qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
 213        if (!qentry)
 214                return -ENOMEM;
 215        qentry->link_event = *event;
 216        spin_lock(&rvu->cgx_evq_lock);
 217        list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
 218        spin_unlock(&rvu->cgx_evq_lock);
 219
 220        /* start worker to process the events */
 221        queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
 222
 223        return 0;
 224}
 225
 226static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
 227{
 228        struct cgx_link_user_info *linfo;
 229        struct cgx_link_info_msg *msg;
 230        unsigned long pfmap;
 231        int err, pfid;
 232
 233        linfo = &event->link_uinfo;
 234        pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
 235
 236        do {
 237                pfid = find_first_bit(&pfmap, 16);
 238                clear_bit(pfid, &pfmap);
 239
 240                /* check if notification is enabled */
 241                if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
 242                        dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
 243                                 event->cgx_id, event->lmac_id,
 244                                 linfo->link_up ? "UP" : "DOWN");
 245                        continue;
 246                }
 247
 248                /* Send mbox message to PF */
 249                msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
 250                if (!msg)
 251                        continue;
 252                msg->link_info = *linfo;
 253                otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
 254                err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
 255                if (err)
 256                        dev_warn(rvu->dev, "notification to pf %d failed\n",
 257                                 pfid);
 258        } while (pfmap);
 259}
 260
 261static void cgx_evhandler_task(struct work_struct *work)
 262{
 263        struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
 264        struct cgx_evq_entry *qentry;
 265        struct cgx_link_event *event;
 266        unsigned long flags;
 267
 268        do {
 269                /* Dequeue an event */
 270                spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
 271                qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
 272                                                  struct cgx_evq_entry,
 273                                                  evq_node);
 274                if (qentry)
 275                        list_del(&qentry->evq_node);
 276                spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
 277                if (!qentry)
 278                        break; /* nothing more to process */
 279
 280                event = &qentry->link_event;
 281
 282                /* process event */
 283                cgx_notify_pfs(event, rvu);
 284                kfree(qentry);
 285        } while (1);
 286}
 287
 288static int cgx_lmac_event_handler_init(struct rvu *rvu)
 289{
 290        unsigned long lmac_bmap;
 291        struct cgx_event_cb cb;
 292        int cgx, lmac, err;
 293        void *cgxd;
 294
 295        spin_lock_init(&rvu->cgx_evq_lock);
 296        INIT_LIST_HEAD(&rvu->cgx_evq_head);
 297        INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
 298        rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
 299        if (!rvu->cgx_evh_wq) {
 300                dev_err(rvu->dev, "alloc workqueue failed");
 301                return -ENOMEM;
 302        }
 303
 304        cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
 305        cb.data = rvu;
 306
 307        for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
 308                cgxd = rvu_cgx_pdata(cgx, rvu);
 309                if (!cgxd)
 310                        continue;
 311                lmac_bmap = cgx_get_lmac_bmap(cgxd);
 312                for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
 313                        err = cgx_lmac_evh_register(&cb, cgxd, lmac);
 314                        if (err)
 315                                dev_err(rvu->dev,
 316                                        "%d:%d handler register failed\n",
 317                                        cgx, lmac);
 318                }
 319        }
 320
 321        return 0;
 322}
 323
 324static void rvu_cgx_wq_destroy(struct rvu *rvu)
 325{
 326        if (rvu->cgx_evh_wq) {
 327                flush_workqueue(rvu->cgx_evh_wq);
 328                destroy_workqueue(rvu->cgx_evh_wq);
 329                rvu->cgx_evh_wq = NULL;
 330        }
 331}
 332
 333int rvu_cgx_init(struct rvu *rvu)
 334{
 335        int cgx, err;
 336        void *cgxd;
 337
 338        /* CGX port id starts from 0 and are not necessarily contiguous
 339         * Hence we allocate resources based on the maximum port id value.
 340         */
 341        rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
 342        if (!rvu->cgx_cnt_max) {
 343                dev_info(rvu->dev, "No CGX devices found!\n");
 344                return -ENODEV;
 345        }
 346
 347        rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
 348                                      sizeof(void *), GFP_KERNEL);
 349        if (!rvu->cgx_idmap)
 350                return -ENOMEM;
 351
 352        /* Initialize the cgxdata table */
 353        for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
 354                rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
 355
 356        /* Map CGX LMAC interfaces to RVU PFs */
 357        err = rvu_map_cgx_lmac_pf(rvu);
 358        if (err)
 359                return err;
 360
 361        /* Register for CGX events */
 362        err = cgx_lmac_event_handler_init(rvu);
 363        if (err)
 364                return err;
 365
 366        mutex_init(&rvu->cgx_cfg_lock);
 367
 368        /* Ensure event handler registration is completed, before
 369         * we turn on the links
 370         */
 371        mb();
 372
 373        /* Do link up for all CGX ports */
 374        for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
 375                cgxd = rvu_cgx_pdata(cgx, rvu);
 376                if (!cgxd)
 377                        continue;
 378                err = cgx_lmac_linkup_start(cgxd);
 379                if (err)
 380                        dev_err(rvu->dev,
 381                                "Link up process failed to start on cgx %d\n",
 382                                cgx);
 383        }
 384
 385        return 0;
 386}
 387
 388int rvu_cgx_exit(struct rvu *rvu)
 389{
 390        unsigned long lmac_bmap;
 391        int cgx, lmac;
 392        void *cgxd;
 393
 394        for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
 395                cgxd = rvu_cgx_pdata(cgx, rvu);
 396                if (!cgxd)
 397                        continue;
 398                lmac_bmap = cgx_get_lmac_bmap(cgxd);
 399                for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
 400                        cgx_lmac_evh_unregister(cgxd, lmac);
 401        }
 402
 403        /* Ensure event handler unregister is completed */
 404        mb();
 405
 406        rvu_cgx_wq_destroy(rvu);
 407        return 0;
 408}
 409
 410/* Most of the CGX configuration is restricted to the mapped PF only,
 411 * VF's of mapped PF and other PFs are not allowed. This fn() checks
 412 * whether a PFFUNC is permitted to do the config or not.
 413 */
 414static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
 415{
 416        if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
 417            !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
 418                return false;
 419        return true;
 420}
 421
 422void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
 423{
 424        struct mac_ops *mac_ops;
 425        u8 cgx_id, lmac_id;
 426        void *cgxd;
 427
 428        if (!is_pf_cgxmapped(rvu, pf))
 429                return;
 430
 431        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 432        cgxd = rvu_cgx_pdata(cgx_id, rvu);
 433
 434        mac_ops = get_mac_ops(cgxd);
 435        /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
 436        if (enable)
 437                mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
 438        else
 439                mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
 440}
 441
 442int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
 443{
 444        int pf = rvu_get_pf(pcifunc);
 445        u8 cgx_id, lmac_id;
 446
 447        if (!is_cgx_config_permitted(rvu, pcifunc))
 448                return LMAC_AF_ERR_PERM_DENIED;
 449
 450        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 451
 452        cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
 453
 454        return 0;
 455}
 456
 457void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
 458{
 459        int pf = rvu_get_pf(pcifunc);
 460        int i = 0, lmac_count = 0;
 461        u8 max_dmac_filters;
 462        u8 cgx_id, lmac_id;
 463        void *cgx_dev;
 464
 465        if (!is_cgx_config_permitted(rvu, pcifunc))
 466                return;
 467
 468        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 469        cgx_dev = cgx_get_pdata(cgx_id);
 470        lmac_count = cgx_get_lmac_cnt(cgx_dev);
 471        max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
 472
 473        for (i = 0; i < max_dmac_filters; i++)
 474                cgx_lmac_addr_del(cgx_id, lmac_id, i);
 475
 476        /* As cgx_lmac_addr_del does not clear entry for index 0
 477         * so it needs to be done explicitly
 478         */
 479        cgx_lmac_addr_reset(cgx_id, lmac_id);
 480}
 481
 482int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
 483                                    struct msg_rsp *rsp)
 484{
 485        rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
 486        return 0;
 487}
 488
 489int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
 490                                   struct msg_rsp *rsp)
 491{
 492        rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
 493        return 0;
 494}
 495
 496static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
 497                              void *rsp)
 498{
 499        int pf = rvu_get_pf(req->hdr.pcifunc);
 500        struct mac_ops *mac_ops;
 501        int stat = 0, err = 0;
 502        u64 tx_stat, rx_stat;
 503        u8 cgx_idx, lmac;
 504        void *cgxd;
 505
 506        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 507                return LMAC_AF_ERR_PERM_DENIED;
 508
 509        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
 510        cgxd = rvu_cgx_pdata(cgx_idx, rvu);
 511        mac_ops = get_mac_ops(cgxd);
 512
 513        /* Rx stats */
 514        while (stat < mac_ops->rx_stats_cnt) {
 515                err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
 516                if (err)
 517                        return err;
 518                if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
 519                        ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
 520                else
 521                        ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
 522                stat++;
 523        }
 524
 525        /* Tx stats */
 526        stat = 0;
 527        while (stat < mac_ops->tx_stats_cnt) {
 528                err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
 529                if (err)
 530                        return err;
 531                if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
 532                        ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
 533                else
 534                        ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
 535                stat++;
 536        }
 537        return 0;
 538}
 539
 540int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
 541                               struct cgx_stats_rsp *rsp)
 542{
 543        return rvu_lmac_get_stats(rvu, req, (void *)rsp);
 544}
 545
 546int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
 547                               struct rpm_stats_rsp *rsp)
 548{
 549        return rvu_lmac_get_stats(rvu, req, (void *)rsp);
 550}
 551
 552int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
 553                                   struct msg_req *req,
 554                                   struct cgx_fec_stats_rsp *rsp)
 555{
 556        int pf = rvu_get_pf(req->hdr.pcifunc);
 557        u8 cgx_idx, lmac;
 558        void *cgxd;
 559
 560        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 561                return LMAC_AF_ERR_PERM_DENIED;
 562        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
 563
 564        cgxd = rvu_cgx_pdata(cgx_idx, rvu);
 565        return cgx_get_fec_stats(cgxd, lmac, rsp);
 566}
 567
 568int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
 569                                      struct cgx_mac_addr_set_or_get *req,
 570                                      struct cgx_mac_addr_set_or_get *rsp)
 571{
 572        int pf = rvu_get_pf(req->hdr.pcifunc);
 573        u8 cgx_id, lmac_id;
 574
 575        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 576                return -EPERM;
 577
 578        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 579
 580        cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
 581
 582        return 0;
 583}
 584
 585int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
 586                                      struct cgx_mac_addr_add_req *req,
 587                                      struct cgx_mac_addr_add_rsp *rsp)
 588{
 589        int pf = rvu_get_pf(req->hdr.pcifunc);
 590        u8 cgx_id, lmac_id;
 591        int rc = 0;
 592
 593        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 594                return -EPERM;
 595
 596        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 597        rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
 598        if (rc >= 0) {
 599                rsp->index = rc;
 600                return 0;
 601        }
 602
 603        return rc;
 604}
 605
 606int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
 607                                      struct cgx_mac_addr_del_req *req,
 608                                      struct msg_rsp *rsp)
 609{
 610        int pf = rvu_get_pf(req->hdr.pcifunc);
 611        u8 cgx_id, lmac_id;
 612
 613        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 614                return -EPERM;
 615
 616        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 617        return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
 618}
 619
 620int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
 621                                             struct msg_req *req,
 622                                             struct cgx_max_dmac_entries_get_rsp
 623                                             *rsp)
 624{
 625        int pf = rvu_get_pf(req->hdr.pcifunc);
 626        u8 cgx_id, lmac_id;
 627
 628        /* If msg is received from PFs(which are not mapped to CGX LMACs)
 629         * or VF then no entries are allocated for DMAC filters at CGX level.
 630         * So returning zero.
 631         */
 632        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
 633                rsp->max_dmac_filters = 0;
 634                return 0;
 635        }
 636
 637        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 638        rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
 639        return 0;
 640}
 641
 642int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
 643                                      struct cgx_mac_addr_set_or_get *req,
 644                                      struct cgx_mac_addr_set_or_get *rsp)
 645{
 646        int pf = rvu_get_pf(req->hdr.pcifunc);
 647        u8 cgx_id, lmac_id;
 648        int rc = 0, i;
 649        u64 cfg;
 650
 651        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 652                return -EPERM;
 653
 654        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 655
 656        rsp->hdr.rc = rc;
 657        cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
 658        /* copy 48 bit mac address to req->mac_addr */
 659        for (i = 0; i < ETH_ALEN; i++)
 660                rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
 661        return 0;
 662}
 663
 664int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
 665                                        struct msg_rsp *rsp)
 666{
 667        u16 pcifunc = req->hdr.pcifunc;
 668        int pf = rvu_get_pf(pcifunc);
 669        u8 cgx_id, lmac_id;
 670
 671        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 672                return -EPERM;
 673
 674        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 675
 676        cgx_lmac_promisc_config(cgx_id, lmac_id, true);
 677        return 0;
 678}
 679
 680int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
 681                                         struct msg_rsp *rsp)
 682{
 683        int pf = rvu_get_pf(req->hdr.pcifunc);
 684        u8 cgx_id, lmac_id;
 685
 686        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
 687                return -EPERM;
 688
 689        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 690
 691        cgx_lmac_promisc_config(cgx_id, lmac_id, false);
 692        return 0;
 693}
 694
 695static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
 696{
 697        int pf = rvu_get_pf(pcifunc);
 698        u8 cgx_id, lmac_id;
 699        void *cgxd;
 700
 701        if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
 702                return 0;
 703
 704        /* This msg is expected only from PFs that are mapped to CGX LMACs,
 705         * if received from other PF/VF simply ACK, nothing to do.
 706         */
 707        if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
 708            !is_pf_cgxmapped(rvu, pf))
 709                return -ENODEV;
 710
 711        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 712        cgxd = rvu_cgx_pdata(cgx_id, rvu);
 713
 714        cgx_lmac_ptp_config(cgxd, lmac_id, enable);
 715        /* If PTP is enabled then inform NPC that packets to be
 716         * parsed by this PF will have their data shifted by 8 bytes
 717         * and if PTP is disabled then no shift is required
 718         */
 719        if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
 720                return -EINVAL;
 721
 722        return 0;
 723}
 724
 725int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
 726                                       struct msg_rsp *rsp)
 727{
 728        return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
 729}
 730
 731int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
 732                                        struct msg_rsp *rsp)
 733{
 734        return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
 735}
 736
 737static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
 738{
 739        int pf = rvu_get_pf(pcifunc);
 740        u8 cgx_id, lmac_id;
 741
 742        if (!is_cgx_config_permitted(rvu, pcifunc))
 743                return -EPERM;
 744
 745        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 746
 747        if (en) {
 748                set_bit(pf, &rvu->pf_notify_bmap);
 749                /* Send the current link status to PF */
 750                rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
 751        } else {
 752                clear_bit(pf, &rvu->pf_notify_bmap);
 753        }
 754
 755        return 0;
 756}
 757
 758int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
 759                                          struct msg_rsp *rsp)
 760{
 761        rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
 762        return 0;
 763}
 764
 765int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
 766                                         struct msg_rsp *rsp)
 767{
 768        rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
 769        return 0;
 770}
 771
 772int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
 773                                      struct cgx_link_info_msg *rsp)
 774{
 775        u8 cgx_id, lmac_id;
 776        int pf, err;
 777
 778        pf = rvu_get_pf(req->hdr.pcifunc);
 779
 780        if (!is_pf_cgxmapped(rvu, pf))
 781                return -ENODEV;
 782
 783        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 784
 785        err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
 786                                &rsp->link_info);
 787        return err;
 788}
 789
 790int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
 791                                      struct msg_req *req,
 792                                      struct cgx_features_info_msg *rsp)
 793{
 794        int pf = rvu_get_pf(req->hdr.pcifunc);
 795        u8 cgx_idx, lmac;
 796        void *cgxd;
 797
 798        if (!is_pf_cgxmapped(rvu, pf))
 799                return 0;
 800
 801        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
 802        cgxd = rvu_cgx_pdata(cgx_idx, rvu);
 803        rsp->lmac_features = cgx_features_get(cgxd);
 804
 805        return 0;
 806}
 807
 808u32 rvu_cgx_get_fifolen(struct rvu *rvu)
 809{
 810        struct mac_ops *mac_ops;
 811        u32 fifo_len;
 812
 813        mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
 814        fifo_len = mac_ops ? mac_ops->fifo_len : 0;
 815
 816        return fifo_len;
 817}
 818
 819static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
 820{
 821        int pf = rvu_get_pf(pcifunc);
 822        struct mac_ops *mac_ops;
 823        u8 cgx_id, lmac_id;
 824
 825        if (!is_cgx_config_permitted(rvu, pcifunc))
 826                return -EPERM;
 827
 828        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 829        mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
 830
 831        return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
 832                                          lmac_id, en);
 833}
 834
 835int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
 836                                       struct msg_rsp *rsp)
 837{
 838        rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
 839        return 0;
 840}
 841
 842int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
 843                                        struct msg_rsp *rsp)
 844{
 845        rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
 846        return 0;
 847}
 848
 849int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
 850                                       struct cgx_pause_frm_cfg *req,
 851                                       struct cgx_pause_frm_cfg *rsp)
 852{
 853        int pf = rvu_get_pf(req->hdr.pcifunc);
 854        struct mac_ops *mac_ops;
 855        u8 cgx_id, lmac_id;
 856        void *cgxd;
 857
 858        if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
 859                return 0;
 860
 861        /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
 862         * if received from other PF/VF simply ACK, nothing to do.
 863         */
 864        if (!is_pf_cgxmapped(rvu, pf))
 865                return -ENODEV;
 866
 867        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 868        cgxd = rvu_cgx_pdata(cgx_id, rvu);
 869        mac_ops = get_mac_ops(cgxd);
 870
 871        if (req->set)
 872                mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
 873                                              req->tx_pause, req->rx_pause);
 874        else
 875                mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
 876                                                  &rsp->tx_pause,
 877                                                  &rsp->rx_pause);
 878        return 0;
 879}
 880
 881int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
 882                                           struct msg_rsp *rsp)
 883{
 884        int pf = rvu_get_pf(req->hdr.pcifunc);
 885        u8 cgx_id, lmac_id;
 886
 887        if (!is_pf_cgxmapped(rvu, pf))
 888                return LMAC_AF_ERR_PF_NOT_MAPPED;
 889
 890        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 891        return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
 892}
 893
 894/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
 895 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
 896 */
 897int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
 898                           int index, int rxtxflag, u64 *stat)
 899{
 900        struct rvu_block *block;
 901        int blkaddr;
 902        u16 pcifunc;
 903        int pf, lf;
 904
 905        *stat = 0;
 906
 907        if (!cgxd || !rvu)
 908                return -EINVAL;
 909
 910        pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
 911        if (pf < 0)
 912                return pf;
 913
 914        /* Assumes LF of a PF and all of its VF belongs to the same
 915         * NIX block
 916         */
 917        pcifunc = pf << RVU_PFVF_PF_SHIFT;
 918        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 919        if (blkaddr < 0)
 920                return 0;
 921        block = &rvu->hw->block[blkaddr];
 922
 923        for (lf = 0; lf < block->lf.max; lf++) {
 924                /* Check if a lf is attached to this PF or one of its VFs */
 925                if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
 926                         ~RVU_PFVF_FUNC_MASK)))
 927                        continue;
 928                if (rxtxflag == NIX_STATS_RX)
 929                        *stat += rvu_read64(rvu, blkaddr,
 930                                            NIX_AF_LFX_RX_STATX(lf, index));
 931                else
 932                        *stat += rvu_read64(rvu, blkaddr,
 933                                            NIX_AF_LFX_TX_STATX(lf, index));
 934        }
 935
 936        return 0;
 937}
 938
 939int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
 940{
 941        struct rvu_pfvf *parent_pf, *pfvf;
 942        int cgx_users, err = 0;
 943
 944        if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
 945                return 0;
 946
 947        parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
 948        pfvf = rvu_get_pfvf(rvu, pcifunc);
 949
 950        mutex_lock(&rvu->cgx_cfg_lock);
 951
 952        if (start && pfvf->cgx_in_use)
 953                goto exit;  /* CGX is already started hence nothing to do */
 954        if (!start && !pfvf->cgx_in_use)
 955                goto exit; /* CGX is already stopped hence nothing to do */
 956
 957        if (start) {
 958                cgx_users = parent_pf->cgx_users;
 959                parent_pf->cgx_users++;
 960        } else {
 961                parent_pf->cgx_users--;
 962                cgx_users = parent_pf->cgx_users;
 963        }
 964
 965        /* Start CGX when first of all NIXLFs is started.
 966         * Stop CGX when last of all NIXLFs is stopped.
 967         */
 968        if (!cgx_users) {
 969                err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
 970                                          start);
 971                if (err) {
 972                        dev_err(rvu->dev, "Unable to %s CGX\n",
 973                                start ? "start" : "stop");
 974                        /* Revert the usage count in case of error */
 975                        parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
 976                                               : parent_pf->cgx_users  + 1;
 977                        goto exit;
 978                }
 979        }
 980        pfvf->cgx_in_use = start;
 981exit:
 982        mutex_unlock(&rvu->cgx_cfg_lock);
 983        return err;
 984}
 985
 986int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
 987                                       struct fec_mode *req,
 988                                       struct fec_mode *rsp)
 989{
 990        int pf = rvu_get_pf(req->hdr.pcifunc);
 991        u8 cgx_id, lmac_id;
 992
 993        if (!is_pf_cgxmapped(rvu, pf))
 994                return -EPERM;
 995
 996        if (req->fec == OTX2_FEC_OFF)
 997                req->fec = OTX2_FEC_NONE;
 998        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 999        rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1000        return 0;
1001}
1002
1003int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1004                                           struct cgx_fw_data *rsp)
1005{
1006        int pf = rvu_get_pf(req->hdr.pcifunc);
1007        u8 cgx_id, lmac_id;
1008
1009        if (!rvu->fwdata)
1010                return -ENXIO;
1011
1012        if (!is_pf_cgxmapped(rvu, pf))
1013                return -EPERM;
1014
1015        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1016
1017        memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1018               sizeof(struct cgx_lmac_fwdata_s));
1019        return 0;
1020}
1021
1022int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1023                                       struct cgx_set_link_mode_req *req,
1024                                       struct cgx_set_link_mode_rsp *rsp)
1025{
1026        int pf = rvu_get_pf(req->hdr.pcifunc);
1027        u8 cgx_idx, lmac;
1028        void *cgxd;
1029
1030        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1031                return -EPERM;
1032
1033        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1034        cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1035        rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1036        return 0;
1037}
1038
1039int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
1040                                        struct msg_rsp *rsp)
1041{
1042        int pf = rvu_get_pf(req->hdr.pcifunc);
1043        u8 cgx_id, lmac_id;
1044
1045        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1046                return LMAC_AF_ERR_PERM_DENIED;
1047
1048        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1049        return cgx_lmac_addr_reset(cgx_id, lmac_id);
1050}
1051
1052int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1053                                         struct cgx_mac_addr_update_req *req,
1054                                         struct msg_rsp *rsp)
1055{
1056        int pf = rvu_get_pf(req->hdr.pcifunc);
1057        u8 cgx_id, lmac_id;
1058
1059        if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1060                return LMAC_AF_ERR_PERM_DENIED;
1061
1062        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1063        return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1064}
1065