linux/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*  Marvell OcteonTx2 RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#ifndef RVU_H
  12#define RVU_H
  13
  14#include <linux/pci.h>
  15#include "rvu_struct.h"
  16#include "common.h"
  17#include "mbox.h"
  18
  19/* PCI device IDs */
  20#define PCI_DEVID_OCTEONTX2_RVU_AF              0xA065
  21
  22/* Subsystem Device ID */
  23#define PCI_SUBSYS_DEVID_96XX                  0xB200
  24
  25/* PCI BAR nos */
  26#define PCI_AF_REG_BAR_NUM                      0
  27#define PCI_PF_REG_BAR_NUM                      2
  28#define PCI_MBOX_BAR_NUM                        4
  29
  30#define NAME_SIZE                               32
  31
  32/* PF_FUNC */
  33#define RVU_PFVF_PF_SHIFT       10
  34#define RVU_PFVF_PF_MASK        0x3F
  35#define RVU_PFVF_FUNC_SHIFT     0
  36#define RVU_PFVF_FUNC_MASK      0x3FF
  37
  38#ifdef CONFIG_DEBUG_FS
  39struct dump_ctx {
  40        int     lf;
  41        int     id;
  42        bool    all;
  43};
  44
  45struct rvu_debugfs {
  46        struct dentry *root;
  47        struct dentry *cgx_root;
  48        struct dentry *cgx;
  49        struct dentry *lmac;
  50        struct dentry *npa;
  51        struct dentry *nix;
  52        struct dentry *npc;
  53        struct dump_ctx npa_aura_ctx;
  54        struct dump_ctx npa_pool_ctx;
  55        struct dump_ctx nix_cq_ctx;
  56        struct dump_ctx nix_rq_ctx;
  57        struct dump_ctx nix_sq_ctx;
  58        int npa_qsize_id;
  59        int nix_qsize_id;
  60};
  61#endif
  62
  63struct rvu_work {
  64        struct  work_struct work;
  65        struct  rvu *rvu;
  66        int num_msgs;
  67        int up_num_msgs;
  68};
  69
  70struct rsrc_bmap {
  71        unsigned long *bmap;    /* Pointer to resource bitmap */
  72        u16  max;               /* Max resource id or count */
  73};
  74
  75struct rvu_block {
  76        struct rsrc_bmap        lf;
  77        struct admin_queue      *aq; /* NIX/NPA AQ */
  78        u16  *fn_map; /* LF to pcifunc mapping */
  79        bool multislot;
  80        bool implemented;
  81        u8   addr;  /* RVU_BLOCK_ADDR_E */
  82        u8   type;  /* RVU_BLOCK_TYPE_E */
  83        u8   lfshift;
  84        u64  lookup_reg;
  85        u64  pf_lfcnt_reg;
  86        u64  vf_lfcnt_reg;
  87        u64  lfcfg_reg;
  88        u64  msixcfg_reg;
  89        u64  lfreset_reg;
  90        unsigned char name[NAME_SIZE];
  91};
  92
  93struct nix_mcast {
  94        struct qmem     *mce_ctx;
  95        struct qmem     *mcast_buf;
  96        int             replay_pkind;
  97        int             next_free_mce;
  98        struct mutex    mce_lock; /* Serialize MCE updates */
  99};
 100
 101struct nix_mce_list {
 102        struct hlist_head       head;
 103        int                     count;
 104        int                     max;
 105};
 106
 107struct npc_mcam {
 108        struct rsrc_bmap counters;
 109        struct mutex    lock;   /* MCAM entries and counters update lock */
 110        unsigned long   *bmap;          /* bitmap, 0 => bmap_entries */
 111        unsigned long   *bmap_reverse;  /* Reverse bitmap, bmap_entries => 0 */
 112        u16     bmap_entries;   /* Number of unreserved MCAM entries */
 113        u16     bmap_fcnt;      /* MCAM entries free count */
 114        u16     *entry2pfvf_map;
 115        u16     *entry2cntr_map;
 116        u16     *cntr2pfvf_map;
 117        u16     *cntr_refcnt;
 118        u8      keysize;        /* MCAM keysize 112/224/448 bits */
 119        u8      banks;          /* Number of MCAM banks */
 120        u8      banks_per_entry;/* Number of keywords in key */
 121        u16     banksize;       /* Number of MCAM entries in each bank */
 122        u16     total_entries;  /* Total number of MCAM entries */
 123        u16     nixlf_offset;   /* Offset of nixlf rsvd uncast entries */
 124        u16     pf_offset;      /* Offset of PF's rsvd bcast, promisc entries */
 125        u16     lprio_count;
 126        u16     lprio_start;
 127        u16     hprio_count;
 128        u16     hprio_end;
 129        u16     rx_miss_act_cntr; /* Counter for RX MISS action */
 130};
 131
 132/* Structure for per RVU func info ie PF/VF */
 133struct rvu_pfvf {
 134        bool            npalf; /* Only one NPALF per RVU_FUNC */
 135        bool            nixlf; /* Only one NIXLF per RVU_FUNC */
 136        u16             sso;
 137        u16             ssow;
 138        u16             cptlfs;
 139        u16             timlfs;
 140        u8              cgx_lmac;
 141
 142        /* Block LF's MSIX vector info */
 143        struct rsrc_bmap msix;      /* Bitmap for MSIX vector alloc */
 144#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
 145        u16              *msix_lfmap; /* Vector to block LF mapping */
 146
 147        /* NPA contexts */
 148        struct qmem     *aura_ctx;
 149        struct qmem     *pool_ctx;
 150        struct qmem     *npa_qints_ctx;
 151        unsigned long   *aura_bmap;
 152        unsigned long   *pool_bmap;
 153
 154        /* NIX contexts */
 155        struct qmem     *rq_ctx;
 156        struct qmem     *sq_ctx;
 157        struct qmem     *cq_ctx;
 158        struct qmem     *rss_ctx;
 159        struct qmem     *cq_ints_ctx;
 160        struct qmem     *nix_qints_ctx;
 161        unsigned long   *sq_bmap;
 162        unsigned long   *rq_bmap;
 163        unsigned long   *cq_bmap;
 164
 165        u16             rx_chan_base;
 166        u16             tx_chan_base;
 167        u8              rx_chan_cnt; /* total number of RX channels */
 168        u8              tx_chan_cnt; /* total number of TX channels */
 169        u16             maxlen;
 170        u16             minlen;
 171
 172        u8              mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
 173
 174        /* Broadcast pkt replication info */
 175        u16                     bcast_mce_idx;
 176        struct nix_mce_list     bcast_mce_list;
 177
 178        /* VLAN offload */
 179        struct mcam_entry entry;
 180        int rxvlan_index;
 181        bool rxvlan;
 182
 183        bool    cgx_in_use; /* this PF/VF using CGX? */
 184        int     cgx_users;  /* number of cgx users - used only by PFs */
 185};
 186
 187struct nix_txsch {
 188        struct rsrc_bmap schq;
 189        u8   lvl;
 190#define NIX_TXSCHQ_FREE               BIT_ULL(1)
 191#define NIX_TXSCHQ_CFG_DONE           BIT_ULL(0)
 192#define TXSCH_MAP_FUNC(__pfvf_map)    ((__pfvf_map) & 0xFFFF)
 193#define TXSCH_MAP_FLAGS(__pfvf_map)   ((__pfvf_map) >> 16)
 194#define TXSCH_MAP(__func, __flags)    (((__func) & 0xFFFF) | ((__flags) << 16))
 195#define TXSCH_SET_FLAG(__pfvf_map, flag)    ((__pfvf_map) | ((flag) << 16))
 196        u32  *pfvf_map;
 197};
 198
 199struct nix_mark_format {
 200        u8 total;
 201        u8 in_use;
 202        u32 *cfg;
 203};
 204
 205struct npc_pkind {
 206        struct rsrc_bmap rsrc;
 207        u32     *pfchan_map;
 208};
 209
 210struct nix_flowkey {
 211#define NIX_FLOW_KEY_ALG_MAX 32
 212        u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
 213        int in_use;
 214};
 215
 216struct nix_lso {
 217        u8 total;
 218        u8 in_use;
 219};
 220
 221struct nix_hw {
 222        struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
 223        struct nix_mcast mcast;
 224        struct nix_flowkey flowkey;
 225        struct nix_mark_format mark_format;
 226        struct nix_lso lso;
 227};
 228
 229/* RVU block's capabilities or functionality,
 230 * which vary by silicon version/skew.
 231 */
 232struct hw_cap {
 233        /* Transmit side supported functionality */
 234        u8      nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
 235        u16     nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
 236        u16     nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
 237        u16     nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
 238        bool    nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
 239        bool    nix_shaping;             /* Is shaping and coloring supported */
 240        bool    nix_tx_link_bp;          /* Can link backpressure TL queues ? */
 241        bool    nix_rx_multicast;        /* Rx packet replication support */
 242};
 243
 244struct rvu_hwinfo {
 245        u8      total_pfs;   /* MAX RVU PFs HW supports */
 246        u16     total_vfs;   /* Max RVU VFs HW supports */
 247        u16     max_vfs_per_pf; /* Max VFs that can be attached to a PF */
 248        u8      cgx;
 249        u8      lmac_per_cgx;
 250        u8      cgx_links;
 251        u8      lbk_links;
 252        u8      sdp_links;
 253        u8      npc_kpus;          /* No of parser units */
 254
 255        struct hw_cap    cap;
 256        struct rvu_block block[BLK_COUNT]; /* Block info */
 257        struct nix_hw    *nix0;
 258        struct npc_pkind pkind;
 259        struct npc_mcam  mcam;
 260};
 261
 262struct mbox_wq_info {
 263        struct otx2_mbox mbox;
 264        struct rvu_work *mbox_wrk;
 265
 266        struct otx2_mbox mbox_up;
 267        struct rvu_work *mbox_wrk_up;
 268
 269        struct workqueue_struct *mbox_wq;
 270};
 271
 272struct rvu {
 273        void __iomem            *afreg_base;
 274        void __iomem            *pfreg_base;
 275        struct pci_dev          *pdev;
 276        struct device           *dev;
 277        struct rvu_hwinfo       *hw;
 278        struct rvu_pfvf         *pf;
 279        struct rvu_pfvf         *hwvf;
 280        struct mutex            rsrc_lock; /* Serialize resource alloc/free */
 281        int                     vfs; /* Number of VFs attached to RVU */
 282
 283        /* Mbox */
 284        struct mbox_wq_info     afpf_wq_info;
 285        struct mbox_wq_info     afvf_wq_info;
 286
 287        /* PF FLR */
 288        struct rvu_work         *flr_wrk;
 289        struct workqueue_struct *flr_wq;
 290        struct mutex            flr_lock; /* Serialize FLRs */
 291
 292        /* MSI-X */
 293        u16                     num_vec;
 294        char                    *irq_name;
 295        bool                    *irq_allocated;
 296        dma_addr_t              msix_base_iova;
 297
 298        /* CGX */
 299#define PF_CGXMAP_BASE          1 /* PF 0 is reserved for RVU PF */
 300        u8                      cgx_mapped_pfs;
 301        u8                      cgx_cnt_max;     /* CGX port count max */
 302        u8                      *pf2cgxlmac_map; /* pf to cgx_lmac map */
 303        u16                     *cgxlmac2pf_map; /* bitmap of mapped pfs for
 304                                                  * every cgx lmac port
 305                                                  */
 306        unsigned long           pf_notify_bmap; /* Flags for PF notification */
 307        void                    **cgx_idmap; /* cgx id to cgx data map table */
 308        struct                  work_struct cgx_evh_work;
 309        struct                  workqueue_struct *cgx_evh_wq;
 310        spinlock_t              cgx_evq_lock; /* cgx event queue lock */
 311        struct list_head        cgx_evq_head; /* cgx event queue head */
 312        struct mutex            cgx_cfg_lock; /* serialize cgx configuration */
 313
 314        char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
 315
 316#ifdef CONFIG_DEBUG_FS
 317        struct rvu_debugfs      rvu_dbg;
 318#endif
 319};
 320
 321static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
 322{
 323        writeq(val, rvu->afreg_base + ((block << 28) | offset));
 324}
 325
 326static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
 327{
 328        return readq(rvu->afreg_base + ((block << 28) | offset));
 329}
 330
 331static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
 332{
 333        writeq(val, rvu->pfreg_base + offset);
 334}
 335
 336static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
 337{
 338        return readq(rvu->pfreg_base + offset);
 339}
 340
 341/* Silicon revisions */
 342static inline bool is_rvu_96xx_A0(struct rvu *rvu)
 343{
 344        struct pci_dev *pdev = rvu->pdev;
 345
 346        return (pdev->revision == 0x00) &&
 347                (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
 348}
 349
 350static inline bool is_rvu_96xx_B0(struct rvu *rvu)
 351{
 352        struct pci_dev *pdev = rvu->pdev;
 353
 354        return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
 355                (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
 356}
 357
 358/* Function Prototypes
 359 * RVU
 360 */
 361static inline int is_afvf(u16 pcifunc)
 362{
 363        return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
 364}
 365
 366int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
 367int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
 368void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
 369int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
 370int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
 371bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
 372int rvu_get_pf(u16 pcifunc);
 373struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
 374void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
 375bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
 376bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
 377int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
 378int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
 379int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
 380int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
 381
 382/* RVU HW reg validation */
 383enum regmap_block {
 384        TXSCHQ_HWREGMAP = 0,
 385        MAX_HWREGMAP,
 386};
 387
 388bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
 389
 390/* NPA/NIX AQ APIs */
 391int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
 392                 int qsize, int inst_size, int res_size);
 393void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
 394
 395/* CGX APIs */
 396static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
 397{
 398        return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
 399}
 400
 401static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
 402{
 403        *cgx_id = (map >> 4) & 0xF;
 404        *lmac_id = (map & 0xF);
 405}
 406
 407#define M(_name, _id, fn_name, req, rsp)                                \
 408int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
 409MBOX_MESSAGES
 410#undef M
 411
 412int rvu_cgx_init(struct rvu *rvu);
 413int rvu_cgx_exit(struct rvu *rvu);
 414void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
 415int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
 416void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
 417int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
 418int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
 419                           int rxtxflag, u64 *stat);
 420/* NPA APIs */
 421int rvu_npa_init(struct rvu *rvu);
 422void rvu_npa_freemem(struct rvu *rvu);
 423void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
 424int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
 425                        struct npa_aq_enq_rsp *rsp);
 426
 427/* NIX APIs */
 428bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
 429int rvu_nix_init(struct rvu *rvu);
 430int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
 431                                int blkaddr, u32 cfg);
 432void rvu_nix_freemem(struct rvu *rvu);
 433int rvu_get_nixlf_count(struct rvu *rvu);
 434void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
 435int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
 436
 437/* NPC APIs */
 438int rvu_npc_init(struct rvu *rvu);
 439void rvu_npc_freemem(struct rvu *rvu);
 440int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
 441void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
 442void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
 443                                 int nixlf, u64 chan, u8 *mac_addr);
 444void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
 445                                   int nixlf, u64 chan, bool allmulti);
 446void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
 447void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
 448void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
 449                                       int nixlf, u64 chan);
 450void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc);
 451int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
 452void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
 453void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
 454void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
 455void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
 456                                    int group, int alg_idx, int mcam_index);
 457void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
 458                                       int blkaddr, int *alloc_cnt,
 459                                       int *enable_cnt);
 460void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
 461                                         int blkaddr, int *alloc_cnt,
 462                                         int *enable_cnt);
 463
 464#ifdef CONFIG_DEBUG_FS
 465void rvu_dbg_init(struct rvu *rvu);
 466void rvu_dbg_exit(struct rvu *rvu);
 467#else
 468static inline void rvu_dbg_init(struct rvu *rvu) {}
 469static inline void rvu_dbg_exit(struct rvu *rvu) {}
 470#endif
 471#endif /* RVU_H */
 472