linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Marvell RVU Ethernet driver
   3 *
   4 * Copyright (C) 2020 Marvell.
   5 *
   6 */
   7
   8#ifndef OTX2_COMMON_H
   9#define OTX2_COMMON_H
  10
  11#include <linux/ethtool.h>
  12#include <linux/pci.h>
  13#include <linux/iommu.h>
  14#include <linux/net_tstamp.h>
  15#include <linux/ptp_clock_kernel.h>
  16#include <linux/timecounter.h>
  17#include <linux/soc/marvell/octeontx2/asm.h>
  18#include <net/pkt_cls.h>
  19#include <net/devlink.h>
  20
  21#include <mbox.h>
  22#include <npc.h>
  23#include "otx2_reg.h"
  24#include "otx2_txrx.h"
  25#include "otx2_devlink.h"
  26#include <rvu_trace.h>
  27
  28/* PCI device IDs */
  29#define PCI_DEVID_OCTEONTX2_RVU_PF              0xA063
  30#define PCI_DEVID_OCTEONTX2_RVU_VF              0xA064
  31#define PCI_DEVID_OCTEONTX2_RVU_AFVF            0xA0F8
  32
  33#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF          0xB200
  34
  35/* PCI BAR nos */
  36#define PCI_CFG_REG_BAR_NUM                     2
  37#define PCI_MBOX_BAR_NUM                        4
  38
  39#define NAME_SIZE                               32
  40
  41enum arua_mapped_qtypes {
  42        AURA_NIX_RQ,
  43        AURA_NIX_SQ,
  44};
  45
  46/* NIX LF interrupts range*/
  47#define NIX_LF_QINT_VEC_START                   0x00
  48#define NIX_LF_CINT_VEC_START                   0x40
  49#define NIX_LF_GINT_VEC                         0x80
  50#define NIX_LF_ERR_VEC                          0x81
  51#define NIX_LF_POISON_VEC                       0x82
  52
  53/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
  54#define SEND_CQ_SKID    2000
  55
  56struct otx2_lmt_info {
  57        u64 lmt_addr;
  58        u16 lmt_id;
  59};
  60/* RSS configuration */
  61struct otx2_rss_ctx {
  62        u8  ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
  63};
  64
  65struct otx2_rss_info {
  66        u8 enable;
  67        u32 flowkey_cfg;
  68        u16 rss_size;
  69#define RSS_HASH_KEY_SIZE       44   /* 352 bit key */
  70        u8  key[RSS_HASH_KEY_SIZE];
  71        struct otx2_rss_ctx     *rss_ctx[MAX_RSS_GROUPS];
  72};
  73
  74/* NIX (or NPC) RX errors */
  75enum otx2_errlvl {
  76        NPC_ERRLVL_RE,
  77        NPC_ERRLVL_LID_LA,
  78        NPC_ERRLVL_LID_LB,
  79        NPC_ERRLVL_LID_LC,
  80        NPC_ERRLVL_LID_LD,
  81        NPC_ERRLVL_LID_LE,
  82        NPC_ERRLVL_LID_LF,
  83        NPC_ERRLVL_LID_LG,
  84        NPC_ERRLVL_LID_LH,
  85        NPC_ERRLVL_NIX = 0x0F,
  86};
  87
  88enum otx2_errcodes_re {
  89        /* NPC_ERRLVL_RE errcodes */
  90        ERRCODE_FCS = 0x7,
  91        ERRCODE_FCS_RCV = 0x8,
  92        ERRCODE_UNDERSIZE = 0x10,
  93        ERRCODE_OVERSIZE = 0x11,
  94        ERRCODE_OL2_LEN_MISMATCH = 0x12,
  95        /* NPC_ERRLVL_NIX errcodes */
  96        ERRCODE_OL3_LEN = 0x10,
  97        ERRCODE_OL4_LEN = 0x11,
  98        ERRCODE_OL4_CSUM = 0x12,
  99        ERRCODE_IL3_LEN = 0x20,
 100        ERRCODE_IL4_LEN = 0x21,
 101        ERRCODE_IL4_CSUM = 0x22,
 102};
 103
 104/* NIX TX stats */
 105enum nix_stat_lf_tx {
 106        TX_UCAST        = 0x0,
 107        TX_BCAST        = 0x1,
 108        TX_MCAST        = 0x2,
 109        TX_DROP         = 0x3,
 110        TX_OCTS         = 0x4,
 111        TX_STATS_ENUM_LAST,
 112};
 113
 114/* NIX RX stats */
 115enum nix_stat_lf_rx {
 116        RX_OCTS         = 0x0,
 117        RX_UCAST        = 0x1,
 118        RX_BCAST        = 0x2,
 119        RX_MCAST        = 0x3,
 120        RX_DROP         = 0x4,
 121        RX_DROP_OCTS    = 0x5,
 122        RX_FCS          = 0x6,
 123        RX_ERR          = 0x7,
 124        RX_DRP_BCAST    = 0x8,
 125        RX_DRP_MCAST    = 0x9,
 126        RX_DRP_L3BCAST  = 0xa,
 127        RX_DRP_L3MCAST  = 0xb,
 128        RX_STATS_ENUM_LAST,
 129};
 130
 131struct otx2_dev_stats {
 132        u64 rx_bytes;
 133        u64 rx_frames;
 134        u64 rx_ucast_frames;
 135        u64 rx_bcast_frames;
 136        u64 rx_mcast_frames;
 137        u64 rx_drops;
 138
 139        u64 tx_bytes;
 140        u64 tx_frames;
 141        u64 tx_ucast_frames;
 142        u64 tx_bcast_frames;
 143        u64 tx_mcast_frames;
 144        u64 tx_drops;
 145};
 146
 147/* Driver counted stats */
 148struct otx2_drv_stats {
 149        atomic_t rx_fcs_errs;
 150        atomic_t rx_oversize_errs;
 151        atomic_t rx_undersize_errs;
 152        atomic_t rx_csum_errs;
 153        atomic_t rx_len_errs;
 154        atomic_t rx_other_errs;
 155};
 156
 157struct mbox {
 158        struct otx2_mbox        mbox;
 159        struct work_struct      mbox_wrk;
 160        struct otx2_mbox        mbox_up;
 161        struct work_struct      mbox_up_wrk;
 162        struct otx2_nic         *pfvf;
 163        void                    *bbuf_base; /* Bounce buffer for mbox memory */
 164        struct mutex            lock;   /* serialize mailbox access */
 165        int                     num_msgs; /* mbox number of messages */
 166        int                     up_num_msgs; /* mbox_up number of messages */
 167};
 168
 169struct otx2_hw {
 170        struct pci_dev          *pdev;
 171        struct otx2_rss_info    rss_info;
 172        u16                     rx_queues;
 173        u16                     tx_queues;
 174        u16                     max_queues;
 175        u16                     pool_cnt;
 176        u16                     rqpool_cnt;
 177        u16                     sqpool_cnt;
 178
 179        /* NPA */
 180        u32                     stack_pg_ptrs;  /* No of ptrs per stack page */
 181        u32                     stack_pg_bytes; /* Size of stack page */
 182        u16                     sqb_size;
 183
 184        /* NIX */
 185        u16             txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
 186        u16                     matchall_ipolicer;
 187        u32                     dwrr_mtu;
 188
 189        /* HW settings, coalescing etc */
 190        u16                     rx_chan_base;
 191        u16                     tx_chan_base;
 192        u16                     cq_qcount_wait;
 193        u16                     cq_ecount_wait;
 194        u16                     rq_skid;
 195        u8                      cq_time_wait;
 196
 197        /* Segmentation */
 198        u8                      lso_tsov4_idx;
 199        u8                      lso_tsov6_idx;
 200        u8                      lso_udpv4_idx;
 201        u8                      lso_udpv6_idx;
 202
 203        /* RSS */
 204        u8                      flowkey_alg_idx;
 205
 206        /* MSI-X */
 207        u8                      cint_cnt; /* CQ interrupt count */
 208        u16                     npa_msixoff; /* Offset of NPA vectors */
 209        u16                     nix_msixoff; /* Offset of NIX vectors */
 210        char                    *irq_name;
 211        cpumask_var_t           *affinity_mask;
 212
 213        /* Stats */
 214        struct otx2_dev_stats   dev_stats;
 215        struct otx2_drv_stats   drv_stats;
 216        u64                     cgx_rx_stats[CGX_RX_STATS_COUNT];
 217        u64                     cgx_tx_stats[CGX_TX_STATS_COUNT];
 218        u64                     cgx_fec_corr_blks;
 219        u64                     cgx_fec_uncorr_blks;
 220        u8                      cgx_links;  /* No. of CGX links present in HW */
 221        u8                      lbk_links;  /* No. of LBK links present in HW */
 222        u8                      tx_link;    /* Transmit channel link number */
 223#define HW_TSO                  0
 224#define CN10K_MBOX              1
 225#define CN10K_LMTST             2
 226        unsigned long           cap_flag;
 227
 228#define LMT_LINE_SIZE           128
 229#define LMT_BURST_SIZE          32 /* 32 LMTST lines for burst SQE flush */
 230        u64                     *lmt_base;
 231        struct otx2_lmt_info    __percpu *lmt_info;
 232};
 233
 234enum vfperm {
 235        OTX2_RESET_VF_PERM,
 236        OTX2_TRUSTED_VF,
 237};
 238
 239struct otx2_vf_config {
 240        struct otx2_nic *pf;
 241        struct delayed_work link_event_work;
 242        bool intf_down; /* interface was either configured or not */
 243        u8 mac[ETH_ALEN];
 244        u16 vlan;
 245        int tx_vtag_idx;
 246        bool trusted;
 247};
 248
 249struct flr_work {
 250        struct work_struct work;
 251        struct otx2_nic *pf;
 252};
 253
 254struct refill_work {
 255        struct delayed_work pool_refill_work;
 256        struct otx2_nic *pf;
 257};
 258
 259struct otx2_ptp {
 260        struct ptp_clock_info ptp_info;
 261        struct ptp_clock *ptp_clock;
 262        struct otx2_nic *nic;
 263
 264        struct cyclecounter cycle_counter;
 265        struct timecounter time_counter;
 266};
 267
 268#define OTX2_HW_TIMESTAMP_LEN   8
 269
 270struct otx2_mac_table {
 271        u8 addr[ETH_ALEN];
 272        u16 mcam_entry;
 273        bool inuse;
 274};
 275
 276struct otx2_flow_config {
 277        u16                     *flow_ent;
 278        u16                     *def_ent;
 279        u16                     nr_flows;
 280#define OTX2_DEFAULT_FLOWCOUNT          16
 281#define OTX2_MAX_UNICAST_FLOWS          8
 282#define OTX2_MAX_VLAN_FLOWS             1
 283#define OTX2_MAX_TC_FLOWS       OTX2_DEFAULT_FLOWCOUNT
 284#define OTX2_MCAM_COUNT         (OTX2_DEFAULT_FLOWCOUNT + \
 285                                 OTX2_MAX_UNICAST_FLOWS + \
 286                                 OTX2_MAX_VLAN_FLOWS)
 287        u16                     unicast_offset;
 288        u16                     rx_vlan_offset;
 289        u16                     vf_vlan_offset;
 290#define OTX2_PER_VF_VLAN_FLOWS  2 /* Rx + Tx per VF */
 291#define OTX2_VF_VLAN_RX_INDEX   0
 292#define OTX2_VF_VLAN_TX_INDEX   1
 293        u16                     max_flows;
 294        u8                      dmacflt_max_flows;
 295        u8                      *bmap_to_dmacindex;
 296        unsigned long           dmacflt_bmap;
 297        struct list_head        flow_list;
 298};
 299
 300struct otx2_tc_info {
 301        /* hash table to store TC offloaded flows */
 302        struct rhashtable               flow_table;
 303        struct rhashtable_params        flow_ht_params;
 304        unsigned long                   *tc_entries_bitmap;
 305};
 306
 307struct dev_hw_ops {
 308        int     (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
 309        void    (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
 310                             int size, int qidx);
 311        void    (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
 312        void    (*aura_freeptr)(void *dev, int aura, u64 buf);
 313};
 314
 315struct otx2_nic {
 316        void __iomem            *reg_base;
 317        struct net_device       *netdev;
 318        struct dev_hw_ops       *hw_ops;
 319        void                    *iommu_domain;
 320        u16                     max_frs;
 321        u16                     rbsize; /* Receive buffer size */
 322
 323#define OTX2_FLAG_RX_TSTAMP_ENABLED             BIT_ULL(0)
 324#define OTX2_FLAG_TX_TSTAMP_ENABLED             BIT_ULL(1)
 325#define OTX2_FLAG_INTF_DOWN                     BIT_ULL(2)
 326#define OTX2_FLAG_MCAM_ENTRIES_ALLOC            BIT_ULL(3)
 327#define OTX2_FLAG_NTUPLE_SUPPORT                BIT_ULL(4)
 328#define OTX2_FLAG_UCAST_FLTR_SUPPORT            BIT_ULL(5)
 329#define OTX2_FLAG_RX_VLAN_SUPPORT               BIT_ULL(6)
 330#define OTX2_FLAG_VF_VLAN_SUPPORT               BIT_ULL(7)
 331#define OTX2_FLAG_PF_SHUTDOWN                   BIT_ULL(8)
 332#define OTX2_FLAG_RX_PAUSE_ENABLED              BIT_ULL(9)
 333#define OTX2_FLAG_TX_PAUSE_ENABLED              BIT_ULL(10)
 334#define OTX2_FLAG_TC_FLOWER_SUPPORT             BIT_ULL(11)
 335#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED    BIT_ULL(12)
 336#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED   BIT_ULL(13)
 337#define OTX2_FLAG_DMACFLTR_SUPPORT              BIT_ULL(14)
 338        u64                     flags;
 339
 340        struct otx2_qset        qset;
 341        struct otx2_hw          hw;
 342        struct pci_dev          *pdev;
 343        struct device           *dev;
 344
 345        /* Mbox */
 346        struct mbox             mbox;
 347        struct mbox             *mbox_pfvf;
 348        struct workqueue_struct *mbox_wq;
 349        struct workqueue_struct *mbox_pfvf_wq;
 350
 351        u8                      total_vfs;
 352        u16                     pcifunc; /* RVU PF_FUNC */
 353        u16                     bpid[NIX_MAX_BPID_CHAN];
 354        struct otx2_vf_config   *vf_configs;
 355        struct cgx_link_user_info linfo;
 356
 357        /* NPC MCAM */
 358        struct otx2_flow_config *flow_cfg;
 359        struct otx2_mac_table   *mac_table;
 360        struct otx2_tc_info     tc_info;
 361
 362        u64                     reset_count;
 363        struct work_struct      reset_task;
 364        struct workqueue_struct *flr_wq;
 365        struct flr_work         *flr_wrk;
 366        struct refill_work      *refill_wrk;
 367        struct workqueue_struct *otx2_wq;
 368        struct work_struct      rx_mode_work;
 369
 370        /* Ethtool stuff */
 371        u32                     msg_enable;
 372
 373        /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
 374        int                     nix_blkaddr;
 375        /* LMTST Lines info */
 376        struct qmem             *dync_lmt;
 377        u16                     tot_lmt_lines;
 378        u16                     npa_lmt_lines;
 379        u32                     nix_lmt_size;
 380
 381        struct otx2_ptp         *ptp;
 382        struct hwtstamp_config  tstamp;
 383
 384        unsigned long           rq_bmap;
 385
 386        /* Devlink */
 387        struct otx2_devlink     *dl;
 388};
 389
 390static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
 391{
 392        return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
 393}
 394
 395static inline bool is_96xx_A0(struct pci_dev *pdev)
 396{
 397        return (pdev->revision == 0x00) &&
 398                (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
 399}
 400
 401static inline bool is_96xx_B0(struct pci_dev *pdev)
 402{
 403        return (pdev->revision == 0x01) &&
 404                (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
 405}
 406
 407/* REVID for PCIe devices.
 408 * Bits 0..1: minor pass, bit 3..2: major pass
 409 * bits 7..4: midr id
 410 */
 411#define PCI_REVISION_ID_96XX            0x00
 412#define PCI_REVISION_ID_95XX            0x10
 413#define PCI_REVISION_ID_95XXN           0x20
 414#define PCI_REVISION_ID_98XX            0x30
 415#define PCI_REVISION_ID_95XXMM          0x40
 416#define PCI_REVISION_ID_95XXO           0xE0
 417
 418static inline bool is_dev_otx2(struct pci_dev *pdev)
 419{
 420        u8 midr = pdev->revision & 0xF0;
 421
 422        return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
 423                midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
 424                midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
 425}
 426
 427static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
 428{
 429        struct otx2_hw *hw = &pfvf->hw;
 430
 431        pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
 432        pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
 433        pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
 434
 435        __set_bit(HW_TSO, &hw->cap_flag);
 436
 437        if (is_96xx_A0(pfvf->pdev)) {
 438                __clear_bit(HW_TSO, &hw->cap_flag);
 439
 440                /* Time based irq coalescing is not supported */
 441                pfvf->hw.cq_qcount_wait = 0x0;
 442
 443                /* Due to HW issue previous silicons required minimum
 444                 * 600 unused CQE to avoid CQ overflow.
 445                 */
 446                pfvf->hw.rq_skid = 600;
 447                pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
 448        }
 449        if (is_96xx_B0(pfvf->pdev))
 450                __clear_bit(HW_TSO, &hw->cap_flag);
 451
 452        if (!is_dev_otx2(pfvf->pdev)) {
 453                __set_bit(CN10K_MBOX, &hw->cap_flag);
 454                __set_bit(CN10K_LMTST, &hw->cap_flag);
 455        }
 456}
 457
 458/* Register read/write APIs */
 459static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
 460{
 461        u64 blkaddr;
 462
 463        switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
 464        case BLKTYPE_NIX:
 465                blkaddr = nic->nix_blkaddr;
 466                break;
 467        case BLKTYPE_NPA:
 468                blkaddr = BLKADDR_NPA;
 469                break;
 470        default:
 471                blkaddr = BLKADDR_RVUM;
 472                break;
 473        }
 474
 475        offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
 476        offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
 477
 478        return nic->reg_base + offset;
 479}
 480
 481static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
 482{
 483        void __iomem *addr = otx2_get_regaddr(nic, offset);
 484
 485        writeq(val, addr);
 486}
 487
 488static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
 489{
 490        void __iomem *addr = otx2_get_regaddr(nic, offset);
 491
 492        return readq(addr);
 493}
 494
 495/* Mbox bounce buffer APIs */
 496static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
 497{
 498        struct otx2_mbox *otx2_mbox;
 499        struct otx2_mbox_dev *mdev;
 500
 501        mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
 502        if (!mbox->bbuf_base)
 503                return -ENOMEM;
 504
 505        /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
 506         * prepare all mbox messages in bounce buffer instead of directly
 507         * in hw mbox memory.
 508         */
 509        otx2_mbox = &mbox->mbox;
 510        mdev = &otx2_mbox->dev[0];
 511        mdev->mbase = mbox->bbuf_base;
 512
 513        otx2_mbox = &mbox->mbox_up;
 514        mdev = &otx2_mbox->dev[0];
 515        mdev->mbase = mbox->bbuf_base;
 516        return 0;
 517}
 518
 519static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
 520{
 521        u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
 522        void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
 523        struct otx2_mbox_dev *mdev = &mbox->dev[devid];
 524        struct mbox_hdr *hdr;
 525        u64 msg_size;
 526
 527        if (mdev->mbase == hw_mbase)
 528                return;
 529
 530        hdr = hw_mbase + mbox->rx_start;
 531        msg_size = hdr->msg_size;
 532
 533        if (msg_size > mbox->rx_size - msgs_offset)
 534                msg_size = mbox->rx_size - msgs_offset;
 535
 536        /* Copy mbox messages from mbox memory to bounce buffer */
 537        memcpy(mdev->mbase + mbox->rx_start,
 538               hw_mbase + mbox->rx_start, msg_size + msgs_offset);
 539}
 540
 541/* With the absence of API for 128-bit IO memory access for arm64,
 542 * implement required operations at place.
 543 */
 544#if defined(CONFIG_ARM64)
 545static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
 546{
 547        __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
 548                         ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
 549}
 550
 551static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
 552{
 553        u64 result;
 554
 555        __asm__ volatile(".cpu   generic+lse\n"
 556                         "ldadd %x[i], %x[r], [%[b]]"
 557                         : [r]"=r"(result), "+m"(*ptr)
 558                         : [i]"r"(incr), [b]"r"(ptr)
 559                         : "memory");
 560        return result;
 561}
 562
 563#else
 564#define otx2_write128(lo, hi, addr)             writeq((hi) | (lo), addr)
 565#define otx2_atomic64_add(incr, ptr)            ({ *ptr += incr; })
 566#endif
 567
 568static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
 569                                        u64 *ptrs, u64 num_ptrs)
 570{
 571        struct otx2_lmt_info *lmt_info;
 572        u64 size = 0, count_eot = 0;
 573        u64 tar_addr, val = 0;
 574
 575        lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
 576        tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
 577        /* LMTID is same as AURA Id */
 578        val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
 579        /* Set if [127:64] of last 128bit word has a valid pointer */
 580        count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
 581        /* Set AURA ID to free pointer */
 582        ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
 583        /* Target address for LMTST flush tells HW how many 128bit
 584         * words are valid from NPA_LF_AURA_BATCH_FREE0.
 585         *
 586         * tar_addr[6:4] is LMTST size-1 in units of 128b.
 587         */
 588        if (num_ptrs > 2) {
 589                size = (sizeof(u64) * num_ptrs) / 16;
 590                if (!count_eot)
 591                        size++;
 592                tar_addr |=  ((size - 1) & 0x7) << 4;
 593        }
 594        memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
 595        /* Perform LMTST flush */
 596        cn10k_lmt_flush(val, tar_addr);
 597}
 598
 599static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
 600{
 601        struct otx2_nic *pfvf = dev;
 602        u64 ptrs[2];
 603
 604        ptrs[1] = buf;
 605        /* Free only one buffer at time during init and teardown */
 606        __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
 607}
 608
 609/* Alloc pointer from pool/aura */
 610static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
 611{
 612        u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
 613                           NPA_LF_AURA_OP_ALLOCX(0));
 614        u64 incr = (u64)aura | BIT_ULL(63);
 615
 616        return otx2_atomic64_add(incr, ptr);
 617}
 618
 619/* Free pointer to a pool/aura */
 620static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
 621{
 622        struct otx2_nic *pfvf = dev;
 623        void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
 624
 625        otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
 626}
 627
 628static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
 629{
 630        if (type == AURA_NIX_SQ)
 631                return pfvf->hw.rqpool_cnt + idx;
 632
 633         /* AURA_NIX_RQ */
 634        return idx;
 635}
 636
 637/* Mbox APIs */
 638static inline int otx2_sync_mbox_msg(struct mbox *mbox)
 639{
 640        int err;
 641
 642        if (!otx2_mbox_nonempty(&mbox->mbox, 0))
 643                return 0;
 644        otx2_mbox_msg_send(&mbox->mbox, 0);
 645        err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
 646        if (err)
 647                return err;
 648
 649        return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
 650}
 651
 652static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
 653{
 654        int err;
 655
 656        if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
 657                return 0;
 658        otx2_mbox_msg_send(&mbox->mbox_up, devid);
 659        err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
 660        if (err)
 661                return err;
 662
 663        return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
 664}
 665
 666/* Use this API to send mbox msgs in atomic context
 667 * where sleeping is not allowed
 668 */
 669static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
 670{
 671        int err;
 672
 673        if (!otx2_mbox_nonempty(&mbox->mbox, 0))
 674                return 0;
 675        otx2_mbox_msg_send(&mbox->mbox, 0);
 676        err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
 677        if (err)
 678                return err;
 679
 680        return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
 681}
 682
 683#define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
 684static struct _req_type __maybe_unused                                  \
 685*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox)                    \
 686{                                                                       \
 687        struct _req_type *req;                                          \
 688                                                                        \
 689        req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(              \
 690                &mbox->mbox, 0, sizeof(struct _req_type),               \
 691                sizeof(struct _rsp_type));                              \
 692        if (!req)                                                       \
 693                return NULL;                                            \
 694        req->hdr.sig = OTX2_MBOX_REQ_SIG;                               \
 695        req->hdr.id = _id;                                              \
 696        trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req));       \
 697        return req;                                                     \
 698}
 699
 700MBOX_MESSAGES
 701#undef M
 702
 703#define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
 704int                                                                     \
 705otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf,                \
 706                                struct _req_type *req,                  \
 707                                struct _rsp_type *rsp);                 \
 708
 709MBOX_UP_CGX_MESSAGES
 710#undef M
 711
 712/* Time to wait before watchdog kicks off */
 713#define OTX2_TX_TIMEOUT         (100 * HZ)
 714
 715#define RVU_PFVF_PF_SHIFT       10
 716#define RVU_PFVF_PF_MASK        0x3F
 717#define RVU_PFVF_FUNC_SHIFT     0
 718#define RVU_PFVF_FUNC_MASK      0x3FF
 719
 720static inline bool is_otx2_vf(u16 pcifunc)
 721{
 722        return !!(pcifunc & RVU_PFVF_FUNC_MASK);
 723}
 724
 725static inline int rvu_get_pf(u16 pcifunc)
 726{
 727        return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
 728}
 729
 730static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
 731                                           struct page *page,
 732                                           size_t offset, size_t size,
 733                                           enum dma_data_direction dir)
 734{
 735        dma_addr_t iova;
 736
 737        iova = dma_map_page_attrs(pfvf->dev, page,
 738                                  offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
 739        if (unlikely(dma_mapping_error(pfvf->dev, iova)))
 740                return (dma_addr_t)NULL;
 741        return iova;
 742}
 743
 744static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
 745                                       dma_addr_t addr, size_t size,
 746                                       enum dma_data_direction dir)
 747{
 748        dma_unmap_page_attrs(pfvf->dev, addr, size,
 749                             dir, DMA_ATTR_SKIP_CPU_SYNC);
 750}
 751
 752/* MSI-X APIs */
 753void otx2_free_cints(struct otx2_nic *pfvf, int n);
 754void otx2_set_cints_affinity(struct otx2_nic *pfvf);
 755int otx2_set_mac_address(struct net_device *netdev, void *p);
 756int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
 757void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
 758void otx2_get_mac_from_af(struct net_device *netdev);
 759void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
 760int otx2_config_pause_frm(struct otx2_nic *pfvf);
 761void otx2_setup_segmentation(struct otx2_nic *pfvf);
 762
 763/* RVU block related APIs */
 764int otx2_attach_npa_nix(struct otx2_nic *pfvf);
 765int otx2_detach_resources(struct mbox *mbox);
 766int otx2_config_npa(struct otx2_nic *pfvf);
 767int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
 768int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
 769void otx2_aura_pool_free(struct otx2_nic *pfvf);
 770void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
 771void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
 772int otx2_config_nix(struct otx2_nic *pfvf);
 773int otx2_config_nix_queues(struct otx2_nic *pfvf);
 774int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
 775int otx2_txsch_alloc(struct otx2_nic *pfvf);
 776int otx2_txschq_stop(struct otx2_nic *pfvf);
 777void otx2_sqb_flush(struct otx2_nic *pfvf);
 778int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
 779                      dma_addr_t *dma);
 780int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
 781void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
 782int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
 783void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
 784void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
 785int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
 786int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
 787int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
 788                      dma_addr_t *dma);
 789
 790/* RSS configuration APIs*/
 791int otx2_rss_init(struct otx2_nic *pfvf);
 792int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
 793void otx2_set_rss_key(struct otx2_nic *pfvf);
 794int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
 795
 796/* Mbox handlers */
 797void mbox_handler_msix_offset(struct otx2_nic *pfvf,
 798                              struct msix_offset_rsp *rsp);
 799void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
 800                               struct npa_lf_alloc_rsp *rsp);
 801void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
 802                               struct nix_lf_alloc_rsp *rsp);
 803void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
 804                                  struct nix_txsch_alloc_rsp *rsp);
 805void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
 806                            struct cgx_stats_rsp *rsp);
 807void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
 808                                struct cgx_fec_stats_rsp *rsp);
 809void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
 810void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
 811                                struct nix_bp_cfg_rsp *rsp);
 812
 813/* Device stats APIs */
 814void otx2_get_dev_stats(struct otx2_nic *pfvf);
 815void otx2_get_stats64(struct net_device *netdev,
 816                      struct rtnl_link_stats64 *stats);
 817void otx2_update_lmac_stats(struct otx2_nic *pfvf);
 818void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
 819int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
 820int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
 821void otx2_set_ethtool_ops(struct net_device *netdev);
 822void otx2vf_set_ethtool_ops(struct net_device *netdev);
 823
 824int otx2_open(struct net_device *netdev);
 825int otx2_stop(struct net_device *netdev);
 826int otx2_set_real_num_queues(struct net_device *netdev,
 827                             int tx_queues, int rx_queues);
 828/* MCAM filter related APIs */
 829int otx2_mcam_flow_init(struct otx2_nic *pf);
 830int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
 831int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
 832void otx2_mcam_flow_del(struct otx2_nic *pf);
 833int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
 834int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
 835int otx2_get_flow(struct otx2_nic *pfvf,
 836                  struct ethtool_rxnfc *nfc, u32 location);
 837int otx2_get_all_flows(struct otx2_nic *pfvf,
 838                       struct ethtool_rxnfc *nfc, u32 *rule_locs);
 839int otx2_add_flow(struct otx2_nic *pfvf,
 840                  struct ethtool_rxnfc *nfc);
 841int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
 842int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
 843void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
 844int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
 845int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
 846int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
 847int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
 848u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
 849/* tc support */
 850int otx2_init_tc(struct otx2_nic *nic);
 851void otx2_shutdown_tc(struct otx2_nic *nic);
 852int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
 853                  void *type_data);
 854int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
 855/* CGX/RPM DMAC filters support */
 856int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
 857int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
 858int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
 859int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
 860void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
 861void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
 862#endif /* OTX2_COMMON_H */
 863