linux/drivers/net/ethernet/marvell/octeontx2/af/common.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Marvell RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell.
   5 */
   6
   7#ifndef COMMON_H
   8#define COMMON_H
   9
  10#include "rvu_struct.h"
  11
  12#define OTX2_ALIGN                      128  /* Align to cacheline */
  13
  14#define Q_SIZE_16               0ULL /* 16 entries */
  15#define Q_SIZE_64               1ULL /* 64 entries */
  16#define Q_SIZE_256              2ULL
  17#define Q_SIZE_1K               3ULL
  18#define Q_SIZE_4K               4ULL
  19#define Q_SIZE_16K              5ULL
  20#define Q_SIZE_64K              6ULL
  21#define Q_SIZE_256K             7ULL
  22#define Q_SIZE_1M               8ULL /* Million entries */
  23#define Q_SIZE_MIN              Q_SIZE_16
  24#define Q_SIZE_MAX              Q_SIZE_1M
  25
  26#define Q_COUNT(x)              (16ULL << (2 * x))
  27#define Q_SIZE(x, n)            ((ilog2(x) - (n)) / 2)
  28
  29/* Admin queue info */
  30
  31/* Since we intend to add only one instruction at a time,
  32 * keep queue size to it's minimum.
  33 */
  34#define AQ_SIZE                 Q_SIZE_16
  35/* HW head & tail pointer mask */
  36#define AQ_PTR_MASK             0xFFFFF
  37
  38struct qmem {
  39        void            *base;
  40        dma_addr_t      iova;
  41        int             alloc_sz;
  42        u16             entry_sz;
  43        u8              align;
  44        u32             qsize;
  45};
  46
  47static inline int qmem_alloc(struct device *dev, struct qmem **q,
  48                             int qsize, int entry_sz)
  49{
  50        struct qmem *qmem;
  51        int aligned_addr;
  52
  53        if (!qsize)
  54                return -EINVAL;
  55
  56        *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
  57        if (!*q)
  58                return -ENOMEM;
  59        qmem = *q;
  60
  61        qmem->entry_sz = entry_sz;
  62        qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
  63        qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova,
  64                                     GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
  65        if (!qmem->base)
  66                return -ENOMEM;
  67
  68        qmem->qsize = qsize;
  69
  70        aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
  71        qmem->align = (aligned_addr - qmem->iova);
  72        qmem->base += qmem->align;
  73        qmem->iova += qmem->align;
  74        return 0;
  75}
  76
  77static inline void qmem_free(struct device *dev, struct qmem *qmem)
  78{
  79        if (!qmem)
  80                return;
  81
  82        if (qmem->base)
  83                dma_free_attrs(dev, qmem->alloc_sz,
  84                               qmem->base - qmem->align,
  85                               qmem->iova - qmem->align,
  86                               DMA_ATTR_FORCE_CONTIGUOUS);
  87        devm_kfree(dev, qmem);
  88}
  89
  90struct admin_queue {
  91        struct qmem     *inst;
  92        struct qmem     *res;
  93        spinlock_t      lock; /* Serialize inst enqueue from PFs */
  94};
  95
  96/* NPA aura count */
  97enum npa_aura_sz {
  98        NPA_AURA_SZ_0,
  99        NPA_AURA_SZ_128,
 100        NPA_AURA_SZ_256,
 101        NPA_AURA_SZ_512,
 102        NPA_AURA_SZ_1K,
 103        NPA_AURA_SZ_2K,
 104        NPA_AURA_SZ_4K,
 105        NPA_AURA_SZ_8K,
 106        NPA_AURA_SZ_16K,
 107        NPA_AURA_SZ_32K,
 108        NPA_AURA_SZ_64K,
 109        NPA_AURA_SZ_128K,
 110        NPA_AURA_SZ_256K,
 111        NPA_AURA_SZ_512K,
 112        NPA_AURA_SZ_1M,
 113        NPA_AURA_SZ_MAX,
 114};
 115
 116#define NPA_AURA_COUNT(x)       (1ULL << ((x) + 6))
 117
 118/* NPA AQ result structure for init/read/write of aura HW contexts */
 119struct npa_aq_aura_res {
 120        struct  npa_aq_res_s    res;
 121        struct  npa_aura_s      aura_ctx;
 122        struct  npa_aura_s      ctx_mask;
 123};
 124
 125/* NPA AQ result structure for init/read/write of pool HW contexts */
 126struct npa_aq_pool_res {
 127        struct  npa_aq_res_s    res;
 128        struct  npa_pool_s      pool_ctx;
 129        struct  npa_pool_s      ctx_mask;
 130};
 131
 132/* NIX Transmit schedulers */
 133enum nix_scheduler {
 134        NIX_TXSCH_LVL_SMQ = 0x0,
 135        NIX_TXSCH_LVL_MDQ = 0x0,
 136        NIX_TXSCH_LVL_TL4 = 0x1,
 137        NIX_TXSCH_LVL_TL3 = 0x2,
 138        NIX_TXSCH_LVL_TL2 = 0x3,
 139        NIX_TXSCH_LVL_TL1 = 0x4,
 140        NIX_TXSCH_LVL_CNT = 0x5,
 141};
 142
 143#define TXSCH_RR_QTM_MAX                ((1 << 24) - 1)
 144#define TXSCH_TL1_DFLT_RR_QTM           TXSCH_RR_QTM_MAX
 145#define TXSCH_TL1_DFLT_RR_PRIO          (0x1ull)
 146#define CN10K_MAX_DWRR_WEIGHT          16384 /* Weight is 14bit on CN10K */
 147
 148/* Min/Max packet sizes, excluding FCS */
 149#define NIC_HW_MIN_FRS                  40
 150#define NIC_HW_MAX_FRS                  9212
 151#define SDP_HW_MAX_FRS                  65535
 152#define CN10K_LMAC_LINK_MAX_FRS         16380 /* 16k - FCS */
 153#define CN10K_LBK_LINK_MAX_FRS          65535 /* 64k */
 154
 155/* NIX RX action operation*/
 156#define NIX_RX_ACTIONOP_DROP            (0x0ull)
 157#define NIX_RX_ACTIONOP_UCAST           (0x1ull)
 158#define NIX_RX_ACTIONOP_UCAST_IPSEC     (0x2ull)
 159#define NIX_RX_ACTIONOP_MCAST           (0x3ull)
 160#define NIX_RX_ACTIONOP_RSS             (0x4ull)
 161/* Use the RX action set in the default unicast entry */
 162#define NIX_RX_ACTION_DEFAULT           (0xfull)
 163
 164/* NIX TX action operation*/
 165#define NIX_TX_ACTIONOP_DROP            (0x0ull)
 166#define NIX_TX_ACTIONOP_UCAST_DEFAULT   (0x1ull)
 167#define NIX_TX_ACTIONOP_UCAST_CHAN      (0x2ull)
 168#define NIX_TX_ACTIONOP_MCAST           (0x3ull)
 169#define NIX_TX_ACTIONOP_DROP_VIOL       (0x5ull)
 170
 171#define NPC_MCAM_KEY_X1                 0
 172#define NPC_MCAM_KEY_X2                 1
 173#define NPC_MCAM_KEY_X4                 2
 174
 175#define NIX_INTFX_RX(a)                 (0x0ull | (a) << 1)
 176#define NIX_INTFX_TX(a)                 (0x1ull | (a) << 1)
 177
 178/* Default interfaces are NIX0_RX and NIX0_TX */
 179#define NIX_INTF_RX                     NIX_INTFX_RX(0)
 180#define NIX_INTF_TX                     NIX_INTFX_TX(0)
 181
 182#define NIX_INTF_TYPE_CGX               0
 183#define NIX_INTF_TYPE_LBK               1
 184#define NIX_INTF_TYPE_SDP               2
 185
 186#define MAX_LMAC_PKIND                  12
 187#define NIX_LINK_CGX_LMAC(a, b)         (0 + 4 * (a) + (b))
 188#define NIX_LINK_LBK(a)                 (12 + (a))
 189#define NIX_CHAN_CGX_LMAC_CHX(a, b, c)  (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
 190#define NIX_CHAN_LBK_CHX(a, b)          (0 + 0x100 * (a) + (b))
 191#define NIX_CHAN_SDP_CH_START          (0x700ull)
 192#define NIX_CHAN_SDP_CHX(a)            (NIX_CHAN_SDP_CH_START + (a))
 193#define NIX_CHAN_SDP_NUM_CHANS          256
 194
 195/* The mask is to extract lower 10-bits of channel number
 196 * which CPT will pass to X2P.
 197 */
 198#define NIX_CHAN_CPT_X2P_MASK          (0x3ffull)
 199
 200/* NIX LSO format indices.
 201 * As of now TSO is the only one using, so statically assigning indices.
 202 */
 203#define NIX_LSO_FORMAT_IDX_TSOV4        0
 204#define NIX_LSO_FORMAT_IDX_TSOV6        1
 205
 206/* RSS info */
 207#define MAX_RSS_GROUPS                  8
 208/* Group 0 has to be used in default pkt forwarding MCAM entries
 209 * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
 210 * filters.
 211 */
 212#define DEFAULT_RSS_CONTEXT_GROUP       0
 213#define MAX_RSS_INDIR_TBL_SIZE          256 /* 1 << Max adder bits */
 214
 215/* NDC info */
 216enum ndc_idx_e {
 217        NIX0_RX = 0x0,
 218        NIX0_TX = 0x1,
 219        NPA0_U  = 0x2,
 220        NIX1_RX = 0x4,
 221        NIX1_TX = 0x5,
 222};
 223
 224enum ndc_ctype_e {
 225        CACHING = 0x0,
 226        BYPASS = 0x1,
 227};
 228
 229#define NDC_MAX_PORT 6
 230#define NDC_READ_TRANS 0
 231#define NDC_WRITE_TRANS 1
 232
 233#endif /* COMMON_H */
 234