linux/drivers/net/ethernet/marvell/octeontx2/af/common.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*  Marvell OcteonTx2 RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#ifndef COMMON_H
  12#define COMMON_H
  13
  14#include "rvu_struct.h"
  15
  16#define OTX2_ALIGN                      128  /* Align to cacheline */
  17
  18#define Q_SIZE_16               0ULL /* 16 entries */
  19#define Q_SIZE_64               1ULL /* 64 entries */
  20#define Q_SIZE_256              2ULL
  21#define Q_SIZE_1K               3ULL
  22#define Q_SIZE_4K               4ULL
  23#define Q_SIZE_16K              5ULL
  24#define Q_SIZE_64K              6ULL
  25#define Q_SIZE_256K             7ULL
  26#define Q_SIZE_1M               8ULL /* Million entries */
  27#define Q_SIZE_MIN              Q_SIZE_16
  28#define Q_SIZE_MAX              Q_SIZE_1M
  29
  30#define Q_COUNT(x)              (16ULL << (2 * x))
  31#define Q_SIZE(x, n)            ((ilog2(x) - (n)) / 2)
  32
  33/* Admin queue info */
  34
  35/* Since we intend to add only one instruction at a time,
  36 * keep queue size to it's minimum.
  37 */
  38#define AQ_SIZE                 Q_SIZE_16
  39/* HW head & tail pointer mask */
  40#define AQ_PTR_MASK             0xFFFFF
  41
  42struct qmem {
  43        void            *base;
  44        dma_addr_t      iova;
  45        int             alloc_sz;
  46        u16             entry_sz;
  47        u8              align;
  48        u32             qsize;
  49};
  50
  51static inline int qmem_alloc(struct device *dev, struct qmem **q,
  52                             int qsize, int entry_sz)
  53{
  54        struct qmem *qmem;
  55        int aligned_addr;
  56
  57        if (!qsize)
  58                return -EINVAL;
  59
  60        *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
  61        if (!*q)
  62                return -ENOMEM;
  63        qmem = *q;
  64
  65        qmem->entry_sz = entry_sz;
  66        qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
  67        qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
  68                                         &qmem->iova, GFP_KERNEL);
  69        if (!qmem->base)
  70                return -ENOMEM;
  71
  72        qmem->qsize = qsize;
  73
  74        aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
  75        qmem->align = (aligned_addr - qmem->iova);
  76        qmem->base += qmem->align;
  77        qmem->iova += qmem->align;
  78        return 0;
  79}
  80
  81static inline void qmem_free(struct device *dev, struct qmem *qmem)
  82{
  83        if (!qmem)
  84                return;
  85
  86        if (qmem->base)
  87                dma_free_coherent(dev, qmem->alloc_sz,
  88                                  qmem->base - qmem->align,
  89                                  qmem->iova - qmem->align);
  90        devm_kfree(dev, qmem);
  91}
  92
  93struct admin_queue {
  94        struct qmem     *inst;
  95        struct qmem     *res;
  96        spinlock_t      lock; /* Serialize inst enqueue from PFs */
  97};
  98
  99/* NPA aura count */
 100enum npa_aura_sz {
 101        NPA_AURA_SZ_0,
 102        NPA_AURA_SZ_128,
 103        NPA_AURA_SZ_256,
 104        NPA_AURA_SZ_512,
 105        NPA_AURA_SZ_1K,
 106        NPA_AURA_SZ_2K,
 107        NPA_AURA_SZ_4K,
 108        NPA_AURA_SZ_8K,
 109        NPA_AURA_SZ_16K,
 110        NPA_AURA_SZ_32K,
 111        NPA_AURA_SZ_64K,
 112        NPA_AURA_SZ_128K,
 113        NPA_AURA_SZ_256K,
 114        NPA_AURA_SZ_512K,
 115        NPA_AURA_SZ_1M,
 116        NPA_AURA_SZ_MAX,
 117};
 118
 119#define NPA_AURA_COUNT(x)       (1ULL << ((x) + 6))
 120
 121/* NPA AQ result structure for init/read/write of aura HW contexts */
 122struct npa_aq_aura_res {
 123        struct  npa_aq_res_s    res;
 124        struct  npa_aura_s      aura_ctx;
 125        struct  npa_aura_s      ctx_mask;
 126};
 127
 128/* NPA AQ result structure for init/read/write of pool HW contexts */
 129struct npa_aq_pool_res {
 130        struct  npa_aq_res_s    res;
 131        struct  npa_pool_s      pool_ctx;
 132        struct  npa_pool_s      ctx_mask;
 133};
 134
 135/* NIX Transmit schedulers */
 136enum nix_scheduler {
 137        NIX_TXSCH_LVL_SMQ = 0x0,
 138        NIX_TXSCH_LVL_MDQ = 0x0,
 139        NIX_TXSCH_LVL_TL4 = 0x1,
 140        NIX_TXSCH_LVL_TL3 = 0x2,
 141        NIX_TXSCH_LVL_TL2 = 0x3,
 142        NIX_TXSCH_LVL_TL1 = 0x4,
 143        NIX_TXSCH_LVL_CNT = 0x5,
 144};
 145
 146#define TXSCH_RR_QTM_MAX                ((1 << 24) - 1)
 147#define TXSCH_TL1_DFLT_RR_QTM           TXSCH_RR_QTM_MAX
 148#define TXSCH_TL1_DFLT_RR_PRIO          (0x1ull)
 149#define MAX_SCHED_WEIGHT                0xFF
 150#define DFLT_RR_WEIGHT                  71
 151#define DFLT_RR_QTM     ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
 152                         / MAX_SCHED_WEIGHT)
 153
 154/* Min/Max packet sizes, excluding FCS */
 155#define NIC_HW_MIN_FRS                  40
 156#define NIC_HW_MAX_FRS                  9212
 157#define SDP_HW_MAX_FRS                  65535
 158#define CN10K_LMAC_LINK_MAX_FRS         16380 /* 16k - FCS */
 159#define CN10K_LBK_LINK_MAX_FRS          65535 /* 64k */
 160
 161/* NIX RX action operation*/
 162#define NIX_RX_ACTIONOP_DROP            (0x0ull)
 163#define NIX_RX_ACTIONOP_UCAST           (0x1ull)
 164#define NIX_RX_ACTIONOP_UCAST_IPSEC     (0x2ull)
 165#define NIX_RX_ACTIONOP_MCAST           (0x3ull)
 166#define NIX_RX_ACTIONOP_RSS             (0x4ull)
 167/* Use the RX action set in the default unicast entry */
 168#define NIX_RX_ACTION_DEFAULT           (0xfull)
 169
 170/* NIX TX action operation*/
 171#define NIX_TX_ACTIONOP_DROP            (0x0ull)
 172#define NIX_TX_ACTIONOP_UCAST_DEFAULT   (0x1ull)
 173#define NIX_TX_ACTIONOP_UCAST_CHAN      (0x2ull)
 174#define NIX_TX_ACTIONOP_MCAST           (0x3ull)
 175#define NIX_TX_ACTIONOP_DROP_VIOL       (0x5ull)
 176
 177#define NPC_MCAM_KEY_X1                 0
 178#define NPC_MCAM_KEY_X2                 1
 179#define NPC_MCAM_KEY_X4                 2
 180
 181#define NIX_INTFX_RX(a)                 (0x0ull | (a) << 1)
 182#define NIX_INTFX_TX(a)                 (0x1ull | (a) << 1)
 183
 184/* Default interfaces are NIX0_RX and NIX0_TX */
 185#define NIX_INTF_RX                     NIX_INTFX_RX(0)
 186#define NIX_INTF_TX                     NIX_INTFX_TX(0)
 187
 188#define NIX_INTF_TYPE_CGX               0
 189#define NIX_INTF_TYPE_LBK               1
 190
 191#define MAX_LMAC_PKIND                  12
 192#define NIX_LINK_CGX_LMAC(a, b)         (0 + 4 * (a) + (b))
 193#define NIX_LINK_LBK(a)                 (12 + (a))
 194#define NIX_CHAN_CGX_LMAC_CHX(a, b, c)  (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
 195#define NIX_CHAN_LBK_CHX(a, b)          (0 + 0x100 * (a) + (b))
 196#define NIX_CHAN_SDP_CH_START           (0x700ull)
 197
 198#define SDP_CHANNELS                    256
 199
 200/* NIX LSO format indices.
 201 * As of now TSO is the only one using, so statically assigning indices.
 202 */
 203#define NIX_LSO_FORMAT_IDX_TSOV4        0
 204#define NIX_LSO_FORMAT_IDX_TSOV6        1
 205
 206/* RSS info */
 207#define MAX_RSS_GROUPS                  8
 208/* Group 0 has to be used in default pkt forwarding MCAM entries
 209 * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
 210 * filters.
 211 */
 212#define DEFAULT_RSS_CONTEXT_GROUP       0
 213#define MAX_RSS_INDIR_TBL_SIZE          256 /* 1 << Max adder bits */
 214
 215/* NDC info */
 216enum ndc_idx_e {
 217        NIX0_RX = 0x0,
 218        NIX0_TX = 0x1,
 219        NPA0_U  = 0x2,
 220        NIX1_RX = 0x4,
 221        NIX1_TX = 0x5,
 222};
 223
 224enum ndc_ctype_e {
 225        CACHING = 0x0,
 226        BYPASS = 0x1,
 227};
 228
 229#define NDC_MAX_PORT 6
 230#define NDC_READ_TRANS 0
 231#define NDC_WRITE_TRANS 1
 232
 233#endif /* COMMON_H */
 234