dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright (C) Mellanox Technologies, Ltd. 2001-2020.
   3 */
   4
   5#ifndef __MLX5_WIN_DEFS_H__
   6#define __MLX5_WIN_DEFS_H__
   7
   8enum {
   9        MLX5_CQE_OWNER_MASK     = 1,
  10        MLX5_CQE_REQ            = 0,
  11        MLX5_CQE_RESP_WR_IMM    = 1,
  12        MLX5_CQE_RESP_SEND      = 2,
  13        MLX5_CQE_RESP_SEND_IMM  = 3,
  14        MLX5_CQE_RESP_SEND_INV  = 4,
  15        MLX5_CQE_RESIZE_CQ      = 5,
  16        MLX5_CQE_NO_PACKET      = 6,
  17        MLX5_CQE_REQ_ERR        = 13,
  18        MLX5_CQE_RESP_ERR       = 14,
  19        MLX5_CQE_INVALID        = 15,
  20};
  21
  22enum {
  23        MLX5_OPCODE_NOP                 = 0x00,
  24        MLX5_OPCODE_SEND_INVAL          = 0x01,
  25        MLX5_OPCODE_RDMA_WRITE          = 0x08,
  26        MLX5_OPCODE_RDMA_WRITE_IMM      = 0x09,
  27        MLX5_OPCODE_SEND                = 0x0a,
  28        MLX5_OPCODE_SEND_IMM            = 0x0b,
  29        MLX5_OPCODE_TSO                 = 0x0e,
  30        MLX5_OPCODE_RDMA_READ           = 0x10,
  31        MLX5_OPCODE_ATOMIC_CS           = 0x11,
  32        MLX5_OPCODE_ATOMIC_FA           = 0x12,
  33        MLX5_OPCODE_ATOMIC_MASKED_CS    = 0x14,
  34        MLX5_OPCODE_ATOMIC_MASKED_FA    = 0x15,
  35        MLX5_OPCODE_FMR                 = 0x19,
  36        MLX5_OPCODE_LOCAL_INVAL         = 0x1b,
  37        MLX5_OPCODE_CONFIG_CMD          = 0x1f,
  38        MLX5_OPCODE_UMR                 = 0x25,
  39        MLX5_OPCODE_TAG_MATCHING        = 0x28
  40};
  41
  42enum mlx5dv_cq_init_attr_mask {
  43        MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE = 1 << 0,
  44        MLX5DV_CQ_INIT_ATTR_MASK_FLAGS          = 1 << 1,
  45        MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE = 1 << 2,
  46};
  47
  48enum mlx5dv_cqe_comp_res_format {
  49        MLX5DV_CQE_RES_FORMAT_HASH              = 1 << 0,
  50        MLX5DV_CQE_RES_FORMAT_CSUM              = 1 << 1,
  51        MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX       = 1 << 2,
  52};
  53
  54enum ibv_access_flags {
  55        IBV_ACCESS_LOCAL_WRITE          = 1,
  56        IBV_ACCESS_REMOTE_WRITE         = 1 << 1,
  57        IBV_ACCESS_REMOTE_READ          = 1 << 2,
  58        IBV_ACCESS_REMOTE_ATOMIC        = 1 << 3,
  59        IBV_ACCESS_MW_BIND              = 1 << 4,
  60        IBV_ACCESS_ZERO_BASED           = 1 << 5,
  61        IBV_ACCESS_ON_DEMAND            = 1 << 6,
  62};
  63
  64enum mlx5_ib_uapi_devx_create_event_channel_flags {
  65        MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA = 1 << 0,
  66};
  67
  68#define MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA \
  69        MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA
  70
  71enum {
  72        MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR              = 0x01,
  73        MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR               = 0x02,
  74        MLX5_CQE_SYNDROME_LOCAL_PROT_ERR                = 0x04,
  75        MLX5_CQE_SYNDROME_WR_FLUSH_ERR                  = 0x05,
  76        MLX5_CQE_SYNDROME_MW_BIND_ERR                   = 0x06,
  77        MLX5_CQE_SYNDROME_BAD_RESP_ERR                  = 0x10,
  78        MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR              = 0x11,
  79        MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR          = 0x12,
  80        MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR             = 0x13,
  81        MLX5_CQE_SYNDROME_REMOTE_OP_ERR                 = 0x14,
  82        MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR       = 0x15,
  83        MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR             = 0x16,
  84        MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR            = 0x22,
  85};
  86
  87enum {
  88        MLX5_ETH_WQE_L3_CSUM = (1 << 6),
  89        MLX5_ETH_WQE_L4_CSUM = (1 << 7),
  90};
  91
  92enum {
  93        MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
  94        MLX5_WQE_CTRL_SOLICITED = 1 << 1,
  95        MLX5_WQE_CTRL_FENCE     = 4 << 5,
  96        MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
  97};
  98
  99enum {
 100        MLX5_SEND_WQE_BB        = 64,
 101        MLX5_SEND_WQE_SHIFT     = 6,
 102};
 103
 104/*
 105 * RX Hash fields enable to set which incoming packet's field should
 106 * participates in RX Hash. Each flag represent certain packet's field,
 107 * when the flag is set the field that is represented by the flag will
 108 * participate in RX Hash calculation.
 109 * Note: IPV4 and IPV6 flags can't be enabled together on the same QP,
 110 * TCP and UDP flags can't be enabled together on the same QP.
 111 */
 112enum ibv_rx_hash_fields {
 113        IBV_RX_HASH_SRC_IPV4    = 1 << 0,
 114        IBV_RX_HASH_DST_IPV4    = 1 << 1,
 115        IBV_RX_HASH_SRC_IPV6    = 1 << 2,
 116        IBV_RX_HASH_DST_IPV6    = 1 << 3,
 117        IBV_RX_HASH_SRC_PORT_TCP        = 1 << 4,
 118        IBV_RX_HASH_DST_PORT_TCP        = 1 << 5,
 119        IBV_RX_HASH_SRC_PORT_UDP        = 1 << 6,
 120        IBV_RX_HASH_DST_PORT_UDP        = 1 << 7,
 121        IBV_RX_HASH_IPSEC_SPI           = 1 << 8,
 122        IBV_RX_HASH_INNER               = (1 << 31),
 123};
 124
 125enum {
 126        MLX5_RCV_DBR    = 0,
 127        MLX5_SND_DBR    = 1,
 128};
 129
 130#ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2
 131#define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 0x0
 132#endif
 133#ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL
 134#define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL 0x1
 135#endif
 136#ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2
 137#define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 0x2
 138#endif
 139#ifndef MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL
 140#define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL 0x3
 141#endif
 142
 143enum ibv_flow_flags {
 144        IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1 << 0,
 145        IBV_FLOW_ATTR_FLAGS_DONT_TRAP = 1 << 1,
 146        IBV_FLOW_ATTR_FLAGS_EGRESS = 1 << 2,
 147};
 148
 149enum ibv_flow_attr_type {
 150        /* Steering according to rule specifications. */
 151        IBV_FLOW_ATTR_NORMAL            = 0x0,
 152        /*
 153         * Default unicast and multicast rule -
 154         * receive all Eth traffic which isn't steered to any QP.
 155         */
 156        IBV_FLOW_ATTR_ALL_DEFAULT       = 0x1,
 157        /*
 158         * Default multicast rule -
 159         * receive all Eth multicast traffic which isn't steered to any QP.
 160         */
 161        IBV_FLOW_ATTR_MC_DEFAULT        = 0x2,
 162        /* Sniffer rule - receive all port traffic. */
 163        IBV_FLOW_ATTR_SNIFFER           = 0x3,
 164};
 165
 166enum mlx5dv_flow_table_type {
 167        MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX     = 0x0,
 168        MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX     = 0x1,
 169        MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB        = 0x2,
 170        MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX    = 0x3,
 171};
 172
 173#define MLX5DV_FLOW_TABLE_TYPE_NIC_RX   MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
 174#define MLX5DV_FLOW_TABLE_TYPE_NIC_TX   MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
 175#define MLX5DV_FLOW_TABLE_TYPE_FDB      MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB
 176#define MLX5DV_FLOW_TABLE_TYPE_RDMA_RX  MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX
 177
 178struct mlx5dv_flow_match_parameters {
 179        size_t match_sz;
 180        uint64_t match_buf[]; /* Device spec format */
 181};
 182
 183struct mlx5dv_flow_matcher_attr {
 184        enum ibv_flow_attr_type type;
 185        uint32_t flags; /* From enum ibv_flow_flags. */
 186        uint16_t priority;
 187        uint8_t match_criteria_enable; /* Device spec format. */
 188        struct mlx5dv_flow_match_parameters *match_mask;
 189        uint64_t comp_mask; /* Use mlx5dv_flow_matcher_attr_mask. */
 190        enum mlx5dv_flow_table_type ft_type;
 191};
 192
 193/* Windows specific mlx5_matcher. */
 194struct mlx5_matcher {
 195        void *ctx;
 196        struct mlx5dv_flow_matcher_attr attr;
 197        uint64_t match_buf[];
 198};
 199
 200/*
 201 * Windows mlx5_action. This struct is the
 202 * equivalent of rdma-core struct mlx5dv_dr_action.
 203 */
 204struct mlx5_action {
 205        int type;
 206        struct {
 207                uint32_t id;
 208        } dest_tir;
 209};
 210
 211struct mlx5_err_cqe {
 212        uint8_t         rsvd0[32];
 213        uint32_t        srqn;
 214        uint8_t         rsvd1[18];
 215        uint8_t         vendor_err_synd;
 216        uint8_t         syndrome;
 217        uint32_t        s_wqe_opcode_qpn;
 218        uint16_t        wqe_counter;
 219        uint8_t         signature;
 220        uint8_t         op_own;
 221};
 222
 223struct mlx5_wqe_srq_next_seg {
 224        uint8_t                 rsvd0[2];
 225        rte_be16_t              next_wqe_index;
 226        uint8_t                 signature;
 227        uint8_t                 rsvd1[11];
 228};
 229
 230enum ibv_wq_state {
 231        IBV_WQS_RESET,
 232        IBV_WQS_RDY,
 233        IBV_WQS_ERR,
 234        IBV_WQS_UNKNOWN
 235};
 236
 237struct mlx5_wqe_data_seg {
 238        rte_be32_t              byte_count;
 239        rte_be32_t              lkey;
 240        rte_be64_t              addr;
 241};
 242
 243#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP      (1 << 4)
 244#define IBV_DEVICE_RAW_IP_CSUM                  (1 << 26)
 245#define IBV_RAW_PACKET_CAP_CVLAN_STRIPPING      (1 << 0)
 246#define IBV_RAW_PACKET_CAP_SCATTER_FCS          (1 << 1)
 247#define IBV_QPT_RAW_PACKET                      8
 248
 249enum {
 250        MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT                    = 0x0,
 251        MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE               = 0x1,
 252        MLX5_FLOW_CONTEXT_DEST_TYPE_TIR                      = 0x2,
 253        MLX5_FLOW_CONTEXT_DEST_TYPE_QP                       = 0x3,
 254};
 255
 256enum {
 257        MLX5_MATCH_OUTER_HEADERS        = 1 << 0,
 258        MLX5_MATCH_MISC_PARAMETERS      = 1 << 1,
 259        MLX5_MATCH_INNER_HEADERS        = 1 << 2,
 260};
 261#endif /* __MLX5_WIN_DEFS_H__ */
 262