linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51#include <linux/socket.h>
  52#include <linux/irq_poll.h>
  53#include <uapi/linux/if_ether.h>
  54#include <net/ipv6.h>
  55#include <net/ip.h>
  56#include <linux/string.h>
  57#include <linux/slab.h>
  58#include <linux/netdevice.h>
  59
  60#include <linux/if_link.h>
  61#include <linux/atomic.h>
  62#include <linux/mmu_notifier.h>
  63#include <linux/uaccess.h>
  64#include <linux/cgroup_rdma.h>
  65#include <uapi/rdma/ib_user_verbs.h>
  66
  67#define IB_FW_VERSION_NAME_MAX  ETHTOOL_FWVERS_LEN
  68
  69extern struct workqueue_struct *ib_wq;
  70extern struct workqueue_struct *ib_comp_wq;
  71
  72union ib_gid {
  73        u8      raw[16];
  74        struct {
  75                __be64  subnet_prefix;
  76                __be64  interface_id;
  77        } global;
  78};
  79
  80extern union ib_gid zgid;
  81
  82enum ib_gid_type {
  83        /* If link layer is Ethernet, this is RoCE V1 */
  84        IB_GID_TYPE_IB        = 0,
  85        IB_GID_TYPE_ROCE      = 0,
  86        IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
  87        IB_GID_TYPE_SIZE
  88};
  89
  90#define ROCE_V2_UDP_DPORT      4791
  91struct ib_gid_attr {
  92        enum ib_gid_type        gid_type;
  93        struct net_device       *ndev;
  94};
  95
  96enum rdma_node_type {
  97        /* IB values map to NodeInfo:NodeType. */
  98        RDMA_NODE_IB_CA         = 1,
  99        RDMA_NODE_IB_SWITCH,
 100        RDMA_NODE_IB_ROUTER,
 101        RDMA_NODE_RNIC,
 102        RDMA_NODE_USNIC,
 103        RDMA_NODE_USNIC_UDP,
 104};
 105
 106enum {
 107        /* set the local administered indication */
 108        IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
 109};
 110
 111enum rdma_transport_type {
 112        RDMA_TRANSPORT_IB,
 113        RDMA_TRANSPORT_IWARP,
 114        RDMA_TRANSPORT_USNIC,
 115        RDMA_TRANSPORT_USNIC_UDP
 116};
 117
 118enum rdma_protocol_type {
 119        RDMA_PROTOCOL_IB,
 120        RDMA_PROTOCOL_IBOE,
 121        RDMA_PROTOCOL_IWARP,
 122        RDMA_PROTOCOL_USNIC_UDP
 123};
 124
 125__attribute_const__ enum rdma_transport_type
 126rdma_node_get_transport(enum rdma_node_type node_type);
 127
 128enum rdma_network_type {
 129        RDMA_NETWORK_IB,
 130        RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
 131        RDMA_NETWORK_IPV4,
 132        RDMA_NETWORK_IPV6
 133};
 134
 135static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 136{
 137        if (network_type == RDMA_NETWORK_IPV4 ||
 138            network_type == RDMA_NETWORK_IPV6)
 139                return IB_GID_TYPE_ROCE_UDP_ENCAP;
 140
 141        /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
 142        return IB_GID_TYPE_IB;
 143}
 144
 145static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
 146                                                            union ib_gid *gid)
 147{
 148        if (gid_type == IB_GID_TYPE_IB)
 149                return RDMA_NETWORK_IB;
 150
 151        if (ipv6_addr_v4mapped((struct in6_addr *)gid))
 152                return RDMA_NETWORK_IPV4;
 153        else
 154                return RDMA_NETWORK_IPV6;
 155}
 156
 157enum rdma_link_layer {
 158        IB_LINK_LAYER_UNSPECIFIED,
 159        IB_LINK_LAYER_INFINIBAND,
 160        IB_LINK_LAYER_ETHERNET,
 161};
 162
 163enum ib_device_cap_flags {
 164        IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
 165        IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
 166        IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
 167        IB_DEVICE_RAW_MULTI                     = (1 << 3),
 168        IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
 169        IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
 170        IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
 171        IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
 172        IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
 173        /* Not in use, former INIT_TYPE         = (1 << 9),*/
 174        IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
 175        IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
 176        IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
 177        IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
 178        IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
 179
 180        /*
 181         * This device supports a per-device lkey or stag that can be
 182         * used without performing a memory registration for the local
 183         * memory.  Note that ULPs should never check this flag, but
 184         * instead of use the local_dma_lkey flag in the ib_pd structure,
 185         * which will always contain a usable lkey.
 186         */
 187        IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
 188        /* Reserved, old SEND_W_INV             = (1 << 16),*/
 189        IB_DEVICE_MEM_WINDOW                    = (1 << 17),
 190        /*
 191         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 192         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 193         * messages and can verify the validity of checksum for
 194         * incoming messages.  Setting this flag implies that the
 195         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 196         */
 197        IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
 198        IB_DEVICE_UD_TSO                        = (1 << 19),
 199        IB_DEVICE_XRC                           = (1 << 20),
 200
 201        /*
 202         * This device supports the IB "base memory management extension",
 203         * which includes support for fast registrations (IB_WR_REG_MR,
 204         * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 205         * also be set by any iWarp device which must support FRs to comply
 206         * to the iWarp verbs spec.  iWarp devices also support the
 207         * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 208         * stag.
 209         */
 210        IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
 211        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
 212        IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
 213        IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
 214        IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
 215        /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 216        IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
 217        /*
 218         * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 219         * support execution of WQEs that involve synchronization
 220         * of I/O operations with single completion queue managed
 221         * by hardware.
 222         */
 223        IB_DEVICE_CROSS_CHANNEL                 = (1 << 27),
 224        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
 225        IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
 226        IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
 227        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
 228        IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
 229        /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 230        IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 231        IB_DEVICE_RDMA_NETDEV_OPA_VNIC          = (1ULL << 35),
 232        /* The device supports padding incoming writes to cacheline. */
 233        IB_DEVICE_PCI_WRITE_END_PADDING         = (1ULL << 36),
 234};
 235
 236enum ib_signature_prot_cap {
 237        IB_PROT_T10DIF_TYPE_1 = 1,
 238        IB_PROT_T10DIF_TYPE_2 = 1 << 1,
 239        IB_PROT_T10DIF_TYPE_3 = 1 << 2,
 240};
 241
 242enum ib_signature_guard_cap {
 243        IB_GUARD_T10DIF_CRC     = 1,
 244        IB_GUARD_T10DIF_CSUM    = 1 << 1,
 245};
 246
 247enum ib_atomic_cap {
 248        IB_ATOMIC_NONE,
 249        IB_ATOMIC_HCA,
 250        IB_ATOMIC_GLOB
 251};
 252
 253enum ib_odp_general_cap_bits {
 254        IB_ODP_SUPPORT          = 1 << 0,
 255        IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 256};
 257
 258enum ib_odp_transport_cap_bits {
 259        IB_ODP_SUPPORT_SEND     = 1 << 0,
 260        IB_ODP_SUPPORT_RECV     = 1 << 1,
 261        IB_ODP_SUPPORT_WRITE    = 1 << 2,
 262        IB_ODP_SUPPORT_READ     = 1 << 3,
 263        IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
 264};
 265
 266struct ib_odp_caps {
 267        uint64_t general_caps;
 268        struct {
 269                uint32_t  rc_odp_caps;
 270                uint32_t  uc_odp_caps;
 271                uint32_t  ud_odp_caps;
 272        } per_transport_caps;
 273};
 274
 275struct ib_rss_caps {
 276        /* Corresponding bit will be set if qp type from
 277         * 'enum ib_qp_type' is supported, e.g.
 278         * supported_qpts |= 1 << IB_QPT_UD
 279         */
 280        u32 supported_qpts;
 281        u32 max_rwq_indirection_tables;
 282        u32 max_rwq_indirection_table_size;
 283};
 284
 285enum ib_tm_cap_flags {
 286        /*  Support tag matching on RC transport */
 287        IB_TM_CAP_RC                = 1 << 0,
 288};
 289
 290struct ib_tm_caps {
 291        /* Max size of RNDV header */
 292        u32 max_rndv_hdr_size;
 293        /* Max number of entries in tag matching list */
 294        u32 max_num_tags;
 295        /* From enum ib_tm_cap_flags */
 296        u32 flags;
 297        /* Max number of outstanding list operations */
 298        u32 max_ops;
 299        /* Max number of SGE in tag matching entry */
 300        u32 max_sge;
 301};
 302
 303enum ib_cq_creation_flags {
 304        IB_CQ_FLAGS_TIMESTAMP_COMPLETION   = 1 << 0,
 305        IB_CQ_FLAGS_IGNORE_OVERRUN         = 1 << 1,
 306};
 307
 308struct ib_cq_init_attr {
 309        unsigned int    cqe;
 310        int             comp_vector;
 311        u32             flags;
 312};
 313
 314enum ib_cq_attr_mask {
 315        IB_CQ_MODERATE = 1 << 0,
 316};
 317
 318struct ib_cq_caps {
 319        u16     max_cq_moderation_count;
 320        u16     max_cq_moderation_period;
 321};
 322
 323struct ib_device_attr {
 324        u64                     fw_ver;
 325        __be64                  sys_image_guid;
 326        u64                     max_mr_size;
 327        u64                     page_size_cap;
 328        u32                     vendor_id;
 329        u32                     vendor_part_id;
 330        u32                     hw_ver;
 331        int                     max_qp;
 332        int                     max_qp_wr;
 333        u64                     device_cap_flags;
 334        int                     max_sge;
 335        int                     max_sge_rd;
 336        int                     max_cq;
 337        int                     max_cqe;
 338        int                     max_mr;
 339        int                     max_pd;
 340        int                     max_qp_rd_atom;
 341        int                     max_ee_rd_atom;
 342        int                     max_res_rd_atom;
 343        int                     max_qp_init_rd_atom;
 344        int                     max_ee_init_rd_atom;
 345        enum ib_atomic_cap      atomic_cap;
 346        enum ib_atomic_cap      masked_atomic_cap;
 347        int                     max_ee;
 348        int                     max_rdd;
 349        int                     max_mw;
 350        int                     max_raw_ipv6_qp;
 351        int                     max_raw_ethy_qp;
 352        int                     max_mcast_grp;
 353        int                     max_mcast_qp_attach;
 354        int                     max_total_mcast_qp_attach;
 355        int                     max_ah;
 356        int                     max_fmr;
 357        int                     max_map_per_fmr;
 358        int                     max_srq;
 359        int                     max_srq_wr;
 360        int                     max_srq_sge;
 361        unsigned int            max_fast_reg_page_list_len;
 362        u16                     max_pkeys;
 363        u8                      local_ca_ack_delay;
 364        int                     sig_prot_cap;
 365        int                     sig_guard_cap;
 366        struct ib_odp_caps      odp_caps;
 367        uint64_t                timestamp_mask;
 368        uint64_t                hca_core_clock; /* in KHZ */
 369        struct ib_rss_caps      rss_caps;
 370        u32                     max_wq_type_rq;
 371        u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
 372        struct ib_tm_caps       tm_caps;
 373        struct ib_cq_caps       cq_caps;
 374};
 375
 376enum ib_mtu {
 377        IB_MTU_256  = 1,
 378        IB_MTU_512  = 2,
 379        IB_MTU_1024 = 3,
 380        IB_MTU_2048 = 4,
 381        IB_MTU_4096 = 5
 382};
 383
 384static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 385{
 386        switch (mtu) {
 387        case IB_MTU_256:  return  256;
 388        case IB_MTU_512:  return  512;
 389        case IB_MTU_1024: return 1024;
 390        case IB_MTU_2048: return 2048;
 391        case IB_MTU_4096: return 4096;
 392        default:          return -1;
 393        }
 394}
 395
 396static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 397{
 398        if (mtu >= 4096)
 399                return IB_MTU_4096;
 400        else if (mtu >= 2048)
 401                return IB_MTU_2048;
 402        else if (mtu >= 1024)
 403                return IB_MTU_1024;
 404        else if (mtu >= 512)
 405                return IB_MTU_512;
 406        else
 407                return IB_MTU_256;
 408}
 409
 410enum ib_port_state {
 411        IB_PORT_NOP             = 0,
 412        IB_PORT_DOWN            = 1,
 413        IB_PORT_INIT            = 2,
 414        IB_PORT_ARMED           = 3,
 415        IB_PORT_ACTIVE          = 4,
 416        IB_PORT_ACTIVE_DEFER    = 5
 417};
 418
 419enum ib_port_cap_flags {
 420        IB_PORT_SM                              = 1 <<  1,
 421        IB_PORT_NOTICE_SUP                      = 1 <<  2,
 422        IB_PORT_TRAP_SUP                        = 1 <<  3,
 423        IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 424        IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
 425        IB_PORT_SL_MAP_SUP                      = 1 <<  6,
 426        IB_PORT_MKEY_NVRAM                      = 1 <<  7,
 427        IB_PORT_PKEY_NVRAM                      = 1 <<  8,
 428        IB_PORT_LED_INFO_SUP                    = 1 <<  9,
 429        IB_PORT_SM_DISABLED                     = 1 << 10,
 430        IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
 431        IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
 432        IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
 433        IB_PORT_CM_SUP                          = 1 << 16,
 434        IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
 435        IB_PORT_REINIT_SUP                      = 1 << 18,
 436        IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
 437        IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
 438        IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
 439        IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
 440        IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
 441        IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
 442        IB_PORT_CLIENT_REG_SUP                  = 1 << 25,
 443        IB_PORT_IP_BASED_GIDS                   = 1 << 26,
 444};
 445
 446enum ib_port_width {
 447        IB_WIDTH_1X     = 1,
 448        IB_WIDTH_4X     = 2,
 449        IB_WIDTH_8X     = 4,
 450        IB_WIDTH_12X    = 8
 451};
 452
 453static inline int ib_width_enum_to_int(enum ib_port_width width)
 454{
 455        switch (width) {
 456        case IB_WIDTH_1X:  return  1;
 457        case IB_WIDTH_4X:  return  4;
 458        case IB_WIDTH_8X:  return  8;
 459        case IB_WIDTH_12X: return 12;
 460        default:          return -1;
 461        }
 462}
 463
 464enum ib_port_speed {
 465        IB_SPEED_SDR    = 1,
 466        IB_SPEED_DDR    = 2,
 467        IB_SPEED_QDR    = 4,
 468        IB_SPEED_FDR10  = 8,
 469        IB_SPEED_FDR    = 16,
 470        IB_SPEED_EDR    = 32,
 471        IB_SPEED_HDR    = 64
 472};
 473
 474/**
 475 * struct rdma_hw_stats
 476 * @timestamp - Used by the core code to track when the last update was
 477 * @lifespan - Used by the core code to determine how old the counters
 478 *   should be before being updated again.  Stored in jiffies, defaults
 479 *   to 10 milliseconds, drivers can override the default be specifying
 480 *   their own value during their allocation routine.
 481 * @name - Array of pointers to static names used for the counters in
 482 *   directory.
 483 * @num_counters - How many hardware counters there are.  If name is
 484 *   shorter than this number, a kernel oops will result.  Driver authors
 485 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 486 *   in their code to prevent this.
 487 * @value - Array of u64 counters that are accessed by the sysfs code and
 488 *   filled in by the drivers get_stats routine
 489 */
 490struct rdma_hw_stats {
 491        unsigned long   timestamp;
 492        unsigned long   lifespan;
 493        const char * const *names;
 494        int             num_counters;
 495        u64             value[];
 496};
 497
 498#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 499/**
 500 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 501 *   for drivers.
 502 * @names - Array of static const char *
 503 * @num_counters - How many elements in array
 504 * @lifespan - How many milliseconds between updates
 505 */
 506static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 507                const char * const *names, int num_counters,
 508                unsigned long lifespan)
 509{
 510        struct rdma_hw_stats *stats;
 511
 512        stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 513                        GFP_KERNEL);
 514        if (!stats)
 515                return NULL;
 516        stats->names = names;
 517        stats->num_counters = num_counters;
 518        stats->lifespan = msecs_to_jiffies(lifespan);
 519
 520        return stats;
 521}
 522
 523
 524/* Define bits for the various functionality this port needs to be supported by
 525 * the core.
 526 */
 527/* Management                           0x00000FFF */
 528#define RDMA_CORE_CAP_IB_MAD            0x00000001
 529#define RDMA_CORE_CAP_IB_SMI            0x00000002
 530#define RDMA_CORE_CAP_IB_CM             0x00000004
 531#define RDMA_CORE_CAP_IW_CM             0x00000008
 532#define RDMA_CORE_CAP_IB_SA             0x00000010
 533#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 534
 535/* Address format                       0x000FF000 */
 536#define RDMA_CORE_CAP_AF_IB             0x00001000
 537#define RDMA_CORE_CAP_ETH_AH            0x00002000
 538#define RDMA_CORE_CAP_OPA_AH            0x00004000
 539
 540/* Protocol                             0xFFF00000 */
 541#define RDMA_CORE_CAP_PROT_IB           0x00100000
 542#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 543#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 544#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 545#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 546#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 547
 548#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 549                                        | RDMA_CORE_CAP_IB_MAD \
 550                                        | RDMA_CORE_CAP_IB_SMI \
 551                                        | RDMA_CORE_CAP_IB_CM  \
 552                                        | RDMA_CORE_CAP_IB_SA  \
 553                                        | RDMA_CORE_CAP_AF_IB)
 554#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 555                                        | RDMA_CORE_CAP_IB_MAD  \
 556                                        | RDMA_CORE_CAP_IB_CM   \
 557                                        | RDMA_CORE_CAP_AF_IB   \
 558                                        | RDMA_CORE_CAP_ETH_AH)
 559#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
 560                                        (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 561                                        | RDMA_CORE_CAP_IB_MAD  \
 562                                        | RDMA_CORE_CAP_IB_CM   \
 563                                        | RDMA_CORE_CAP_AF_IB   \
 564                                        | RDMA_CORE_CAP_ETH_AH)
 565#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 566                                        | RDMA_CORE_CAP_IW_CM)
 567#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 568                                        | RDMA_CORE_CAP_OPA_MAD)
 569
 570#define RDMA_CORE_PORT_RAW_PACKET       (RDMA_CORE_CAP_PROT_RAW_PACKET)
 571
 572#define RDMA_CORE_PORT_USNIC            (RDMA_CORE_CAP_PROT_USNIC)
 573
 574struct ib_port_attr {
 575        u64                     subnet_prefix;
 576        enum ib_port_state      state;
 577        enum ib_mtu             max_mtu;
 578        enum ib_mtu             active_mtu;
 579        int                     gid_tbl_len;
 580        u32                     port_cap_flags;
 581        u32                     max_msg_sz;
 582        u32                     bad_pkey_cntr;
 583        u32                     qkey_viol_cntr;
 584        u16                     pkey_tbl_len;
 585        u32                     sm_lid;
 586        u32                     lid;
 587        u8                      lmc;
 588        u8                      max_vl_num;
 589        u8                      sm_sl;
 590        u8                      subnet_timeout;
 591        u8                      init_type_reply;
 592        u8                      active_width;
 593        u8                      active_speed;
 594        u8                      phys_state;
 595        bool                    grh_required;
 596};
 597
 598enum ib_device_modify_flags {
 599        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 600        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 601};
 602
 603#define IB_DEVICE_NODE_DESC_MAX 64
 604
 605struct ib_device_modify {
 606        u64     sys_image_guid;
 607        char    node_desc[IB_DEVICE_NODE_DESC_MAX];
 608};
 609
 610enum ib_port_modify_flags {
 611        IB_PORT_SHUTDOWN                = 1,
 612        IB_PORT_INIT_TYPE               = (1<<2),
 613        IB_PORT_RESET_QKEY_CNTR         = (1<<3),
 614        IB_PORT_OPA_MASK_CHG            = (1<<4)
 615};
 616
 617struct ib_port_modify {
 618        u32     set_port_cap_mask;
 619        u32     clr_port_cap_mask;
 620        u8      init_type;
 621};
 622
 623enum ib_event_type {
 624        IB_EVENT_CQ_ERR,
 625        IB_EVENT_QP_FATAL,
 626        IB_EVENT_QP_REQ_ERR,
 627        IB_EVENT_QP_ACCESS_ERR,
 628        IB_EVENT_COMM_EST,
 629        IB_EVENT_SQ_DRAINED,
 630        IB_EVENT_PATH_MIG,
 631        IB_EVENT_PATH_MIG_ERR,
 632        IB_EVENT_DEVICE_FATAL,
 633        IB_EVENT_PORT_ACTIVE,
 634        IB_EVENT_PORT_ERR,
 635        IB_EVENT_LID_CHANGE,
 636        IB_EVENT_PKEY_CHANGE,
 637        IB_EVENT_SM_CHANGE,
 638        IB_EVENT_SRQ_ERR,
 639        IB_EVENT_SRQ_LIMIT_REACHED,
 640        IB_EVENT_QP_LAST_WQE_REACHED,
 641        IB_EVENT_CLIENT_REREGISTER,
 642        IB_EVENT_GID_CHANGE,
 643        IB_EVENT_WQ_FATAL,
 644};
 645
 646const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 647
 648struct ib_event {
 649        struct ib_device        *device;
 650        union {
 651                struct ib_cq    *cq;
 652                struct ib_qp    *qp;
 653                struct ib_srq   *srq;
 654                struct ib_wq    *wq;
 655                u8              port_num;
 656        } element;
 657        enum ib_event_type      event;
 658};
 659
 660struct ib_event_handler {
 661        struct ib_device *device;
 662        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 663        struct list_head  list;
 664};
 665
 666#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 667        do {                                                    \
 668                (_ptr)->device  = _device;                      \
 669                (_ptr)->handler = _handler;                     \
 670                INIT_LIST_HEAD(&(_ptr)->list);                  \
 671        } while (0)
 672
 673struct ib_global_route {
 674        union ib_gid    dgid;
 675        u32             flow_label;
 676        u8              sgid_index;
 677        u8              hop_limit;
 678        u8              traffic_class;
 679};
 680
 681struct ib_grh {
 682        __be32          version_tclass_flow;
 683        __be16          paylen;
 684        u8              next_hdr;
 685        u8              hop_limit;
 686        union ib_gid    sgid;
 687        union ib_gid    dgid;
 688};
 689
 690union rdma_network_hdr {
 691        struct ib_grh ibgrh;
 692        struct {
 693                /* The IB spec states that if it's IPv4, the header
 694                 * is located in the last 20 bytes of the header.
 695                 */
 696                u8              reserved[20];
 697                struct iphdr    roce4grh;
 698        };
 699};
 700
 701#define IB_QPN_MASK             0xFFFFFF
 702
 703enum {
 704        IB_MULTICAST_QPN = 0xffffff
 705};
 706
 707#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 708#define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
 709
 710enum ib_ah_flags {
 711        IB_AH_GRH       = 1
 712};
 713
 714enum ib_rate {
 715        IB_RATE_PORT_CURRENT = 0,
 716        IB_RATE_2_5_GBPS = 2,
 717        IB_RATE_5_GBPS   = 5,
 718        IB_RATE_10_GBPS  = 3,
 719        IB_RATE_20_GBPS  = 6,
 720        IB_RATE_30_GBPS  = 4,
 721        IB_RATE_40_GBPS  = 7,
 722        IB_RATE_60_GBPS  = 8,
 723        IB_RATE_80_GBPS  = 9,
 724        IB_RATE_120_GBPS = 10,
 725        IB_RATE_14_GBPS  = 11,
 726        IB_RATE_56_GBPS  = 12,
 727        IB_RATE_112_GBPS = 13,
 728        IB_RATE_168_GBPS = 14,
 729        IB_RATE_25_GBPS  = 15,
 730        IB_RATE_100_GBPS = 16,
 731        IB_RATE_200_GBPS = 17,
 732        IB_RATE_300_GBPS = 18
 733};
 734
 735/**
 736 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 737 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 738 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 739 * @rate: rate to convert.
 740 */
 741__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 742
 743/**
 744 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 745 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 746 * @rate: rate to convert.
 747 */
 748__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 749
 750
 751/**
 752 * enum ib_mr_type - memory region type
 753 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 754 *                            normal registration
 755 * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
 756 *                            signature operations (data-integrity
 757 *                            capable regions)
 758 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 759 *                            register any arbitrary sg lists (without
 760 *                            the normal mr constraints - see
 761 *                            ib_map_mr_sg)
 762 */
 763enum ib_mr_type {
 764        IB_MR_TYPE_MEM_REG,
 765        IB_MR_TYPE_SIGNATURE,
 766        IB_MR_TYPE_SG_GAPS,
 767};
 768
 769/**
 770 * Signature types
 771 * IB_SIG_TYPE_NONE: Unprotected.
 772 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
 773 */
 774enum ib_signature_type {
 775        IB_SIG_TYPE_NONE,
 776        IB_SIG_TYPE_T10_DIF,
 777};
 778
 779/**
 780 * Signature T10-DIF block-guard types
 781 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
 782 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
 783 */
 784enum ib_t10_dif_bg_type {
 785        IB_T10DIF_CRC,
 786        IB_T10DIF_CSUM
 787};
 788
 789/**
 790 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
 791 *     domain.
 792 * @bg_type: T10-DIF block guard type (CRC|CSUM)
 793 * @pi_interval: protection information interval.
 794 * @bg: seed of guard computation.
 795 * @app_tag: application tag of guard block
 796 * @ref_tag: initial guard block reference tag.
 797 * @ref_remap: Indicate wethear the reftag increments each block
 798 * @app_escape: Indicate to skip block check if apptag=0xffff
 799 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
 800 * @apptag_check_mask: check bitmask of application tag.
 801 */
 802struct ib_t10_dif_domain {
 803        enum ib_t10_dif_bg_type bg_type;
 804        u16                     pi_interval;
 805        u16                     bg;
 806        u16                     app_tag;
 807        u32                     ref_tag;
 808        bool                    ref_remap;
 809        bool                    app_escape;
 810        bool                    ref_escape;
 811        u16                     apptag_check_mask;
 812};
 813
 814/**
 815 * struct ib_sig_domain - Parameters for signature domain
 816 * @sig_type: specific signauture type
 817 * @sig: union of all signature domain attributes that may
 818 *     be used to set domain layout.
 819 */
 820struct ib_sig_domain {
 821        enum ib_signature_type sig_type;
 822        union {
 823                struct ib_t10_dif_domain dif;
 824        } sig;
 825};
 826
 827/**
 828 * struct ib_sig_attrs - Parameters for signature handover operation
 829 * @check_mask: bitmask for signature byte check (8 bytes)
 830 * @mem: memory domain layout desciptor.
 831 * @wire: wire domain layout desciptor.
 832 */
 833struct ib_sig_attrs {
 834        u8                      check_mask;
 835        struct ib_sig_domain    mem;
 836        struct ib_sig_domain    wire;
 837};
 838
 839enum ib_sig_err_type {
 840        IB_SIG_BAD_GUARD,
 841        IB_SIG_BAD_REFTAG,
 842        IB_SIG_BAD_APPTAG,
 843};
 844
 845/**
 846 * struct ib_sig_err - signature error descriptor
 847 */
 848struct ib_sig_err {
 849        enum ib_sig_err_type    err_type;
 850        u32                     expected;
 851        u32                     actual;
 852        u64                     sig_err_offset;
 853        u32                     key;
 854};
 855
 856enum ib_mr_status_check {
 857        IB_MR_CHECK_SIG_STATUS = 1,
 858};
 859
 860/**
 861 * struct ib_mr_status - Memory region status container
 862 *
 863 * @fail_status: Bitmask of MR checks status. For each
 864 *     failed check a corresponding status bit is set.
 865 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 866 *     failure.
 867 */
 868struct ib_mr_status {
 869        u32                 fail_status;
 870        struct ib_sig_err   sig_err;
 871};
 872
 873/**
 874 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 875 * enum.
 876 * @mult: multiple to convert.
 877 */
 878__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 879
 880enum rdma_ah_attr_type {
 881        RDMA_AH_ATTR_TYPE_IB,
 882        RDMA_AH_ATTR_TYPE_ROCE,
 883        RDMA_AH_ATTR_TYPE_OPA,
 884};
 885
 886struct ib_ah_attr {
 887        u16                     dlid;
 888        u8                      src_path_bits;
 889};
 890
 891struct roce_ah_attr {
 892        u8                      dmac[ETH_ALEN];
 893};
 894
 895struct opa_ah_attr {
 896        u32                     dlid;
 897        u8                      src_path_bits;
 898        bool                    make_grd;
 899};
 900
 901struct rdma_ah_attr {
 902        struct ib_global_route  grh;
 903        u8                      sl;
 904        u8                      static_rate;
 905        u8                      port_num;
 906        u8                      ah_flags;
 907        enum rdma_ah_attr_type type;
 908        union {
 909                struct ib_ah_attr ib;
 910                struct roce_ah_attr roce;
 911                struct opa_ah_attr opa;
 912        };
 913};
 914
 915enum ib_wc_status {
 916        IB_WC_SUCCESS,
 917        IB_WC_LOC_LEN_ERR,
 918        IB_WC_LOC_QP_OP_ERR,
 919        IB_WC_LOC_EEC_OP_ERR,
 920        IB_WC_LOC_PROT_ERR,
 921        IB_WC_WR_FLUSH_ERR,
 922        IB_WC_MW_BIND_ERR,
 923        IB_WC_BAD_RESP_ERR,
 924        IB_WC_LOC_ACCESS_ERR,
 925        IB_WC_REM_INV_REQ_ERR,
 926        IB_WC_REM_ACCESS_ERR,
 927        IB_WC_REM_OP_ERR,
 928        IB_WC_RETRY_EXC_ERR,
 929        IB_WC_RNR_RETRY_EXC_ERR,
 930        IB_WC_LOC_RDD_VIOL_ERR,
 931        IB_WC_REM_INV_RD_REQ_ERR,
 932        IB_WC_REM_ABORT_ERR,
 933        IB_WC_INV_EECN_ERR,
 934        IB_WC_INV_EEC_STATE_ERR,
 935        IB_WC_FATAL_ERR,
 936        IB_WC_RESP_TIMEOUT_ERR,
 937        IB_WC_GENERAL_ERR
 938};
 939
 940const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 941
 942enum ib_wc_opcode {
 943        IB_WC_SEND,
 944        IB_WC_RDMA_WRITE,
 945        IB_WC_RDMA_READ,
 946        IB_WC_COMP_SWAP,
 947        IB_WC_FETCH_ADD,
 948        IB_WC_LSO,
 949        IB_WC_LOCAL_INV,
 950        IB_WC_REG_MR,
 951        IB_WC_MASKED_COMP_SWAP,
 952        IB_WC_MASKED_FETCH_ADD,
 953/*
 954 * Set value of IB_WC_RECV so consumers can test if a completion is a
 955 * receive by testing (opcode & IB_WC_RECV).
 956 */
 957        IB_WC_RECV                      = 1 << 7,
 958        IB_WC_RECV_RDMA_WITH_IMM
 959};
 960
 961enum ib_wc_flags {
 962        IB_WC_GRH               = 1,
 963        IB_WC_WITH_IMM          = (1<<1),
 964        IB_WC_WITH_INVALIDATE   = (1<<2),
 965        IB_WC_IP_CSUM_OK        = (1<<3),
 966        IB_WC_WITH_SMAC         = (1<<4),
 967        IB_WC_WITH_VLAN         = (1<<5),
 968        IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
 969};
 970
 971struct ib_wc {
 972        union {
 973                u64             wr_id;
 974                struct ib_cqe   *wr_cqe;
 975        };
 976        enum ib_wc_status       status;
 977        enum ib_wc_opcode       opcode;
 978        u32                     vendor_err;
 979        u32                     byte_len;
 980        struct ib_qp           *qp;
 981        union {
 982                __be32          imm_data;
 983                u32             invalidate_rkey;
 984        } ex;
 985        u32                     src_qp;
 986        int                     wc_flags;
 987        u16                     pkey_index;
 988        u32                     slid;
 989        u8                      sl;
 990        u8                      dlid_path_bits;
 991        u8                      port_num;       /* valid only for DR SMPs on switches */
 992        u8                      smac[ETH_ALEN];
 993        u16                     vlan_id;
 994        u8                      network_hdr_type;
 995};
 996
 997enum ib_cq_notify_flags {
 998        IB_CQ_SOLICITED                 = 1 << 0,
 999        IB_CQ_NEXT_COMP                 = 1 << 1,
1000        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1001        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
1002};
1003
1004enum ib_srq_type {
1005        IB_SRQT_BASIC,
1006        IB_SRQT_XRC,
1007        IB_SRQT_TM,
1008};
1009
1010static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1011{
1012        return srq_type == IB_SRQT_XRC ||
1013               srq_type == IB_SRQT_TM;
1014}
1015
1016enum ib_srq_attr_mask {
1017        IB_SRQ_MAX_WR   = 1 << 0,
1018        IB_SRQ_LIMIT    = 1 << 1,
1019};
1020
1021struct ib_srq_attr {
1022        u32     max_wr;
1023        u32     max_sge;
1024        u32     srq_limit;
1025};
1026
1027struct ib_srq_init_attr {
1028        void                  (*event_handler)(struct ib_event *, void *);
1029        void                   *srq_context;
1030        struct ib_srq_attr      attr;
1031        enum ib_srq_type        srq_type;
1032
1033        struct {
1034                struct ib_cq   *cq;
1035                union {
1036                        struct {
1037                                struct ib_xrcd *xrcd;
1038                        } xrc;
1039
1040                        struct {
1041                                u32             max_num_tags;
1042                        } tag_matching;
1043                };
1044        } ext;
1045};
1046
1047struct ib_qp_cap {
1048        u32     max_send_wr;
1049        u32     max_recv_wr;
1050        u32     max_send_sge;
1051        u32     max_recv_sge;
1052        u32     max_inline_data;
1053
1054        /*
1055         * Maximum number of rdma_rw_ctx structures in flight at a time.
1056         * ib_create_qp() will calculate the right amount of neededed WRs
1057         * and MRs based on this.
1058         */
1059        u32     max_rdma_ctxs;
1060};
1061
1062enum ib_sig_type {
1063        IB_SIGNAL_ALL_WR,
1064        IB_SIGNAL_REQ_WR
1065};
1066
1067enum ib_qp_type {
1068        /*
1069         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1070         * here (and in that order) since the MAD layer uses them as
1071         * indices into a 2-entry table.
1072         */
1073        IB_QPT_SMI,
1074        IB_QPT_GSI,
1075
1076        IB_QPT_RC,
1077        IB_QPT_UC,
1078        IB_QPT_UD,
1079        IB_QPT_RAW_IPV6,
1080        IB_QPT_RAW_ETHERTYPE,
1081        IB_QPT_RAW_PACKET = 8,
1082        IB_QPT_XRC_INI = 9,
1083        IB_QPT_XRC_TGT,
1084        IB_QPT_MAX,
1085        /* Reserve a range for qp types internal to the low level driver.
1086         * These qp types will not be visible at the IB core layer, so the
1087         * IB_QPT_MAX usages should not be affected in the core layer
1088         */
1089        IB_QPT_RESERVED1 = 0x1000,
1090        IB_QPT_RESERVED2,
1091        IB_QPT_RESERVED3,
1092        IB_QPT_RESERVED4,
1093        IB_QPT_RESERVED5,
1094        IB_QPT_RESERVED6,
1095        IB_QPT_RESERVED7,
1096        IB_QPT_RESERVED8,
1097        IB_QPT_RESERVED9,
1098        IB_QPT_RESERVED10,
1099};
1100
1101enum ib_qp_create_flags {
1102        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
1103        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
1104        IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1105        IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1106        IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1107        IB_QP_CREATE_NETIF_QP                   = 1 << 5,
1108        IB_QP_CREATE_SIGNATURE_EN               = 1 << 6,
1109        /* FREE                                 = 1 << 7, */
1110        IB_QP_CREATE_SCATTER_FCS                = 1 << 8,
1111        IB_QP_CREATE_CVLAN_STRIPPING            = 1 << 9,
1112        IB_QP_CREATE_SOURCE_QPN                 = 1 << 10,
1113        IB_QP_CREATE_PCI_WRITE_END_PADDING      = 1 << 11,
1114        /* reserve bits 26-31 for low level drivers' internal use */
1115        IB_QP_CREATE_RESERVED_START             = 1 << 26,
1116        IB_QP_CREATE_RESERVED_END               = 1 << 31,
1117};
1118
1119/*
1120 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1121 * callback to destroy the passed in QP.
1122 */
1123
1124struct ib_qp_init_attr {
1125        void                  (*event_handler)(struct ib_event *, void *);
1126        void                   *qp_context;
1127        struct ib_cq           *send_cq;
1128        struct ib_cq           *recv_cq;
1129        struct ib_srq          *srq;
1130        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1131        struct ib_qp_cap        cap;
1132        enum ib_sig_type        sq_sig_type;
1133        enum ib_qp_type         qp_type;
1134        enum ib_qp_create_flags create_flags;
1135
1136        /*
1137         * Only needed for special QP types, or when using the RW API.
1138         */
1139        u8                      port_num;
1140        struct ib_rwq_ind_table *rwq_ind_tbl;
1141        u32                     source_qpn;
1142};
1143
1144struct ib_qp_open_attr {
1145        void                  (*event_handler)(struct ib_event *, void *);
1146        void                   *qp_context;
1147        u32                     qp_num;
1148        enum ib_qp_type         qp_type;
1149};
1150
1151enum ib_rnr_timeout {
1152        IB_RNR_TIMER_655_36 =  0,
1153        IB_RNR_TIMER_000_01 =  1,
1154        IB_RNR_TIMER_000_02 =  2,
1155        IB_RNR_TIMER_000_03 =  3,
1156        IB_RNR_TIMER_000_04 =  4,
1157        IB_RNR_TIMER_000_06 =  5,
1158        IB_RNR_TIMER_000_08 =  6,
1159        IB_RNR_TIMER_000_12 =  7,
1160        IB_RNR_TIMER_000_16 =  8,
1161        IB_RNR_TIMER_000_24 =  9,
1162        IB_RNR_TIMER_000_32 = 10,
1163        IB_RNR_TIMER_000_48 = 11,
1164        IB_RNR_TIMER_000_64 = 12,
1165        IB_RNR_TIMER_000_96 = 13,
1166        IB_RNR_TIMER_001_28 = 14,
1167        IB_RNR_TIMER_001_92 = 15,
1168        IB_RNR_TIMER_002_56 = 16,
1169        IB_RNR_TIMER_003_84 = 17,
1170        IB_RNR_TIMER_005_12 = 18,
1171        IB_RNR_TIMER_007_68 = 19,
1172        IB_RNR_TIMER_010_24 = 20,
1173        IB_RNR_TIMER_015_36 = 21,
1174        IB_RNR_TIMER_020_48 = 22,
1175        IB_RNR_TIMER_030_72 = 23,
1176        IB_RNR_TIMER_040_96 = 24,
1177        IB_RNR_TIMER_061_44 = 25,
1178        IB_RNR_TIMER_081_92 = 26,
1179        IB_RNR_TIMER_122_88 = 27,
1180        IB_RNR_TIMER_163_84 = 28,
1181        IB_RNR_TIMER_245_76 = 29,
1182        IB_RNR_TIMER_327_68 = 30,
1183        IB_RNR_TIMER_491_52 = 31
1184};
1185
1186enum ib_qp_attr_mask {
1187        IB_QP_STATE                     = 1,
1188        IB_QP_CUR_STATE                 = (1<<1),
1189        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
1190        IB_QP_ACCESS_FLAGS              = (1<<3),
1191        IB_QP_PKEY_INDEX                = (1<<4),
1192        IB_QP_PORT                      = (1<<5),
1193        IB_QP_QKEY                      = (1<<6),
1194        IB_QP_AV                        = (1<<7),
1195        IB_QP_PATH_MTU                  = (1<<8),
1196        IB_QP_TIMEOUT                   = (1<<9),
1197        IB_QP_RETRY_CNT                 = (1<<10),
1198        IB_QP_RNR_RETRY                 = (1<<11),
1199        IB_QP_RQ_PSN                    = (1<<12),
1200        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
1201        IB_QP_ALT_PATH                  = (1<<14),
1202        IB_QP_MIN_RNR_TIMER             = (1<<15),
1203        IB_QP_SQ_PSN                    = (1<<16),
1204        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
1205        IB_QP_PATH_MIG_STATE            = (1<<18),
1206        IB_QP_CAP                       = (1<<19),
1207        IB_QP_DEST_QPN                  = (1<<20),
1208        IB_QP_RESERVED1                 = (1<<21),
1209        IB_QP_RESERVED2                 = (1<<22),
1210        IB_QP_RESERVED3                 = (1<<23),
1211        IB_QP_RESERVED4                 = (1<<24),
1212        IB_QP_RATE_LIMIT                = (1<<25),
1213};
1214
1215enum ib_qp_state {
1216        IB_QPS_RESET,
1217        IB_QPS_INIT,
1218        IB_QPS_RTR,
1219        IB_QPS_RTS,
1220        IB_QPS_SQD,
1221        IB_QPS_SQE,
1222        IB_QPS_ERR
1223};
1224
1225enum ib_mig_state {
1226        IB_MIG_MIGRATED,
1227        IB_MIG_REARM,
1228        IB_MIG_ARMED
1229};
1230
1231enum ib_mw_type {
1232        IB_MW_TYPE_1 = 1,
1233        IB_MW_TYPE_2 = 2
1234};
1235
1236struct ib_qp_attr {
1237        enum ib_qp_state        qp_state;
1238        enum ib_qp_state        cur_qp_state;
1239        enum ib_mtu             path_mtu;
1240        enum ib_mig_state       path_mig_state;
1241        u32                     qkey;
1242        u32                     rq_psn;
1243        u32                     sq_psn;
1244        u32                     dest_qp_num;
1245        int                     qp_access_flags;
1246        struct ib_qp_cap        cap;
1247        struct rdma_ah_attr     ah_attr;
1248        struct rdma_ah_attr     alt_ah_attr;
1249        u16                     pkey_index;
1250        u16                     alt_pkey_index;
1251        u8                      en_sqd_async_notify;
1252        u8                      sq_draining;
1253        u8                      max_rd_atomic;
1254        u8                      max_dest_rd_atomic;
1255        u8                      min_rnr_timer;
1256        u8                      port_num;
1257        u8                      timeout;
1258        u8                      retry_cnt;
1259        u8                      rnr_retry;
1260        u8                      alt_port_num;
1261        u8                      alt_timeout;
1262        u32                     rate_limit;
1263};
1264
1265enum ib_wr_opcode {
1266        IB_WR_RDMA_WRITE,
1267        IB_WR_RDMA_WRITE_WITH_IMM,
1268        IB_WR_SEND,
1269        IB_WR_SEND_WITH_IMM,
1270        IB_WR_RDMA_READ,
1271        IB_WR_ATOMIC_CMP_AND_SWP,
1272        IB_WR_ATOMIC_FETCH_AND_ADD,
1273        IB_WR_LSO,
1274        IB_WR_SEND_WITH_INV,
1275        IB_WR_RDMA_READ_WITH_INV,
1276        IB_WR_LOCAL_INV,
1277        IB_WR_REG_MR,
1278        IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1279        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1280        IB_WR_REG_SIG_MR,
1281        /* reserve values for low level drivers' internal use.
1282         * These values will not be used at all in the ib core layer.
1283         */
1284        IB_WR_RESERVED1 = 0xf0,
1285        IB_WR_RESERVED2,
1286        IB_WR_RESERVED3,
1287        IB_WR_RESERVED4,
1288        IB_WR_RESERVED5,
1289        IB_WR_RESERVED6,
1290        IB_WR_RESERVED7,
1291        IB_WR_RESERVED8,
1292        IB_WR_RESERVED9,
1293        IB_WR_RESERVED10,
1294};
1295
1296enum ib_send_flags {
1297        IB_SEND_FENCE           = 1,
1298        IB_SEND_SIGNALED        = (1<<1),
1299        IB_SEND_SOLICITED       = (1<<2),
1300        IB_SEND_INLINE          = (1<<3),
1301        IB_SEND_IP_CSUM         = (1<<4),
1302
1303        /* reserve bits 26-31 for low level drivers' internal use */
1304        IB_SEND_RESERVED_START  = (1 << 26),
1305        IB_SEND_RESERVED_END    = (1 << 31),
1306};
1307
1308struct ib_sge {
1309        u64     addr;
1310        u32     length;
1311        u32     lkey;
1312};
1313
1314struct ib_cqe {
1315        void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1316};
1317
1318struct ib_send_wr {
1319        struct ib_send_wr      *next;
1320        union {
1321                u64             wr_id;
1322                struct ib_cqe   *wr_cqe;
1323        };
1324        struct ib_sge          *sg_list;
1325        int                     num_sge;
1326        enum ib_wr_opcode       opcode;
1327        int                     send_flags;
1328        union {
1329                __be32          imm_data;
1330                u32             invalidate_rkey;
1331        } ex;
1332};
1333
1334struct ib_rdma_wr {
1335        struct ib_send_wr       wr;
1336        u64                     remote_addr;
1337        u32                     rkey;
1338};
1339
1340static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1341{
1342        return container_of(wr, struct ib_rdma_wr, wr);
1343}
1344
1345struct ib_atomic_wr {
1346        struct ib_send_wr       wr;
1347        u64                     remote_addr;
1348        u64                     compare_add;
1349        u64                     swap;
1350        u64                     compare_add_mask;
1351        u64                     swap_mask;
1352        u32                     rkey;
1353};
1354
1355static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1356{
1357        return container_of(wr, struct ib_atomic_wr, wr);
1358}
1359
1360struct ib_ud_wr {
1361        struct ib_send_wr       wr;
1362        struct ib_ah            *ah;
1363        void                    *header;
1364        int                     hlen;
1365        int                     mss;
1366        u32                     remote_qpn;
1367        u32                     remote_qkey;
1368        u16                     pkey_index; /* valid for GSI only */
1369        u8                      port_num;   /* valid for DR SMPs on switch only */
1370};
1371
1372static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1373{
1374        return container_of(wr, struct ib_ud_wr, wr);
1375}
1376
1377struct ib_reg_wr {
1378        struct ib_send_wr       wr;
1379        struct ib_mr            *mr;
1380        u32                     key;
1381        int                     access;
1382};
1383
1384static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1385{
1386        return container_of(wr, struct ib_reg_wr, wr);
1387}
1388
1389struct ib_sig_handover_wr {
1390        struct ib_send_wr       wr;
1391        struct ib_sig_attrs    *sig_attrs;
1392        struct ib_mr           *sig_mr;
1393        int                     access_flags;
1394        struct ib_sge          *prot;
1395};
1396
1397static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1398{
1399        return container_of(wr, struct ib_sig_handover_wr, wr);
1400}
1401
1402struct ib_recv_wr {
1403        struct ib_recv_wr      *next;
1404        union {
1405                u64             wr_id;
1406                struct ib_cqe   *wr_cqe;
1407        };
1408        struct ib_sge          *sg_list;
1409        int                     num_sge;
1410};
1411
1412enum ib_access_flags {
1413        IB_ACCESS_LOCAL_WRITE   = 1,
1414        IB_ACCESS_REMOTE_WRITE  = (1<<1),
1415        IB_ACCESS_REMOTE_READ   = (1<<2),
1416        IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1417        IB_ACCESS_MW_BIND       = (1<<4),
1418        IB_ZERO_BASED           = (1<<5),
1419        IB_ACCESS_ON_DEMAND     = (1<<6),
1420        IB_ACCESS_HUGETLB       = (1<<7),
1421};
1422
1423/*
1424 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1425 * are hidden here instead of a uapi header!
1426 */
1427enum ib_mr_rereg_flags {
1428        IB_MR_REREG_TRANS       = 1,
1429        IB_MR_REREG_PD          = (1<<1),
1430        IB_MR_REREG_ACCESS      = (1<<2),
1431        IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1432};
1433
1434struct ib_fmr_attr {
1435        int     max_pages;
1436        int     max_maps;
1437        u8      page_shift;
1438};
1439
1440struct ib_umem;
1441
1442enum rdma_remove_reason {
1443        /* Userspace requested uobject deletion. Call could fail */
1444        RDMA_REMOVE_DESTROY,
1445        /* Context deletion. This call should delete the actual object itself */
1446        RDMA_REMOVE_CLOSE,
1447        /* Driver is being hot-unplugged. This call should delete the actual object itself */
1448        RDMA_REMOVE_DRIVER_REMOVE,
1449        /* Context is being cleaned-up, but commit was just completed */
1450        RDMA_REMOVE_DURING_CLEANUP,
1451};
1452
1453struct ib_rdmacg_object {
1454#ifdef CONFIG_CGROUP_RDMA
1455        struct rdma_cgroup      *cg;            /* owner rdma cgroup */
1456#endif
1457};
1458
1459struct ib_ucontext {
1460        struct ib_device       *device;
1461        struct ib_uverbs_file  *ufile;
1462        int                     closing;
1463
1464        /* locking the uobjects_list */
1465        struct mutex            uobjects_lock;
1466        struct list_head        uobjects;
1467        /* protects cleanup process from other actions */
1468        struct rw_semaphore     cleanup_rwsem;
1469        enum rdma_remove_reason cleanup_reason;
1470
1471        struct pid             *tgid;
1472#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1473        struct rb_root_cached   umem_tree;
1474        /*
1475         * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1476         * mmu notifiers registration.
1477         */
1478        struct rw_semaphore     umem_rwsem;
1479        void (*invalidate_range)(struct ib_umem *umem,
1480                                 unsigned long start, unsigned long end);
1481
1482        struct mmu_notifier     mn;
1483        atomic_t                notifier_count;
1484        /* A list of umems that don't have private mmu notifier counters yet. */
1485        struct list_head        no_private_counters;
1486        int                     odp_mrs_count;
1487#endif
1488
1489        struct ib_rdmacg_object cg_obj;
1490};
1491
1492struct ib_uobject {
1493        u64                     user_handle;    /* handle given to us by userspace */
1494        struct ib_ucontext     *context;        /* associated user context */
1495        void                   *object;         /* containing object */
1496        struct list_head        list;           /* link to context's list */
1497        struct ib_rdmacg_object cg_obj;         /* rdmacg object */
1498        int                     id;             /* index into kernel idr */
1499        struct kref             ref;
1500        atomic_t                usecnt;         /* protects exclusive access */
1501        struct rcu_head         rcu;            /* kfree_rcu() overhead */
1502
1503        const struct uverbs_obj_type *type;
1504};
1505
1506struct ib_uobject_file {
1507        struct ib_uobject       uobj;
1508        /* ufile contains the lock between context release and file close */
1509        struct ib_uverbs_file   *ufile;
1510};
1511
1512struct ib_udata {
1513        const void __user *inbuf;
1514        void __user *outbuf;
1515        size_t       inlen;
1516        size_t       outlen;
1517};
1518
1519struct ib_pd {
1520        u32                     local_dma_lkey;
1521        u32                     flags;
1522        struct ib_device       *device;
1523        struct ib_uobject      *uobject;
1524        atomic_t                usecnt; /* count all resources */
1525
1526        u32                     unsafe_global_rkey;
1527
1528        /*
1529         * Implementation details of the RDMA core, don't use in drivers:
1530         */
1531        struct ib_mr           *__internal_mr;
1532};
1533
1534struct ib_xrcd {
1535        struct ib_device       *device;
1536        atomic_t                usecnt; /* count all exposed resources */
1537        struct inode           *inode;
1538
1539        struct mutex            tgt_qp_mutex;
1540        struct list_head        tgt_qp_list;
1541};
1542
1543struct ib_ah {
1544        struct ib_device        *device;
1545        struct ib_pd            *pd;
1546        struct ib_uobject       *uobject;
1547        enum rdma_ah_attr_type  type;
1548};
1549
1550typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1551
1552enum ib_poll_context {
1553        IB_POLL_DIRECT,         /* caller context, no hw completions */
1554        IB_POLL_SOFTIRQ,        /* poll from softirq context */
1555        IB_POLL_WORKQUEUE,      /* poll from workqueue */
1556};
1557
1558struct ib_cq {
1559        struct ib_device       *device;
1560        struct ib_uobject      *uobject;
1561        ib_comp_handler         comp_handler;
1562        void                  (*event_handler)(struct ib_event *, void *);
1563        void                   *cq_context;
1564        int                     cqe;
1565        atomic_t                usecnt; /* count number of work queues */
1566        enum ib_poll_context    poll_ctx;
1567        struct ib_wc            *wc;
1568        union {
1569                struct irq_poll         iop;
1570                struct work_struct      work;
1571        };
1572};
1573
1574struct ib_srq {
1575        struct ib_device       *device;
1576        struct ib_pd           *pd;
1577        struct ib_uobject      *uobject;
1578        void                  (*event_handler)(struct ib_event *, void *);
1579        void                   *srq_context;
1580        enum ib_srq_type        srq_type;
1581        atomic_t                usecnt;
1582
1583        struct {
1584                struct ib_cq   *cq;
1585                union {
1586                        struct {
1587                                struct ib_xrcd *xrcd;
1588                                u32             srq_num;
1589                        } xrc;
1590                };
1591        } ext;
1592};
1593
1594enum ib_raw_packet_caps {
1595        /* Strip cvlan from incoming packet and report it in the matching work
1596         * completion is supported.
1597         */
1598        IB_RAW_PACKET_CAP_CVLAN_STRIPPING       = (1 << 0),
1599        /* Scatter FCS field of an incoming packet to host memory is supported.
1600         */
1601        IB_RAW_PACKET_CAP_SCATTER_FCS           = (1 << 1),
1602        /* Checksum offloads are supported (for both send and receive). */
1603        IB_RAW_PACKET_CAP_IP_CSUM               = (1 << 2),
1604        /* When a packet is received for an RQ with no receive WQEs, the
1605         * packet processing is delayed.
1606         */
1607        IB_RAW_PACKET_CAP_DELAY_DROP            = (1 << 3),
1608};
1609
1610enum ib_wq_type {
1611        IB_WQT_RQ
1612};
1613
1614enum ib_wq_state {
1615        IB_WQS_RESET,
1616        IB_WQS_RDY,
1617        IB_WQS_ERR
1618};
1619
1620struct ib_wq {
1621        struct ib_device       *device;
1622        struct ib_uobject      *uobject;
1623        void                *wq_context;
1624        void                (*event_handler)(struct ib_event *, void *);
1625        struct ib_pd           *pd;
1626        struct ib_cq           *cq;
1627        u32             wq_num;
1628        enum ib_wq_state       state;
1629        enum ib_wq_type wq_type;
1630        atomic_t                usecnt;
1631};
1632
1633enum ib_wq_flags {
1634        IB_WQ_FLAGS_CVLAN_STRIPPING     = 1 << 0,
1635        IB_WQ_FLAGS_SCATTER_FCS         = 1 << 1,
1636        IB_WQ_FLAGS_DELAY_DROP          = 1 << 2,
1637        IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1638};
1639
1640struct ib_wq_init_attr {
1641        void                   *wq_context;
1642        enum ib_wq_type wq_type;
1643        u32             max_wr;
1644        u32             max_sge;
1645        struct  ib_cq          *cq;
1646        void                (*event_handler)(struct ib_event *, void *);
1647        u32             create_flags; /* Use enum ib_wq_flags */
1648};
1649
1650enum ib_wq_attr_mask {
1651        IB_WQ_STATE             = 1 << 0,
1652        IB_WQ_CUR_STATE         = 1 << 1,
1653        IB_WQ_FLAGS             = 1 << 2,
1654};
1655
1656struct ib_wq_attr {
1657        enum    ib_wq_state     wq_state;
1658        enum    ib_wq_state     curr_wq_state;
1659        u32                     flags; /* Use enum ib_wq_flags */
1660        u32                     flags_mask; /* Use enum ib_wq_flags */
1661};
1662
1663struct ib_rwq_ind_table {
1664        struct ib_device        *device;
1665        struct ib_uobject      *uobject;
1666        atomic_t                usecnt;
1667        u32             ind_tbl_num;
1668        u32             log_ind_tbl_size;
1669        struct ib_wq    **ind_tbl;
1670};
1671
1672struct ib_rwq_ind_table_init_attr {
1673        u32             log_ind_tbl_size;
1674        /* Each entry is a pointer to Receive Work Queue */
1675        struct ib_wq    **ind_tbl;
1676};
1677
1678enum port_pkey_state {
1679        IB_PORT_PKEY_NOT_VALID = 0,
1680        IB_PORT_PKEY_VALID = 1,
1681        IB_PORT_PKEY_LISTED = 2,
1682};
1683
1684struct ib_qp_security;
1685
1686struct ib_port_pkey {
1687        enum port_pkey_state    state;
1688        u16                     pkey_index;
1689        u8                      port_num;
1690        struct list_head        qp_list;
1691        struct list_head        to_error_list;
1692        struct ib_qp_security  *sec;
1693};
1694
1695struct ib_ports_pkeys {
1696        struct ib_port_pkey     main;
1697        struct ib_port_pkey     alt;
1698};
1699
1700struct ib_qp_security {
1701        struct ib_qp           *qp;
1702        struct ib_device       *dev;
1703        /* Hold this mutex when changing port and pkey settings. */
1704        struct mutex            mutex;
1705        struct ib_ports_pkeys  *ports_pkeys;
1706        /* A list of all open shared QP handles.  Required to enforce security
1707         * properly for all users of a shared QP.
1708         */
1709        struct list_head        shared_qp_list;
1710        void                   *security;
1711        bool                    destroying;
1712        atomic_t                error_list_count;
1713        struct completion       error_complete;
1714        int                     error_comps_pending;
1715};
1716
1717/*
1718 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1719 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1720 */
1721struct ib_qp {
1722        struct ib_device       *device;
1723        struct ib_pd           *pd;
1724        struct ib_cq           *send_cq;
1725        struct ib_cq           *recv_cq;
1726        spinlock_t              mr_lock;
1727        int                     mrs_used;
1728        struct list_head        rdma_mrs;
1729        struct list_head        sig_mrs;
1730        struct ib_srq          *srq;
1731        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1732        struct list_head        xrcd_list;
1733
1734        /* count times opened, mcast attaches, flow attaches */
1735        atomic_t                usecnt;
1736        struct list_head        open_list;
1737        struct ib_qp           *real_qp;
1738        struct ib_uobject      *uobject;
1739        void                  (*event_handler)(struct ib_event *, void *);
1740        void                   *qp_context;
1741        u32                     qp_num;
1742        u32                     max_write_sge;
1743        u32                     max_read_sge;
1744        enum ib_qp_type         qp_type;
1745        struct ib_rwq_ind_table *rwq_ind_tbl;
1746        struct ib_qp_security  *qp_sec;
1747        u8                      port;
1748};
1749
1750struct ib_mr {
1751        struct ib_device  *device;
1752        struct ib_pd      *pd;
1753        u32                lkey;
1754        u32                rkey;
1755        u64                iova;
1756        u64                length;
1757        unsigned int       page_size;
1758        bool               need_inval;
1759        union {
1760                struct ib_uobject       *uobject;       /* user */
1761                struct list_head        qp_entry;       /* FR */
1762        };
1763};
1764
1765struct ib_mw {
1766        struct ib_device        *device;
1767        struct ib_pd            *pd;
1768        struct ib_uobject       *uobject;
1769        u32                     rkey;
1770        enum ib_mw_type         type;
1771};
1772
1773struct ib_fmr {
1774        struct ib_device        *device;
1775        struct ib_pd            *pd;
1776        struct list_head        list;
1777        u32                     lkey;
1778        u32                     rkey;
1779};
1780
1781/* Supported steering options */
1782enum ib_flow_attr_type {
1783        /* steering according to rule specifications */
1784        IB_FLOW_ATTR_NORMAL             = 0x0,
1785        /* default unicast and multicast rule -
1786         * receive all Eth traffic which isn't steered to any QP
1787         */
1788        IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1789        /* default multicast rule -
1790         * receive all Eth multicast traffic which isn't steered to any QP
1791         */
1792        IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1793        /* sniffer rule - receive all port traffic */
1794        IB_FLOW_ATTR_SNIFFER            = 0x3
1795};
1796
1797/* Supported steering header types */
1798enum ib_flow_spec_type {
1799        /* L2 headers*/
1800        IB_FLOW_SPEC_ETH                = 0x20,
1801        IB_FLOW_SPEC_IB                 = 0x22,
1802        /* L3 header*/
1803        IB_FLOW_SPEC_IPV4               = 0x30,
1804        IB_FLOW_SPEC_IPV6               = 0x31,
1805        /* L4 headers*/
1806        IB_FLOW_SPEC_TCP                = 0x40,
1807        IB_FLOW_SPEC_UDP                = 0x41,
1808        IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
1809        IB_FLOW_SPEC_INNER              = 0x100,
1810        /* Actions */
1811        IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1812        IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1813};
1814#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1815#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1816
1817/* Flow steering rule priority is set according to it's domain.
1818 * Lower domain value means higher priority.
1819 */
1820enum ib_flow_domain {
1821        IB_FLOW_DOMAIN_USER,
1822        IB_FLOW_DOMAIN_ETHTOOL,
1823        IB_FLOW_DOMAIN_RFS,
1824        IB_FLOW_DOMAIN_NIC,
1825        IB_FLOW_DOMAIN_NUM /* Must be last */
1826};
1827
1828enum ib_flow_flags {
1829        IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1830        IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
1831};
1832
1833struct ib_flow_eth_filter {
1834        u8      dst_mac[6];
1835        u8      src_mac[6];
1836        __be16  ether_type;
1837        __be16  vlan_tag;
1838        /* Must be last */
1839        u8      real_sz[0];
1840};
1841
1842struct ib_flow_spec_eth {
1843        u32                       type;
1844        u16                       size;
1845        struct ib_flow_eth_filter val;
1846        struct ib_flow_eth_filter mask;
1847};
1848
1849struct ib_flow_ib_filter {
1850        __be16 dlid;
1851        __u8   sl;
1852        /* Must be last */
1853        u8      real_sz[0];
1854};
1855
1856struct ib_flow_spec_ib {
1857        u32                      type;
1858        u16                      size;
1859        struct ib_flow_ib_filter val;
1860        struct ib_flow_ib_filter mask;
1861};
1862
1863/* IPv4 header flags */
1864enum ib_ipv4_flags {
1865        IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1866        IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1867                                    last have this flag set */
1868};
1869
1870struct ib_flow_ipv4_filter {
1871        __be32  src_ip;
1872        __be32  dst_ip;
1873        u8      proto;
1874        u8      tos;
1875        u8      ttl;
1876        u8      flags;
1877        /* Must be last */
1878        u8      real_sz[0];
1879};
1880
1881struct ib_flow_spec_ipv4 {
1882        u32                        type;
1883        u16                        size;
1884        struct ib_flow_ipv4_filter val;
1885        struct ib_flow_ipv4_filter mask;
1886};
1887
1888struct ib_flow_ipv6_filter {
1889        u8      src_ip[16];
1890        u8      dst_ip[16];
1891        __be32  flow_label;
1892        u8      next_hdr;
1893        u8      traffic_class;
1894        u8      hop_limit;
1895        /* Must be last */
1896        u8      real_sz[0];
1897};
1898
1899struct ib_flow_spec_ipv6 {
1900        u32                        type;
1901        u16                        size;
1902        struct ib_flow_ipv6_filter val;
1903        struct ib_flow_ipv6_filter mask;
1904};
1905
1906struct ib_flow_tcp_udp_filter {
1907        __be16  dst_port;
1908        __be16  src_port;
1909        /* Must be last */
1910        u8      real_sz[0];
1911};
1912
1913struct ib_flow_spec_tcp_udp {
1914        u32                           type;
1915        u16                           size;
1916        struct ib_flow_tcp_udp_filter val;
1917        struct ib_flow_tcp_udp_filter mask;
1918};
1919
1920struct ib_flow_tunnel_filter {
1921        __be32  tunnel_id;
1922        u8      real_sz[0];
1923};
1924
1925/* ib_flow_spec_tunnel describes the Vxlan tunnel
1926 * the tunnel_id from val has the vni value
1927 */
1928struct ib_flow_spec_tunnel {
1929        u32                           type;
1930        u16                           size;
1931        struct ib_flow_tunnel_filter  val;
1932        struct ib_flow_tunnel_filter  mask;
1933};
1934
1935struct ib_flow_spec_action_tag {
1936        enum ib_flow_spec_type        type;
1937        u16                           size;
1938        u32                           tag_id;
1939};
1940
1941struct ib_flow_spec_action_drop {
1942        enum ib_flow_spec_type        type;
1943        u16                           size;
1944};
1945
1946union ib_flow_spec {
1947        struct {
1948                u32                     type;
1949                u16                     size;
1950        };
1951        struct ib_flow_spec_eth         eth;
1952        struct ib_flow_spec_ib          ib;
1953        struct ib_flow_spec_ipv4        ipv4;
1954        struct ib_flow_spec_tcp_udp     tcp_udp;
1955        struct ib_flow_spec_ipv6        ipv6;
1956        struct ib_flow_spec_tunnel      tunnel;
1957        struct ib_flow_spec_action_tag  flow_tag;
1958        struct ib_flow_spec_action_drop drop;
1959};
1960
1961struct ib_flow_attr {
1962        enum ib_flow_attr_type type;
1963        u16          size;
1964        u16          priority;
1965        u32          flags;
1966        u8           num_of_specs;
1967        u8           port;
1968        /* Following are the optional layers according to user request
1969         * struct ib_flow_spec_xxx
1970         * struct ib_flow_spec_yyy
1971         */
1972};
1973
1974struct ib_flow {
1975        struct ib_qp            *qp;
1976        struct ib_uobject       *uobject;
1977};
1978
1979struct ib_mad_hdr;
1980struct ib_grh;
1981
1982enum ib_process_mad_flags {
1983        IB_MAD_IGNORE_MKEY      = 1,
1984        IB_MAD_IGNORE_BKEY      = 2,
1985        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1986};
1987
1988enum ib_mad_result {
1989        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1990        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1991        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1992        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1993};
1994
1995struct ib_port_cache {
1996        u64                   subnet_prefix;
1997        struct ib_pkey_cache  *pkey;
1998        struct ib_gid_table   *gid;
1999        u8                     lmc;
2000        enum ib_port_state     port_state;
2001};
2002
2003struct ib_cache {
2004        rwlock_t                lock;
2005        struct ib_event_handler event_handler;
2006        struct ib_port_cache   *ports;
2007};
2008
2009struct iw_cm_verbs;
2010
2011struct ib_port_immutable {
2012        int                           pkey_tbl_len;
2013        int                           gid_tbl_len;
2014        u32                           core_cap_flags;
2015        u32                           max_mad_size;
2016};
2017
2018/* rdma netdev type - specifies protocol type */
2019enum rdma_netdev_t {
2020        RDMA_NETDEV_OPA_VNIC,
2021        RDMA_NETDEV_IPOIB,
2022};
2023
2024/**
2025 * struct rdma_netdev - rdma netdev
2026 * For cases where netstack interfacing is required.
2027 */
2028struct rdma_netdev {
2029        void              *clnt_priv;
2030        struct ib_device  *hca;
2031        u8                 port_num;
2032
2033        /* cleanup function must be specified */
2034        void (*free_rdma_netdev)(struct net_device *netdev);
2035
2036        /* control functions */
2037        void (*set_id)(struct net_device *netdev, int id);
2038        /* send packet */
2039        int (*send)(struct net_device *dev, struct sk_buff *skb,
2040                    struct ib_ah *address, u32 dqpn);
2041        /* multicast */
2042        int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2043                            union ib_gid *gid, u16 mlid,
2044                            int set_qkey, u32 qkey);
2045        int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2046                            union ib_gid *gid, u16 mlid);
2047};
2048
2049struct ib_port_pkey_list {
2050        /* Lock to hold while modifying the list. */
2051        spinlock_t                    list_lock;
2052        struct list_head              pkey_list;
2053};
2054
2055struct ib_device {
2056        /* Do not access @dma_device directly from ULP nor from HW drivers. */
2057        struct device                *dma_device;
2058
2059        char                          name[IB_DEVICE_NAME_MAX];
2060
2061        struct list_head              event_handler_list;
2062        spinlock_t                    event_handler_lock;
2063
2064        spinlock_t                    client_data_lock;
2065        struct list_head              core_list;
2066        /* Access to the client_data_list is protected by the client_data_lock
2067         * spinlock and the lists_rwsem read-write semaphore */
2068        struct list_head              client_data_list;
2069
2070        struct ib_cache               cache;
2071        /**
2072         * port_immutable is indexed by port number
2073         */
2074        struct ib_port_immutable     *port_immutable;
2075
2076        int                           num_comp_vectors;
2077
2078        struct ib_port_pkey_list     *port_pkey_list;
2079
2080        struct iw_cm_verbs           *iwcm;
2081
2082        /**
2083         * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2084         *   driver initialized data.  The struct is kfree()'ed by the sysfs
2085         *   core when the device is removed.  A lifespan of -1 in the return
2086         *   struct tells the core to set a default lifespan.
2087         */
2088        struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
2089                                                     u8 port_num);
2090        /**
2091         * get_hw_stats - Fill in the counter value(s) in the stats struct.
2092         * @index - The index in the value array we wish to have updated, or
2093         *   num_counters if we want all stats updated
2094         * Return codes -
2095         *   < 0 - Error, no counters updated
2096         *   index - Updated the single counter pointed to by index
2097         *   num_counters - Updated all counters (will reset the timestamp
2098         *     and prevent further calls for lifespan milliseconds)
2099         * Drivers are allowed to update all counters in leiu of just the
2100         *   one given in index at their option
2101         */
2102        int                        (*get_hw_stats)(struct ib_device *device,
2103                                                   struct rdma_hw_stats *stats,
2104                                                   u8 port, int index);
2105        int                        (*query_device)(struct ib_device *device,
2106                                                   struct ib_device_attr *device_attr,
2107                                                   struct ib_udata *udata);
2108        int                        (*query_port)(struct ib_device *device,
2109                                                 u8 port_num,
2110                                                 struct ib_port_attr *port_attr);
2111        enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
2112                                                     u8 port_num);
2113        /* When calling get_netdev, the HW vendor's driver should return the
2114         * net device of device @device at port @port_num or NULL if such
2115         * a net device doesn't exist. The vendor driver should call dev_hold
2116         * on this net device. The HW vendor's device driver must guarantee
2117         * that this function returns NULL before the net device reaches
2118         * NETDEV_UNREGISTER_FINAL state.
2119         */
2120        struct net_device         *(*get_netdev)(struct ib_device *device,
2121                                                 u8 port_num);
2122        int                        (*query_gid)(struct ib_device *device,
2123                                                u8 port_num, int index,
2124                                                union ib_gid *gid);
2125        /* When calling add_gid, the HW vendor's driver should
2126         * add the gid of device @device at gid index @index of
2127         * port @port_num to be @gid. Meta-info of that gid (for example,
2128         * the network device related to this gid is available
2129         * at @attr. @context allows the HW vendor driver to store extra
2130         * information together with a GID entry. The HW vendor may allocate
2131         * memory to contain this information and store it in @context when a
2132         * new GID entry is written to. Params are consistent until the next
2133         * call of add_gid or delete_gid. The function should return 0 on
2134         * success or error otherwise. The function could be called
2135         * concurrently for different ports. This function is only called
2136         * when roce_gid_table is used.
2137         */
2138        int                        (*add_gid)(struct ib_device *device,
2139                                              u8 port_num,
2140                                              unsigned int index,
2141                                              const union ib_gid *gid,
2142                                              const struct ib_gid_attr *attr,
2143                                              void **context);
2144        /* When calling del_gid, the HW vendor's driver should delete the
2145         * gid of device @device at gid index @index of port @port_num.
2146         * Upon the deletion of a GID entry, the HW vendor must free any
2147         * allocated memory. The caller will clear @context afterwards.
2148         * This function is only called when roce_gid_table is used.
2149         */
2150        int                        (*del_gid)(struct ib_device *device,
2151                                              u8 port_num,
2152                                              unsigned int index,
2153                                              void **context);
2154        int                        (*query_pkey)(struct ib_device *device,
2155                                                 u8 port_num, u16 index, u16 *pkey);
2156        int                        (*modify_device)(struct ib_device *device,
2157                                                    int device_modify_mask,
2158                                                    struct ib_device_modify *device_modify);
2159        int                        (*modify_port)(struct ib_device *device,
2160                                                  u8 port_num, int port_modify_mask,
2161                                                  struct ib_port_modify *port_modify);
2162        struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
2163                                                     struct ib_udata *udata);
2164        int                        (*dealloc_ucontext)(struct ib_ucontext *context);
2165        int                        (*mmap)(struct ib_ucontext *context,
2166                                           struct vm_area_struct *vma);
2167        struct ib_pd *             (*alloc_pd)(struct ib_device *device,
2168                                               struct ib_ucontext *context,
2169                                               struct ib_udata *udata);
2170        int                        (*dealloc_pd)(struct ib_pd *pd);
2171        struct ib_ah *             (*create_ah)(struct ib_pd *pd,
2172                                                struct rdma_ah_attr *ah_attr,
2173                                                struct ib_udata *udata);
2174        int                        (*modify_ah)(struct ib_ah *ah,
2175                                                struct rdma_ah_attr *ah_attr);
2176        int                        (*query_ah)(struct ib_ah *ah,
2177                                               struct rdma_ah_attr *ah_attr);
2178        int                        (*destroy_ah)(struct ib_ah *ah);
2179        struct ib_srq *            (*create_srq)(struct ib_pd *pd,
2180                                                 struct ib_srq_init_attr *srq_init_attr,
2181                                                 struct ib_udata *udata);
2182        int                        (*modify_srq)(struct ib_srq *srq,
2183                                                 struct ib_srq_attr *srq_attr,
2184                                                 enum ib_srq_attr_mask srq_attr_mask,
2185                                                 struct ib_udata *udata);
2186        int                        (*query_srq)(struct ib_srq *srq,
2187                                                struct ib_srq_attr *srq_attr);
2188        int                        (*destroy_srq)(struct ib_srq *srq);
2189        int                        (*post_srq_recv)(struct ib_srq *srq,
2190                                                    struct ib_recv_wr *recv_wr,
2191                                                    struct ib_recv_wr **bad_recv_wr);
2192        struct ib_qp *             (*create_qp)(struct ib_pd *pd,
2193                                                struct ib_qp_init_attr *qp_init_attr,
2194                                                struct ib_udata *udata);
2195        int                        (*modify_qp)(struct ib_qp *qp,
2196                                                struct ib_qp_attr *qp_attr,
2197                                                int qp_attr_mask,
2198                                                struct ib_udata *udata);
2199        int                        (*query_qp)(struct ib_qp *qp,
2200                                               struct ib_qp_attr *qp_attr,
2201                                               int qp_attr_mask,
2202                                               struct ib_qp_init_attr *qp_init_attr);
2203        int                        (*destroy_qp)(struct ib_qp *qp);
2204        int                        (*post_send)(struct ib_qp *qp,
2205                                                struct ib_send_wr *send_wr,
2206                                                struct ib_send_wr **bad_send_wr);
2207        int                        (*post_recv)(struct ib_qp *qp,
2208                                                struct ib_recv_wr *recv_wr,
2209                                                struct ib_recv_wr **bad_recv_wr);
2210        struct ib_cq *             (*create_cq)(struct ib_device *device,
2211                                                const struct ib_cq_init_attr *attr,
2212                                                struct ib_ucontext *context,
2213                                                struct ib_udata *udata);
2214        int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2215                                                u16 cq_period);
2216        int                        (*destroy_cq)(struct ib_cq *cq);
2217        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
2218                                                struct ib_udata *udata);
2219        int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
2220                                              struct ib_wc *wc);
2221        int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2222        int                        (*req_notify_cq)(struct ib_cq *cq,
2223                                                    enum ib_cq_notify_flags flags);
2224        int                        (*req_ncomp_notif)(struct ib_cq *cq,
2225                                                      int wc_cnt);
2226        struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2227                                                 int mr_access_flags);
2228        struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2229                                                  u64 start, u64 length,
2230                                                  u64 virt_addr,
2231                                                  int mr_access_flags,
2232                                                  struct ib_udata *udata);
2233        int                        (*rereg_user_mr)(struct ib_mr *mr,
2234                                                    int flags,
2235                                                    u64 start, u64 length,
2236                                                    u64 virt_addr,
2237                                                    int mr_access_flags,
2238                                                    struct ib_pd *pd,
2239                                                    struct ib_udata *udata);
2240        int                        (*dereg_mr)(struct ib_mr *mr);
2241        struct ib_mr *             (*alloc_mr)(struct ib_pd *pd,
2242                                               enum ib_mr_type mr_type,
2243                                               u32 max_num_sg);
2244        int                        (*map_mr_sg)(struct ib_mr *mr,
2245                                                struct scatterlist *sg,
2246                                                int sg_nents,
2247                                                unsigned int *sg_offset);
2248        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2249                                               enum ib_mw_type type,
2250                                               struct ib_udata *udata);
2251        int                        (*dealloc_mw)(struct ib_mw *mw);
2252        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
2253                                                int mr_access_flags,
2254                                                struct ib_fmr_attr *fmr_attr);
2255        int                        (*map_phys_fmr)(struct ib_fmr *fmr,
2256                                                   u64 *page_list, int list_len,
2257                                                   u64 iova);
2258        int                        (*unmap_fmr)(struct list_head *fmr_list);
2259        int                        (*dealloc_fmr)(struct ib_fmr *fmr);
2260        int                        (*attach_mcast)(struct ib_qp *qp,
2261                                                   union ib_gid *gid,
2262                                                   u16 lid);
2263        int                        (*detach_mcast)(struct ib_qp *qp,
2264                                                   union ib_gid *gid,
2265                                                   u16 lid);
2266        int                        (*process_mad)(struct ib_device *device,
2267                                                  int process_mad_flags,
2268                                                  u8 port_num,
2269                                                  const struct ib_wc *in_wc,
2270                                                  const struct ib_grh *in_grh,
2271                                                  const struct ib_mad_hdr *in_mad,
2272                                                  size_t in_mad_size,
2273                                                  struct ib_mad_hdr *out_mad,
2274                                                  size_t *out_mad_size,
2275                                                  u16 *out_mad_pkey_index);
2276        struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
2277                                                 struct ib_ucontext *ucontext,
2278                                                 struct ib_udata *udata);
2279        int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2280        struct ib_flow *           (*create_flow)(struct ib_qp *qp,
2281                                                  struct ib_flow_attr
2282                                                  *flow_attr,
2283                                                  int domain);
2284        int                        (*destroy_flow)(struct ib_flow *flow_id);
2285        int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2286                                                      struct ib_mr_status *mr_status);
2287        void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2288        void                       (*drain_rq)(struct ib_qp *qp);
2289        void                       (*drain_sq)(struct ib_qp *qp);
2290        int                        (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2291                                                        int state);
2292        int                        (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2293                                                   struct ifla_vf_info *ivf);
2294        int                        (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2295                                                   struct ifla_vf_stats *stats);
2296        int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2297                                                  int type);
2298        struct ib_wq *             (*create_wq)(struct ib_pd *pd,
2299                                                struct ib_wq_init_attr *init_attr,
2300                                                struct ib_udata *udata);
2301        int                        (*destroy_wq)(struct ib_wq *wq);
2302        int                        (*modify_wq)(struct ib_wq *wq,
2303                                                struct ib_wq_attr *attr,
2304                                                u32 wq_attr_mask,
2305                                                struct ib_udata *udata);
2306        struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2307                                                           struct ib_rwq_ind_table_init_attr *init_attr,
2308                                                           struct ib_udata *udata);
2309        int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2310        /**
2311         * rdma netdev operation
2312         *
2313         * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2314         * doesn't support the specified rdma netdev type.
2315         */
2316        struct net_device *(*alloc_rdma_netdev)(
2317                                        struct ib_device *device,
2318                                        u8 port_num,
2319                                        enum rdma_netdev_t type,
2320                                        const char *name,
2321                                        unsigned char name_assign_type,
2322                                        void (*setup)(struct net_device *));
2323
2324        struct module               *owner;
2325        struct device                dev;
2326        struct kobject               *ports_parent;
2327        struct list_head             port_list;
2328
2329        enum {
2330                IB_DEV_UNINITIALIZED,
2331                IB_DEV_REGISTERED,
2332                IB_DEV_UNREGISTERED
2333        }                            reg_state;
2334
2335        int                          uverbs_abi_ver;
2336        u64                          uverbs_cmd_mask;
2337        u64                          uverbs_ex_cmd_mask;
2338
2339        char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
2340        __be64                       node_guid;
2341        u32                          local_dma_lkey;
2342        u16                          is_switch:1;
2343        u8                           node_type;
2344        u8                           phys_port_cnt;
2345        struct ib_device_attr        attrs;
2346        struct attribute_group       *hw_stats_ag;
2347        struct rdma_hw_stats         *hw_stats;
2348
2349#ifdef CONFIG_CGROUP_RDMA
2350        struct rdmacg_device         cg_device;
2351#endif
2352
2353        u32                          index;
2354
2355        /**
2356         * The following mandatory functions are used only at device
2357         * registration.  Keep functions such as these at the end of this
2358         * structure to avoid cache line misses when accessing struct ib_device
2359         * in fast paths.
2360         */
2361        int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2362        void (*get_dev_fw_str)(struct ib_device *, char *str);
2363        const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2364                                                     int comp_vector);
2365
2366        struct uverbs_root_spec         *specs_root;
2367};
2368
2369struct ib_client {
2370        char  *name;
2371        void (*add)   (struct ib_device *);
2372        void (*remove)(struct ib_device *, void *client_data);
2373
2374        /* Returns the net_dev belonging to this ib_client and matching the
2375         * given parameters.
2376         * @dev:         An RDMA device that the net_dev use for communication.
2377         * @port:        A physical port number on the RDMA device.
2378         * @pkey:        P_Key that the net_dev uses if applicable.
2379         * @gid:         A GID that the net_dev uses to communicate.
2380         * @addr:        An IP address the net_dev is configured with.
2381         * @client_data: The device's client data set by ib_set_client_data().
2382         *
2383         * An ib_client that implements a net_dev on top of RDMA devices
2384         * (such as IP over IB) should implement this callback, allowing the
2385         * rdma_cm module to find the right net_dev for a given request.
2386         *
2387         * The caller is responsible for calling dev_put on the returned
2388         * netdev. */
2389        struct net_device *(*get_net_dev_by_params)(
2390                        struct ib_device *dev,
2391                        u8 port,
2392                        u16 pkey,
2393                        const union ib_gid *gid,
2394                        const struct sockaddr *addr,
2395                        void *client_data);
2396        struct list_head list;
2397};
2398
2399struct ib_device *ib_alloc_device(size_t size);
2400void ib_dealloc_device(struct ib_device *device);
2401
2402void ib_get_device_fw_str(struct ib_device *device, char *str);
2403
2404int ib_register_device(struct ib_device *device,
2405                       int (*port_callback)(struct ib_device *,
2406                                            u8, struct kobject *));
2407void ib_unregister_device(struct ib_device *device);
2408
2409int ib_register_client   (struct ib_client *client);
2410void ib_unregister_client(struct ib_client *client);
2411
2412void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2413void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2414                         void *data);
2415
2416static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2417{
2418        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2419}
2420
2421static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2422{
2423        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2424}
2425
2426static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2427                                       size_t offset,
2428                                       size_t len)
2429{
2430        const void __user *p = udata->inbuf + offset;
2431        bool ret;
2432        u8 *buf;
2433
2434        if (len > USHRT_MAX)
2435                return false;
2436
2437        buf = memdup_user(p, len);
2438        if (IS_ERR(buf))
2439                return false;
2440
2441        ret = !memchr_inv(buf, 0, len);
2442        kfree(buf);
2443        return ret;
2444}
2445
2446/**
2447 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2448 * contains all required attributes and no attributes not allowed for
2449 * the given QP state transition.
2450 * @cur_state: Current QP state
2451 * @next_state: Next QP state
2452 * @type: QP type
2453 * @mask: Mask of supplied QP attributes
2454 * @ll : link layer of port
2455 *
2456 * This function is a helper function that a low-level driver's
2457 * modify_qp method can use to validate the consumer's input.  It
2458 * checks that cur_state and next_state are valid QP states, that a
2459 * transition from cur_state to next_state is allowed by the IB spec,
2460 * and that the attribute mask supplied is allowed for the transition.
2461 */
2462int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2463                       enum ib_qp_type type, enum ib_qp_attr_mask mask,
2464                       enum rdma_link_layer ll);
2465
2466void ib_register_event_handler(struct ib_event_handler *event_handler);
2467void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2468void ib_dispatch_event(struct ib_event *event);
2469
2470int ib_query_port(struct ib_device *device,
2471                  u8 port_num, struct ib_port_attr *port_attr);
2472
2473enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2474                                               u8 port_num);
2475
2476/**
2477 * rdma_cap_ib_switch - Check if the device is IB switch
2478 * @device: Device to check
2479 *
2480 * Device driver is responsible for setting is_switch bit on
2481 * in ib_device structure at init time.
2482 *
2483 * Return: true if the device is IB switch.
2484 */
2485static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2486{
2487        return device->is_switch;
2488}
2489
2490/**
2491 * rdma_start_port - Return the first valid port number for the device
2492 * specified
2493 *
2494 * @device: Device to be checked
2495 *
2496 * Return start port number
2497 */
2498static inline u8 rdma_start_port(const struct ib_device *device)
2499{
2500        return rdma_cap_ib_switch(device) ? 0 : 1;
2501}
2502
2503/**
2504 * rdma_end_port - Return the last valid port number for the device
2505 * specified
2506 *
2507 * @device: Device to be checked
2508 *
2509 * Return last port number
2510 */
2511static inline u8 rdma_end_port(const struct ib_device *device)
2512{
2513        return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2514}
2515
2516static inline int rdma_is_port_valid(const struct ib_device *device,
2517                                     unsigned int port)
2518{
2519        return (port >= rdma_start_port(device) &&
2520                port <= rdma_end_port(device));
2521}
2522
2523static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2524{
2525        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2526}
2527
2528static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2529{
2530        return device->port_immutable[port_num].core_cap_flags &
2531                (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2532}
2533
2534static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2535{
2536        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2537}
2538
2539static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2540{
2541        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2542}
2543
2544static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2545{
2546        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2547}
2548
2549static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2550{
2551        return rdma_protocol_ib(device, port_num) ||
2552                rdma_protocol_roce(device, port_num);
2553}
2554
2555static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2556{
2557        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2558}
2559
2560static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2561{
2562        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2563}
2564
2565/**
2566 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2567 * Management Datagrams.
2568 * @device: Device to check
2569 * @port_num: Port number to check
2570 *
2571 * Management Datagrams (MAD) are a required part of the InfiniBand
2572 * specification and are supported on all InfiniBand devices.  A slightly
2573 * extended version are also supported on OPA interfaces.
2574 *
2575 * Return: true if the port supports sending/receiving of MAD packets.
2576 */
2577static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2578{
2579        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2580}
2581
2582/**
2583 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2584 * Management Datagrams.
2585 * @device: Device to check
2586 * @port_num: Port number to check
2587 *
2588 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2589 * datagrams with their own versions.  These OPA MADs share many but not all of
2590 * the characteristics of InfiniBand MADs.
2591 *
2592 * OPA MADs differ in the following ways:
2593 *
2594 *    1) MADs are variable size up to 2K
2595 *       IBTA defined MADs remain fixed at 256 bytes
2596 *    2) OPA SMPs must carry valid PKeys
2597 *    3) OPA SMP packets are a different format
2598 *
2599 * Return: true if the port supports OPA MAD packet formats.
2600 */
2601static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2602{
2603        return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2604                == RDMA_CORE_CAP_OPA_MAD;
2605}
2606
2607/**
2608 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2609 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2610 * @device: Device to check
2611 * @port_num: Port number to check
2612 *
2613 * Each InfiniBand node is required to provide a Subnet Management Agent
2614 * that the subnet manager can access.  Prior to the fabric being fully
2615 * configured by the subnet manager, the SMA is accessed via a well known
2616 * interface called the Subnet Management Interface (SMI).  This interface
2617 * uses directed route packets to communicate with the SM to get around the
2618 * chicken and egg problem of the SM needing to know what's on the fabric
2619 * in order to configure the fabric, and needing to configure the fabric in
2620 * order to send packets to the devices on the fabric.  These directed
2621 * route packets do not need the fabric fully configured in order to reach
2622 * their destination.  The SMI is the only method allowed to send
2623 * directed route packets on an InfiniBand fabric.
2624 *
2625 * Return: true if the port provides an SMI.
2626 */
2627static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2628{
2629        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2630}
2631
2632/**
2633 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2634 * Communication Manager.
2635 * @device: Device to check
2636 * @port_num: Port number to check
2637 *
2638 * The InfiniBand Communication Manager is one of many pre-defined General
2639 * Service Agents (GSA) that are accessed via the General Service
2640 * Interface (GSI).  It's role is to facilitate establishment of connections
2641 * between nodes as well as other management related tasks for established
2642 * connections.
2643 *
2644 * Return: true if the port supports an IB CM (this does not guarantee that
2645 * a CM is actually running however).
2646 */
2647static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2648{
2649        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2650}
2651
2652/**
2653 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2654 * Communication Manager.
2655 * @device: Device to check
2656 * @port_num: Port number to check
2657 *
2658 * Similar to above, but specific to iWARP connections which have a different
2659 * managment protocol than InfiniBand.
2660 *
2661 * Return: true if the port supports an iWARP CM (this does not guarantee that
2662 * a CM is actually running however).
2663 */
2664static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2665{
2666        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2667}
2668
2669/**
2670 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2671 * Subnet Administration.
2672 * @device: Device to check
2673 * @port_num: Port number to check
2674 *
2675 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2676 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2677 * fabrics, devices should resolve routes to other hosts by contacting the
2678 * SA to query the proper route.
2679 *
2680 * Return: true if the port should act as a client to the fabric Subnet
2681 * Administration interface.  This does not imply that the SA service is
2682 * running locally.
2683 */
2684static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2685{
2686        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2687}
2688
2689/**
2690 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2691 * Multicast.
2692 * @device: Device to check
2693 * @port_num: Port number to check
2694 *
2695 * InfiniBand multicast registration is more complex than normal IPv4 or
2696 * IPv6 multicast registration.  Each Host Channel Adapter must register
2697 * with the Subnet Manager when it wishes to join a multicast group.  It
2698 * should do so only once regardless of how many queue pairs it subscribes
2699 * to this group.  And it should leave the group only after all queue pairs
2700 * attached to the group have been detached.
2701 *
2702 * Return: true if the port must undertake the additional adminstrative
2703 * overhead of registering/unregistering with the SM and tracking of the
2704 * total number of queue pairs attached to the multicast group.
2705 */
2706static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2707{
2708        return rdma_cap_ib_sa(device, port_num);
2709}
2710
2711/**
2712 * rdma_cap_af_ib - Check if the port of device has the capability
2713 * Native Infiniband Address.
2714 * @device: Device to check
2715 * @port_num: Port number to check
2716 *
2717 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2718 * GID.  RoCE uses a different mechanism, but still generates a GID via
2719 * a prescribed mechanism and port specific data.
2720 *
2721 * Return: true if the port uses a GID address to identify devices on the
2722 * network.
2723 */
2724static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2725{
2726        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2727}
2728
2729/**
2730 * rdma_cap_eth_ah - Check if the port of device has the capability
2731 * Ethernet Address Handle.
2732 * @device: Device to check
2733 * @port_num: Port number to check
2734 *
2735 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2736 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2737 * port.  Normally, packet headers are generated by the sending host
2738 * adapter, but when sending connectionless datagrams, we must manually
2739 * inject the proper headers for the fabric we are communicating over.
2740 *
2741 * Return: true if we are running as a RoCE port and must force the
2742 * addition of a Global Route Header built from our Ethernet Address
2743 * Handle into our header list for connectionless packets.
2744 */
2745static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2746{
2747        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2748}
2749
2750/**
2751 * rdma_cap_opa_ah - Check if the port of device supports
2752 * OPA Address handles
2753 * @device: Device to check
2754 * @port_num: Port number to check
2755 *
2756 * Return: true if we are running on an OPA device which supports
2757 * the extended OPA addressing.
2758 */
2759static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2760{
2761        return (device->port_immutable[port_num].core_cap_flags &
2762                RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2763}
2764
2765/**
2766 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2767 *
2768 * @device: Device
2769 * @port_num: Port number
2770 *
2771 * This MAD size includes the MAD headers and MAD payload.  No other headers
2772 * are included.
2773 *
2774 * Return the max MAD size required by the Port.  Will return 0 if the port
2775 * does not support MADs
2776 */
2777static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2778{
2779        return device->port_immutable[port_num].max_mad_size;
2780}
2781
2782/**
2783 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2784 * @device: Device to check
2785 * @port_num: Port number to check
2786 *
2787 * RoCE GID table mechanism manages the various GIDs for a device.
2788 *
2789 * NOTE: if allocating the port's GID table has failed, this call will still
2790 * return true, but any RoCE GID table API will fail.
2791 *
2792 * Return: true if the port uses RoCE GID table mechanism in order to manage
2793 * its GIDs.
2794 */
2795static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2796                                           u8 port_num)
2797{
2798        return rdma_protocol_roce(device, port_num) &&
2799                device->add_gid && device->del_gid;
2800}
2801
2802/*
2803 * Check if the device supports READ W/ INVALIDATE.
2804 */
2805static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2806{
2807        /*
2808         * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
2809         * has support for it yet.
2810         */
2811        return rdma_protocol_iwarp(dev, port_num);
2812}
2813
2814int ib_query_gid(struct ib_device *device,
2815                 u8 port_num, int index, union ib_gid *gid,
2816                 struct ib_gid_attr *attr);
2817
2818int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2819                         int state);
2820int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2821                     struct ifla_vf_info *info);
2822int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2823                    struct ifla_vf_stats *stats);
2824int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2825                   int type);
2826
2827int ib_query_pkey(struct ib_device *device,
2828                  u8 port_num, u16 index, u16 *pkey);
2829
2830int ib_modify_device(struct ib_device *device,
2831                     int device_modify_mask,
2832                     struct ib_device_modify *device_modify);
2833
2834int ib_modify_port(struct ib_device *device,
2835                   u8 port_num, int port_modify_mask,
2836                   struct ib_port_modify *port_modify);
2837
2838int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2839                enum ib_gid_type gid_type, struct net_device *ndev,
2840                u8 *port_num, u16 *index);
2841
2842int ib_find_pkey(struct ib_device *device,
2843                 u8 port_num, u16 pkey, u16 *index);
2844
2845enum ib_pd_flags {
2846        /*
2847         * Create a memory registration for all memory in the system and place
2848         * the rkey for it into pd->unsafe_global_rkey.  This can be used by
2849         * ULPs to avoid the overhead of dynamic MRs.
2850         *
2851         * This flag is generally considered unsafe and must only be used in
2852         * extremly trusted environments.  Every use of it will log a warning
2853         * in the kernel log.
2854         */
2855        IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
2856};
2857
2858struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2859                const char *caller);
2860#define ib_alloc_pd(device, flags) \
2861        __ib_alloc_pd((device), (flags), __func__)
2862void ib_dealloc_pd(struct ib_pd *pd);
2863
2864/**
2865 * rdma_create_ah - Creates an address handle for the given address vector.
2866 * @pd: The protection domain associated with the address handle.
2867 * @ah_attr: The attributes of the address vector.
2868 *
2869 * The address handle is used to reference a local or global destination
2870 * in all UD QP post sends.
2871 */
2872struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2873
2874/**
2875 * rdma_create_user_ah - Creates an address handle for the given address vector.
2876 * It resolves destination mac address for ah attribute of RoCE type.
2877 * @pd: The protection domain associated with the address handle.
2878 * @ah_attr: The attributes of the address vector.
2879 * @udata: pointer to user's input output buffer information need by
2880 *         provider driver.
2881 *
2882 * It returns 0 on success and returns appropriate error code on error.
2883 * The address handle is used to reference a local or global destination
2884 * in all UD QP post sends.
2885 */
2886struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
2887                                  struct rdma_ah_attr *ah_attr,
2888                                  struct ib_udata *udata);
2889/**
2890 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
2891 *   work completion.
2892 * @hdr: the L3 header to parse
2893 * @net_type: type of header to parse
2894 * @sgid: place to store source gid
2895 * @dgid: place to store destination gid
2896 */
2897int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2898                              enum rdma_network_type net_type,
2899                              union ib_gid *sgid, union ib_gid *dgid);
2900
2901/**
2902 * ib_get_rdma_header_version - Get the header version
2903 * @hdr: the L3 header to parse
2904 */
2905int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2906
2907/**
2908 * ib_init_ah_from_wc - Initializes address handle attributes from a
2909 *   work completion.
2910 * @device: Device on which the received message arrived.
2911 * @port_num: Port on which the received message arrived.
2912 * @wc: Work completion associated with the received message.
2913 * @grh: References the received global route header.  This parameter is
2914 *   ignored unless the work completion indicates that the GRH is valid.
2915 * @ah_attr: Returned attributes that can be used when creating an address
2916 *   handle for replying to the message.
2917 */
2918int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2919                       const struct ib_wc *wc, const struct ib_grh *grh,
2920                       struct rdma_ah_attr *ah_attr);
2921
2922/**
2923 * ib_create_ah_from_wc - Creates an address handle associated with the
2924 *   sender of the specified work completion.
2925 * @pd: The protection domain associated with the address handle.
2926 * @wc: Work completion information associated with a received message.
2927 * @grh: References the received global route header.  This parameter is
2928 *   ignored unless the work completion indicates that the GRH is valid.
2929 * @port_num: The outbound port number to associate with the address.
2930 *
2931 * The address handle is used to reference a local or global destination
2932 * in all UD QP post sends.
2933 */
2934struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2935                                   const struct ib_grh *grh, u8 port_num);
2936
2937/**
2938 * rdma_modify_ah - Modifies the address vector associated with an address
2939 *   handle.
2940 * @ah: The address handle to modify.
2941 * @ah_attr: The new address vector attributes to associate with the
2942 *   address handle.
2943 */
2944int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2945
2946/**
2947 * rdma_query_ah - Queries the address vector associated with an address
2948 *   handle.
2949 * @ah: The address handle to query.
2950 * @ah_attr: The address vector attributes associated with the address
2951 *   handle.
2952 */
2953int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2954
2955/**
2956 * rdma_destroy_ah - Destroys an address handle.
2957 * @ah: The address handle to destroy.
2958 */
2959int rdma_destroy_ah(struct ib_ah *ah);
2960
2961/**
2962 * ib_create_srq - Creates a SRQ associated with the specified protection
2963 *   domain.
2964 * @pd: The protection domain associated with the SRQ.
2965 * @srq_init_attr: A list of initial attributes required to create the
2966 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2967 *   the actual capabilities of the created SRQ.
2968 *
2969 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2970 * requested size of the SRQ, and set to the actual values allocated
2971 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2972 * will always be at least as large as the requested values.
2973 */
2974struct ib_srq *ib_create_srq(struct ib_pd *pd,
2975                             struct ib_srq_init_attr *srq_init_attr);
2976
2977/**
2978 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2979 * @srq: The SRQ to modify.
2980 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2981 *   the current values of selected SRQ attributes are returned.
2982 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2983 *   are being modified.
2984 *
2985 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2986 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2987 * the number of receives queued drops below the limit.
2988 */
2989int ib_modify_srq(struct ib_srq *srq,
2990                  struct ib_srq_attr *srq_attr,
2991                  enum ib_srq_attr_mask srq_attr_mask);
2992
2993/**
2994 * ib_query_srq - Returns the attribute list and current values for the
2995 *   specified SRQ.
2996 * @srq: The SRQ to query.
2997 * @srq_attr: The attributes of the specified SRQ.
2998 */
2999int ib_query_srq(struct ib_srq *srq,
3000                 struct ib_srq_attr *srq_attr);
3001
3002/**
3003 * ib_destroy_srq - Destroys the specified SRQ.
3004 * @srq: The SRQ to destroy.
3005 */
3006int ib_destroy_srq(struct ib_srq *srq);
3007
3008/**
3009 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3010 * @srq: The SRQ to post the work request on.
3011 * @recv_wr: A list of work requests to post on the receive queue.
3012 * @bad_recv_wr: On an immediate failure, this parameter will reference
3013 *   the work request that failed to be posted on the QP.
3014 */
3015static inline int ib_post_srq_recv(struct ib_srq *srq,
3016                                   struct ib_recv_wr *recv_wr,
3017                                   struct ib_recv_wr **bad_recv_wr)
3018{
3019        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3020}
3021
3022/**
3023 * ib_create_qp - Creates a QP associated with the specified protection
3024 *   domain.
3025 * @pd: The protection domain associated with the QP.
3026 * @qp_init_attr: A list of initial attributes required to create the
3027 *   QP.  If QP creation succeeds, then the attributes are updated to
3028 *   the actual capabilities of the created QP.
3029 */
3030struct ib_qp *ib_create_qp(struct ib_pd *pd,
3031                           struct ib_qp_init_attr *qp_init_attr);
3032
3033/**
3034 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3035 * @qp: The QP to modify.
3036 * @attr: On input, specifies the QP attributes to modify.  On output,
3037 *   the current values of selected QP attributes are returned.
3038 * @attr_mask: A bit-mask used to specify which attributes of the QP
3039 *   are being modified.
3040 * @udata: pointer to user's input output buffer information
3041 *   are being modified.
3042 * It returns 0 on success and returns appropriate error code on error.
3043 */
3044int ib_modify_qp_with_udata(struct ib_qp *qp,
3045                            struct ib_qp_attr *attr,
3046                            int attr_mask,
3047                            struct ib_udata *udata);
3048
3049/**
3050 * ib_modify_qp - Modifies the attributes for the specified QP and then
3051 *   transitions the QP to the given state.
3052 * @qp: The QP to modify.
3053 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3054 *   the current values of selected QP attributes are returned.
3055 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3056 *   are being modified.
3057 */
3058int ib_modify_qp(struct ib_qp *qp,
3059                 struct ib_qp_attr *qp_attr,
3060                 int qp_attr_mask);
3061
3062/**
3063 * ib_query_qp - Returns the attribute list and current values for the
3064 *   specified QP.
3065 * @qp: The QP to query.
3066 * @qp_attr: The attributes of the specified QP.
3067 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3068 * @qp_init_attr: Additional attributes of the selected QP.
3069 *
3070 * The qp_attr_mask may be used to limit the query to gathering only the
3071 * selected attributes.
3072 */
3073int ib_query_qp(struct ib_qp *qp,
3074                struct ib_qp_attr *qp_attr,
3075                int qp_attr_mask,
3076                struct ib_qp_init_attr *qp_init_attr);
3077
3078/**
3079 * ib_destroy_qp - Destroys the specified QP.
3080 * @qp: The QP to destroy.
3081 */
3082int ib_destroy_qp(struct ib_qp *qp);
3083
3084/**
3085 * ib_open_qp - Obtain a reference to an existing sharable QP.
3086 * @xrcd - XRC domain
3087 * @qp_open_attr: Attributes identifying the QP to open.
3088 *
3089 * Returns a reference to a sharable QP.
3090 */
3091struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3092                         struct ib_qp_open_attr *qp_open_attr);
3093
3094/**
3095 * ib_close_qp - Release an external reference to a QP.
3096 * @qp: The QP handle to release
3097 *
3098 * The opened QP handle is released by the caller.  The underlying
3099 * shared QP is not destroyed until all internal references are released.
3100 */
3101int ib_close_qp(struct ib_qp *qp);
3102
3103/**
3104 * ib_post_send - Posts a list of work requests to the send queue of
3105 *   the specified QP.
3106 * @qp: The QP to post the work request on.
3107 * @send_wr: A list of work requests to post on the send queue.
3108 * @bad_send_wr: On an immediate failure, this parameter will reference
3109 *   the work request that failed to be posted on the QP.
3110 *
3111 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3112 * error is returned, the QP state shall not be affected,
3113 * ib_post_send() will return an immediate error after queueing any
3114 * earlier work requests in the list.
3115 */
3116static inline int ib_post_send(struct ib_qp *qp,
3117                               struct ib_send_wr *send_wr,
3118                               struct ib_send_wr **bad_send_wr)
3119{
3120        return qp->device->post_send(qp, send_wr, bad_send_wr);
3121}
3122
3123/**
3124 * ib_post_recv - Posts a list of work requests to the receive queue of
3125 *   the specified QP.
3126 * @qp: The QP to post the work request on.
3127 * @recv_wr: A list of work requests to post on the receive queue.
3128 * @bad_recv_wr: On an immediate failure, this parameter will reference
3129 *   the work request that failed to be posted on the QP.
3130 */
3131static inline int ib_post_recv(struct ib_qp *qp,
3132                               struct ib_recv_wr *recv_wr,
3133                               struct ib_recv_wr **bad_recv_wr)
3134{
3135        return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3136}
3137
3138struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3139                int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
3140void ib_free_cq(struct ib_cq *cq);
3141int ib_process_cq_direct(struct ib_cq *cq, int budget);
3142
3143/**
3144 * ib_create_cq - Creates a CQ on the specified device.
3145 * @device: The device on which to create the CQ.
3146 * @comp_handler: A user-specified callback that is invoked when a
3147 *   completion event occurs on the CQ.
3148 * @event_handler: A user-specified callback that is invoked when an
3149 *   asynchronous event not associated with a completion occurs on the CQ.
3150 * @cq_context: Context associated with the CQ returned to the user via
3151 *   the associated completion and event handlers.
3152 * @cq_attr: The attributes the CQ should be created upon.
3153 *
3154 * Users can examine the cq structure to determine the actual CQ size.
3155 */
3156struct ib_cq *ib_create_cq(struct ib_device *device,
3157                           ib_comp_handler comp_handler,
3158                           void (*event_handler)(struct ib_event *, void *),
3159                           void *cq_context,
3160                           const struct ib_cq_init_attr *cq_attr);
3161
3162/**
3163 * ib_resize_cq - Modifies the capacity of the CQ.
3164 * @cq: The CQ to resize.
3165 * @cqe: The minimum size of the CQ.
3166 *
3167 * Users can examine the cq structure to determine the actual CQ size.
3168 */
3169int ib_resize_cq(struct ib_cq *cq, int cqe);
3170
3171/**
3172 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3173 * @cq: The CQ to modify.
3174 * @cq_count: number of CQEs that will trigger an event
3175 * @cq_period: max period of time in usec before triggering an event
3176 *
3177 */
3178int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3179
3180/**
3181 * ib_destroy_cq - Destroys the specified CQ.
3182 * @cq: The CQ to destroy.
3183 */
3184int ib_destroy_cq(struct ib_cq *cq);
3185
3186/**
3187 * ib_poll_cq - poll a CQ for completion(s)
3188 * @cq:the CQ being polled
3189 * @num_entries:maximum number of completions to return
3190 * @wc:array of at least @num_entries &struct ib_wc where completions
3191 *   will be returned
3192 *
3193 * Poll a CQ for (possibly multiple) completions.  If the return value
3194 * is < 0, an error occurred.  If the return value is >= 0, it is the
3195 * number of completions returned.  If the return value is
3196 * non-negative and < num_entries, then the CQ was emptied.
3197 */
3198static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3199                             struct ib_wc *wc)
3200{
3201        return cq->device->poll_cq(cq, num_entries, wc);
3202}
3203
3204/**
3205 * ib_peek_cq - Returns the number of unreaped completions currently
3206 *   on the specified CQ.
3207 * @cq: The CQ to peek.
3208 * @wc_cnt: A minimum number of unreaped completions to check for.
3209 *
3210 * If the number of unreaped completions is greater than or equal to wc_cnt,
3211 * this function returns wc_cnt, otherwise, it returns the actual number of
3212 * unreaped completions.
3213 */
3214int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3215
3216/**
3217 * ib_req_notify_cq - Request completion notification on a CQ.
3218 * @cq: The CQ to generate an event for.
3219 * @flags:
3220 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3221 *   to request an event on the next solicited event or next work
3222 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3223 *   may also be |ed in to request a hint about missed events, as
3224 *   described below.
3225 *
3226 * Return Value:
3227 *    < 0 means an error occurred while requesting notification
3228 *   == 0 means notification was requested successfully, and if
3229 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3230 *        were missed and it is safe to wait for another event.  In
3231 *        this case is it guaranteed that any work completions added
3232 *        to the CQ since the last CQ poll will trigger a completion
3233 *        notification event.
3234 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3235 *        in.  It means that the consumer must poll the CQ again to
3236 *        make sure it is empty to avoid missing an event because of a
3237 *        race between requesting notification and an entry being
3238 *        added to the CQ.  This return value means it is possible
3239 *        (but not guaranteed) that a work completion has been added
3240 *        to the CQ since the last poll without triggering a
3241 *        completion notification event.
3242 */
3243static inline int ib_req_notify_cq(struct ib_cq *cq,
3244                                   enum ib_cq_notify_flags flags)
3245{
3246        return cq->device->req_notify_cq(cq, flags);
3247}
3248
3249/**
3250 * ib_req_ncomp_notif - Request completion notification when there are
3251 *   at least the specified number of unreaped completions on the CQ.
3252 * @cq: The CQ to generate an event for.
3253 * @wc_cnt: The number of unreaped completions that should be on the
3254 *   CQ before an event is generated.
3255 */
3256static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3257{
3258        return cq->device->req_ncomp_notif ?
3259                cq->device->req_ncomp_notif(cq, wc_cnt) :
3260                -ENOSYS;
3261}
3262
3263/**
3264 * ib_dma_mapping_error - check a DMA addr for error
3265 * @dev: The device for which the dma_addr was created
3266 * @dma_addr: The DMA address to check
3267 */
3268static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3269{
3270        return dma_mapping_error(dev->dma_device, dma_addr);
3271}
3272
3273/**
3274 * ib_dma_map_single - Map a kernel virtual address to DMA address
3275 * @dev: The device for which the dma_addr is to be created
3276 * @cpu_addr: The kernel virtual address
3277 * @size: The size of the region in bytes
3278 * @direction: The direction of the DMA
3279 */
3280static inline u64 ib_dma_map_single(struct ib_device *dev,
3281                                    void *cpu_addr, size_t size,
3282                                    enum dma_data_direction direction)
3283{
3284        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3285}
3286
3287/**
3288 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3289 * @dev: The device for which the DMA address was created
3290 * @addr: The DMA address
3291 * @size: The size of the region in bytes
3292 * @direction: The direction of the DMA
3293 */
3294static inline void ib_dma_unmap_single(struct ib_device *dev,
3295                                       u64 addr, size_t size,
3296                                       enum dma_data_direction direction)
3297{
3298        dma_unmap_single(dev->dma_device, addr, size, direction);
3299}
3300
3301/**
3302 * ib_dma_map_page - Map a physical page to DMA address
3303 * @dev: The device for which the dma_addr is to be created
3304 * @page: The page to be mapped
3305 * @offset: The offset within the page
3306 * @size: The size of the region in bytes
3307 * @direction: The direction of the DMA
3308 */
3309static inline u64 ib_dma_map_page(struct ib_device *dev,
3310                                  struct page *page,
3311                                  unsigned long offset,
3312                                  size_t size,
3313                                         enum dma_data_direction direction)
3314{
3315        return dma_map_page(dev->dma_device, page, offset, size, direction);
3316}
3317
3318/**
3319 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3320 * @dev: The device for which the DMA address was created
3321 * @addr: The DMA address
3322 * @size: The size of the region in bytes
3323 * @direction: The direction of the DMA
3324 */
3325static inline void ib_dma_unmap_page(struct ib_device *dev,
3326                                     u64 addr, size_t size,
3327                                     enum dma_data_direction direction)
3328{
3329        dma_unmap_page(dev->dma_device, addr, size, direction);
3330}
3331
3332/**
3333 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3334 * @dev: The device for which the DMA addresses are to be created
3335 * @sg: The array of scatter/gather entries
3336 * @nents: The number of scatter/gather entries
3337 * @direction: The direction of the DMA
3338 */
3339static inline int ib_dma_map_sg(struct ib_device *dev,
3340                                struct scatterlist *sg, int nents,
3341                                enum dma_data_direction direction)
3342{
3343        return dma_map_sg(dev->dma_device, sg, nents, direction);
3344}
3345
3346/**
3347 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3348 * @dev: The device for which the DMA addresses were created
3349 * @sg: The array of scatter/gather entries
3350 * @nents: The number of scatter/gather entries
3351 * @direction: The direction of the DMA
3352 */
3353static inline void ib_dma_unmap_sg(struct ib_device *dev,
3354                                   struct scatterlist *sg, int nents,
3355                                   enum dma_data_direction direction)
3356{
3357        dma_unmap_sg(dev->dma_device, sg, nents, direction);
3358}
3359
3360static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3361                                      struct scatterlist *sg, int nents,
3362                                      enum dma_data_direction direction,
3363                                      unsigned long dma_attrs)
3364{
3365        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3366                                dma_attrs);
3367}
3368
3369static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3370                                         struct scatterlist *sg, int nents,
3371                                         enum dma_data_direction direction,
3372                                         unsigned long dma_attrs)
3373{
3374        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3375}
3376/**
3377 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3378 * @dev: The device for which the DMA addresses were created
3379 * @sg: The scatter/gather entry
3380 *
3381 * Note: this function is obsolete. To do: change all occurrences of
3382 * ib_sg_dma_address() into sg_dma_address().
3383 */
3384static inline u64 ib_sg_dma_address(struct ib_device *dev,
3385                                    struct scatterlist *sg)
3386{
3387        return sg_dma_address(sg);
3388}
3389
3390/**
3391 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3392 * @dev: The device for which the DMA addresses were created
3393 * @sg: The scatter/gather entry
3394 *
3395 * Note: this function is obsolete. To do: change all occurrences of
3396 * ib_sg_dma_len() into sg_dma_len().
3397 */
3398static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3399                                         struct scatterlist *sg)
3400{
3401        return sg_dma_len(sg);
3402}
3403
3404/**
3405 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3406 * @dev: The device for which the DMA address was created
3407 * @addr: The DMA address
3408 * @size: The size of the region in bytes
3409 * @dir: The direction of the DMA
3410 */
3411static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3412                                              u64 addr,
3413                                              size_t size,
3414                                              enum dma_data_direction dir)
3415{
3416        dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3417}
3418
3419/**
3420 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3421 * @dev: The device for which the DMA address was created
3422 * @addr: The DMA address
3423 * @size: The size of the region in bytes
3424 * @dir: The direction of the DMA
3425 */
3426static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3427                                                 u64 addr,
3428                                                 size_t size,
3429                                                 enum dma_data_direction dir)
3430{
3431        dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3432}
3433
3434/**
3435 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3436 * @dev: The device for which the DMA address is requested
3437 * @size: The size of the region to allocate in bytes
3438 * @dma_handle: A pointer for returning the DMA address of the region
3439 * @flag: memory allocator flags
3440 */
3441static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3442                                           size_t size,
3443                                           dma_addr_t *dma_handle,
3444                                           gfp_t flag)
3445{
3446        return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3447}
3448
3449/**
3450 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3451 * @dev: The device for which the DMA addresses were allocated
3452 * @size: The size of the region
3453 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3454 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3455 */
3456static inline void ib_dma_free_coherent(struct ib_device *dev,
3457                                        size_t size, void *cpu_addr,
3458                                        dma_addr_t dma_handle)
3459{
3460        dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3461}
3462
3463/**
3464 * ib_dereg_mr - Deregisters a memory region and removes it from the
3465 *   HCA translation table.
3466 * @mr: The memory region to deregister.
3467 *
3468 * This function can fail, if the memory region has memory windows bound to it.
3469 */
3470int ib_dereg_mr(struct ib_mr *mr);
3471
3472struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3473                          enum ib_mr_type mr_type,
3474                          u32 max_num_sg);
3475
3476/**
3477 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3478 *   R_Key and L_Key.
3479 * @mr - struct ib_mr pointer to be updated.
3480 * @newkey - new key to be used.
3481 */
3482static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3483{
3484        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3485        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3486}
3487
3488/**
3489 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3490 * for calculating a new rkey for type 2 memory windows.
3491 * @rkey - the rkey to increment.
3492 */
3493static inline u32 ib_inc_rkey(u32 rkey)
3494{
3495        const u32 mask = 0x000000ff;
3496        return ((rkey + 1) & mask) | (rkey & ~mask);
3497}
3498
3499/**
3500 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3501 * @pd: The protection domain associated with the unmapped region.
3502 * @mr_access_flags: Specifies the memory access rights.
3503 * @fmr_attr: Attributes of the unmapped region.
3504 *
3505 * A fast memory region must be mapped before it can be used as part of
3506 * a work request.
3507 */
3508struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3509                            int mr_access_flags,
3510                            struct ib_fmr_attr *fmr_attr);
3511
3512/**
3513 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3514 * @fmr: The fast memory region to associate with the pages.
3515 * @page_list: An array of physical pages to map to the fast memory region.
3516 * @list_len: The number of pages in page_list.
3517 * @iova: The I/O virtual address to use with the mapped region.
3518 */
3519static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3520                                  u64 *page_list, int list_len,
3521                                  u64 iova)
3522{
3523        return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3524}
3525
3526/**
3527 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3528 * @fmr_list: A linked list of fast memory regions to unmap.
3529 */
3530int ib_unmap_fmr(struct list_head *fmr_list);
3531
3532/**
3533 * ib_dealloc_fmr - Deallocates a fast memory region.
3534 * @fmr: The fast memory region to deallocate.
3535 */
3536int ib_dealloc_fmr(struct ib_fmr *fmr);
3537
3538/**
3539 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3540 * @qp: QP to attach to the multicast group.  The QP must be type
3541 *   IB_QPT_UD.
3542 * @gid: Multicast group GID.
3543 * @lid: Multicast group LID in host byte order.
3544 *
3545 * In order to send and receive multicast packets, subnet
3546 * administration must have created the multicast group and configured
3547 * the fabric appropriately.  The port associated with the specified
3548 * QP must also be a member of the multicast group.
3549 */
3550int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3551
3552/**
3553 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3554 * @qp: QP to detach from the multicast group.
3555 * @gid: Multicast group GID.
3556 * @lid: Multicast group LID in host byte order.
3557 */
3558int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3559
3560/**
3561 * ib_alloc_xrcd - Allocates an XRC domain.
3562 * @device: The device on which to allocate the XRC domain.
3563 */
3564struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3565
3566/**
3567 * ib_dealloc_xrcd - Deallocates an XRC domain.
3568 * @xrcd: The XRC domain to deallocate.
3569 */
3570int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3571
3572struct ib_flow *ib_create_flow(struct ib_qp *qp,
3573                               struct ib_flow_attr *flow_attr, int domain);
3574int ib_destroy_flow(struct ib_flow *flow_id);
3575
3576static inline int ib_check_mr_access(int flags)
3577{
3578        /*
3579         * Local write permission is required if remote write or
3580         * remote atomic permission is also requested.
3581         */
3582        if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3583            !(flags & IB_ACCESS_LOCAL_WRITE))
3584                return -EINVAL;
3585
3586        return 0;
3587}
3588
3589/**
3590 * ib_check_mr_status: lightweight check of MR status.
3591 *     This routine may provide status checks on a selected
3592 *     ib_mr. first use is for signature status check.
3593 *
3594 * @mr: A memory region.
3595 * @check_mask: Bitmask of which checks to perform from
3596 *     ib_mr_status_check enumeration.
3597 * @mr_status: The container of relevant status checks.
3598 *     failed checks will be indicated in the status bitmask
3599 *     and the relevant info shall be in the error item.
3600 */
3601int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3602                       struct ib_mr_status *mr_status);
3603
3604struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3605                                            u16 pkey, const union ib_gid *gid,
3606                                            const struct sockaddr *addr);
3607struct ib_wq *ib_create_wq(struct ib_pd *pd,
3608                           struct ib_wq_init_attr *init_attr);
3609int ib_destroy_wq(struct ib_wq *wq);
3610int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3611                 u32 wq_attr_mask);
3612struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3613                                                 struct ib_rwq_ind_table_init_attr*
3614                                                 wq_ind_table_init_attr);
3615int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3616
3617int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3618                 unsigned int *sg_offset, unsigned int page_size);
3619
3620static inline int
3621ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3622                  unsigned int *sg_offset, unsigned int page_size)
3623{
3624        int n;
3625
3626        n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3627        mr->iova = 0;
3628
3629        return n;
3630}
3631
3632int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3633                unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3634
3635void ib_drain_rq(struct ib_qp *qp);
3636void ib_drain_sq(struct ib_qp *qp);
3637void ib_drain_qp(struct ib_qp *qp);
3638
3639int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3640
3641static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3642{
3643        if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3644                return attr->roce.dmac;
3645        return NULL;
3646}
3647
3648static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3649{
3650        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3651                attr->ib.dlid = (u16)dlid;
3652        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3653                attr->opa.dlid = dlid;
3654}
3655
3656static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3657{
3658        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3659                return attr->ib.dlid;
3660        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3661                return attr->opa.dlid;
3662        return 0;
3663}
3664
3665static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3666{
3667        attr->sl = sl;
3668}
3669
3670static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3671{
3672        return attr->sl;
3673}
3674
3675static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3676                                         u8 src_path_bits)
3677{
3678        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3679                attr->ib.src_path_bits = src_path_bits;
3680        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3681                attr->opa.src_path_bits = src_path_bits;
3682}
3683
3684static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3685{
3686        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3687                return attr->ib.src_path_bits;
3688        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3689                return attr->opa.src_path_bits;
3690        return 0;
3691}
3692
3693static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3694                                        bool make_grd)
3695{
3696        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3697                attr->opa.make_grd = make_grd;
3698}
3699
3700static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3701{
3702        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3703                return attr->opa.make_grd;
3704        return false;
3705}
3706
3707static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3708{
3709        attr->port_num = port_num;
3710}
3711
3712static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3713{
3714        return attr->port_num;
3715}
3716
3717static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3718                                           u8 static_rate)
3719{
3720        attr->static_rate = static_rate;
3721}
3722
3723static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3724{
3725        return attr->static_rate;
3726}
3727
3728static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3729                                        enum ib_ah_flags flag)
3730{
3731        attr->ah_flags = flag;
3732}
3733
3734static inline enum ib_ah_flags
3735                rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3736{
3737        return attr->ah_flags;
3738}
3739
3740static inline const struct ib_global_route
3741                *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3742{
3743        return &attr->grh;
3744}
3745
3746/*To retrieve and modify the grh */
3747static inline struct ib_global_route
3748                *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3749{
3750        return &attr->grh;
3751}
3752
3753static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3754{
3755        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3756
3757        memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3758}
3759
3760static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3761                                             __be64 prefix)
3762{
3763        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3764
3765        grh->dgid.global.subnet_prefix = prefix;
3766}
3767
3768static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3769                                            __be64 if_id)
3770{
3771        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3772
3773        grh->dgid.global.interface_id = if_id;
3774}
3775
3776static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3777                                   union ib_gid *dgid, u32 flow_label,
3778                                   u8 sgid_index, u8 hop_limit,
3779                                   u8 traffic_class)
3780{
3781        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3782
3783        attr->ah_flags = IB_AH_GRH;
3784        if (dgid)
3785                grh->dgid = *dgid;
3786        grh->flow_label = flow_label;
3787        grh->sgid_index = sgid_index;
3788        grh->hop_limit = hop_limit;
3789        grh->traffic_class = traffic_class;
3790}
3791
3792/*Get AH type */
3793static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3794                                                       u32 port_num)
3795{
3796        if ((rdma_protocol_roce(dev, port_num)) ||
3797            (rdma_protocol_iwarp(dev, port_num)))
3798                return RDMA_AH_ATTR_TYPE_ROCE;
3799        else if ((rdma_protocol_ib(dev, port_num)) &&
3800                 (rdma_cap_opa_ah(dev, port_num)))
3801                return RDMA_AH_ATTR_TYPE_OPA;
3802        else
3803                return RDMA_AH_ATTR_TYPE_IB;
3804}
3805
3806/**
3807 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
3808 *     In the current implementation the only way to get
3809 *     get the 32bit lid is from other sources for OPA.
3810 *     For IB, lids will always be 16bits so cast the
3811 *     value accordingly.
3812 *
3813 * @lid: A 32bit LID
3814 */
3815static inline u16 ib_lid_cpu16(u32 lid)
3816{
3817        WARN_ON_ONCE(lid & 0xFFFF0000);
3818        return (u16)lid;
3819}
3820
3821/**
3822 * ib_lid_be16 - Return lid in 16bit BE encoding.
3823 *
3824 * @lid: A 32bit LID
3825 */
3826static inline __be16 ib_lid_be16(u32 lid)
3827{
3828        WARN_ON_ONCE(lid & 0xFFFF0000);
3829        return cpu_to_be16((u16)lid);
3830}
3831
3832/**
3833 * ib_get_vector_affinity - Get the affinity mappings of a given completion
3834 *   vector
3835 * @device:         the rdma device
3836 * @comp_vector:    index of completion vector
3837 *
3838 * Returns NULL on failure, otherwise a corresponding cpu map of the
3839 * completion vector (returns all-cpus map if the device driver doesn't
3840 * implement get_vector_affinity).
3841 */
3842static inline const struct cpumask *
3843ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3844{
3845        if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3846            !device->get_vector_affinity)
3847                return NULL;
3848
3849        return device->get_vector_affinity(device, comp_vector);
3850
3851}
3852
3853#endif /* IB_VERBS_H */
3854