linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51#include <linux/socket.h>
  52#include <linux/irq_poll.h>
  53#include <uapi/linux/if_ether.h>
  54#include <net/ipv6.h>
  55#include <net/ip.h>
  56#include <linux/string.h>
  57#include <linux/slab.h>
  58#include <linux/netdevice.h>
  59
  60#include <linux/if_link.h>
  61#include <linux/atomic.h>
  62#include <linux/mmu_notifier.h>
  63#include <linux/uaccess.h>
  64#include <linux/cgroup_rdma.h>
  65#include <uapi/rdma/ib_user_verbs.h>
  66
  67#define IB_FW_VERSION_NAME_MAX  ETHTOOL_FWVERS_LEN
  68
  69extern struct workqueue_struct *ib_wq;
  70extern struct workqueue_struct *ib_comp_wq;
  71
  72union ib_gid {
  73        u8      raw[16];
  74        struct {
  75                __be64  subnet_prefix;
  76                __be64  interface_id;
  77        } global;
  78};
  79
  80extern union ib_gid zgid;
  81
  82enum ib_gid_type {
  83        /* If link layer is Ethernet, this is RoCE V1 */
  84        IB_GID_TYPE_IB        = 0,
  85        IB_GID_TYPE_ROCE      = 0,
  86        IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
  87        IB_GID_TYPE_SIZE
  88};
  89
  90#define ROCE_V2_UDP_DPORT      4791
  91struct ib_gid_attr {
  92        enum ib_gid_type        gid_type;
  93        struct net_device       *ndev;
  94};
  95
  96enum rdma_node_type {
  97        /* IB values map to NodeInfo:NodeType. */
  98        RDMA_NODE_IB_CA         = 1,
  99        RDMA_NODE_IB_SWITCH,
 100        RDMA_NODE_IB_ROUTER,
 101        RDMA_NODE_RNIC,
 102        RDMA_NODE_USNIC,
 103        RDMA_NODE_USNIC_UDP,
 104};
 105
 106enum {
 107        /* set the local administered indication */
 108        IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
 109};
 110
 111enum rdma_transport_type {
 112        RDMA_TRANSPORT_IB,
 113        RDMA_TRANSPORT_IWARP,
 114        RDMA_TRANSPORT_USNIC,
 115        RDMA_TRANSPORT_USNIC_UDP
 116};
 117
 118enum rdma_protocol_type {
 119        RDMA_PROTOCOL_IB,
 120        RDMA_PROTOCOL_IBOE,
 121        RDMA_PROTOCOL_IWARP,
 122        RDMA_PROTOCOL_USNIC_UDP
 123};
 124
 125__attribute_const__ enum rdma_transport_type
 126rdma_node_get_transport(enum rdma_node_type node_type);
 127
 128enum rdma_network_type {
 129        RDMA_NETWORK_IB,
 130        RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
 131        RDMA_NETWORK_IPV4,
 132        RDMA_NETWORK_IPV6
 133};
 134
 135static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 136{
 137        if (network_type == RDMA_NETWORK_IPV4 ||
 138            network_type == RDMA_NETWORK_IPV6)
 139                return IB_GID_TYPE_ROCE_UDP_ENCAP;
 140
 141        /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
 142        return IB_GID_TYPE_IB;
 143}
 144
 145static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
 146                                                            union ib_gid *gid)
 147{
 148        if (gid_type == IB_GID_TYPE_IB)
 149                return RDMA_NETWORK_IB;
 150
 151        if (ipv6_addr_v4mapped((struct in6_addr *)gid))
 152                return RDMA_NETWORK_IPV4;
 153        else
 154                return RDMA_NETWORK_IPV6;
 155}
 156
 157enum rdma_link_layer {
 158        IB_LINK_LAYER_UNSPECIFIED,
 159        IB_LINK_LAYER_INFINIBAND,
 160        IB_LINK_LAYER_ETHERNET,
 161};
 162
 163enum ib_device_cap_flags {
 164        IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
 165        IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
 166        IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
 167        IB_DEVICE_RAW_MULTI                     = (1 << 3),
 168        IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
 169        IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
 170        IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
 171        IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
 172        IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
 173        /* Not in use, former INIT_TYPE         = (1 << 9),*/
 174        IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
 175        IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
 176        IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
 177        IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
 178        IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
 179
 180        /*
 181         * This device supports a per-device lkey or stag that can be
 182         * used without performing a memory registration for the local
 183         * memory.  Note that ULPs should never check this flag, but
 184         * instead of use the local_dma_lkey flag in the ib_pd structure,
 185         * which will always contain a usable lkey.
 186         */
 187        IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
 188        /* Reserved, old SEND_W_INV             = (1 << 16),*/
 189        IB_DEVICE_MEM_WINDOW                    = (1 << 17),
 190        /*
 191         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 192         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 193         * messages and can verify the validity of checksum for
 194         * incoming messages.  Setting this flag implies that the
 195         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 196         */
 197        IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
 198        IB_DEVICE_UD_TSO                        = (1 << 19),
 199        IB_DEVICE_XRC                           = (1 << 20),
 200
 201        /*
 202         * This device supports the IB "base memory management extension",
 203         * which includes support for fast registrations (IB_WR_REG_MR,
 204         * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 205         * also be set by any iWarp device which must support FRs to comply
 206         * to the iWarp verbs spec.  iWarp devices also support the
 207         * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 208         * stag.
 209         */
 210        IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
 211        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
 212        IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
 213        IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
 214        IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
 215        /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 216        IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
 217        /*
 218         * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 219         * support execution of WQEs that involve synchronization
 220         * of I/O operations with single completion queue managed
 221         * by hardware.
 222         */
 223        IB_DEVICE_CROSS_CHANNEL                 = (1 << 27),
 224        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
 225        IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
 226        IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
 227        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
 228        IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
 229        /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 230        IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 231        IB_DEVICE_RDMA_NETDEV_OPA_VNIC          = (1ULL << 35),
 232};
 233
 234enum ib_signature_prot_cap {
 235        IB_PROT_T10DIF_TYPE_1 = 1,
 236        IB_PROT_T10DIF_TYPE_2 = 1 << 1,
 237        IB_PROT_T10DIF_TYPE_3 = 1 << 2,
 238};
 239
 240enum ib_signature_guard_cap {
 241        IB_GUARD_T10DIF_CRC     = 1,
 242        IB_GUARD_T10DIF_CSUM    = 1 << 1,
 243};
 244
 245enum ib_atomic_cap {
 246        IB_ATOMIC_NONE,
 247        IB_ATOMIC_HCA,
 248        IB_ATOMIC_GLOB
 249};
 250
 251enum ib_odp_general_cap_bits {
 252        IB_ODP_SUPPORT          = 1 << 0,
 253        IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 254};
 255
 256enum ib_odp_transport_cap_bits {
 257        IB_ODP_SUPPORT_SEND     = 1 << 0,
 258        IB_ODP_SUPPORT_RECV     = 1 << 1,
 259        IB_ODP_SUPPORT_WRITE    = 1 << 2,
 260        IB_ODP_SUPPORT_READ     = 1 << 3,
 261        IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
 262};
 263
 264struct ib_odp_caps {
 265        uint64_t general_caps;
 266        struct {
 267                uint32_t  rc_odp_caps;
 268                uint32_t  uc_odp_caps;
 269                uint32_t  ud_odp_caps;
 270        } per_transport_caps;
 271};
 272
 273struct ib_rss_caps {
 274        /* Corresponding bit will be set if qp type from
 275         * 'enum ib_qp_type' is supported, e.g.
 276         * supported_qpts |= 1 << IB_QPT_UD
 277         */
 278        u32 supported_qpts;
 279        u32 max_rwq_indirection_tables;
 280        u32 max_rwq_indirection_table_size;
 281};
 282
 283enum ib_tm_cap_flags {
 284        /*  Support tag matching on RC transport */
 285        IB_TM_CAP_RC                = 1 << 0,
 286};
 287
 288struct ib_tm_caps {
 289        /* Max size of RNDV header */
 290        u32 max_rndv_hdr_size;
 291        /* Max number of entries in tag matching list */
 292        u32 max_num_tags;
 293        /* From enum ib_tm_cap_flags */
 294        u32 flags;
 295        /* Max number of outstanding list operations */
 296        u32 max_ops;
 297        /* Max number of SGE in tag matching entry */
 298        u32 max_sge;
 299};
 300
 301enum ib_cq_creation_flags {
 302        IB_CQ_FLAGS_TIMESTAMP_COMPLETION   = 1 << 0,
 303        IB_CQ_FLAGS_IGNORE_OVERRUN         = 1 << 1,
 304};
 305
 306struct ib_cq_init_attr {
 307        unsigned int    cqe;
 308        int             comp_vector;
 309        u32             flags;
 310};
 311
 312struct ib_device_attr {
 313        u64                     fw_ver;
 314        __be64                  sys_image_guid;
 315        u64                     max_mr_size;
 316        u64                     page_size_cap;
 317        u32                     vendor_id;
 318        u32                     vendor_part_id;
 319        u32                     hw_ver;
 320        int                     max_qp;
 321        int                     max_qp_wr;
 322        u64                     device_cap_flags;
 323        int                     max_sge;
 324        int                     max_sge_rd;
 325        int                     max_cq;
 326        int                     max_cqe;
 327        int                     max_mr;
 328        int                     max_pd;
 329        int                     max_qp_rd_atom;
 330        int                     max_ee_rd_atom;
 331        int                     max_res_rd_atom;
 332        int                     max_qp_init_rd_atom;
 333        int                     max_ee_init_rd_atom;
 334        enum ib_atomic_cap      atomic_cap;
 335        enum ib_atomic_cap      masked_atomic_cap;
 336        int                     max_ee;
 337        int                     max_rdd;
 338        int                     max_mw;
 339        int                     max_raw_ipv6_qp;
 340        int                     max_raw_ethy_qp;
 341        int                     max_mcast_grp;
 342        int                     max_mcast_qp_attach;
 343        int                     max_total_mcast_qp_attach;
 344        int                     max_ah;
 345        int                     max_fmr;
 346        int                     max_map_per_fmr;
 347        int                     max_srq;
 348        int                     max_srq_wr;
 349        int                     max_srq_sge;
 350        unsigned int            max_fast_reg_page_list_len;
 351        u16                     max_pkeys;
 352        u8                      local_ca_ack_delay;
 353        int                     sig_prot_cap;
 354        int                     sig_guard_cap;
 355        struct ib_odp_caps      odp_caps;
 356        uint64_t                timestamp_mask;
 357        uint64_t                hca_core_clock; /* in KHZ */
 358        struct ib_rss_caps      rss_caps;
 359        u32                     max_wq_type_rq;
 360        u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
 361        struct ib_tm_caps       tm_caps;
 362};
 363
 364enum ib_mtu {
 365        IB_MTU_256  = 1,
 366        IB_MTU_512  = 2,
 367        IB_MTU_1024 = 3,
 368        IB_MTU_2048 = 4,
 369        IB_MTU_4096 = 5
 370};
 371
 372static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 373{
 374        switch (mtu) {
 375        case IB_MTU_256:  return  256;
 376        case IB_MTU_512:  return  512;
 377        case IB_MTU_1024: return 1024;
 378        case IB_MTU_2048: return 2048;
 379        case IB_MTU_4096: return 4096;
 380        default:          return -1;
 381        }
 382}
 383
 384static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 385{
 386        if (mtu >= 4096)
 387                return IB_MTU_4096;
 388        else if (mtu >= 2048)
 389                return IB_MTU_2048;
 390        else if (mtu >= 1024)
 391                return IB_MTU_1024;
 392        else if (mtu >= 512)
 393                return IB_MTU_512;
 394        else
 395                return IB_MTU_256;
 396}
 397
 398enum ib_port_state {
 399        IB_PORT_NOP             = 0,
 400        IB_PORT_DOWN            = 1,
 401        IB_PORT_INIT            = 2,
 402        IB_PORT_ARMED           = 3,
 403        IB_PORT_ACTIVE          = 4,
 404        IB_PORT_ACTIVE_DEFER    = 5
 405};
 406
 407enum ib_port_cap_flags {
 408        IB_PORT_SM                              = 1 <<  1,
 409        IB_PORT_NOTICE_SUP                      = 1 <<  2,
 410        IB_PORT_TRAP_SUP                        = 1 <<  3,
 411        IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 412        IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
 413        IB_PORT_SL_MAP_SUP                      = 1 <<  6,
 414        IB_PORT_MKEY_NVRAM                      = 1 <<  7,
 415        IB_PORT_PKEY_NVRAM                      = 1 <<  8,
 416        IB_PORT_LED_INFO_SUP                    = 1 <<  9,
 417        IB_PORT_SM_DISABLED                     = 1 << 10,
 418        IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
 419        IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
 420        IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
 421        IB_PORT_CM_SUP                          = 1 << 16,
 422        IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
 423        IB_PORT_REINIT_SUP                      = 1 << 18,
 424        IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
 425        IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
 426        IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
 427        IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
 428        IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
 429        IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
 430        IB_PORT_CLIENT_REG_SUP                  = 1 << 25,
 431        IB_PORT_IP_BASED_GIDS                   = 1 << 26,
 432};
 433
 434enum ib_port_width {
 435        IB_WIDTH_1X     = 1,
 436        IB_WIDTH_4X     = 2,
 437        IB_WIDTH_8X     = 4,
 438        IB_WIDTH_12X    = 8
 439};
 440
 441static inline int ib_width_enum_to_int(enum ib_port_width width)
 442{
 443        switch (width) {
 444        case IB_WIDTH_1X:  return  1;
 445        case IB_WIDTH_4X:  return  4;
 446        case IB_WIDTH_8X:  return  8;
 447        case IB_WIDTH_12X: return 12;
 448        default:          return -1;
 449        }
 450}
 451
 452enum ib_port_speed {
 453        IB_SPEED_SDR    = 1,
 454        IB_SPEED_DDR    = 2,
 455        IB_SPEED_QDR    = 4,
 456        IB_SPEED_FDR10  = 8,
 457        IB_SPEED_FDR    = 16,
 458        IB_SPEED_EDR    = 32,
 459        IB_SPEED_HDR    = 64
 460};
 461
 462/**
 463 * struct rdma_hw_stats
 464 * @timestamp - Used by the core code to track when the last update was
 465 * @lifespan - Used by the core code to determine how old the counters
 466 *   should be before being updated again.  Stored in jiffies, defaults
 467 *   to 10 milliseconds, drivers can override the default be specifying
 468 *   their own value during their allocation routine.
 469 * @name - Array of pointers to static names used for the counters in
 470 *   directory.
 471 * @num_counters - How many hardware counters there are.  If name is
 472 *   shorter than this number, a kernel oops will result.  Driver authors
 473 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 474 *   in their code to prevent this.
 475 * @value - Array of u64 counters that are accessed by the sysfs code and
 476 *   filled in by the drivers get_stats routine
 477 */
 478struct rdma_hw_stats {
 479        unsigned long   timestamp;
 480        unsigned long   lifespan;
 481        const char * const *names;
 482        int             num_counters;
 483        u64             value[];
 484};
 485
 486#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 487/**
 488 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 489 *   for drivers.
 490 * @names - Array of static const char *
 491 * @num_counters - How many elements in array
 492 * @lifespan - How many milliseconds between updates
 493 */
 494static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 495                const char * const *names, int num_counters,
 496                unsigned long lifespan)
 497{
 498        struct rdma_hw_stats *stats;
 499
 500        stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 501                        GFP_KERNEL);
 502        if (!stats)
 503                return NULL;
 504        stats->names = names;
 505        stats->num_counters = num_counters;
 506        stats->lifespan = msecs_to_jiffies(lifespan);
 507
 508        return stats;
 509}
 510
 511
 512/* Define bits for the various functionality this port needs to be supported by
 513 * the core.
 514 */
 515/* Management                           0x00000FFF */
 516#define RDMA_CORE_CAP_IB_MAD            0x00000001
 517#define RDMA_CORE_CAP_IB_SMI            0x00000002
 518#define RDMA_CORE_CAP_IB_CM             0x00000004
 519#define RDMA_CORE_CAP_IW_CM             0x00000008
 520#define RDMA_CORE_CAP_IB_SA             0x00000010
 521#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 522
 523/* Address format                       0x000FF000 */
 524#define RDMA_CORE_CAP_AF_IB             0x00001000
 525#define RDMA_CORE_CAP_ETH_AH            0x00002000
 526#define RDMA_CORE_CAP_OPA_AH            0x00004000
 527
 528/* Protocol                             0xFFF00000 */
 529#define RDMA_CORE_CAP_PROT_IB           0x00100000
 530#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 531#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 532#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 533#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 534#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 535
 536#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 537                                        | RDMA_CORE_CAP_IB_MAD \
 538                                        | RDMA_CORE_CAP_IB_SMI \
 539                                        | RDMA_CORE_CAP_IB_CM  \
 540                                        | RDMA_CORE_CAP_IB_SA  \
 541                                        | RDMA_CORE_CAP_AF_IB)
 542#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 543                                        | RDMA_CORE_CAP_IB_MAD  \
 544                                        | RDMA_CORE_CAP_IB_CM   \
 545                                        | RDMA_CORE_CAP_AF_IB   \
 546                                        | RDMA_CORE_CAP_ETH_AH)
 547#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
 548                                        (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 549                                        | RDMA_CORE_CAP_IB_MAD  \
 550                                        | RDMA_CORE_CAP_IB_CM   \
 551                                        | RDMA_CORE_CAP_AF_IB   \
 552                                        | RDMA_CORE_CAP_ETH_AH)
 553#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 554                                        | RDMA_CORE_CAP_IW_CM)
 555#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 556                                        | RDMA_CORE_CAP_OPA_MAD)
 557
 558#define RDMA_CORE_PORT_RAW_PACKET       (RDMA_CORE_CAP_PROT_RAW_PACKET)
 559
 560#define RDMA_CORE_PORT_USNIC            (RDMA_CORE_CAP_PROT_USNIC)
 561
 562struct ib_port_attr {
 563        u64                     subnet_prefix;
 564        enum ib_port_state      state;
 565        enum ib_mtu             max_mtu;
 566        enum ib_mtu             active_mtu;
 567        int                     gid_tbl_len;
 568        u32                     port_cap_flags;
 569        u32                     max_msg_sz;
 570        u32                     bad_pkey_cntr;
 571        u32                     qkey_viol_cntr;
 572        u16                     pkey_tbl_len;
 573        u32                     sm_lid;
 574        u32                     lid;
 575        u8                      lmc;
 576        u8                      max_vl_num;
 577        u8                      sm_sl;
 578        u8                      subnet_timeout;
 579        u8                      init_type_reply;
 580        u8                      active_width;
 581        u8                      active_speed;
 582        u8                      phys_state;
 583        bool                    grh_required;
 584};
 585
 586enum ib_device_modify_flags {
 587        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 588        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 589};
 590
 591#define IB_DEVICE_NODE_DESC_MAX 64
 592
 593struct ib_device_modify {
 594        u64     sys_image_guid;
 595        char    node_desc[IB_DEVICE_NODE_DESC_MAX];
 596};
 597
 598enum ib_port_modify_flags {
 599        IB_PORT_SHUTDOWN                = 1,
 600        IB_PORT_INIT_TYPE               = (1<<2),
 601        IB_PORT_RESET_QKEY_CNTR         = (1<<3),
 602        IB_PORT_OPA_MASK_CHG            = (1<<4)
 603};
 604
 605struct ib_port_modify {
 606        u32     set_port_cap_mask;
 607        u32     clr_port_cap_mask;
 608        u8      init_type;
 609};
 610
 611enum ib_event_type {
 612        IB_EVENT_CQ_ERR,
 613        IB_EVENT_QP_FATAL,
 614        IB_EVENT_QP_REQ_ERR,
 615        IB_EVENT_QP_ACCESS_ERR,
 616        IB_EVENT_COMM_EST,
 617        IB_EVENT_SQ_DRAINED,
 618        IB_EVENT_PATH_MIG,
 619        IB_EVENT_PATH_MIG_ERR,
 620        IB_EVENT_DEVICE_FATAL,
 621        IB_EVENT_PORT_ACTIVE,
 622        IB_EVENT_PORT_ERR,
 623        IB_EVENT_LID_CHANGE,
 624        IB_EVENT_PKEY_CHANGE,
 625        IB_EVENT_SM_CHANGE,
 626        IB_EVENT_SRQ_ERR,
 627        IB_EVENT_SRQ_LIMIT_REACHED,
 628        IB_EVENT_QP_LAST_WQE_REACHED,
 629        IB_EVENT_CLIENT_REREGISTER,
 630        IB_EVENT_GID_CHANGE,
 631        IB_EVENT_WQ_FATAL,
 632};
 633
 634const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 635
 636struct ib_event {
 637        struct ib_device        *device;
 638        union {
 639                struct ib_cq    *cq;
 640                struct ib_qp    *qp;
 641                struct ib_srq   *srq;
 642                struct ib_wq    *wq;
 643                u8              port_num;
 644        } element;
 645        enum ib_event_type      event;
 646};
 647
 648struct ib_event_handler {
 649        struct ib_device *device;
 650        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 651        struct list_head  list;
 652};
 653
 654#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 655        do {                                                    \
 656                (_ptr)->device  = _device;                      \
 657                (_ptr)->handler = _handler;                     \
 658                INIT_LIST_HEAD(&(_ptr)->list);                  \
 659        } while (0)
 660
 661struct ib_global_route {
 662        union ib_gid    dgid;
 663        u32             flow_label;
 664        u8              sgid_index;
 665        u8              hop_limit;
 666        u8              traffic_class;
 667};
 668
 669struct ib_grh {
 670        __be32          version_tclass_flow;
 671        __be16          paylen;
 672        u8              next_hdr;
 673        u8              hop_limit;
 674        union ib_gid    sgid;
 675        union ib_gid    dgid;
 676};
 677
 678union rdma_network_hdr {
 679        struct ib_grh ibgrh;
 680        struct {
 681                /* The IB spec states that if it's IPv4, the header
 682                 * is located in the last 20 bytes of the header.
 683                 */
 684                u8              reserved[20];
 685                struct iphdr    roce4grh;
 686        };
 687};
 688
 689#define IB_QPN_MASK             0xFFFFFF
 690
 691enum {
 692        IB_MULTICAST_QPN = 0xffffff
 693};
 694
 695#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 696#define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
 697
 698enum ib_ah_flags {
 699        IB_AH_GRH       = 1
 700};
 701
 702enum ib_rate {
 703        IB_RATE_PORT_CURRENT = 0,
 704        IB_RATE_2_5_GBPS = 2,
 705        IB_RATE_5_GBPS   = 5,
 706        IB_RATE_10_GBPS  = 3,
 707        IB_RATE_20_GBPS  = 6,
 708        IB_RATE_30_GBPS  = 4,
 709        IB_RATE_40_GBPS  = 7,
 710        IB_RATE_60_GBPS  = 8,
 711        IB_RATE_80_GBPS  = 9,
 712        IB_RATE_120_GBPS = 10,
 713        IB_RATE_14_GBPS  = 11,
 714        IB_RATE_56_GBPS  = 12,
 715        IB_RATE_112_GBPS = 13,
 716        IB_RATE_168_GBPS = 14,
 717        IB_RATE_25_GBPS  = 15,
 718        IB_RATE_100_GBPS = 16,
 719        IB_RATE_200_GBPS = 17,
 720        IB_RATE_300_GBPS = 18
 721};
 722
 723/**
 724 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 725 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 726 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 727 * @rate: rate to convert.
 728 */
 729__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 730
 731/**
 732 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 733 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 734 * @rate: rate to convert.
 735 */
 736__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 737
 738
 739/**
 740 * enum ib_mr_type - memory region type
 741 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 742 *                            normal registration
 743 * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
 744 *                            signature operations (data-integrity
 745 *                            capable regions)
 746 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 747 *                            register any arbitrary sg lists (without
 748 *                            the normal mr constraints - see
 749 *                            ib_map_mr_sg)
 750 */
 751enum ib_mr_type {
 752        IB_MR_TYPE_MEM_REG,
 753        IB_MR_TYPE_SIGNATURE,
 754        IB_MR_TYPE_SG_GAPS,
 755};
 756
 757/**
 758 * Signature types
 759 * IB_SIG_TYPE_NONE: Unprotected.
 760 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
 761 */
 762enum ib_signature_type {
 763        IB_SIG_TYPE_NONE,
 764        IB_SIG_TYPE_T10_DIF,
 765};
 766
 767/**
 768 * Signature T10-DIF block-guard types
 769 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
 770 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
 771 */
 772enum ib_t10_dif_bg_type {
 773        IB_T10DIF_CRC,
 774        IB_T10DIF_CSUM
 775};
 776
 777/**
 778 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
 779 *     domain.
 780 * @bg_type: T10-DIF block guard type (CRC|CSUM)
 781 * @pi_interval: protection information interval.
 782 * @bg: seed of guard computation.
 783 * @app_tag: application tag of guard block
 784 * @ref_tag: initial guard block reference tag.
 785 * @ref_remap: Indicate wethear the reftag increments each block
 786 * @app_escape: Indicate to skip block check if apptag=0xffff
 787 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
 788 * @apptag_check_mask: check bitmask of application tag.
 789 */
 790struct ib_t10_dif_domain {
 791        enum ib_t10_dif_bg_type bg_type;
 792        u16                     pi_interval;
 793        u16                     bg;
 794        u16                     app_tag;
 795        u32                     ref_tag;
 796        bool                    ref_remap;
 797        bool                    app_escape;
 798        bool                    ref_escape;
 799        u16                     apptag_check_mask;
 800};
 801
 802/**
 803 * struct ib_sig_domain - Parameters for signature domain
 804 * @sig_type: specific signauture type
 805 * @sig: union of all signature domain attributes that may
 806 *     be used to set domain layout.
 807 */
 808struct ib_sig_domain {
 809        enum ib_signature_type sig_type;
 810        union {
 811                struct ib_t10_dif_domain dif;
 812        } sig;
 813};
 814
 815/**
 816 * struct ib_sig_attrs - Parameters for signature handover operation
 817 * @check_mask: bitmask for signature byte check (8 bytes)
 818 * @mem: memory domain layout desciptor.
 819 * @wire: wire domain layout desciptor.
 820 */
 821struct ib_sig_attrs {
 822        u8                      check_mask;
 823        struct ib_sig_domain    mem;
 824        struct ib_sig_domain    wire;
 825};
 826
 827enum ib_sig_err_type {
 828        IB_SIG_BAD_GUARD,
 829        IB_SIG_BAD_REFTAG,
 830        IB_SIG_BAD_APPTAG,
 831};
 832
 833/**
 834 * struct ib_sig_err - signature error descriptor
 835 */
 836struct ib_sig_err {
 837        enum ib_sig_err_type    err_type;
 838        u32                     expected;
 839        u32                     actual;
 840        u64                     sig_err_offset;
 841        u32                     key;
 842};
 843
 844enum ib_mr_status_check {
 845        IB_MR_CHECK_SIG_STATUS = 1,
 846};
 847
 848/**
 849 * struct ib_mr_status - Memory region status container
 850 *
 851 * @fail_status: Bitmask of MR checks status. For each
 852 *     failed check a corresponding status bit is set.
 853 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 854 *     failure.
 855 */
 856struct ib_mr_status {
 857        u32                 fail_status;
 858        struct ib_sig_err   sig_err;
 859};
 860
 861/**
 862 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 863 * enum.
 864 * @mult: multiple to convert.
 865 */
 866__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 867
 868enum rdma_ah_attr_type {
 869        RDMA_AH_ATTR_TYPE_IB,
 870        RDMA_AH_ATTR_TYPE_ROCE,
 871        RDMA_AH_ATTR_TYPE_OPA,
 872};
 873
 874struct ib_ah_attr {
 875        u16                     dlid;
 876        u8                      src_path_bits;
 877};
 878
 879struct roce_ah_attr {
 880        u8                      dmac[ETH_ALEN];
 881};
 882
 883struct opa_ah_attr {
 884        u32                     dlid;
 885        u8                      src_path_bits;
 886        bool                    make_grd;
 887};
 888
 889struct rdma_ah_attr {
 890        struct ib_global_route  grh;
 891        u8                      sl;
 892        u8                      static_rate;
 893        u8                      port_num;
 894        u8                      ah_flags;
 895        enum rdma_ah_attr_type type;
 896        union {
 897                struct ib_ah_attr ib;
 898                struct roce_ah_attr roce;
 899                struct opa_ah_attr opa;
 900        };
 901};
 902
 903enum ib_wc_status {
 904        IB_WC_SUCCESS,
 905        IB_WC_LOC_LEN_ERR,
 906        IB_WC_LOC_QP_OP_ERR,
 907        IB_WC_LOC_EEC_OP_ERR,
 908        IB_WC_LOC_PROT_ERR,
 909        IB_WC_WR_FLUSH_ERR,
 910        IB_WC_MW_BIND_ERR,
 911        IB_WC_BAD_RESP_ERR,
 912        IB_WC_LOC_ACCESS_ERR,
 913        IB_WC_REM_INV_REQ_ERR,
 914        IB_WC_REM_ACCESS_ERR,
 915        IB_WC_REM_OP_ERR,
 916        IB_WC_RETRY_EXC_ERR,
 917        IB_WC_RNR_RETRY_EXC_ERR,
 918        IB_WC_LOC_RDD_VIOL_ERR,
 919        IB_WC_REM_INV_RD_REQ_ERR,
 920        IB_WC_REM_ABORT_ERR,
 921        IB_WC_INV_EECN_ERR,
 922        IB_WC_INV_EEC_STATE_ERR,
 923        IB_WC_FATAL_ERR,
 924        IB_WC_RESP_TIMEOUT_ERR,
 925        IB_WC_GENERAL_ERR
 926};
 927
 928const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 929
 930enum ib_wc_opcode {
 931        IB_WC_SEND,
 932        IB_WC_RDMA_WRITE,
 933        IB_WC_RDMA_READ,
 934        IB_WC_COMP_SWAP,
 935        IB_WC_FETCH_ADD,
 936        IB_WC_LSO,
 937        IB_WC_LOCAL_INV,
 938        IB_WC_REG_MR,
 939        IB_WC_MASKED_COMP_SWAP,
 940        IB_WC_MASKED_FETCH_ADD,
 941/*
 942 * Set value of IB_WC_RECV so consumers can test if a completion is a
 943 * receive by testing (opcode & IB_WC_RECV).
 944 */
 945        IB_WC_RECV                      = 1 << 7,
 946        IB_WC_RECV_RDMA_WITH_IMM
 947};
 948
 949enum ib_wc_flags {
 950        IB_WC_GRH               = 1,
 951        IB_WC_WITH_IMM          = (1<<1),
 952        IB_WC_WITH_INVALIDATE   = (1<<2),
 953        IB_WC_IP_CSUM_OK        = (1<<3),
 954        IB_WC_WITH_SMAC         = (1<<4),
 955        IB_WC_WITH_VLAN         = (1<<5),
 956        IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
 957};
 958
 959struct ib_wc {
 960        union {
 961                u64             wr_id;
 962                struct ib_cqe   *wr_cqe;
 963        };
 964        enum ib_wc_status       status;
 965        enum ib_wc_opcode       opcode;
 966        u32                     vendor_err;
 967        u32                     byte_len;
 968        struct ib_qp           *qp;
 969        union {
 970                __be32          imm_data;
 971                u32             invalidate_rkey;
 972        } ex;
 973        u32                     src_qp;
 974        int                     wc_flags;
 975        u16                     pkey_index;
 976        u32                     slid;
 977        u8                      sl;
 978        u8                      dlid_path_bits;
 979        u8                      port_num;       /* valid only for DR SMPs on switches */
 980        u8                      smac[ETH_ALEN];
 981        u16                     vlan_id;
 982        u8                      network_hdr_type;
 983};
 984
 985enum ib_cq_notify_flags {
 986        IB_CQ_SOLICITED                 = 1 << 0,
 987        IB_CQ_NEXT_COMP                 = 1 << 1,
 988        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 989        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
 990};
 991
 992enum ib_srq_type {
 993        IB_SRQT_BASIC,
 994        IB_SRQT_XRC,
 995        IB_SRQT_TM,
 996};
 997
 998static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
 999{
1000        return srq_type == IB_SRQT_XRC ||
1001               srq_type == IB_SRQT_TM;
1002}
1003
1004enum ib_srq_attr_mask {
1005        IB_SRQ_MAX_WR   = 1 << 0,
1006        IB_SRQ_LIMIT    = 1 << 1,
1007};
1008
1009struct ib_srq_attr {
1010        u32     max_wr;
1011        u32     max_sge;
1012        u32     srq_limit;
1013};
1014
1015struct ib_srq_init_attr {
1016        void                  (*event_handler)(struct ib_event *, void *);
1017        void                   *srq_context;
1018        struct ib_srq_attr      attr;
1019        enum ib_srq_type        srq_type;
1020
1021        struct {
1022                struct ib_cq   *cq;
1023                union {
1024                        struct {
1025                                struct ib_xrcd *xrcd;
1026                        } xrc;
1027
1028                        struct {
1029                                u32             max_num_tags;
1030                        } tag_matching;
1031                };
1032        } ext;
1033};
1034
1035struct ib_qp_cap {
1036        u32     max_send_wr;
1037        u32     max_recv_wr;
1038        u32     max_send_sge;
1039        u32     max_recv_sge;
1040        u32     max_inline_data;
1041
1042        /*
1043         * Maximum number of rdma_rw_ctx structures in flight at a time.
1044         * ib_create_qp() will calculate the right amount of neededed WRs
1045         * and MRs based on this.
1046         */
1047        u32     max_rdma_ctxs;
1048};
1049
1050enum ib_sig_type {
1051        IB_SIGNAL_ALL_WR,
1052        IB_SIGNAL_REQ_WR
1053};
1054
1055enum ib_qp_type {
1056        /*
1057         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1058         * here (and in that order) since the MAD layer uses them as
1059         * indices into a 2-entry table.
1060         */
1061        IB_QPT_SMI,
1062        IB_QPT_GSI,
1063
1064        IB_QPT_RC,
1065        IB_QPT_UC,
1066        IB_QPT_UD,
1067        IB_QPT_RAW_IPV6,
1068        IB_QPT_RAW_ETHERTYPE,
1069        IB_QPT_RAW_PACKET = 8,
1070        IB_QPT_XRC_INI = 9,
1071        IB_QPT_XRC_TGT,
1072        IB_QPT_MAX,
1073        /* Reserve a range for qp types internal to the low level driver.
1074         * These qp types will not be visible at the IB core layer, so the
1075         * IB_QPT_MAX usages should not be affected in the core layer
1076         */
1077        IB_QPT_RESERVED1 = 0x1000,
1078        IB_QPT_RESERVED2,
1079        IB_QPT_RESERVED3,
1080        IB_QPT_RESERVED4,
1081        IB_QPT_RESERVED5,
1082        IB_QPT_RESERVED6,
1083        IB_QPT_RESERVED7,
1084        IB_QPT_RESERVED8,
1085        IB_QPT_RESERVED9,
1086        IB_QPT_RESERVED10,
1087};
1088
1089enum ib_qp_create_flags {
1090        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
1091        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
1092        IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1093        IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1094        IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1095        IB_QP_CREATE_NETIF_QP                   = 1 << 5,
1096        IB_QP_CREATE_SIGNATURE_EN               = 1 << 6,
1097        /* FREE                                 = 1 << 7, */
1098        IB_QP_CREATE_SCATTER_FCS                = 1 << 8,
1099        IB_QP_CREATE_CVLAN_STRIPPING            = 1 << 9,
1100        IB_QP_CREATE_SOURCE_QPN                 = 1 << 10,
1101        /* reserve bits 26-31 for low level drivers' internal use */
1102        IB_QP_CREATE_RESERVED_START             = 1 << 26,
1103        IB_QP_CREATE_RESERVED_END               = 1 << 31,
1104};
1105
1106/*
1107 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1108 * callback to destroy the passed in QP.
1109 */
1110
1111struct ib_qp_init_attr {
1112        void                  (*event_handler)(struct ib_event *, void *);
1113        void                   *qp_context;
1114        struct ib_cq           *send_cq;
1115        struct ib_cq           *recv_cq;
1116        struct ib_srq          *srq;
1117        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1118        struct ib_qp_cap        cap;
1119        enum ib_sig_type        sq_sig_type;
1120        enum ib_qp_type         qp_type;
1121        enum ib_qp_create_flags create_flags;
1122
1123        /*
1124         * Only needed for special QP types, or when using the RW API.
1125         */
1126        u8                      port_num;
1127        struct ib_rwq_ind_table *rwq_ind_tbl;
1128        u32                     source_qpn;
1129};
1130
1131struct ib_qp_open_attr {
1132        void                  (*event_handler)(struct ib_event *, void *);
1133        void                   *qp_context;
1134        u32                     qp_num;
1135        enum ib_qp_type         qp_type;
1136};
1137
1138enum ib_rnr_timeout {
1139        IB_RNR_TIMER_655_36 =  0,
1140        IB_RNR_TIMER_000_01 =  1,
1141        IB_RNR_TIMER_000_02 =  2,
1142        IB_RNR_TIMER_000_03 =  3,
1143        IB_RNR_TIMER_000_04 =  4,
1144        IB_RNR_TIMER_000_06 =  5,
1145        IB_RNR_TIMER_000_08 =  6,
1146        IB_RNR_TIMER_000_12 =  7,
1147        IB_RNR_TIMER_000_16 =  8,
1148        IB_RNR_TIMER_000_24 =  9,
1149        IB_RNR_TIMER_000_32 = 10,
1150        IB_RNR_TIMER_000_48 = 11,
1151        IB_RNR_TIMER_000_64 = 12,
1152        IB_RNR_TIMER_000_96 = 13,
1153        IB_RNR_TIMER_001_28 = 14,
1154        IB_RNR_TIMER_001_92 = 15,
1155        IB_RNR_TIMER_002_56 = 16,
1156        IB_RNR_TIMER_003_84 = 17,
1157        IB_RNR_TIMER_005_12 = 18,
1158        IB_RNR_TIMER_007_68 = 19,
1159        IB_RNR_TIMER_010_24 = 20,
1160        IB_RNR_TIMER_015_36 = 21,
1161        IB_RNR_TIMER_020_48 = 22,
1162        IB_RNR_TIMER_030_72 = 23,
1163        IB_RNR_TIMER_040_96 = 24,
1164        IB_RNR_TIMER_061_44 = 25,
1165        IB_RNR_TIMER_081_92 = 26,
1166        IB_RNR_TIMER_122_88 = 27,
1167        IB_RNR_TIMER_163_84 = 28,
1168        IB_RNR_TIMER_245_76 = 29,
1169        IB_RNR_TIMER_327_68 = 30,
1170        IB_RNR_TIMER_491_52 = 31
1171};
1172
1173enum ib_qp_attr_mask {
1174        IB_QP_STATE                     = 1,
1175        IB_QP_CUR_STATE                 = (1<<1),
1176        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
1177        IB_QP_ACCESS_FLAGS              = (1<<3),
1178        IB_QP_PKEY_INDEX                = (1<<4),
1179        IB_QP_PORT                      = (1<<5),
1180        IB_QP_QKEY                      = (1<<6),
1181        IB_QP_AV                        = (1<<7),
1182        IB_QP_PATH_MTU                  = (1<<8),
1183        IB_QP_TIMEOUT                   = (1<<9),
1184        IB_QP_RETRY_CNT                 = (1<<10),
1185        IB_QP_RNR_RETRY                 = (1<<11),
1186        IB_QP_RQ_PSN                    = (1<<12),
1187        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
1188        IB_QP_ALT_PATH                  = (1<<14),
1189        IB_QP_MIN_RNR_TIMER             = (1<<15),
1190        IB_QP_SQ_PSN                    = (1<<16),
1191        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
1192        IB_QP_PATH_MIG_STATE            = (1<<18),
1193        IB_QP_CAP                       = (1<<19),
1194        IB_QP_DEST_QPN                  = (1<<20),
1195        IB_QP_RESERVED1                 = (1<<21),
1196        IB_QP_RESERVED2                 = (1<<22),
1197        IB_QP_RESERVED3                 = (1<<23),
1198        IB_QP_RESERVED4                 = (1<<24),
1199        IB_QP_RATE_LIMIT                = (1<<25),
1200};
1201
1202enum ib_qp_state {
1203        IB_QPS_RESET,
1204        IB_QPS_INIT,
1205        IB_QPS_RTR,
1206        IB_QPS_RTS,
1207        IB_QPS_SQD,
1208        IB_QPS_SQE,
1209        IB_QPS_ERR
1210};
1211
1212enum ib_mig_state {
1213        IB_MIG_MIGRATED,
1214        IB_MIG_REARM,
1215        IB_MIG_ARMED
1216};
1217
1218enum ib_mw_type {
1219        IB_MW_TYPE_1 = 1,
1220        IB_MW_TYPE_2 = 2
1221};
1222
1223struct ib_qp_attr {
1224        enum ib_qp_state        qp_state;
1225        enum ib_qp_state        cur_qp_state;
1226        enum ib_mtu             path_mtu;
1227        enum ib_mig_state       path_mig_state;
1228        u32                     qkey;
1229        u32                     rq_psn;
1230        u32                     sq_psn;
1231        u32                     dest_qp_num;
1232        int                     qp_access_flags;
1233        struct ib_qp_cap        cap;
1234        struct rdma_ah_attr     ah_attr;
1235        struct rdma_ah_attr     alt_ah_attr;
1236        u16                     pkey_index;
1237        u16                     alt_pkey_index;
1238        u8                      en_sqd_async_notify;
1239        u8                      sq_draining;
1240        u8                      max_rd_atomic;
1241        u8                      max_dest_rd_atomic;
1242        u8                      min_rnr_timer;
1243        u8                      port_num;
1244        u8                      timeout;
1245        u8                      retry_cnt;
1246        u8                      rnr_retry;
1247        u8                      alt_port_num;
1248        u8                      alt_timeout;
1249        u32                     rate_limit;
1250};
1251
1252enum ib_wr_opcode {
1253        IB_WR_RDMA_WRITE,
1254        IB_WR_RDMA_WRITE_WITH_IMM,
1255        IB_WR_SEND,
1256        IB_WR_SEND_WITH_IMM,
1257        IB_WR_RDMA_READ,
1258        IB_WR_ATOMIC_CMP_AND_SWP,
1259        IB_WR_ATOMIC_FETCH_AND_ADD,
1260        IB_WR_LSO,
1261        IB_WR_SEND_WITH_INV,
1262        IB_WR_RDMA_READ_WITH_INV,
1263        IB_WR_LOCAL_INV,
1264        IB_WR_REG_MR,
1265        IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1266        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1267        IB_WR_REG_SIG_MR,
1268        /* reserve values for low level drivers' internal use.
1269         * These values will not be used at all in the ib core layer.
1270         */
1271        IB_WR_RESERVED1 = 0xf0,
1272        IB_WR_RESERVED2,
1273        IB_WR_RESERVED3,
1274        IB_WR_RESERVED4,
1275        IB_WR_RESERVED5,
1276        IB_WR_RESERVED6,
1277        IB_WR_RESERVED7,
1278        IB_WR_RESERVED8,
1279        IB_WR_RESERVED9,
1280        IB_WR_RESERVED10,
1281};
1282
1283enum ib_send_flags {
1284        IB_SEND_FENCE           = 1,
1285        IB_SEND_SIGNALED        = (1<<1),
1286        IB_SEND_SOLICITED       = (1<<2),
1287        IB_SEND_INLINE          = (1<<3),
1288        IB_SEND_IP_CSUM         = (1<<4),
1289
1290        /* reserve bits 26-31 for low level drivers' internal use */
1291        IB_SEND_RESERVED_START  = (1 << 26),
1292        IB_SEND_RESERVED_END    = (1 << 31),
1293};
1294
1295struct ib_sge {
1296        u64     addr;
1297        u32     length;
1298        u32     lkey;
1299};
1300
1301struct ib_cqe {
1302        void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1303};
1304
1305struct ib_send_wr {
1306        struct ib_send_wr      *next;
1307        union {
1308                u64             wr_id;
1309                struct ib_cqe   *wr_cqe;
1310        };
1311        struct ib_sge          *sg_list;
1312        int                     num_sge;
1313        enum ib_wr_opcode       opcode;
1314        int                     send_flags;
1315        union {
1316                __be32          imm_data;
1317                u32             invalidate_rkey;
1318        } ex;
1319};
1320
1321struct ib_rdma_wr {
1322        struct ib_send_wr       wr;
1323        u64                     remote_addr;
1324        u32                     rkey;
1325};
1326
1327static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1328{
1329        return container_of(wr, struct ib_rdma_wr, wr);
1330}
1331
1332struct ib_atomic_wr {
1333        struct ib_send_wr       wr;
1334        u64                     remote_addr;
1335        u64                     compare_add;
1336        u64                     swap;
1337        u64                     compare_add_mask;
1338        u64                     swap_mask;
1339        u32                     rkey;
1340};
1341
1342static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1343{
1344        return container_of(wr, struct ib_atomic_wr, wr);
1345}
1346
1347struct ib_ud_wr {
1348        struct ib_send_wr       wr;
1349        struct ib_ah            *ah;
1350        void                    *header;
1351        int                     hlen;
1352        int                     mss;
1353        u32                     remote_qpn;
1354        u32                     remote_qkey;
1355        u16                     pkey_index; /* valid for GSI only */
1356        u8                      port_num;   /* valid for DR SMPs on switch only */
1357};
1358
1359static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1360{
1361        return container_of(wr, struct ib_ud_wr, wr);
1362}
1363
1364struct ib_reg_wr {
1365        struct ib_send_wr       wr;
1366        struct ib_mr            *mr;
1367        u32                     key;
1368        int                     access;
1369};
1370
1371static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1372{
1373        return container_of(wr, struct ib_reg_wr, wr);
1374}
1375
1376struct ib_sig_handover_wr {
1377        struct ib_send_wr       wr;
1378        struct ib_sig_attrs    *sig_attrs;
1379        struct ib_mr           *sig_mr;
1380        int                     access_flags;
1381        struct ib_sge          *prot;
1382};
1383
1384static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1385{
1386        return container_of(wr, struct ib_sig_handover_wr, wr);
1387}
1388
1389struct ib_recv_wr {
1390        struct ib_recv_wr      *next;
1391        union {
1392                u64             wr_id;
1393                struct ib_cqe   *wr_cqe;
1394        };
1395        struct ib_sge          *sg_list;
1396        int                     num_sge;
1397};
1398
1399enum ib_access_flags {
1400        IB_ACCESS_LOCAL_WRITE   = 1,
1401        IB_ACCESS_REMOTE_WRITE  = (1<<1),
1402        IB_ACCESS_REMOTE_READ   = (1<<2),
1403        IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1404        IB_ACCESS_MW_BIND       = (1<<4),
1405        IB_ZERO_BASED           = (1<<5),
1406        IB_ACCESS_ON_DEMAND     = (1<<6),
1407        IB_ACCESS_HUGETLB       = (1<<7),
1408};
1409
1410/*
1411 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1412 * are hidden here instead of a uapi header!
1413 */
1414enum ib_mr_rereg_flags {
1415        IB_MR_REREG_TRANS       = 1,
1416        IB_MR_REREG_PD          = (1<<1),
1417        IB_MR_REREG_ACCESS      = (1<<2),
1418        IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1419};
1420
1421struct ib_fmr_attr {
1422        int     max_pages;
1423        int     max_maps;
1424        u8      page_shift;
1425};
1426
1427struct ib_umem;
1428
1429enum rdma_remove_reason {
1430        /* Userspace requested uobject deletion. Call could fail */
1431        RDMA_REMOVE_DESTROY,
1432        /* Context deletion. This call should delete the actual object itself */
1433        RDMA_REMOVE_CLOSE,
1434        /* Driver is being hot-unplugged. This call should delete the actual object itself */
1435        RDMA_REMOVE_DRIVER_REMOVE,
1436        /* Context is being cleaned-up, but commit was just completed */
1437        RDMA_REMOVE_DURING_CLEANUP,
1438};
1439
1440struct ib_rdmacg_object {
1441#ifdef CONFIG_CGROUP_RDMA
1442        struct rdma_cgroup      *cg;            /* owner rdma cgroup */
1443#endif
1444};
1445
1446struct ib_ucontext {
1447        struct ib_device       *device;
1448        struct ib_uverbs_file  *ufile;
1449        int                     closing;
1450
1451        /* locking the uobjects_list */
1452        struct mutex            uobjects_lock;
1453        struct list_head        uobjects;
1454        /* protects cleanup process from other actions */
1455        struct rw_semaphore     cleanup_rwsem;
1456        enum rdma_remove_reason cleanup_reason;
1457
1458        struct pid             *tgid;
1459#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1460        struct rb_root_cached   umem_tree;
1461        /*
1462         * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1463         * mmu notifiers registration.
1464         */
1465        struct rw_semaphore     umem_rwsem;
1466        void (*invalidate_range)(struct ib_umem *umem,
1467                                 unsigned long start, unsigned long end);
1468
1469        struct mmu_notifier     mn;
1470        atomic_t                notifier_count;
1471        /* A list of umems that don't have private mmu notifier counters yet. */
1472        struct list_head        no_private_counters;
1473        int                     odp_mrs_count;
1474#endif
1475
1476        struct ib_rdmacg_object cg_obj;
1477};
1478
1479struct ib_uobject {
1480        u64                     user_handle;    /* handle given to us by userspace */
1481        struct ib_ucontext     *context;        /* associated user context */
1482        void                   *object;         /* containing object */
1483        struct list_head        list;           /* link to context's list */
1484        struct ib_rdmacg_object cg_obj;         /* rdmacg object */
1485        int                     id;             /* index into kernel idr */
1486        struct kref             ref;
1487        atomic_t                usecnt;         /* protects exclusive access */
1488        struct rcu_head         rcu;            /* kfree_rcu() overhead */
1489
1490        const struct uverbs_obj_type *type;
1491};
1492
1493struct ib_uobject_file {
1494        struct ib_uobject       uobj;
1495        /* ufile contains the lock between context release and file close */
1496        struct ib_uverbs_file   *ufile;
1497};
1498
1499struct ib_udata {
1500        const void __user *inbuf;
1501        void __user *outbuf;
1502        size_t       inlen;
1503        size_t       outlen;
1504};
1505
1506struct ib_pd {
1507        u32                     local_dma_lkey;
1508        u32                     flags;
1509        struct ib_device       *device;
1510        struct ib_uobject      *uobject;
1511        atomic_t                usecnt; /* count all resources */
1512
1513        u32                     unsafe_global_rkey;
1514
1515        /*
1516         * Implementation details of the RDMA core, don't use in drivers:
1517         */
1518        struct ib_mr           *__internal_mr;
1519};
1520
1521struct ib_xrcd {
1522        struct ib_device       *device;
1523        atomic_t                usecnt; /* count all exposed resources */
1524        struct inode           *inode;
1525
1526        struct mutex            tgt_qp_mutex;
1527        struct list_head        tgt_qp_list;
1528};
1529
1530struct ib_ah {
1531        struct ib_device        *device;
1532        struct ib_pd            *pd;
1533        struct ib_uobject       *uobject;
1534        enum rdma_ah_attr_type  type;
1535};
1536
1537typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1538
1539enum ib_poll_context {
1540        IB_POLL_DIRECT,         /* caller context, no hw completions */
1541        IB_POLL_SOFTIRQ,        /* poll from softirq context */
1542        IB_POLL_WORKQUEUE,      /* poll from workqueue */
1543};
1544
1545struct ib_cq {
1546        struct ib_device       *device;
1547        struct ib_uobject      *uobject;
1548        ib_comp_handler         comp_handler;
1549        void                  (*event_handler)(struct ib_event *, void *);
1550        void                   *cq_context;
1551        int                     cqe;
1552        atomic_t                usecnt; /* count number of work queues */
1553        enum ib_poll_context    poll_ctx;
1554        struct ib_wc            *wc;
1555        union {
1556                struct irq_poll         iop;
1557                struct work_struct      work;
1558        };
1559};
1560
1561struct ib_srq {
1562        struct ib_device       *device;
1563        struct ib_pd           *pd;
1564        struct ib_uobject      *uobject;
1565        void                  (*event_handler)(struct ib_event *, void *);
1566        void                   *srq_context;
1567        enum ib_srq_type        srq_type;
1568        atomic_t                usecnt;
1569
1570        struct {
1571                struct ib_cq   *cq;
1572                union {
1573                        struct {
1574                                struct ib_xrcd *xrcd;
1575                                u32             srq_num;
1576                        } xrc;
1577                };
1578        } ext;
1579};
1580
1581enum ib_raw_packet_caps {
1582        /* Strip cvlan from incoming packet and report it in the matching work
1583         * completion is supported.
1584         */
1585        IB_RAW_PACKET_CAP_CVLAN_STRIPPING       = (1 << 0),
1586        /* Scatter FCS field of an incoming packet to host memory is supported.
1587         */
1588        IB_RAW_PACKET_CAP_SCATTER_FCS           = (1 << 1),
1589        /* Checksum offloads are supported (for both send and receive). */
1590        IB_RAW_PACKET_CAP_IP_CSUM               = (1 << 2),
1591        /* When a packet is received for an RQ with no receive WQEs, the
1592         * packet processing is delayed.
1593         */
1594        IB_RAW_PACKET_CAP_DELAY_DROP            = (1 << 3),
1595};
1596
1597enum ib_wq_type {
1598        IB_WQT_RQ
1599};
1600
1601enum ib_wq_state {
1602        IB_WQS_RESET,
1603        IB_WQS_RDY,
1604        IB_WQS_ERR
1605};
1606
1607struct ib_wq {
1608        struct ib_device       *device;
1609        struct ib_uobject      *uobject;
1610        void                *wq_context;
1611        void                (*event_handler)(struct ib_event *, void *);
1612        struct ib_pd           *pd;
1613        struct ib_cq           *cq;
1614        u32             wq_num;
1615        enum ib_wq_state       state;
1616        enum ib_wq_type wq_type;
1617        atomic_t                usecnt;
1618};
1619
1620enum ib_wq_flags {
1621        IB_WQ_FLAGS_CVLAN_STRIPPING     = 1 << 0,
1622        IB_WQ_FLAGS_SCATTER_FCS         = 1 << 1,
1623        IB_WQ_FLAGS_DELAY_DROP          = 1 << 2,
1624};
1625
1626struct ib_wq_init_attr {
1627        void                   *wq_context;
1628        enum ib_wq_type wq_type;
1629        u32             max_wr;
1630        u32             max_sge;
1631        struct  ib_cq          *cq;
1632        void                (*event_handler)(struct ib_event *, void *);
1633        u32             create_flags; /* Use enum ib_wq_flags */
1634};
1635
1636enum ib_wq_attr_mask {
1637        IB_WQ_STATE             = 1 << 0,
1638        IB_WQ_CUR_STATE         = 1 << 1,
1639        IB_WQ_FLAGS             = 1 << 2,
1640};
1641
1642struct ib_wq_attr {
1643        enum    ib_wq_state     wq_state;
1644        enum    ib_wq_state     curr_wq_state;
1645        u32                     flags; /* Use enum ib_wq_flags */
1646        u32                     flags_mask; /* Use enum ib_wq_flags */
1647};
1648
1649struct ib_rwq_ind_table {
1650        struct ib_device        *device;
1651        struct ib_uobject      *uobject;
1652        atomic_t                usecnt;
1653        u32             ind_tbl_num;
1654        u32             log_ind_tbl_size;
1655        struct ib_wq    **ind_tbl;
1656};
1657
1658struct ib_rwq_ind_table_init_attr {
1659        u32             log_ind_tbl_size;
1660        /* Each entry is a pointer to Receive Work Queue */
1661        struct ib_wq    **ind_tbl;
1662};
1663
1664enum port_pkey_state {
1665        IB_PORT_PKEY_NOT_VALID = 0,
1666        IB_PORT_PKEY_VALID = 1,
1667        IB_PORT_PKEY_LISTED = 2,
1668};
1669
1670struct ib_qp_security;
1671
1672struct ib_port_pkey {
1673        enum port_pkey_state    state;
1674        u16                     pkey_index;
1675        u8                      port_num;
1676        struct list_head        qp_list;
1677        struct list_head        to_error_list;
1678        struct ib_qp_security  *sec;
1679};
1680
1681struct ib_ports_pkeys {
1682        struct ib_port_pkey     main;
1683        struct ib_port_pkey     alt;
1684};
1685
1686struct ib_qp_security {
1687        struct ib_qp           *qp;
1688        struct ib_device       *dev;
1689        /* Hold this mutex when changing port and pkey settings. */
1690        struct mutex            mutex;
1691        struct ib_ports_pkeys  *ports_pkeys;
1692        /* A list of all open shared QP handles.  Required to enforce security
1693         * properly for all users of a shared QP.
1694         */
1695        struct list_head        shared_qp_list;
1696        void                   *security;
1697        bool                    destroying;
1698        atomic_t                error_list_count;
1699        struct completion       error_complete;
1700        int                     error_comps_pending;
1701};
1702
1703/*
1704 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1705 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1706 */
1707struct ib_qp {
1708        struct ib_device       *device;
1709        struct ib_pd           *pd;
1710        struct ib_cq           *send_cq;
1711        struct ib_cq           *recv_cq;
1712        spinlock_t              mr_lock;
1713        int                     mrs_used;
1714        struct list_head        rdma_mrs;
1715        struct list_head        sig_mrs;
1716        struct ib_srq          *srq;
1717        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1718        struct list_head        xrcd_list;
1719
1720        /* count times opened, mcast attaches, flow attaches */
1721        atomic_t                usecnt;
1722        struct list_head        open_list;
1723        struct ib_qp           *real_qp;
1724        struct ib_uobject      *uobject;
1725        void                  (*event_handler)(struct ib_event *, void *);
1726        void                   *qp_context;
1727        u32                     qp_num;
1728        u32                     max_write_sge;
1729        u32                     max_read_sge;
1730        enum ib_qp_type         qp_type;
1731        struct ib_rwq_ind_table *rwq_ind_tbl;
1732        struct ib_qp_security  *qp_sec;
1733        u8                      port;
1734};
1735
1736struct ib_mr {
1737        struct ib_device  *device;
1738        struct ib_pd      *pd;
1739        u32                lkey;
1740        u32                rkey;
1741        u64                iova;
1742        u64                length;
1743        unsigned int       page_size;
1744        bool               need_inval;
1745        union {
1746                struct ib_uobject       *uobject;       /* user */
1747                struct list_head        qp_entry;       /* FR */
1748        };
1749};
1750
1751struct ib_mw {
1752        struct ib_device        *device;
1753        struct ib_pd            *pd;
1754        struct ib_uobject       *uobject;
1755        u32                     rkey;
1756        enum ib_mw_type         type;
1757};
1758
1759struct ib_fmr {
1760        struct ib_device        *device;
1761        struct ib_pd            *pd;
1762        struct list_head        list;
1763        u32                     lkey;
1764        u32                     rkey;
1765};
1766
1767/* Supported steering options */
1768enum ib_flow_attr_type {
1769        /* steering according to rule specifications */
1770        IB_FLOW_ATTR_NORMAL             = 0x0,
1771        /* default unicast and multicast rule -
1772         * receive all Eth traffic which isn't steered to any QP
1773         */
1774        IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1775        /* default multicast rule -
1776         * receive all Eth multicast traffic which isn't steered to any QP
1777         */
1778        IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1779        /* sniffer rule - receive all port traffic */
1780        IB_FLOW_ATTR_SNIFFER            = 0x3
1781};
1782
1783/* Supported steering header types */
1784enum ib_flow_spec_type {
1785        /* L2 headers*/
1786        IB_FLOW_SPEC_ETH                = 0x20,
1787        IB_FLOW_SPEC_IB                 = 0x22,
1788        /* L3 header*/
1789        IB_FLOW_SPEC_IPV4               = 0x30,
1790        IB_FLOW_SPEC_IPV6               = 0x31,
1791        /* L4 headers*/
1792        IB_FLOW_SPEC_TCP                = 0x40,
1793        IB_FLOW_SPEC_UDP                = 0x41,
1794        IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
1795        IB_FLOW_SPEC_INNER              = 0x100,
1796        /* Actions */
1797        IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1798        IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1799};
1800#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1801#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1802
1803/* Flow steering rule priority is set according to it's domain.
1804 * Lower domain value means higher priority.
1805 */
1806enum ib_flow_domain {
1807        IB_FLOW_DOMAIN_USER,
1808        IB_FLOW_DOMAIN_ETHTOOL,
1809        IB_FLOW_DOMAIN_RFS,
1810        IB_FLOW_DOMAIN_NIC,
1811        IB_FLOW_DOMAIN_NUM /* Must be last */
1812};
1813
1814enum ib_flow_flags {
1815        IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1816        IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
1817};
1818
1819struct ib_flow_eth_filter {
1820        u8      dst_mac[6];
1821        u8      src_mac[6];
1822        __be16  ether_type;
1823        __be16  vlan_tag;
1824        /* Must be last */
1825        u8      real_sz[0];
1826};
1827
1828struct ib_flow_spec_eth {
1829        u32                       type;
1830        u16                       size;
1831        struct ib_flow_eth_filter val;
1832        struct ib_flow_eth_filter mask;
1833};
1834
1835struct ib_flow_ib_filter {
1836        __be16 dlid;
1837        __u8   sl;
1838        /* Must be last */
1839        u8      real_sz[0];
1840};
1841
1842struct ib_flow_spec_ib {
1843        u32                      type;
1844        u16                      size;
1845        struct ib_flow_ib_filter val;
1846        struct ib_flow_ib_filter mask;
1847};
1848
1849/* IPv4 header flags */
1850enum ib_ipv4_flags {
1851        IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1852        IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1853                                    last have this flag set */
1854};
1855
1856struct ib_flow_ipv4_filter {
1857        __be32  src_ip;
1858        __be32  dst_ip;
1859        u8      proto;
1860        u8      tos;
1861        u8      ttl;
1862        u8      flags;
1863        /* Must be last */
1864        u8      real_sz[0];
1865};
1866
1867struct ib_flow_spec_ipv4 {
1868        u32                        type;
1869        u16                        size;
1870        struct ib_flow_ipv4_filter val;
1871        struct ib_flow_ipv4_filter mask;
1872};
1873
1874struct ib_flow_ipv6_filter {
1875        u8      src_ip[16];
1876        u8      dst_ip[16];
1877        __be32  flow_label;
1878        u8      next_hdr;
1879        u8      traffic_class;
1880        u8      hop_limit;
1881        /* Must be last */
1882        u8      real_sz[0];
1883};
1884
1885struct ib_flow_spec_ipv6 {
1886        u32                        type;
1887        u16                        size;
1888        struct ib_flow_ipv6_filter val;
1889        struct ib_flow_ipv6_filter mask;
1890};
1891
1892struct ib_flow_tcp_udp_filter {
1893        __be16  dst_port;
1894        __be16  src_port;
1895        /* Must be last */
1896        u8      real_sz[0];
1897};
1898
1899struct ib_flow_spec_tcp_udp {
1900        u32                           type;
1901        u16                           size;
1902        struct ib_flow_tcp_udp_filter val;
1903        struct ib_flow_tcp_udp_filter mask;
1904};
1905
1906struct ib_flow_tunnel_filter {
1907        __be32  tunnel_id;
1908        u8      real_sz[0];
1909};
1910
1911/* ib_flow_spec_tunnel describes the Vxlan tunnel
1912 * the tunnel_id from val has the vni value
1913 */
1914struct ib_flow_spec_tunnel {
1915        u32                           type;
1916        u16                           size;
1917        struct ib_flow_tunnel_filter  val;
1918        struct ib_flow_tunnel_filter  mask;
1919};
1920
1921struct ib_flow_spec_action_tag {
1922        enum ib_flow_spec_type        type;
1923        u16                           size;
1924        u32                           tag_id;
1925};
1926
1927struct ib_flow_spec_action_drop {
1928        enum ib_flow_spec_type        type;
1929        u16                           size;
1930};
1931
1932union ib_flow_spec {
1933        struct {
1934                u32                     type;
1935                u16                     size;
1936        };
1937        struct ib_flow_spec_eth         eth;
1938        struct ib_flow_spec_ib          ib;
1939        struct ib_flow_spec_ipv4        ipv4;
1940        struct ib_flow_spec_tcp_udp     tcp_udp;
1941        struct ib_flow_spec_ipv6        ipv6;
1942        struct ib_flow_spec_tunnel      tunnel;
1943        struct ib_flow_spec_action_tag  flow_tag;
1944        struct ib_flow_spec_action_drop drop;
1945};
1946
1947struct ib_flow_attr {
1948        enum ib_flow_attr_type type;
1949        u16          size;
1950        u16          priority;
1951        u32          flags;
1952        u8           num_of_specs;
1953        u8           port;
1954        /* Following are the optional layers according to user request
1955         * struct ib_flow_spec_xxx
1956         * struct ib_flow_spec_yyy
1957         */
1958};
1959
1960struct ib_flow {
1961        struct ib_qp            *qp;
1962        struct ib_uobject       *uobject;
1963};
1964
1965struct ib_mad_hdr;
1966struct ib_grh;
1967
1968enum ib_process_mad_flags {
1969        IB_MAD_IGNORE_MKEY      = 1,
1970        IB_MAD_IGNORE_BKEY      = 2,
1971        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1972};
1973
1974enum ib_mad_result {
1975        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1976        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1977        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1978        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1979};
1980
1981struct ib_port_cache {
1982        u64                   subnet_prefix;
1983        struct ib_pkey_cache  *pkey;
1984        struct ib_gid_table   *gid;
1985        u8                     lmc;
1986        enum ib_port_state     port_state;
1987};
1988
1989struct ib_cache {
1990        rwlock_t                lock;
1991        struct ib_event_handler event_handler;
1992        struct ib_port_cache   *ports;
1993};
1994
1995struct iw_cm_verbs;
1996
1997struct ib_port_immutable {
1998        int                           pkey_tbl_len;
1999        int                           gid_tbl_len;
2000        u32                           core_cap_flags;
2001        u32                           max_mad_size;
2002};
2003
2004/* rdma netdev type - specifies protocol type */
2005enum rdma_netdev_t {
2006        RDMA_NETDEV_OPA_VNIC,
2007        RDMA_NETDEV_IPOIB,
2008};
2009
2010/**
2011 * struct rdma_netdev - rdma netdev
2012 * For cases where netstack interfacing is required.
2013 */
2014struct rdma_netdev {
2015        void              *clnt_priv;
2016        struct ib_device  *hca;
2017        u8                 port_num;
2018
2019        /* cleanup function must be specified */
2020        void (*free_rdma_netdev)(struct net_device *netdev);
2021
2022        /* control functions */
2023        void (*set_id)(struct net_device *netdev, int id);
2024        /* send packet */
2025        int (*send)(struct net_device *dev, struct sk_buff *skb,
2026                    struct ib_ah *address, u32 dqpn);
2027        /* multicast */
2028        int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2029                            union ib_gid *gid, u16 mlid,
2030                            int set_qkey, u32 qkey);
2031        int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2032                            union ib_gid *gid, u16 mlid);
2033};
2034
2035struct ib_port_pkey_list {
2036        /* Lock to hold while modifying the list. */
2037        spinlock_t                    list_lock;
2038        struct list_head              pkey_list;
2039};
2040
2041struct ib_device {
2042        /* Do not access @dma_device directly from ULP nor from HW drivers. */
2043        struct device                *dma_device;
2044
2045        char                          name[IB_DEVICE_NAME_MAX];
2046
2047        struct list_head              event_handler_list;
2048        spinlock_t                    event_handler_lock;
2049
2050        spinlock_t                    client_data_lock;
2051        struct list_head              core_list;
2052        /* Access to the client_data_list is protected by the client_data_lock
2053         * spinlock and the lists_rwsem read-write semaphore */
2054        struct list_head              client_data_list;
2055
2056        struct ib_cache               cache;
2057        /**
2058         * port_immutable is indexed by port number
2059         */
2060        struct ib_port_immutable     *port_immutable;
2061
2062        int                           num_comp_vectors;
2063
2064        struct ib_port_pkey_list     *port_pkey_list;
2065
2066        struct iw_cm_verbs           *iwcm;
2067
2068        /**
2069         * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2070         *   driver initialized data.  The struct is kfree()'ed by the sysfs
2071         *   core when the device is removed.  A lifespan of -1 in the return
2072         *   struct tells the core to set a default lifespan.
2073         */
2074        struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
2075                                                     u8 port_num);
2076        /**
2077         * get_hw_stats - Fill in the counter value(s) in the stats struct.
2078         * @index - The index in the value array we wish to have updated, or
2079         *   num_counters if we want all stats updated
2080         * Return codes -
2081         *   < 0 - Error, no counters updated
2082         *   index - Updated the single counter pointed to by index
2083         *   num_counters - Updated all counters (will reset the timestamp
2084         *     and prevent further calls for lifespan milliseconds)
2085         * Drivers are allowed to update all counters in leiu of just the
2086         *   one given in index at their option
2087         */
2088        int                        (*get_hw_stats)(struct ib_device *device,
2089                                                   struct rdma_hw_stats *stats,
2090                                                   u8 port, int index);
2091        int                        (*query_device)(struct ib_device *device,
2092                                                   struct ib_device_attr *device_attr,
2093                                                   struct ib_udata *udata);
2094        int                        (*query_port)(struct ib_device *device,
2095                                                 u8 port_num,
2096                                                 struct ib_port_attr *port_attr);
2097        enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
2098                                                     u8 port_num);
2099        /* When calling get_netdev, the HW vendor's driver should return the
2100         * net device of device @device at port @port_num or NULL if such
2101         * a net device doesn't exist. The vendor driver should call dev_hold
2102         * on this net device. The HW vendor's device driver must guarantee
2103         * that this function returns NULL before the net device reaches
2104         * NETDEV_UNREGISTER_FINAL state.
2105         */
2106        struct net_device         *(*get_netdev)(struct ib_device *device,
2107                                                 u8 port_num);
2108        int                        (*query_gid)(struct ib_device *device,
2109                                                u8 port_num, int index,
2110                                                union ib_gid *gid);
2111        /* When calling add_gid, the HW vendor's driver should
2112         * add the gid of device @device at gid index @index of
2113         * port @port_num to be @gid. Meta-info of that gid (for example,
2114         * the network device related to this gid is available
2115         * at @attr. @context allows the HW vendor driver to store extra
2116         * information together with a GID entry. The HW vendor may allocate
2117         * memory to contain this information and store it in @context when a
2118         * new GID entry is written to. Params are consistent until the next
2119         * call of add_gid or delete_gid. The function should return 0 on
2120         * success or error otherwise. The function could be called
2121         * concurrently for different ports. This function is only called
2122         * when roce_gid_table is used.
2123         */
2124        int                        (*add_gid)(struct ib_device *device,
2125                                              u8 port_num,
2126                                              unsigned int index,
2127                                              const union ib_gid *gid,
2128                                              const struct ib_gid_attr *attr,
2129                                              void **context);
2130        /* When calling del_gid, the HW vendor's driver should delete the
2131         * gid of device @device at gid index @index of port @port_num.
2132         * Upon the deletion of a GID entry, the HW vendor must free any
2133         * allocated memory. The caller will clear @context afterwards.
2134         * This function is only called when roce_gid_table is used.
2135         */
2136        int                        (*del_gid)(struct ib_device *device,
2137                                              u8 port_num,
2138                                              unsigned int index,
2139                                              void **context);
2140        int                        (*query_pkey)(struct ib_device *device,
2141                                                 u8 port_num, u16 index, u16 *pkey);
2142        int                        (*modify_device)(struct ib_device *device,
2143                                                    int device_modify_mask,
2144                                                    struct ib_device_modify *device_modify);
2145        int                        (*modify_port)(struct ib_device *device,
2146                                                  u8 port_num, int port_modify_mask,
2147                                                  struct ib_port_modify *port_modify);
2148        struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
2149                                                     struct ib_udata *udata);
2150        int                        (*dealloc_ucontext)(struct ib_ucontext *context);
2151        int                        (*mmap)(struct ib_ucontext *context,
2152                                           struct vm_area_struct *vma);
2153        struct ib_pd *             (*alloc_pd)(struct ib_device *device,
2154                                               struct ib_ucontext *context,
2155                                               struct ib_udata *udata);
2156        int                        (*dealloc_pd)(struct ib_pd *pd);
2157        struct ib_ah *             (*create_ah)(struct ib_pd *pd,
2158                                                struct rdma_ah_attr *ah_attr,
2159                                                struct ib_udata *udata);
2160        int                        (*modify_ah)(struct ib_ah *ah,
2161                                                struct rdma_ah_attr *ah_attr);
2162        int                        (*query_ah)(struct ib_ah *ah,
2163                                               struct rdma_ah_attr *ah_attr);
2164        int                        (*destroy_ah)(struct ib_ah *ah);
2165        struct ib_srq *            (*create_srq)(struct ib_pd *pd,
2166                                                 struct ib_srq_init_attr *srq_init_attr,
2167                                                 struct ib_udata *udata);
2168        int                        (*modify_srq)(struct ib_srq *srq,
2169                                                 struct ib_srq_attr *srq_attr,
2170                                                 enum ib_srq_attr_mask srq_attr_mask,
2171                                                 struct ib_udata *udata);
2172        int                        (*query_srq)(struct ib_srq *srq,
2173                                                struct ib_srq_attr *srq_attr);
2174        int                        (*destroy_srq)(struct ib_srq *srq);
2175        int                        (*post_srq_recv)(struct ib_srq *srq,
2176                                                    struct ib_recv_wr *recv_wr,
2177                                                    struct ib_recv_wr **bad_recv_wr);
2178        struct ib_qp *             (*create_qp)(struct ib_pd *pd,
2179                                                struct ib_qp_init_attr *qp_init_attr,
2180                                                struct ib_udata *udata);
2181        int                        (*modify_qp)(struct ib_qp *qp,
2182                                                struct ib_qp_attr *qp_attr,
2183                                                int qp_attr_mask,
2184                                                struct ib_udata *udata);
2185        int                        (*query_qp)(struct ib_qp *qp,
2186                                               struct ib_qp_attr *qp_attr,
2187                                               int qp_attr_mask,
2188                                               struct ib_qp_init_attr *qp_init_attr);
2189        int                        (*destroy_qp)(struct ib_qp *qp);
2190        int                        (*post_send)(struct ib_qp *qp,
2191                                                struct ib_send_wr *send_wr,
2192                                                struct ib_send_wr **bad_send_wr);
2193        int                        (*post_recv)(struct ib_qp *qp,
2194                                                struct ib_recv_wr *recv_wr,
2195                                                struct ib_recv_wr **bad_recv_wr);
2196        struct ib_cq *             (*create_cq)(struct ib_device *device,
2197                                                const struct ib_cq_init_attr *attr,
2198                                                struct ib_ucontext *context,
2199                                                struct ib_udata *udata);
2200        int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2201                                                u16 cq_period);
2202        int                        (*destroy_cq)(struct ib_cq *cq);
2203        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
2204                                                struct ib_udata *udata);
2205        int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
2206                                              struct ib_wc *wc);
2207        int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2208        int                        (*req_notify_cq)(struct ib_cq *cq,
2209                                                    enum ib_cq_notify_flags flags);
2210        int                        (*req_ncomp_notif)(struct ib_cq *cq,
2211                                                      int wc_cnt);
2212        struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2213                                                 int mr_access_flags);
2214        struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2215                                                  u64 start, u64 length,
2216                                                  u64 virt_addr,
2217                                                  int mr_access_flags,
2218                                                  struct ib_udata *udata);
2219        int                        (*rereg_user_mr)(struct ib_mr *mr,
2220                                                    int flags,
2221                                                    u64 start, u64 length,
2222                                                    u64 virt_addr,
2223                                                    int mr_access_flags,
2224                                                    struct ib_pd *pd,
2225                                                    struct ib_udata *udata);
2226        int                        (*dereg_mr)(struct ib_mr *mr);
2227        struct ib_mr *             (*alloc_mr)(struct ib_pd *pd,
2228                                               enum ib_mr_type mr_type,
2229                                               u32 max_num_sg);
2230        int                        (*map_mr_sg)(struct ib_mr *mr,
2231                                                struct scatterlist *sg,
2232                                                int sg_nents,
2233                                                unsigned int *sg_offset);
2234        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2235                                               enum ib_mw_type type,
2236                                               struct ib_udata *udata);
2237        int                        (*dealloc_mw)(struct ib_mw *mw);
2238        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
2239                                                int mr_access_flags,
2240                                                struct ib_fmr_attr *fmr_attr);
2241        int                        (*map_phys_fmr)(struct ib_fmr *fmr,
2242                                                   u64 *page_list, int list_len,
2243                                                   u64 iova);
2244        int                        (*unmap_fmr)(struct list_head *fmr_list);
2245        int                        (*dealloc_fmr)(struct ib_fmr *fmr);
2246        int                        (*attach_mcast)(struct ib_qp *qp,
2247                                                   union ib_gid *gid,
2248                                                   u16 lid);
2249        int                        (*detach_mcast)(struct ib_qp *qp,
2250                                                   union ib_gid *gid,
2251                                                   u16 lid);
2252        int                        (*process_mad)(struct ib_device *device,
2253                                                  int process_mad_flags,
2254                                                  u8 port_num,
2255                                                  const struct ib_wc *in_wc,
2256                                                  const struct ib_grh *in_grh,
2257                                                  const struct ib_mad_hdr *in_mad,
2258                                                  size_t in_mad_size,
2259                                                  struct ib_mad_hdr *out_mad,
2260                                                  size_t *out_mad_size,
2261                                                  u16 *out_mad_pkey_index);
2262        struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
2263                                                 struct ib_ucontext *ucontext,
2264                                                 struct ib_udata *udata);
2265        int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2266        struct ib_flow *           (*create_flow)(struct ib_qp *qp,
2267                                                  struct ib_flow_attr
2268                                                  *flow_attr,
2269                                                  int domain);
2270        int                        (*destroy_flow)(struct ib_flow *flow_id);
2271        int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2272                                                      struct ib_mr_status *mr_status);
2273        void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2274        void                       (*drain_rq)(struct ib_qp *qp);
2275        void                       (*drain_sq)(struct ib_qp *qp);
2276        int                        (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2277                                                        int state);
2278        int                        (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2279                                                   struct ifla_vf_info *ivf);
2280        int                        (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2281                                                   struct ifla_vf_stats *stats);
2282        int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2283                                                  int type);
2284        struct ib_wq *             (*create_wq)(struct ib_pd *pd,
2285                                                struct ib_wq_init_attr *init_attr,
2286                                                struct ib_udata *udata);
2287        int                        (*destroy_wq)(struct ib_wq *wq);
2288        int                        (*modify_wq)(struct ib_wq *wq,
2289                                                struct ib_wq_attr *attr,
2290                                                u32 wq_attr_mask,
2291                                                struct ib_udata *udata);
2292        struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2293                                                           struct ib_rwq_ind_table_init_attr *init_attr,
2294                                                           struct ib_udata *udata);
2295        int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2296        /**
2297         * rdma netdev operation
2298         *
2299         * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2300         * doesn't support the specified rdma netdev type.
2301         */
2302        struct net_device *(*alloc_rdma_netdev)(
2303                                        struct ib_device *device,
2304                                        u8 port_num,
2305                                        enum rdma_netdev_t type,
2306                                        const char *name,
2307                                        unsigned char name_assign_type,
2308                                        void (*setup)(struct net_device *));
2309
2310        struct module               *owner;
2311        struct device                dev;
2312        struct kobject               *ports_parent;
2313        struct list_head             port_list;
2314
2315        enum {
2316                IB_DEV_UNINITIALIZED,
2317                IB_DEV_REGISTERED,
2318                IB_DEV_UNREGISTERED
2319        }                            reg_state;
2320
2321        int                          uverbs_abi_ver;
2322        u64                          uverbs_cmd_mask;
2323        u64                          uverbs_ex_cmd_mask;
2324
2325        char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
2326        __be64                       node_guid;
2327        u32                          local_dma_lkey;
2328        u16                          is_switch:1;
2329        u8                           node_type;
2330        u8                           phys_port_cnt;
2331        struct ib_device_attr        attrs;
2332        struct attribute_group       *hw_stats_ag;
2333        struct rdma_hw_stats         *hw_stats;
2334
2335#ifdef CONFIG_CGROUP_RDMA
2336        struct rdmacg_device         cg_device;
2337#endif
2338
2339        u32                          index;
2340
2341        /**
2342         * The following mandatory functions are used only at device
2343         * registration.  Keep functions such as these at the end of this
2344         * structure to avoid cache line misses when accessing struct ib_device
2345         * in fast paths.
2346         */
2347        int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2348        void (*get_dev_fw_str)(struct ib_device *, char *str);
2349        const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2350                                                     int comp_vector);
2351
2352        struct uverbs_root_spec         *specs_root;
2353};
2354
2355struct ib_client {
2356        char  *name;
2357        void (*add)   (struct ib_device *);
2358        void (*remove)(struct ib_device *, void *client_data);
2359
2360        /* Returns the net_dev belonging to this ib_client and matching the
2361         * given parameters.
2362         * @dev:         An RDMA device that the net_dev use for communication.
2363         * @port:        A physical port number on the RDMA device.
2364         * @pkey:        P_Key that the net_dev uses if applicable.
2365         * @gid:         A GID that the net_dev uses to communicate.
2366         * @addr:        An IP address the net_dev is configured with.
2367         * @client_data: The device's client data set by ib_set_client_data().
2368         *
2369         * An ib_client that implements a net_dev on top of RDMA devices
2370         * (such as IP over IB) should implement this callback, allowing the
2371         * rdma_cm module to find the right net_dev for a given request.
2372         *
2373         * The caller is responsible for calling dev_put on the returned
2374         * netdev. */
2375        struct net_device *(*get_net_dev_by_params)(
2376                        struct ib_device *dev,
2377                        u8 port,
2378                        u16 pkey,
2379                        const union ib_gid *gid,
2380                        const struct sockaddr *addr,
2381                        void *client_data);
2382        struct list_head list;
2383};
2384
2385struct ib_device *ib_alloc_device(size_t size);
2386void ib_dealloc_device(struct ib_device *device);
2387
2388void ib_get_device_fw_str(struct ib_device *device, char *str);
2389
2390int ib_register_device(struct ib_device *device,
2391                       int (*port_callback)(struct ib_device *,
2392                                            u8, struct kobject *));
2393void ib_unregister_device(struct ib_device *device);
2394
2395int ib_register_client   (struct ib_client *client);
2396void ib_unregister_client(struct ib_client *client);
2397
2398void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2399void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2400                         void *data);
2401
2402static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2403{
2404        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2405}
2406
2407static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2408{
2409        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2410}
2411
2412static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2413                                       size_t offset,
2414                                       size_t len)
2415{
2416        const void __user *p = udata->inbuf + offset;
2417        bool ret;
2418        u8 *buf;
2419
2420        if (len > USHRT_MAX)
2421                return false;
2422
2423        buf = memdup_user(p, len);
2424        if (IS_ERR(buf))
2425                return false;
2426
2427        ret = !memchr_inv(buf, 0, len);
2428        kfree(buf);
2429        return ret;
2430}
2431
2432/**
2433 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2434 * contains all required attributes and no attributes not allowed for
2435 * the given QP state transition.
2436 * @cur_state: Current QP state
2437 * @next_state: Next QP state
2438 * @type: QP type
2439 * @mask: Mask of supplied QP attributes
2440 * @ll : link layer of port
2441 *
2442 * This function is a helper function that a low-level driver's
2443 * modify_qp method can use to validate the consumer's input.  It
2444 * checks that cur_state and next_state are valid QP states, that a
2445 * transition from cur_state to next_state is allowed by the IB spec,
2446 * and that the attribute mask supplied is allowed for the transition.
2447 */
2448int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2449                       enum ib_qp_type type, enum ib_qp_attr_mask mask,
2450                       enum rdma_link_layer ll);
2451
2452void ib_register_event_handler(struct ib_event_handler *event_handler);
2453void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2454void ib_dispatch_event(struct ib_event *event);
2455
2456int ib_query_port(struct ib_device *device,
2457                  u8 port_num, struct ib_port_attr *port_attr);
2458
2459enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2460                                               u8 port_num);
2461
2462/**
2463 * rdma_cap_ib_switch - Check if the device is IB switch
2464 * @device: Device to check
2465 *
2466 * Device driver is responsible for setting is_switch bit on
2467 * in ib_device structure at init time.
2468 *
2469 * Return: true if the device is IB switch.
2470 */
2471static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2472{
2473        return device->is_switch;
2474}
2475
2476/**
2477 * rdma_start_port - Return the first valid port number for the device
2478 * specified
2479 *
2480 * @device: Device to be checked
2481 *
2482 * Return start port number
2483 */
2484static inline u8 rdma_start_port(const struct ib_device *device)
2485{
2486        return rdma_cap_ib_switch(device) ? 0 : 1;
2487}
2488
2489/**
2490 * rdma_end_port - Return the last valid port number for the device
2491 * specified
2492 *
2493 * @device: Device to be checked
2494 *
2495 * Return last port number
2496 */
2497static inline u8 rdma_end_port(const struct ib_device *device)
2498{
2499        return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2500}
2501
2502static inline int rdma_is_port_valid(const struct ib_device *device,
2503                                     unsigned int port)
2504{
2505        return (port >= rdma_start_port(device) &&
2506                port <= rdma_end_port(device));
2507}
2508
2509static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2510{
2511        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2512}
2513
2514static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2515{
2516        return device->port_immutable[port_num].core_cap_flags &
2517                (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2518}
2519
2520static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2521{
2522        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2523}
2524
2525static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2526{
2527        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2528}
2529
2530static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2531{
2532        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2533}
2534
2535static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2536{
2537        return rdma_protocol_ib(device, port_num) ||
2538                rdma_protocol_roce(device, port_num);
2539}
2540
2541static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2542{
2543        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2544}
2545
2546static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2547{
2548        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2549}
2550
2551/**
2552 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2553 * Management Datagrams.
2554 * @device: Device to check
2555 * @port_num: Port number to check
2556 *
2557 * Management Datagrams (MAD) are a required part of the InfiniBand
2558 * specification and are supported on all InfiniBand devices.  A slightly
2559 * extended version are also supported on OPA interfaces.
2560 *
2561 * Return: true if the port supports sending/receiving of MAD packets.
2562 */
2563static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2564{
2565        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2566}
2567
2568/**
2569 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2570 * Management Datagrams.
2571 * @device: Device to check
2572 * @port_num: Port number to check
2573 *
2574 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2575 * datagrams with their own versions.  These OPA MADs share many but not all of
2576 * the characteristics of InfiniBand MADs.
2577 *
2578 * OPA MADs differ in the following ways:
2579 *
2580 *    1) MADs are variable size up to 2K
2581 *       IBTA defined MADs remain fixed at 256 bytes
2582 *    2) OPA SMPs must carry valid PKeys
2583 *    3) OPA SMP packets are a different format
2584 *
2585 * Return: true if the port supports OPA MAD packet formats.
2586 */
2587static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2588{
2589        return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2590                == RDMA_CORE_CAP_OPA_MAD;
2591}
2592
2593/**
2594 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2595 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2596 * @device: Device to check
2597 * @port_num: Port number to check
2598 *
2599 * Each InfiniBand node is required to provide a Subnet Management Agent
2600 * that the subnet manager can access.  Prior to the fabric being fully
2601 * configured by the subnet manager, the SMA is accessed via a well known
2602 * interface called the Subnet Management Interface (SMI).  This interface
2603 * uses directed route packets to communicate with the SM to get around the
2604 * chicken and egg problem of the SM needing to know what's on the fabric
2605 * in order to configure the fabric, and needing to configure the fabric in
2606 * order to send packets to the devices on the fabric.  These directed
2607 * route packets do not need the fabric fully configured in order to reach
2608 * their destination.  The SMI is the only method allowed to send
2609 * directed route packets on an InfiniBand fabric.
2610 *
2611 * Return: true if the port provides an SMI.
2612 */
2613static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2614{
2615        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2616}
2617
2618/**
2619 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2620 * Communication Manager.
2621 * @device: Device to check
2622 * @port_num: Port number to check
2623 *
2624 * The InfiniBand Communication Manager is one of many pre-defined General
2625 * Service Agents (GSA) that are accessed via the General Service
2626 * Interface (GSI).  It's role is to facilitate establishment of connections
2627 * between nodes as well as other management related tasks for established
2628 * connections.
2629 *
2630 * Return: true if the port supports an IB CM (this does not guarantee that
2631 * a CM is actually running however).
2632 */
2633static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2634{
2635        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2636}
2637
2638/**
2639 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2640 * Communication Manager.
2641 * @device: Device to check
2642 * @port_num: Port number to check
2643 *
2644 * Similar to above, but specific to iWARP connections which have a different
2645 * managment protocol than InfiniBand.
2646 *
2647 * Return: true if the port supports an iWARP CM (this does not guarantee that
2648 * a CM is actually running however).
2649 */
2650static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2651{
2652        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2653}
2654
2655/**
2656 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2657 * Subnet Administration.
2658 * @device: Device to check
2659 * @port_num: Port number to check
2660 *
2661 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2662 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2663 * fabrics, devices should resolve routes to other hosts by contacting the
2664 * SA to query the proper route.
2665 *
2666 * Return: true if the port should act as a client to the fabric Subnet
2667 * Administration interface.  This does not imply that the SA service is
2668 * running locally.
2669 */
2670static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2671{
2672        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2673}
2674
2675/**
2676 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2677 * Multicast.
2678 * @device: Device to check
2679 * @port_num: Port number to check
2680 *
2681 * InfiniBand multicast registration is more complex than normal IPv4 or
2682 * IPv6 multicast registration.  Each Host Channel Adapter must register
2683 * with the Subnet Manager when it wishes to join a multicast group.  It
2684 * should do so only once regardless of how many queue pairs it subscribes
2685 * to this group.  And it should leave the group only after all queue pairs
2686 * attached to the group have been detached.
2687 *
2688 * Return: true if the port must undertake the additional adminstrative
2689 * overhead of registering/unregistering with the SM and tracking of the
2690 * total number of queue pairs attached to the multicast group.
2691 */
2692static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2693{
2694        return rdma_cap_ib_sa(device, port_num);
2695}
2696
2697/**
2698 * rdma_cap_af_ib - Check if the port of device has the capability
2699 * Native Infiniband Address.
2700 * @device: Device to check
2701 * @port_num: Port number to check
2702 *
2703 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2704 * GID.  RoCE uses a different mechanism, but still generates a GID via
2705 * a prescribed mechanism and port specific data.
2706 *
2707 * Return: true if the port uses a GID address to identify devices on the
2708 * network.
2709 */
2710static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2711{
2712        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2713}
2714
2715/**
2716 * rdma_cap_eth_ah - Check if the port of device has the capability
2717 * Ethernet Address Handle.
2718 * @device: Device to check
2719 * @port_num: Port number to check
2720 *
2721 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2722 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2723 * port.  Normally, packet headers are generated by the sending host
2724 * adapter, but when sending connectionless datagrams, we must manually
2725 * inject the proper headers for the fabric we are communicating over.
2726 *
2727 * Return: true if we are running as a RoCE port and must force the
2728 * addition of a Global Route Header built from our Ethernet Address
2729 * Handle into our header list for connectionless packets.
2730 */
2731static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2732{
2733        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2734}
2735
2736/**
2737 * rdma_cap_opa_ah - Check if the port of device supports
2738 * OPA Address handles
2739 * @device: Device to check
2740 * @port_num: Port number to check
2741 *
2742 * Return: true if we are running on an OPA device which supports
2743 * the extended OPA addressing.
2744 */
2745static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2746{
2747        return (device->port_immutable[port_num].core_cap_flags &
2748                RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2749}
2750
2751/**
2752 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2753 *
2754 * @device: Device
2755 * @port_num: Port number
2756 *
2757 * This MAD size includes the MAD headers and MAD payload.  No other headers
2758 * are included.
2759 *
2760 * Return the max MAD size required by the Port.  Will return 0 if the port
2761 * does not support MADs
2762 */
2763static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2764{
2765        return device->port_immutable[port_num].max_mad_size;
2766}
2767
2768/**
2769 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2770 * @device: Device to check
2771 * @port_num: Port number to check
2772 *
2773 * RoCE GID table mechanism manages the various GIDs for a device.
2774 *
2775 * NOTE: if allocating the port's GID table has failed, this call will still
2776 * return true, but any RoCE GID table API will fail.
2777 *
2778 * Return: true if the port uses RoCE GID table mechanism in order to manage
2779 * its GIDs.
2780 */
2781static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2782                                           u8 port_num)
2783{
2784        return rdma_protocol_roce(device, port_num) &&
2785                device->add_gid && device->del_gid;
2786}
2787
2788/*
2789 * Check if the device supports READ W/ INVALIDATE.
2790 */
2791static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2792{
2793        /*
2794         * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
2795         * has support for it yet.
2796         */
2797        return rdma_protocol_iwarp(dev, port_num);
2798}
2799
2800int ib_query_gid(struct ib_device *device,
2801                 u8 port_num, int index, union ib_gid *gid,
2802                 struct ib_gid_attr *attr);
2803
2804int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2805                         int state);
2806int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2807                     struct ifla_vf_info *info);
2808int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2809                    struct ifla_vf_stats *stats);
2810int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2811                   int type);
2812
2813int ib_query_pkey(struct ib_device *device,
2814                  u8 port_num, u16 index, u16 *pkey);
2815
2816int ib_modify_device(struct ib_device *device,
2817                     int device_modify_mask,
2818                     struct ib_device_modify *device_modify);
2819
2820int ib_modify_port(struct ib_device *device,
2821                   u8 port_num, int port_modify_mask,
2822                   struct ib_port_modify *port_modify);
2823
2824int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2825                enum ib_gid_type gid_type, struct net_device *ndev,
2826                u8 *port_num, u16 *index);
2827
2828int ib_find_pkey(struct ib_device *device,
2829                 u8 port_num, u16 pkey, u16 *index);
2830
2831enum ib_pd_flags {
2832        /*
2833         * Create a memory registration for all memory in the system and place
2834         * the rkey for it into pd->unsafe_global_rkey.  This can be used by
2835         * ULPs to avoid the overhead of dynamic MRs.
2836         *
2837         * This flag is generally considered unsafe and must only be used in
2838         * extremly trusted environments.  Every use of it will log a warning
2839         * in the kernel log.
2840         */
2841        IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
2842};
2843
2844struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2845                const char *caller);
2846#define ib_alloc_pd(device, flags) \
2847        __ib_alloc_pd((device), (flags), __func__)
2848void ib_dealloc_pd(struct ib_pd *pd);
2849
2850/**
2851 * rdma_create_ah - Creates an address handle for the given address vector.
2852 * @pd: The protection domain associated with the address handle.
2853 * @ah_attr: The attributes of the address vector.
2854 *
2855 * The address handle is used to reference a local or global destination
2856 * in all UD QP post sends.
2857 */
2858struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2859
2860/**
2861 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
2862 *   work completion.
2863 * @hdr: the L3 header to parse
2864 * @net_type: type of header to parse
2865 * @sgid: place to store source gid
2866 * @dgid: place to store destination gid
2867 */
2868int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2869                              enum rdma_network_type net_type,
2870                              union ib_gid *sgid, union ib_gid *dgid);
2871
2872/**
2873 * ib_get_rdma_header_version - Get the header version
2874 * @hdr: the L3 header to parse
2875 */
2876int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2877
2878/**
2879 * ib_init_ah_from_wc - Initializes address handle attributes from a
2880 *   work completion.
2881 * @device: Device on which the received message arrived.
2882 * @port_num: Port on which the received message arrived.
2883 * @wc: Work completion associated with the received message.
2884 * @grh: References the received global route header.  This parameter is
2885 *   ignored unless the work completion indicates that the GRH is valid.
2886 * @ah_attr: Returned attributes that can be used when creating an address
2887 *   handle for replying to the message.
2888 */
2889int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2890                       const struct ib_wc *wc, const struct ib_grh *grh,
2891                       struct rdma_ah_attr *ah_attr);
2892
2893/**
2894 * ib_create_ah_from_wc - Creates an address handle associated with the
2895 *   sender of the specified work completion.
2896 * @pd: The protection domain associated with the address handle.
2897 * @wc: Work completion information associated with a received message.
2898 * @grh: References the received global route header.  This parameter is
2899 *   ignored unless the work completion indicates that the GRH is valid.
2900 * @port_num: The outbound port number to associate with the address.
2901 *
2902 * The address handle is used to reference a local or global destination
2903 * in all UD QP post sends.
2904 */
2905struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2906                                   const struct ib_grh *grh, u8 port_num);
2907
2908/**
2909 * rdma_modify_ah - Modifies the address vector associated with an address
2910 *   handle.
2911 * @ah: The address handle to modify.
2912 * @ah_attr: The new address vector attributes to associate with the
2913 *   address handle.
2914 */
2915int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2916
2917/**
2918 * rdma_query_ah - Queries the address vector associated with an address
2919 *   handle.
2920 * @ah: The address handle to query.
2921 * @ah_attr: The address vector attributes associated with the address
2922 *   handle.
2923 */
2924int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2925
2926/**
2927 * rdma_destroy_ah - Destroys an address handle.
2928 * @ah: The address handle to destroy.
2929 */
2930int rdma_destroy_ah(struct ib_ah *ah);
2931
2932/**
2933 * ib_create_srq - Creates a SRQ associated with the specified protection
2934 *   domain.
2935 * @pd: The protection domain associated with the SRQ.
2936 * @srq_init_attr: A list of initial attributes required to create the
2937 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2938 *   the actual capabilities of the created SRQ.
2939 *
2940 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2941 * requested size of the SRQ, and set to the actual values allocated
2942 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2943 * will always be at least as large as the requested values.
2944 */
2945struct ib_srq *ib_create_srq(struct ib_pd *pd,
2946                             struct ib_srq_init_attr *srq_init_attr);
2947
2948/**
2949 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2950 * @srq: The SRQ to modify.
2951 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2952 *   the current values of selected SRQ attributes are returned.
2953 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2954 *   are being modified.
2955 *
2956 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2957 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2958 * the number of receives queued drops below the limit.
2959 */
2960int ib_modify_srq(struct ib_srq *srq,
2961                  struct ib_srq_attr *srq_attr,
2962                  enum ib_srq_attr_mask srq_attr_mask);
2963
2964/**
2965 * ib_query_srq - Returns the attribute list and current values for the
2966 *   specified SRQ.
2967 * @srq: The SRQ to query.
2968 * @srq_attr: The attributes of the specified SRQ.
2969 */
2970int ib_query_srq(struct ib_srq *srq,
2971                 struct ib_srq_attr *srq_attr);
2972
2973/**
2974 * ib_destroy_srq - Destroys the specified SRQ.
2975 * @srq: The SRQ to destroy.
2976 */
2977int ib_destroy_srq(struct ib_srq *srq);
2978
2979/**
2980 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2981 * @srq: The SRQ to post the work request on.
2982 * @recv_wr: A list of work requests to post on the receive queue.
2983 * @bad_recv_wr: On an immediate failure, this parameter will reference
2984 *   the work request that failed to be posted on the QP.
2985 */
2986static inline int ib_post_srq_recv(struct ib_srq *srq,
2987                                   struct ib_recv_wr *recv_wr,
2988                                   struct ib_recv_wr **bad_recv_wr)
2989{
2990        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2991}
2992
2993/**
2994 * ib_create_qp - Creates a QP associated with the specified protection
2995 *   domain.
2996 * @pd: The protection domain associated with the QP.
2997 * @qp_init_attr: A list of initial attributes required to create the
2998 *   QP.  If QP creation succeeds, then the attributes are updated to
2999 *   the actual capabilities of the created QP.
3000 */
3001struct ib_qp *ib_create_qp(struct ib_pd *pd,
3002                           struct ib_qp_init_attr *qp_init_attr);
3003
3004/**
3005 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3006 * @qp: The QP to modify.
3007 * @attr: On input, specifies the QP attributes to modify.  On output,
3008 *   the current values of selected QP attributes are returned.
3009 * @attr_mask: A bit-mask used to specify which attributes of the QP
3010 *   are being modified.
3011 * @udata: pointer to user's input output buffer information
3012 *   are being modified.
3013 * It returns 0 on success and returns appropriate error code on error.
3014 */
3015int ib_modify_qp_with_udata(struct ib_qp *qp,
3016                            struct ib_qp_attr *attr,
3017                            int attr_mask,
3018                            struct ib_udata *udata);
3019
3020/**
3021 * ib_modify_qp - Modifies the attributes for the specified QP and then
3022 *   transitions the QP to the given state.
3023 * @qp: The QP to modify.
3024 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3025 *   the current values of selected QP attributes are returned.
3026 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3027 *   are being modified.
3028 */
3029int ib_modify_qp(struct ib_qp *qp,
3030                 struct ib_qp_attr *qp_attr,
3031                 int qp_attr_mask);
3032
3033/**
3034 * ib_query_qp - Returns the attribute list and current values for the
3035 *   specified QP.
3036 * @qp: The QP to query.
3037 * @qp_attr: The attributes of the specified QP.
3038 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3039 * @qp_init_attr: Additional attributes of the selected QP.
3040 *
3041 * The qp_attr_mask may be used to limit the query to gathering only the
3042 * selected attributes.
3043 */
3044int ib_query_qp(struct ib_qp *qp,
3045                struct ib_qp_attr *qp_attr,
3046                int qp_attr_mask,
3047                struct ib_qp_init_attr *qp_init_attr);
3048
3049/**
3050 * ib_destroy_qp - Destroys the specified QP.
3051 * @qp: The QP to destroy.
3052 */
3053int ib_destroy_qp(struct ib_qp *qp);
3054
3055/**
3056 * ib_open_qp - Obtain a reference to an existing sharable QP.
3057 * @xrcd - XRC domain
3058 * @qp_open_attr: Attributes identifying the QP to open.
3059 *
3060 * Returns a reference to a sharable QP.
3061 */
3062struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3063                         struct ib_qp_open_attr *qp_open_attr);
3064
3065/**
3066 * ib_close_qp - Release an external reference to a QP.
3067 * @qp: The QP handle to release
3068 *
3069 * The opened QP handle is released by the caller.  The underlying
3070 * shared QP is not destroyed until all internal references are released.
3071 */
3072int ib_close_qp(struct ib_qp *qp);
3073
3074/**
3075 * ib_post_send - Posts a list of work requests to the send queue of
3076 *   the specified QP.
3077 * @qp: The QP to post the work request on.
3078 * @send_wr: A list of work requests to post on the send queue.
3079 * @bad_send_wr: On an immediate failure, this parameter will reference
3080 *   the work request that failed to be posted on the QP.
3081 *
3082 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3083 * error is returned, the QP state shall not be affected,
3084 * ib_post_send() will return an immediate error after queueing any
3085 * earlier work requests in the list.
3086 */
3087static inline int ib_post_send(struct ib_qp *qp,
3088                               struct ib_send_wr *send_wr,
3089                               struct ib_send_wr **bad_send_wr)
3090{
3091        return qp->device->post_send(qp, send_wr, bad_send_wr);
3092}
3093
3094/**
3095 * ib_post_recv - Posts a list of work requests to the receive queue of
3096 *   the specified QP.
3097 * @qp: The QP to post the work request on.
3098 * @recv_wr: A list of work requests to post on the receive queue.
3099 * @bad_recv_wr: On an immediate failure, this parameter will reference
3100 *   the work request that failed to be posted on the QP.
3101 */
3102static inline int ib_post_recv(struct ib_qp *qp,
3103                               struct ib_recv_wr *recv_wr,
3104                               struct ib_recv_wr **bad_recv_wr)
3105{
3106        return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3107}
3108
3109struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3110                int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
3111void ib_free_cq(struct ib_cq *cq);
3112int ib_process_cq_direct(struct ib_cq *cq, int budget);
3113
3114/**
3115 * ib_create_cq - Creates a CQ on the specified device.
3116 * @device: The device on which to create the CQ.
3117 * @comp_handler: A user-specified callback that is invoked when a
3118 *   completion event occurs on the CQ.
3119 * @event_handler: A user-specified callback that is invoked when an
3120 *   asynchronous event not associated with a completion occurs on the CQ.
3121 * @cq_context: Context associated with the CQ returned to the user via
3122 *   the associated completion and event handlers.
3123 * @cq_attr: The attributes the CQ should be created upon.
3124 *
3125 * Users can examine the cq structure to determine the actual CQ size.
3126 */
3127struct ib_cq *ib_create_cq(struct ib_device *device,
3128                           ib_comp_handler comp_handler,
3129                           void (*event_handler)(struct ib_event *, void *),
3130                           void *cq_context,
3131                           const struct ib_cq_init_attr *cq_attr);
3132
3133/**
3134 * ib_resize_cq - Modifies the capacity of the CQ.
3135 * @cq: The CQ to resize.
3136 * @cqe: The minimum size of the CQ.
3137 *
3138 * Users can examine the cq structure to determine the actual CQ size.
3139 */
3140int ib_resize_cq(struct ib_cq *cq, int cqe);
3141
3142/**
3143 * ib_modify_cq - Modifies moderation params of the CQ
3144 * @cq: The CQ to modify.
3145 * @cq_count: number of CQEs that will trigger an event
3146 * @cq_period: max period of time in usec before triggering an event
3147 *
3148 */
3149int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3150
3151/**
3152 * ib_destroy_cq - Destroys the specified CQ.
3153 * @cq: The CQ to destroy.
3154 */
3155int ib_destroy_cq(struct ib_cq *cq);
3156
3157/**
3158 * ib_poll_cq - poll a CQ for completion(s)
3159 * @cq:the CQ being polled
3160 * @num_entries:maximum number of completions to return
3161 * @wc:array of at least @num_entries &struct ib_wc where completions
3162 *   will be returned
3163 *
3164 * Poll a CQ for (possibly multiple) completions.  If the return value
3165 * is < 0, an error occurred.  If the return value is >= 0, it is the
3166 * number of completions returned.  If the return value is
3167 * non-negative and < num_entries, then the CQ was emptied.
3168 */
3169static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3170                             struct ib_wc *wc)
3171{
3172        return cq->device->poll_cq(cq, num_entries, wc);
3173}
3174
3175/**
3176 * ib_peek_cq - Returns the number of unreaped completions currently
3177 *   on the specified CQ.
3178 * @cq: The CQ to peek.
3179 * @wc_cnt: A minimum number of unreaped completions to check for.
3180 *
3181 * If the number of unreaped completions is greater than or equal to wc_cnt,
3182 * this function returns wc_cnt, otherwise, it returns the actual number of
3183 * unreaped completions.
3184 */
3185int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3186
3187/**
3188 * ib_req_notify_cq - Request completion notification on a CQ.
3189 * @cq: The CQ to generate an event for.
3190 * @flags:
3191 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3192 *   to request an event on the next solicited event or next work
3193 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3194 *   may also be |ed in to request a hint about missed events, as
3195 *   described below.
3196 *
3197 * Return Value:
3198 *    < 0 means an error occurred while requesting notification
3199 *   == 0 means notification was requested successfully, and if
3200 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3201 *        were missed and it is safe to wait for another event.  In
3202 *        this case is it guaranteed that any work completions added
3203 *        to the CQ since the last CQ poll will trigger a completion
3204 *        notification event.
3205 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3206 *        in.  It means that the consumer must poll the CQ again to
3207 *        make sure it is empty to avoid missing an event because of a
3208 *        race between requesting notification and an entry being
3209 *        added to the CQ.  This return value means it is possible
3210 *        (but not guaranteed) that a work completion has been added
3211 *        to the CQ since the last poll without triggering a
3212 *        completion notification event.
3213 */
3214static inline int ib_req_notify_cq(struct ib_cq *cq,
3215                                   enum ib_cq_notify_flags flags)
3216{
3217        return cq->device->req_notify_cq(cq, flags);
3218}
3219
3220/**
3221 * ib_req_ncomp_notif - Request completion notification when there are
3222 *   at least the specified number of unreaped completions on the CQ.
3223 * @cq: The CQ to generate an event for.
3224 * @wc_cnt: The number of unreaped completions that should be on the
3225 *   CQ before an event is generated.
3226 */
3227static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3228{
3229        return cq->device->req_ncomp_notif ?
3230                cq->device->req_ncomp_notif(cq, wc_cnt) :
3231                -ENOSYS;
3232}
3233
3234/**
3235 * ib_dma_mapping_error - check a DMA addr for error
3236 * @dev: The device for which the dma_addr was created
3237 * @dma_addr: The DMA address to check
3238 */
3239static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3240{
3241        return dma_mapping_error(dev->dma_device, dma_addr);
3242}
3243
3244/**
3245 * ib_dma_map_single - Map a kernel virtual address to DMA address
3246 * @dev: The device for which the dma_addr is to be created
3247 * @cpu_addr: The kernel virtual address
3248 * @size: The size of the region in bytes
3249 * @direction: The direction of the DMA
3250 */
3251static inline u64 ib_dma_map_single(struct ib_device *dev,
3252                                    void *cpu_addr, size_t size,
3253                                    enum dma_data_direction direction)
3254{
3255        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3256}
3257
3258/**
3259 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3260 * @dev: The device for which the DMA address was created
3261 * @addr: The DMA address
3262 * @size: The size of the region in bytes
3263 * @direction: The direction of the DMA
3264 */
3265static inline void ib_dma_unmap_single(struct ib_device *dev,
3266                                       u64 addr, size_t size,
3267                                       enum dma_data_direction direction)
3268{
3269        dma_unmap_single(dev->dma_device, addr, size, direction);
3270}
3271
3272/**
3273 * ib_dma_map_page - Map a physical page to DMA address
3274 * @dev: The device for which the dma_addr is to be created
3275 * @page: The page to be mapped
3276 * @offset: The offset within the page
3277 * @size: The size of the region in bytes
3278 * @direction: The direction of the DMA
3279 */
3280static inline u64 ib_dma_map_page(struct ib_device *dev,
3281                                  struct page *page,
3282                                  unsigned long offset,
3283                                  size_t size,
3284                                         enum dma_data_direction direction)
3285{
3286        return dma_map_page(dev->dma_device, page, offset, size, direction);
3287}
3288
3289/**
3290 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3291 * @dev: The device for which the DMA address was created
3292 * @addr: The DMA address
3293 * @size: The size of the region in bytes
3294 * @direction: The direction of the DMA
3295 */
3296static inline void ib_dma_unmap_page(struct ib_device *dev,
3297                                     u64 addr, size_t size,
3298                                     enum dma_data_direction direction)
3299{
3300        dma_unmap_page(dev->dma_device, addr, size, direction);
3301}
3302
3303/**
3304 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3305 * @dev: The device for which the DMA addresses are to be created
3306 * @sg: The array of scatter/gather entries
3307 * @nents: The number of scatter/gather entries
3308 * @direction: The direction of the DMA
3309 */
3310static inline int ib_dma_map_sg(struct ib_device *dev,
3311                                struct scatterlist *sg, int nents,
3312                                enum dma_data_direction direction)
3313{
3314        return dma_map_sg(dev->dma_device, sg, nents, direction);
3315}
3316
3317/**
3318 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3319 * @dev: The device for which the DMA addresses were created
3320 * @sg: The array of scatter/gather entries
3321 * @nents: The number of scatter/gather entries
3322 * @direction: The direction of the DMA
3323 */
3324static inline void ib_dma_unmap_sg(struct ib_device *dev,
3325                                   struct scatterlist *sg, int nents,
3326                                   enum dma_data_direction direction)
3327{
3328        dma_unmap_sg(dev->dma_device, sg, nents, direction);
3329}
3330
3331static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3332                                      struct scatterlist *sg, int nents,
3333                                      enum dma_data_direction direction,
3334                                      unsigned long dma_attrs)
3335{
3336        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3337                                dma_attrs);
3338}
3339
3340static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3341                                         struct scatterlist *sg, int nents,
3342                                         enum dma_data_direction direction,
3343                                         unsigned long dma_attrs)
3344{
3345        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3346}
3347/**
3348 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3349 * @dev: The device for which the DMA addresses were created
3350 * @sg: The scatter/gather entry
3351 *
3352 * Note: this function is obsolete. To do: change all occurrences of
3353 * ib_sg_dma_address() into sg_dma_address().
3354 */
3355static inline u64 ib_sg_dma_address(struct ib_device *dev,
3356                                    struct scatterlist *sg)
3357{
3358        return sg_dma_address(sg);
3359}
3360
3361/**
3362 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3363 * @dev: The device for which the DMA addresses were created
3364 * @sg: The scatter/gather entry
3365 *
3366 * Note: this function is obsolete. To do: change all occurrences of
3367 * ib_sg_dma_len() into sg_dma_len().
3368 */
3369static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3370                                         struct scatterlist *sg)
3371{
3372        return sg_dma_len(sg);
3373}
3374
3375/**
3376 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3377 * @dev: The device for which the DMA address was created
3378 * @addr: The DMA address
3379 * @size: The size of the region in bytes
3380 * @dir: The direction of the DMA
3381 */
3382static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3383                                              u64 addr,
3384                                              size_t size,
3385                                              enum dma_data_direction dir)
3386{
3387        dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3388}
3389
3390/**
3391 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3392 * @dev: The device for which the DMA address was created
3393 * @addr: The DMA address
3394 * @size: The size of the region in bytes
3395 * @dir: The direction of the DMA
3396 */
3397static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3398                                                 u64 addr,
3399                                                 size_t size,
3400                                                 enum dma_data_direction dir)
3401{
3402        dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3403}
3404
3405/**
3406 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3407 * @dev: The device for which the DMA address is requested
3408 * @size: The size of the region to allocate in bytes
3409 * @dma_handle: A pointer for returning the DMA address of the region
3410 * @flag: memory allocator flags
3411 */
3412static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3413                                           size_t size,
3414                                           dma_addr_t *dma_handle,
3415                                           gfp_t flag)
3416{
3417        return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3418}
3419
3420/**
3421 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3422 * @dev: The device for which the DMA addresses were allocated
3423 * @size: The size of the region
3424 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3425 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3426 */
3427static inline void ib_dma_free_coherent(struct ib_device *dev,
3428                                        size_t size, void *cpu_addr,
3429                                        dma_addr_t dma_handle)
3430{
3431        dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3432}
3433
3434/**
3435 * ib_dereg_mr - Deregisters a memory region and removes it from the
3436 *   HCA translation table.
3437 * @mr: The memory region to deregister.
3438 *
3439 * This function can fail, if the memory region has memory windows bound to it.
3440 */
3441int ib_dereg_mr(struct ib_mr *mr);
3442
3443struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3444                          enum ib_mr_type mr_type,
3445                          u32 max_num_sg);
3446
3447/**
3448 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3449 *   R_Key and L_Key.
3450 * @mr - struct ib_mr pointer to be updated.
3451 * @newkey - new key to be used.
3452 */
3453static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3454{
3455        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3456        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3457}
3458
3459/**
3460 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3461 * for calculating a new rkey for type 2 memory windows.
3462 * @rkey - the rkey to increment.
3463 */
3464static inline u32 ib_inc_rkey(u32 rkey)
3465{
3466        const u32 mask = 0x000000ff;
3467        return ((rkey + 1) & mask) | (rkey & ~mask);
3468}
3469
3470/**
3471 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3472 * @pd: The protection domain associated with the unmapped region.
3473 * @mr_access_flags: Specifies the memory access rights.
3474 * @fmr_attr: Attributes of the unmapped region.
3475 *
3476 * A fast memory region must be mapped before it can be used as part of
3477 * a work request.
3478 */
3479struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3480                            int mr_access_flags,
3481                            struct ib_fmr_attr *fmr_attr);
3482
3483/**
3484 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3485 * @fmr: The fast memory region to associate with the pages.
3486 * @page_list: An array of physical pages to map to the fast memory region.
3487 * @list_len: The number of pages in page_list.
3488 * @iova: The I/O virtual address to use with the mapped region.
3489 */
3490static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3491                                  u64 *page_list, int list_len,
3492                                  u64 iova)
3493{
3494        return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3495}
3496
3497/**
3498 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3499 * @fmr_list: A linked list of fast memory regions to unmap.
3500 */
3501int ib_unmap_fmr(struct list_head *fmr_list);
3502
3503/**
3504 * ib_dealloc_fmr - Deallocates a fast memory region.
3505 * @fmr: The fast memory region to deallocate.
3506 */
3507int ib_dealloc_fmr(struct ib_fmr *fmr);
3508
3509/**
3510 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3511 * @qp: QP to attach to the multicast group.  The QP must be type
3512 *   IB_QPT_UD.
3513 * @gid: Multicast group GID.
3514 * @lid: Multicast group LID in host byte order.
3515 *
3516 * In order to send and receive multicast packets, subnet
3517 * administration must have created the multicast group and configured
3518 * the fabric appropriately.  The port associated with the specified
3519 * QP must also be a member of the multicast group.
3520 */
3521int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3522
3523/**
3524 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3525 * @qp: QP to detach from the multicast group.
3526 * @gid: Multicast group GID.
3527 * @lid: Multicast group LID in host byte order.
3528 */
3529int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3530
3531/**
3532 * ib_alloc_xrcd - Allocates an XRC domain.
3533 * @device: The device on which to allocate the XRC domain.
3534 */
3535struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3536
3537/**
3538 * ib_dealloc_xrcd - Deallocates an XRC domain.
3539 * @xrcd: The XRC domain to deallocate.
3540 */
3541int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3542
3543struct ib_flow *ib_create_flow(struct ib_qp *qp,
3544                               struct ib_flow_attr *flow_attr, int domain);
3545int ib_destroy_flow(struct ib_flow *flow_id);
3546
3547static inline int ib_check_mr_access(int flags)
3548{
3549        /*
3550         * Local write permission is required if remote write or
3551         * remote atomic permission is also requested.
3552         */
3553        if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3554            !(flags & IB_ACCESS_LOCAL_WRITE))
3555                return -EINVAL;
3556
3557        return 0;
3558}
3559
3560/**
3561 * ib_check_mr_status: lightweight check of MR status.
3562 *     This routine may provide status checks on a selected
3563 *     ib_mr. first use is for signature status check.
3564 *
3565 * @mr: A memory region.
3566 * @check_mask: Bitmask of which checks to perform from
3567 *     ib_mr_status_check enumeration.
3568 * @mr_status: The container of relevant status checks.
3569 *     failed checks will be indicated in the status bitmask
3570 *     and the relevant info shall be in the error item.
3571 */
3572int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3573                       struct ib_mr_status *mr_status);
3574
3575struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3576                                            u16 pkey, const union ib_gid *gid,
3577                                            const struct sockaddr *addr);
3578struct ib_wq *ib_create_wq(struct ib_pd *pd,
3579                           struct ib_wq_init_attr *init_attr);
3580int ib_destroy_wq(struct ib_wq *wq);
3581int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3582                 u32 wq_attr_mask);
3583struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3584                                                 struct ib_rwq_ind_table_init_attr*
3585                                                 wq_ind_table_init_attr);
3586int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3587
3588int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3589                 unsigned int *sg_offset, unsigned int page_size);
3590
3591static inline int
3592ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3593                  unsigned int *sg_offset, unsigned int page_size)
3594{
3595        int n;
3596
3597        n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3598        mr->iova = 0;
3599
3600        return n;
3601}
3602
3603int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3604                unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3605
3606void ib_drain_rq(struct ib_qp *qp);
3607void ib_drain_sq(struct ib_qp *qp);
3608void ib_drain_qp(struct ib_qp *qp);
3609
3610int ib_resolve_eth_dmac(struct ib_device *device,
3611                        struct rdma_ah_attr *ah_attr);
3612int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3613
3614static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3615{
3616        if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3617                return attr->roce.dmac;
3618        return NULL;
3619}
3620
3621static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3622{
3623        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3624                attr->ib.dlid = (u16)dlid;
3625        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3626                attr->opa.dlid = dlid;
3627}
3628
3629static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3630{
3631        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3632                return attr->ib.dlid;
3633        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3634                return attr->opa.dlid;
3635        return 0;
3636}
3637
3638static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3639{
3640        attr->sl = sl;
3641}
3642
3643static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3644{
3645        return attr->sl;
3646}
3647
3648static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3649                                         u8 src_path_bits)
3650{
3651        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3652                attr->ib.src_path_bits = src_path_bits;
3653        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3654                attr->opa.src_path_bits = src_path_bits;
3655}
3656
3657static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3658{
3659        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3660                return attr->ib.src_path_bits;
3661        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3662                return attr->opa.src_path_bits;
3663        return 0;
3664}
3665
3666static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3667                                        bool make_grd)
3668{
3669        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3670                attr->opa.make_grd = make_grd;
3671}
3672
3673static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3674{
3675        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3676                return attr->opa.make_grd;
3677        return false;
3678}
3679
3680static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3681{
3682        attr->port_num = port_num;
3683}
3684
3685static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3686{
3687        return attr->port_num;
3688}
3689
3690static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3691                                           u8 static_rate)
3692{
3693        attr->static_rate = static_rate;
3694}
3695
3696static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3697{
3698        return attr->static_rate;
3699}
3700
3701static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3702                                        enum ib_ah_flags flag)
3703{
3704        attr->ah_flags = flag;
3705}
3706
3707static inline enum ib_ah_flags
3708                rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3709{
3710        return attr->ah_flags;
3711}
3712
3713static inline const struct ib_global_route
3714                *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3715{
3716        return &attr->grh;
3717}
3718
3719/*To retrieve and modify the grh */
3720static inline struct ib_global_route
3721                *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3722{
3723        return &attr->grh;
3724}
3725
3726static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3727{
3728        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3729
3730        memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3731}
3732
3733static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3734                                             __be64 prefix)
3735{
3736        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3737
3738        grh->dgid.global.subnet_prefix = prefix;
3739}
3740
3741static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3742                                            __be64 if_id)
3743{
3744        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3745
3746        grh->dgid.global.interface_id = if_id;
3747}
3748
3749static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3750                                   union ib_gid *dgid, u32 flow_label,
3751                                   u8 sgid_index, u8 hop_limit,
3752                                   u8 traffic_class)
3753{
3754        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3755
3756        attr->ah_flags = IB_AH_GRH;
3757        if (dgid)
3758                grh->dgid = *dgid;
3759        grh->flow_label = flow_label;
3760        grh->sgid_index = sgid_index;
3761        grh->hop_limit = hop_limit;
3762        grh->traffic_class = traffic_class;
3763}
3764
3765/*Get AH type */
3766static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3767                                                       u32 port_num)
3768{
3769        if ((rdma_protocol_roce(dev, port_num)) ||
3770            (rdma_protocol_iwarp(dev, port_num)))
3771                return RDMA_AH_ATTR_TYPE_ROCE;
3772        else if ((rdma_protocol_ib(dev, port_num)) &&
3773                 (rdma_cap_opa_ah(dev, port_num)))
3774                return RDMA_AH_ATTR_TYPE_OPA;
3775        else
3776                return RDMA_AH_ATTR_TYPE_IB;
3777}
3778
3779/**
3780 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
3781 *     In the current implementation the only way to get
3782 *     get the 32bit lid is from other sources for OPA.
3783 *     For IB, lids will always be 16bits so cast the
3784 *     value accordingly.
3785 *
3786 * @lid: A 32bit LID
3787 */
3788static inline u16 ib_lid_cpu16(u32 lid)
3789{
3790        WARN_ON_ONCE(lid & 0xFFFF0000);
3791        return (u16)lid;
3792}
3793
3794/**
3795 * ib_lid_be16 - Return lid in 16bit BE encoding.
3796 *
3797 * @lid: A 32bit LID
3798 */
3799static inline __be16 ib_lid_be16(u32 lid)
3800{
3801        WARN_ON_ONCE(lid & 0xFFFF0000);
3802        return cpu_to_be16((u16)lid);
3803}
3804
3805/**
3806 * ib_get_vector_affinity - Get the affinity mappings of a given completion
3807 *   vector
3808 * @device:         the rdma device
3809 * @comp_vector:    index of completion vector
3810 *
3811 * Returns NULL on failure, otherwise a corresponding cpu map of the
3812 * completion vector (returns all-cpus map if the device driver doesn't
3813 * implement get_vector_affinity).
3814 */
3815static inline const struct cpumask *
3816ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3817{
3818        if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3819            !device->get_vector_affinity)
3820                return NULL;
3821
3822        return device->get_vector_affinity(device, comp_vector);
3823
3824}
3825
3826#endif /* IB_VERBS_H */
3827