linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51#include <linux/socket.h>
  52#include <linux/irq_poll.h>
  53#include <uapi/linux/if_ether.h>
  54#include <net/ipv6.h>
  55#include <net/ip.h>
  56#include <linux/string.h>
  57#include <linux/slab.h>
  58#include <linux/netdevice.h>
  59
  60#include <linux/if_link.h>
  61#include <linux/atomic.h>
  62#include <linux/mmu_notifier.h>
  63#include <linux/uaccess.h>
  64#include <linux/cgroup_rdma.h>
  65#include <uapi/rdma/ib_user_verbs.h>
  66#include <rdma/restrack.h>
  67
  68#define IB_FW_VERSION_NAME_MAX  ETHTOOL_FWVERS_LEN
  69
  70extern struct workqueue_struct *ib_wq;
  71extern struct workqueue_struct *ib_comp_wq;
  72
  73union ib_gid {
  74        u8      raw[16];
  75        struct {
  76                __be64  subnet_prefix;
  77                __be64  interface_id;
  78        } global;
  79};
  80
  81extern union ib_gid zgid;
  82
  83enum ib_gid_type {
  84        /* If link layer is Ethernet, this is RoCE V1 */
  85        IB_GID_TYPE_IB        = 0,
  86        IB_GID_TYPE_ROCE      = 0,
  87        IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
  88        IB_GID_TYPE_SIZE
  89};
  90
  91#define ROCE_V2_UDP_DPORT      4791
  92struct ib_gid_attr {
  93        enum ib_gid_type        gid_type;
  94        struct net_device       *ndev;
  95};
  96
  97enum rdma_node_type {
  98        /* IB values map to NodeInfo:NodeType. */
  99        RDMA_NODE_IB_CA         = 1,
 100        RDMA_NODE_IB_SWITCH,
 101        RDMA_NODE_IB_ROUTER,
 102        RDMA_NODE_RNIC,
 103        RDMA_NODE_USNIC,
 104        RDMA_NODE_USNIC_UDP,
 105};
 106
 107enum {
 108        /* set the local administered indication */
 109        IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
 110};
 111
 112enum rdma_transport_type {
 113        RDMA_TRANSPORT_IB,
 114        RDMA_TRANSPORT_IWARP,
 115        RDMA_TRANSPORT_USNIC,
 116        RDMA_TRANSPORT_USNIC_UDP
 117};
 118
 119enum rdma_protocol_type {
 120        RDMA_PROTOCOL_IB,
 121        RDMA_PROTOCOL_IBOE,
 122        RDMA_PROTOCOL_IWARP,
 123        RDMA_PROTOCOL_USNIC_UDP
 124};
 125
 126__attribute_const__ enum rdma_transport_type
 127rdma_node_get_transport(enum rdma_node_type node_type);
 128
 129enum rdma_network_type {
 130        RDMA_NETWORK_IB,
 131        RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
 132        RDMA_NETWORK_IPV4,
 133        RDMA_NETWORK_IPV6
 134};
 135
 136static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 137{
 138        if (network_type == RDMA_NETWORK_IPV4 ||
 139            network_type == RDMA_NETWORK_IPV6)
 140                return IB_GID_TYPE_ROCE_UDP_ENCAP;
 141
 142        /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
 143        return IB_GID_TYPE_IB;
 144}
 145
 146static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
 147                                                            union ib_gid *gid)
 148{
 149        if (gid_type == IB_GID_TYPE_IB)
 150                return RDMA_NETWORK_IB;
 151
 152        if (ipv6_addr_v4mapped((struct in6_addr *)gid))
 153                return RDMA_NETWORK_IPV4;
 154        else
 155                return RDMA_NETWORK_IPV6;
 156}
 157
 158enum rdma_link_layer {
 159        IB_LINK_LAYER_UNSPECIFIED,
 160        IB_LINK_LAYER_INFINIBAND,
 161        IB_LINK_LAYER_ETHERNET,
 162};
 163
 164enum ib_device_cap_flags {
 165        IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
 166        IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
 167        IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
 168        IB_DEVICE_RAW_MULTI                     = (1 << 3),
 169        IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
 170        IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
 171        IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
 172        IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
 173        IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
 174        /* Not in use, former INIT_TYPE         = (1 << 9),*/
 175        IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
 176        IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
 177        IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
 178        IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
 179        IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
 180
 181        /*
 182         * This device supports a per-device lkey or stag that can be
 183         * used without performing a memory registration for the local
 184         * memory.  Note that ULPs should never check this flag, but
 185         * instead of use the local_dma_lkey flag in the ib_pd structure,
 186         * which will always contain a usable lkey.
 187         */
 188        IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
 189        /* Reserved, old SEND_W_INV             = (1 << 16),*/
 190        IB_DEVICE_MEM_WINDOW                    = (1 << 17),
 191        /*
 192         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 193         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 194         * messages and can verify the validity of checksum for
 195         * incoming messages.  Setting this flag implies that the
 196         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 197         */
 198        IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
 199        IB_DEVICE_UD_TSO                        = (1 << 19),
 200        IB_DEVICE_XRC                           = (1 << 20),
 201
 202        /*
 203         * This device supports the IB "base memory management extension",
 204         * which includes support for fast registrations (IB_WR_REG_MR,
 205         * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 206         * also be set by any iWarp device which must support FRs to comply
 207         * to the iWarp verbs spec.  iWarp devices also support the
 208         * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 209         * stag.
 210         */
 211        IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
 212        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
 213        IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
 214        IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
 215        IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
 216        /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 217        IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
 218        /*
 219         * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 220         * support execution of WQEs that involve synchronization
 221         * of I/O operations with single completion queue managed
 222         * by hardware.
 223         */
 224        IB_DEVICE_CROSS_CHANNEL                 = (1 << 27),
 225        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
 226        IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
 227        IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
 228        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
 229        IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
 230        /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 231        IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 232        IB_DEVICE_RDMA_NETDEV_OPA_VNIC          = (1ULL << 35),
 233        /* The device supports padding incoming writes to cacheline. */
 234        IB_DEVICE_PCI_WRITE_END_PADDING         = (1ULL << 36),
 235};
 236
 237enum ib_signature_prot_cap {
 238        IB_PROT_T10DIF_TYPE_1 = 1,
 239        IB_PROT_T10DIF_TYPE_2 = 1 << 1,
 240        IB_PROT_T10DIF_TYPE_3 = 1 << 2,
 241};
 242
 243enum ib_signature_guard_cap {
 244        IB_GUARD_T10DIF_CRC     = 1,
 245        IB_GUARD_T10DIF_CSUM    = 1 << 1,
 246};
 247
 248enum ib_atomic_cap {
 249        IB_ATOMIC_NONE,
 250        IB_ATOMIC_HCA,
 251        IB_ATOMIC_GLOB
 252};
 253
 254enum ib_odp_general_cap_bits {
 255        IB_ODP_SUPPORT          = 1 << 0,
 256        IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 257};
 258
 259enum ib_odp_transport_cap_bits {
 260        IB_ODP_SUPPORT_SEND     = 1 << 0,
 261        IB_ODP_SUPPORT_RECV     = 1 << 1,
 262        IB_ODP_SUPPORT_WRITE    = 1 << 2,
 263        IB_ODP_SUPPORT_READ     = 1 << 3,
 264        IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
 265};
 266
 267struct ib_odp_caps {
 268        uint64_t general_caps;
 269        struct {
 270                uint32_t  rc_odp_caps;
 271                uint32_t  uc_odp_caps;
 272                uint32_t  ud_odp_caps;
 273        } per_transport_caps;
 274};
 275
 276struct ib_rss_caps {
 277        /* Corresponding bit will be set if qp type from
 278         * 'enum ib_qp_type' is supported, e.g.
 279         * supported_qpts |= 1 << IB_QPT_UD
 280         */
 281        u32 supported_qpts;
 282        u32 max_rwq_indirection_tables;
 283        u32 max_rwq_indirection_table_size;
 284};
 285
 286enum ib_tm_cap_flags {
 287        /*  Support tag matching on RC transport */
 288        IB_TM_CAP_RC                = 1 << 0,
 289};
 290
 291struct ib_tm_caps {
 292        /* Max size of RNDV header */
 293        u32 max_rndv_hdr_size;
 294        /* Max number of entries in tag matching list */
 295        u32 max_num_tags;
 296        /* From enum ib_tm_cap_flags */
 297        u32 flags;
 298        /* Max number of outstanding list operations */
 299        u32 max_ops;
 300        /* Max number of SGE in tag matching entry */
 301        u32 max_sge;
 302};
 303
 304struct ib_cq_init_attr {
 305        unsigned int    cqe;
 306        int             comp_vector;
 307        u32             flags;
 308};
 309
 310enum ib_cq_attr_mask {
 311        IB_CQ_MODERATE = 1 << 0,
 312};
 313
 314struct ib_cq_caps {
 315        u16     max_cq_moderation_count;
 316        u16     max_cq_moderation_period;
 317};
 318
 319struct ib_device_attr {
 320        u64                     fw_ver;
 321        __be64                  sys_image_guid;
 322        u64                     max_mr_size;
 323        u64                     page_size_cap;
 324        u32                     vendor_id;
 325        u32                     vendor_part_id;
 326        u32                     hw_ver;
 327        int                     max_qp;
 328        int                     max_qp_wr;
 329        u64                     device_cap_flags;
 330        int                     max_sge;
 331        int                     max_sge_rd;
 332        int                     max_cq;
 333        int                     max_cqe;
 334        int                     max_mr;
 335        int                     max_pd;
 336        int                     max_qp_rd_atom;
 337        int                     max_ee_rd_atom;
 338        int                     max_res_rd_atom;
 339        int                     max_qp_init_rd_atom;
 340        int                     max_ee_init_rd_atom;
 341        enum ib_atomic_cap      atomic_cap;
 342        enum ib_atomic_cap      masked_atomic_cap;
 343        int                     max_ee;
 344        int                     max_rdd;
 345        int                     max_mw;
 346        int                     max_raw_ipv6_qp;
 347        int                     max_raw_ethy_qp;
 348        int                     max_mcast_grp;
 349        int                     max_mcast_qp_attach;
 350        int                     max_total_mcast_qp_attach;
 351        int                     max_ah;
 352        int                     max_fmr;
 353        int                     max_map_per_fmr;
 354        int                     max_srq;
 355        int                     max_srq_wr;
 356        int                     max_srq_sge;
 357        unsigned int            max_fast_reg_page_list_len;
 358        u16                     max_pkeys;
 359        u8                      local_ca_ack_delay;
 360        int                     sig_prot_cap;
 361        int                     sig_guard_cap;
 362        struct ib_odp_caps      odp_caps;
 363        uint64_t                timestamp_mask;
 364        uint64_t                hca_core_clock; /* in KHZ */
 365        struct ib_rss_caps      rss_caps;
 366        u32                     max_wq_type_rq;
 367        u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
 368        struct ib_tm_caps       tm_caps;
 369        struct ib_cq_caps       cq_caps;
 370};
 371
 372enum ib_mtu {
 373        IB_MTU_256  = 1,
 374        IB_MTU_512  = 2,
 375        IB_MTU_1024 = 3,
 376        IB_MTU_2048 = 4,
 377        IB_MTU_4096 = 5
 378};
 379
 380static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 381{
 382        switch (mtu) {
 383        case IB_MTU_256:  return  256;
 384        case IB_MTU_512:  return  512;
 385        case IB_MTU_1024: return 1024;
 386        case IB_MTU_2048: return 2048;
 387        case IB_MTU_4096: return 4096;
 388        default:          return -1;
 389        }
 390}
 391
 392static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 393{
 394        if (mtu >= 4096)
 395                return IB_MTU_4096;
 396        else if (mtu >= 2048)
 397                return IB_MTU_2048;
 398        else if (mtu >= 1024)
 399                return IB_MTU_1024;
 400        else if (mtu >= 512)
 401                return IB_MTU_512;
 402        else
 403                return IB_MTU_256;
 404}
 405
 406enum ib_port_state {
 407        IB_PORT_NOP             = 0,
 408        IB_PORT_DOWN            = 1,
 409        IB_PORT_INIT            = 2,
 410        IB_PORT_ARMED           = 3,
 411        IB_PORT_ACTIVE          = 4,
 412        IB_PORT_ACTIVE_DEFER    = 5
 413};
 414
 415enum ib_port_cap_flags {
 416        IB_PORT_SM                              = 1 <<  1,
 417        IB_PORT_NOTICE_SUP                      = 1 <<  2,
 418        IB_PORT_TRAP_SUP                        = 1 <<  3,
 419        IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 420        IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
 421        IB_PORT_SL_MAP_SUP                      = 1 <<  6,
 422        IB_PORT_MKEY_NVRAM                      = 1 <<  7,
 423        IB_PORT_PKEY_NVRAM                      = 1 <<  8,
 424        IB_PORT_LED_INFO_SUP                    = 1 <<  9,
 425        IB_PORT_SM_DISABLED                     = 1 << 10,
 426        IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
 427        IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
 428        IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
 429        IB_PORT_CM_SUP                          = 1 << 16,
 430        IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
 431        IB_PORT_REINIT_SUP                      = 1 << 18,
 432        IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
 433        IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
 434        IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
 435        IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
 436        IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
 437        IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
 438        IB_PORT_CLIENT_REG_SUP                  = 1 << 25,
 439        IB_PORT_IP_BASED_GIDS                   = 1 << 26,
 440};
 441
 442enum ib_port_width {
 443        IB_WIDTH_1X     = 1,
 444        IB_WIDTH_4X     = 2,
 445        IB_WIDTH_8X     = 4,
 446        IB_WIDTH_12X    = 8
 447};
 448
 449static inline int ib_width_enum_to_int(enum ib_port_width width)
 450{
 451        switch (width) {
 452        case IB_WIDTH_1X:  return  1;
 453        case IB_WIDTH_4X:  return  4;
 454        case IB_WIDTH_8X:  return  8;
 455        case IB_WIDTH_12X: return 12;
 456        default:          return -1;
 457        }
 458}
 459
 460enum ib_port_speed {
 461        IB_SPEED_SDR    = 1,
 462        IB_SPEED_DDR    = 2,
 463        IB_SPEED_QDR    = 4,
 464        IB_SPEED_FDR10  = 8,
 465        IB_SPEED_FDR    = 16,
 466        IB_SPEED_EDR    = 32,
 467        IB_SPEED_HDR    = 64
 468};
 469
 470/**
 471 * struct rdma_hw_stats
 472 * @timestamp - Used by the core code to track when the last update was
 473 * @lifespan - Used by the core code to determine how old the counters
 474 *   should be before being updated again.  Stored in jiffies, defaults
 475 *   to 10 milliseconds, drivers can override the default be specifying
 476 *   their own value during their allocation routine.
 477 * @name - Array of pointers to static names used for the counters in
 478 *   directory.
 479 * @num_counters - How many hardware counters there are.  If name is
 480 *   shorter than this number, a kernel oops will result.  Driver authors
 481 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 482 *   in their code to prevent this.
 483 * @value - Array of u64 counters that are accessed by the sysfs code and
 484 *   filled in by the drivers get_stats routine
 485 */
 486struct rdma_hw_stats {
 487        unsigned long   timestamp;
 488        unsigned long   lifespan;
 489        const char * const *names;
 490        int             num_counters;
 491        u64             value[];
 492};
 493
 494#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 495/**
 496 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 497 *   for drivers.
 498 * @names - Array of static const char *
 499 * @num_counters - How many elements in array
 500 * @lifespan - How many milliseconds between updates
 501 */
 502static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 503                const char * const *names, int num_counters,
 504                unsigned long lifespan)
 505{
 506        struct rdma_hw_stats *stats;
 507
 508        stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 509                        GFP_KERNEL);
 510        if (!stats)
 511                return NULL;
 512        stats->names = names;
 513        stats->num_counters = num_counters;
 514        stats->lifespan = msecs_to_jiffies(lifespan);
 515
 516        return stats;
 517}
 518
 519
 520/* Define bits for the various functionality this port needs to be supported by
 521 * the core.
 522 */
 523/* Management                           0x00000FFF */
 524#define RDMA_CORE_CAP_IB_MAD            0x00000001
 525#define RDMA_CORE_CAP_IB_SMI            0x00000002
 526#define RDMA_CORE_CAP_IB_CM             0x00000004
 527#define RDMA_CORE_CAP_IW_CM             0x00000008
 528#define RDMA_CORE_CAP_IB_SA             0x00000010
 529#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 530
 531/* Address format                       0x000FF000 */
 532#define RDMA_CORE_CAP_AF_IB             0x00001000
 533#define RDMA_CORE_CAP_ETH_AH            0x00002000
 534#define RDMA_CORE_CAP_OPA_AH            0x00004000
 535
 536/* Protocol                             0xFFF00000 */
 537#define RDMA_CORE_CAP_PROT_IB           0x00100000
 538#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 539#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 540#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 541#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 542#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 543
 544#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 545                                        | RDMA_CORE_CAP_IB_MAD \
 546                                        | RDMA_CORE_CAP_IB_SMI \
 547                                        | RDMA_CORE_CAP_IB_CM  \
 548                                        | RDMA_CORE_CAP_IB_SA  \
 549                                        | RDMA_CORE_CAP_AF_IB)
 550#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 551                                        | RDMA_CORE_CAP_IB_MAD  \
 552                                        | RDMA_CORE_CAP_IB_CM   \
 553                                        | RDMA_CORE_CAP_AF_IB   \
 554                                        | RDMA_CORE_CAP_ETH_AH)
 555#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
 556                                        (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 557                                        | RDMA_CORE_CAP_IB_MAD  \
 558                                        | RDMA_CORE_CAP_IB_CM   \
 559                                        | RDMA_CORE_CAP_AF_IB   \
 560                                        | RDMA_CORE_CAP_ETH_AH)
 561#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 562                                        | RDMA_CORE_CAP_IW_CM)
 563#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 564                                        | RDMA_CORE_CAP_OPA_MAD)
 565
 566#define RDMA_CORE_PORT_RAW_PACKET       (RDMA_CORE_CAP_PROT_RAW_PACKET)
 567
 568#define RDMA_CORE_PORT_USNIC            (RDMA_CORE_CAP_PROT_USNIC)
 569
 570struct ib_port_attr {
 571        u64                     subnet_prefix;
 572        enum ib_port_state      state;
 573        enum ib_mtu             max_mtu;
 574        enum ib_mtu             active_mtu;
 575        int                     gid_tbl_len;
 576        u32                     port_cap_flags;
 577        u32                     max_msg_sz;
 578        u32                     bad_pkey_cntr;
 579        u32                     qkey_viol_cntr;
 580        u16                     pkey_tbl_len;
 581        u32                     sm_lid;
 582        u32                     lid;
 583        u8                      lmc;
 584        u8                      max_vl_num;
 585        u8                      sm_sl;
 586        u8                      subnet_timeout;
 587        u8                      init_type_reply;
 588        u8                      active_width;
 589        u8                      active_speed;
 590        u8                      phys_state;
 591        bool                    grh_required;
 592};
 593
 594enum ib_device_modify_flags {
 595        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 596        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 597};
 598
 599#define IB_DEVICE_NODE_DESC_MAX 64
 600
 601struct ib_device_modify {
 602        u64     sys_image_guid;
 603        char    node_desc[IB_DEVICE_NODE_DESC_MAX];
 604};
 605
 606enum ib_port_modify_flags {
 607        IB_PORT_SHUTDOWN                = 1,
 608        IB_PORT_INIT_TYPE               = (1<<2),
 609        IB_PORT_RESET_QKEY_CNTR         = (1<<3),
 610        IB_PORT_OPA_MASK_CHG            = (1<<4)
 611};
 612
 613struct ib_port_modify {
 614        u32     set_port_cap_mask;
 615        u32     clr_port_cap_mask;
 616        u8      init_type;
 617};
 618
 619enum ib_event_type {
 620        IB_EVENT_CQ_ERR,
 621        IB_EVENT_QP_FATAL,
 622        IB_EVENT_QP_REQ_ERR,
 623        IB_EVENT_QP_ACCESS_ERR,
 624        IB_EVENT_COMM_EST,
 625        IB_EVENT_SQ_DRAINED,
 626        IB_EVENT_PATH_MIG,
 627        IB_EVENT_PATH_MIG_ERR,
 628        IB_EVENT_DEVICE_FATAL,
 629        IB_EVENT_PORT_ACTIVE,
 630        IB_EVENT_PORT_ERR,
 631        IB_EVENT_LID_CHANGE,
 632        IB_EVENT_PKEY_CHANGE,
 633        IB_EVENT_SM_CHANGE,
 634        IB_EVENT_SRQ_ERR,
 635        IB_EVENT_SRQ_LIMIT_REACHED,
 636        IB_EVENT_QP_LAST_WQE_REACHED,
 637        IB_EVENT_CLIENT_REREGISTER,
 638        IB_EVENT_GID_CHANGE,
 639        IB_EVENT_WQ_FATAL,
 640};
 641
 642const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 643
 644struct ib_event {
 645        struct ib_device        *device;
 646        union {
 647                struct ib_cq    *cq;
 648                struct ib_qp    *qp;
 649                struct ib_srq   *srq;
 650                struct ib_wq    *wq;
 651                u8              port_num;
 652        } element;
 653        enum ib_event_type      event;
 654};
 655
 656struct ib_event_handler {
 657        struct ib_device *device;
 658        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 659        struct list_head  list;
 660};
 661
 662#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 663        do {                                                    \
 664                (_ptr)->device  = _device;                      \
 665                (_ptr)->handler = _handler;                     \
 666                INIT_LIST_HEAD(&(_ptr)->list);                  \
 667        } while (0)
 668
 669struct ib_global_route {
 670        union ib_gid    dgid;
 671        u32             flow_label;
 672        u8              sgid_index;
 673        u8              hop_limit;
 674        u8              traffic_class;
 675};
 676
 677struct ib_grh {
 678        __be32          version_tclass_flow;
 679        __be16          paylen;
 680        u8              next_hdr;
 681        u8              hop_limit;
 682        union ib_gid    sgid;
 683        union ib_gid    dgid;
 684};
 685
 686union rdma_network_hdr {
 687        struct ib_grh ibgrh;
 688        struct {
 689                /* The IB spec states that if it's IPv4, the header
 690                 * is located in the last 20 bytes of the header.
 691                 */
 692                u8              reserved[20];
 693                struct iphdr    roce4grh;
 694        };
 695};
 696
 697#define IB_QPN_MASK             0xFFFFFF
 698
 699enum {
 700        IB_MULTICAST_QPN = 0xffffff
 701};
 702
 703#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 704#define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
 705
 706enum ib_ah_flags {
 707        IB_AH_GRH       = 1
 708};
 709
 710enum ib_rate {
 711        IB_RATE_PORT_CURRENT = 0,
 712        IB_RATE_2_5_GBPS = 2,
 713        IB_RATE_5_GBPS   = 5,
 714        IB_RATE_10_GBPS  = 3,
 715        IB_RATE_20_GBPS  = 6,
 716        IB_RATE_30_GBPS  = 4,
 717        IB_RATE_40_GBPS  = 7,
 718        IB_RATE_60_GBPS  = 8,
 719        IB_RATE_80_GBPS  = 9,
 720        IB_RATE_120_GBPS = 10,
 721        IB_RATE_14_GBPS  = 11,
 722        IB_RATE_56_GBPS  = 12,
 723        IB_RATE_112_GBPS = 13,
 724        IB_RATE_168_GBPS = 14,
 725        IB_RATE_25_GBPS  = 15,
 726        IB_RATE_100_GBPS = 16,
 727        IB_RATE_200_GBPS = 17,
 728        IB_RATE_300_GBPS = 18
 729};
 730
 731/**
 732 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 733 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 734 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 735 * @rate: rate to convert.
 736 */
 737__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 738
 739/**
 740 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 741 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 742 * @rate: rate to convert.
 743 */
 744__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 745
 746
 747/**
 748 * enum ib_mr_type - memory region type
 749 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 750 *                            normal registration
 751 * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
 752 *                            signature operations (data-integrity
 753 *                            capable regions)
 754 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 755 *                            register any arbitrary sg lists (without
 756 *                            the normal mr constraints - see
 757 *                            ib_map_mr_sg)
 758 */
 759enum ib_mr_type {
 760        IB_MR_TYPE_MEM_REG,
 761        IB_MR_TYPE_SIGNATURE,
 762        IB_MR_TYPE_SG_GAPS,
 763};
 764
 765/**
 766 * Signature types
 767 * IB_SIG_TYPE_NONE: Unprotected.
 768 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
 769 */
 770enum ib_signature_type {
 771        IB_SIG_TYPE_NONE,
 772        IB_SIG_TYPE_T10_DIF,
 773};
 774
 775/**
 776 * Signature T10-DIF block-guard types
 777 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
 778 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
 779 */
 780enum ib_t10_dif_bg_type {
 781        IB_T10DIF_CRC,
 782        IB_T10DIF_CSUM
 783};
 784
 785/**
 786 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
 787 *     domain.
 788 * @bg_type: T10-DIF block guard type (CRC|CSUM)
 789 * @pi_interval: protection information interval.
 790 * @bg: seed of guard computation.
 791 * @app_tag: application tag of guard block
 792 * @ref_tag: initial guard block reference tag.
 793 * @ref_remap: Indicate wethear the reftag increments each block
 794 * @app_escape: Indicate to skip block check if apptag=0xffff
 795 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
 796 * @apptag_check_mask: check bitmask of application tag.
 797 */
 798struct ib_t10_dif_domain {
 799        enum ib_t10_dif_bg_type bg_type;
 800        u16                     pi_interval;
 801        u16                     bg;
 802        u16                     app_tag;
 803        u32                     ref_tag;
 804        bool                    ref_remap;
 805        bool                    app_escape;
 806        bool                    ref_escape;
 807        u16                     apptag_check_mask;
 808};
 809
 810/**
 811 * struct ib_sig_domain - Parameters for signature domain
 812 * @sig_type: specific signauture type
 813 * @sig: union of all signature domain attributes that may
 814 *     be used to set domain layout.
 815 */
 816struct ib_sig_domain {
 817        enum ib_signature_type sig_type;
 818        union {
 819                struct ib_t10_dif_domain dif;
 820        } sig;
 821};
 822
 823/**
 824 * struct ib_sig_attrs - Parameters for signature handover operation
 825 * @check_mask: bitmask for signature byte check (8 bytes)
 826 * @mem: memory domain layout desciptor.
 827 * @wire: wire domain layout desciptor.
 828 */
 829struct ib_sig_attrs {
 830        u8                      check_mask;
 831        struct ib_sig_domain    mem;
 832        struct ib_sig_domain    wire;
 833};
 834
 835enum ib_sig_err_type {
 836        IB_SIG_BAD_GUARD,
 837        IB_SIG_BAD_REFTAG,
 838        IB_SIG_BAD_APPTAG,
 839};
 840
 841/**
 842 * struct ib_sig_err - signature error descriptor
 843 */
 844struct ib_sig_err {
 845        enum ib_sig_err_type    err_type;
 846        u32                     expected;
 847        u32                     actual;
 848        u64                     sig_err_offset;
 849        u32                     key;
 850};
 851
 852enum ib_mr_status_check {
 853        IB_MR_CHECK_SIG_STATUS = 1,
 854};
 855
 856/**
 857 * struct ib_mr_status - Memory region status container
 858 *
 859 * @fail_status: Bitmask of MR checks status. For each
 860 *     failed check a corresponding status bit is set.
 861 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 862 *     failure.
 863 */
 864struct ib_mr_status {
 865        u32                 fail_status;
 866        struct ib_sig_err   sig_err;
 867};
 868
 869/**
 870 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 871 * enum.
 872 * @mult: multiple to convert.
 873 */
 874__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 875
 876enum rdma_ah_attr_type {
 877        RDMA_AH_ATTR_TYPE_UNDEFINED,
 878        RDMA_AH_ATTR_TYPE_IB,
 879        RDMA_AH_ATTR_TYPE_ROCE,
 880        RDMA_AH_ATTR_TYPE_OPA,
 881};
 882
 883struct ib_ah_attr {
 884        u16                     dlid;
 885        u8                      src_path_bits;
 886};
 887
 888struct roce_ah_attr {
 889        u8                      dmac[ETH_ALEN];
 890};
 891
 892struct opa_ah_attr {
 893        u32                     dlid;
 894        u8                      src_path_bits;
 895        bool                    make_grd;
 896};
 897
 898struct rdma_ah_attr {
 899        struct ib_global_route  grh;
 900        u8                      sl;
 901        u8                      static_rate;
 902        u8                      port_num;
 903        u8                      ah_flags;
 904        enum rdma_ah_attr_type type;
 905        union {
 906                struct ib_ah_attr ib;
 907                struct roce_ah_attr roce;
 908                struct opa_ah_attr opa;
 909        };
 910};
 911
 912enum ib_wc_status {
 913        IB_WC_SUCCESS,
 914        IB_WC_LOC_LEN_ERR,
 915        IB_WC_LOC_QP_OP_ERR,
 916        IB_WC_LOC_EEC_OP_ERR,
 917        IB_WC_LOC_PROT_ERR,
 918        IB_WC_WR_FLUSH_ERR,
 919        IB_WC_MW_BIND_ERR,
 920        IB_WC_BAD_RESP_ERR,
 921        IB_WC_LOC_ACCESS_ERR,
 922        IB_WC_REM_INV_REQ_ERR,
 923        IB_WC_REM_ACCESS_ERR,
 924        IB_WC_REM_OP_ERR,
 925        IB_WC_RETRY_EXC_ERR,
 926        IB_WC_RNR_RETRY_EXC_ERR,
 927        IB_WC_LOC_RDD_VIOL_ERR,
 928        IB_WC_REM_INV_RD_REQ_ERR,
 929        IB_WC_REM_ABORT_ERR,
 930        IB_WC_INV_EECN_ERR,
 931        IB_WC_INV_EEC_STATE_ERR,
 932        IB_WC_FATAL_ERR,
 933        IB_WC_RESP_TIMEOUT_ERR,
 934        IB_WC_GENERAL_ERR
 935};
 936
 937const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 938
 939enum ib_wc_opcode {
 940        IB_WC_SEND,
 941        IB_WC_RDMA_WRITE,
 942        IB_WC_RDMA_READ,
 943        IB_WC_COMP_SWAP,
 944        IB_WC_FETCH_ADD,
 945        IB_WC_LSO,
 946        IB_WC_LOCAL_INV,
 947        IB_WC_REG_MR,
 948        IB_WC_MASKED_COMP_SWAP,
 949        IB_WC_MASKED_FETCH_ADD,
 950/*
 951 * Set value of IB_WC_RECV so consumers can test if a completion is a
 952 * receive by testing (opcode & IB_WC_RECV).
 953 */
 954        IB_WC_RECV                      = 1 << 7,
 955        IB_WC_RECV_RDMA_WITH_IMM
 956};
 957
 958enum ib_wc_flags {
 959        IB_WC_GRH               = 1,
 960        IB_WC_WITH_IMM          = (1<<1),
 961        IB_WC_WITH_INVALIDATE   = (1<<2),
 962        IB_WC_IP_CSUM_OK        = (1<<3),
 963        IB_WC_WITH_SMAC         = (1<<4),
 964        IB_WC_WITH_VLAN         = (1<<5),
 965        IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
 966};
 967
 968struct ib_wc {
 969        union {
 970                u64             wr_id;
 971                struct ib_cqe   *wr_cqe;
 972        };
 973        enum ib_wc_status       status;
 974        enum ib_wc_opcode       opcode;
 975        u32                     vendor_err;
 976        u32                     byte_len;
 977        struct ib_qp           *qp;
 978        union {
 979                __be32          imm_data;
 980                u32             invalidate_rkey;
 981        } ex;
 982        u32                     src_qp;
 983        u32                     slid;
 984        int                     wc_flags;
 985        u16                     pkey_index;
 986        u8                      sl;
 987        u8                      dlid_path_bits;
 988        u8                      port_num;       /* valid only for DR SMPs on switches */
 989        u8                      smac[ETH_ALEN];
 990        u16                     vlan_id;
 991        u8                      network_hdr_type;
 992};
 993
 994enum ib_cq_notify_flags {
 995        IB_CQ_SOLICITED                 = 1 << 0,
 996        IB_CQ_NEXT_COMP                 = 1 << 1,
 997        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 998        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
 999};
1000
1001enum ib_srq_type {
1002        IB_SRQT_BASIC,
1003        IB_SRQT_XRC,
1004        IB_SRQT_TM,
1005};
1006
1007static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1008{
1009        return srq_type == IB_SRQT_XRC ||
1010               srq_type == IB_SRQT_TM;
1011}
1012
1013enum ib_srq_attr_mask {
1014        IB_SRQ_MAX_WR   = 1 << 0,
1015        IB_SRQ_LIMIT    = 1 << 1,
1016};
1017
1018struct ib_srq_attr {
1019        u32     max_wr;
1020        u32     max_sge;
1021        u32     srq_limit;
1022};
1023
1024struct ib_srq_init_attr {
1025        void                  (*event_handler)(struct ib_event *, void *);
1026        void                   *srq_context;
1027        struct ib_srq_attr      attr;
1028        enum ib_srq_type        srq_type;
1029
1030        struct {
1031                struct ib_cq   *cq;
1032                union {
1033                        struct {
1034                                struct ib_xrcd *xrcd;
1035                        } xrc;
1036
1037                        struct {
1038                                u32             max_num_tags;
1039                        } tag_matching;
1040                };
1041        } ext;
1042};
1043
1044struct ib_qp_cap {
1045        u32     max_send_wr;
1046        u32     max_recv_wr;
1047        u32     max_send_sge;
1048        u32     max_recv_sge;
1049        u32     max_inline_data;
1050
1051        /*
1052         * Maximum number of rdma_rw_ctx structures in flight at a time.
1053         * ib_create_qp() will calculate the right amount of neededed WRs
1054         * and MRs based on this.
1055         */
1056        u32     max_rdma_ctxs;
1057};
1058
1059enum ib_sig_type {
1060        IB_SIGNAL_ALL_WR,
1061        IB_SIGNAL_REQ_WR
1062};
1063
1064enum ib_qp_type {
1065        /*
1066         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1067         * here (and in that order) since the MAD layer uses them as
1068         * indices into a 2-entry table.
1069         */
1070        IB_QPT_SMI,
1071        IB_QPT_GSI,
1072
1073        IB_QPT_RC,
1074        IB_QPT_UC,
1075        IB_QPT_UD,
1076        IB_QPT_RAW_IPV6,
1077        IB_QPT_RAW_ETHERTYPE,
1078        IB_QPT_RAW_PACKET = 8,
1079        IB_QPT_XRC_INI = 9,
1080        IB_QPT_XRC_TGT,
1081        IB_QPT_MAX,
1082        IB_QPT_DRIVER = 0xFF,
1083        /* Reserve a range for qp types internal to the low level driver.
1084         * These qp types will not be visible at the IB core layer, so the
1085         * IB_QPT_MAX usages should not be affected in the core layer
1086         */
1087        IB_QPT_RESERVED1 = 0x1000,
1088        IB_QPT_RESERVED2,
1089        IB_QPT_RESERVED3,
1090        IB_QPT_RESERVED4,
1091        IB_QPT_RESERVED5,
1092        IB_QPT_RESERVED6,
1093        IB_QPT_RESERVED7,
1094        IB_QPT_RESERVED8,
1095        IB_QPT_RESERVED9,
1096        IB_QPT_RESERVED10,
1097};
1098
1099enum ib_qp_create_flags {
1100        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
1101        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
1102        IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1103        IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1104        IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1105        IB_QP_CREATE_NETIF_QP                   = 1 << 5,
1106        IB_QP_CREATE_SIGNATURE_EN               = 1 << 6,
1107        /* FREE                                 = 1 << 7, */
1108        IB_QP_CREATE_SCATTER_FCS                = 1 << 8,
1109        IB_QP_CREATE_CVLAN_STRIPPING            = 1 << 9,
1110        IB_QP_CREATE_SOURCE_QPN                 = 1 << 10,
1111        IB_QP_CREATE_PCI_WRITE_END_PADDING      = 1 << 11,
1112        /* reserve bits 26-31 for low level drivers' internal use */
1113        IB_QP_CREATE_RESERVED_START             = 1 << 26,
1114        IB_QP_CREATE_RESERVED_END               = 1 << 31,
1115};
1116
1117/*
1118 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1119 * callback to destroy the passed in QP.
1120 */
1121
1122struct ib_qp_init_attr {
1123        void                  (*event_handler)(struct ib_event *, void *);
1124        void                   *qp_context;
1125        struct ib_cq           *send_cq;
1126        struct ib_cq           *recv_cq;
1127        struct ib_srq          *srq;
1128        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1129        struct ib_qp_cap        cap;
1130        enum ib_sig_type        sq_sig_type;
1131        enum ib_qp_type         qp_type;
1132        enum ib_qp_create_flags create_flags;
1133
1134        /*
1135         * Only needed for special QP types, or when using the RW API.
1136         */
1137        u8                      port_num;
1138        struct ib_rwq_ind_table *rwq_ind_tbl;
1139        u32                     source_qpn;
1140};
1141
1142struct ib_qp_open_attr {
1143        void                  (*event_handler)(struct ib_event *, void *);
1144        void                   *qp_context;
1145        u32                     qp_num;
1146        enum ib_qp_type         qp_type;
1147};
1148
1149enum ib_rnr_timeout {
1150        IB_RNR_TIMER_655_36 =  0,
1151        IB_RNR_TIMER_000_01 =  1,
1152        IB_RNR_TIMER_000_02 =  2,
1153        IB_RNR_TIMER_000_03 =  3,
1154        IB_RNR_TIMER_000_04 =  4,
1155        IB_RNR_TIMER_000_06 =  5,
1156        IB_RNR_TIMER_000_08 =  6,
1157        IB_RNR_TIMER_000_12 =  7,
1158        IB_RNR_TIMER_000_16 =  8,
1159        IB_RNR_TIMER_000_24 =  9,
1160        IB_RNR_TIMER_000_32 = 10,
1161        IB_RNR_TIMER_000_48 = 11,
1162        IB_RNR_TIMER_000_64 = 12,
1163        IB_RNR_TIMER_000_96 = 13,
1164        IB_RNR_TIMER_001_28 = 14,
1165        IB_RNR_TIMER_001_92 = 15,
1166        IB_RNR_TIMER_002_56 = 16,
1167        IB_RNR_TIMER_003_84 = 17,
1168        IB_RNR_TIMER_005_12 = 18,
1169        IB_RNR_TIMER_007_68 = 19,
1170        IB_RNR_TIMER_010_24 = 20,
1171        IB_RNR_TIMER_015_36 = 21,
1172        IB_RNR_TIMER_020_48 = 22,
1173        IB_RNR_TIMER_030_72 = 23,
1174        IB_RNR_TIMER_040_96 = 24,
1175        IB_RNR_TIMER_061_44 = 25,
1176        IB_RNR_TIMER_081_92 = 26,
1177        IB_RNR_TIMER_122_88 = 27,
1178        IB_RNR_TIMER_163_84 = 28,
1179        IB_RNR_TIMER_245_76 = 29,
1180        IB_RNR_TIMER_327_68 = 30,
1181        IB_RNR_TIMER_491_52 = 31
1182};
1183
1184enum ib_qp_attr_mask {
1185        IB_QP_STATE                     = 1,
1186        IB_QP_CUR_STATE                 = (1<<1),
1187        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
1188        IB_QP_ACCESS_FLAGS              = (1<<3),
1189        IB_QP_PKEY_INDEX                = (1<<4),
1190        IB_QP_PORT                      = (1<<5),
1191        IB_QP_QKEY                      = (1<<6),
1192        IB_QP_AV                        = (1<<7),
1193        IB_QP_PATH_MTU                  = (1<<8),
1194        IB_QP_TIMEOUT                   = (1<<9),
1195        IB_QP_RETRY_CNT                 = (1<<10),
1196        IB_QP_RNR_RETRY                 = (1<<11),
1197        IB_QP_RQ_PSN                    = (1<<12),
1198        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
1199        IB_QP_ALT_PATH                  = (1<<14),
1200        IB_QP_MIN_RNR_TIMER             = (1<<15),
1201        IB_QP_SQ_PSN                    = (1<<16),
1202        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
1203        IB_QP_PATH_MIG_STATE            = (1<<18),
1204        IB_QP_CAP                       = (1<<19),
1205        IB_QP_DEST_QPN                  = (1<<20),
1206        IB_QP_RESERVED1                 = (1<<21),
1207        IB_QP_RESERVED2                 = (1<<22),
1208        IB_QP_RESERVED3                 = (1<<23),
1209        IB_QP_RESERVED4                 = (1<<24),
1210        IB_QP_RATE_LIMIT                = (1<<25),
1211};
1212
1213enum ib_qp_state {
1214        IB_QPS_RESET,
1215        IB_QPS_INIT,
1216        IB_QPS_RTR,
1217        IB_QPS_RTS,
1218        IB_QPS_SQD,
1219        IB_QPS_SQE,
1220        IB_QPS_ERR
1221};
1222
1223enum ib_mig_state {
1224        IB_MIG_MIGRATED,
1225        IB_MIG_REARM,
1226        IB_MIG_ARMED
1227};
1228
1229enum ib_mw_type {
1230        IB_MW_TYPE_1 = 1,
1231        IB_MW_TYPE_2 = 2
1232};
1233
1234struct ib_qp_attr {
1235        enum ib_qp_state        qp_state;
1236        enum ib_qp_state        cur_qp_state;
1237        enum ib_mtu             path_mtu;
1238        enum ib_mig_state       path_mig_state;
1239        u32                     qkey;
1240        u32                     rq_psn;
1241        u32                     sq_psn;
1242        u32                     dest_qp_num;
1243        int                     qp_access_flags;
1244        struct ib_qp_cap        cap;
1245        struct rdma_ah_attr     ah_attr;
1246        struct rdma_ah_attr     alt_ah_attr;
1247        u16                     pkey_index;
1248        u16                     alt_pkey_index;
1249        u8                      en_sqd_async_notify;
1250        u8                      sq_draining;
1251        u8                      max_rd_atomic;
1252        u8                      max_dest_rd_atomic;
1253        u8                      min_rnr_timer;
1254        u8                      port_num;
1255        u8                      timeout;
1256        u8                      retry_cnt;
1257        u8                      rnr_retry;
1258        u8                      alt_port_num;
1259        u8                      alt_timeout;
1260        u32                     rate_limit;
1261};
1262
1263enum ib_wr_opcode {
1264        IB_WR_RDMA_WRITE,
1265        IB_WR_RDMA_WRITE_WITH_IMM,
1266        IB_WR_SEND,
1267        IB_WR_SEND_WITH_IMM,
1268        IB_WR_RDMA_READ,
1269        IB_WR_ATOMIC_CMP_AND_SWP,
1270        IB_WR_ATOMIC_FETCH_AND_ADD,
1271        IB_WR_LSO,
1272        IB_WR_SEND_WITH_INV,
1273        IB_WR_RDMA_READ_WITH_INV,
1274        IB_WR_LOCAL_INV,
1275        IB_WR_REG_MR,
1276        IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1277        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1278        IB_WR_REG_SIG_MR,
1279        /* reserve values for low level drivers' internal use.
1280         * These values will not be used at all in the ib core layer.
1281         */
1282        IB_WR_RESERVED1 = 0xf0,
1283        IB_WR_RESERVED2,
1284        IB_WR_RESERVED3,
1285        IB_WR_RESERVED4,
1286        IB_WR_RESERVED5,
1287        IB_WR_RESERVED6,
1288        IB_WR_RESERVED7,
1289        IB_WR_RESERVED8,
1290        IB_WR_RESERVED9,
1291        IB_WR_RESERVED10,
1292};
1293
1294enum ib_send_flags {
1295        IB_SEND_FENCE           = 1,
1296        IB_SEND_SIGNALED        = (1<<1),
1297        IB_SEND_SOLICITED       = (1<<2),
1298        IB_SEND_INLINE          = (1<<3),
1299        IB_SEND_IP_CSUM         = (1<<4),
1300
1301        /* reserve bits 26-31 for low level drivers' internal use */
1302        IB_SEND_RESERVED_START  = (1 << 26),
1303        IB_SEND_RESERVED_END    = (1 << 31),
1304};
1305
1306struct ib_sge {
1307        u64     addr;
1308        u32     length;
1309        u32     lkey;
1310};
1311
1312struct ib_cqe {
1313        void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1314};
1315
1316struct ib_send_wr {
1317        struct ib_send_wr      *next;
1318        union {
1319                u64             wr_id;
1320                struct ib_cqe   *wr_cqe;
1321        };
1322        struct ib_sge          *sg_list;
1323        int                     num_sge;
1324        enum ib_wr_opcode       opcode;
1325        int                     send_flags;
1326        union {
1327                __be32          imm_data;
1328                u32             invalidate_rkey;
1329        } ex;
1330};
1331
1332struct ib_rdma_wr {
1333        struct ib_send_wr       wr;
1334        u64                     remote_addr;
1335        u32                     rkey;
1336};
1337
1338static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1339{
1340        return container_of(wr, struct ib_rdma_wr, wr);
1341}
1342
1343struct ib_atomic_wr {
1344        struct ib_send_wr       wr;
1345        u64                     remote_addr;
1346        u64                     compare_add;
1347        u64                     swap;
1348        u64                     compare_add_mask;
1349        u64                     swap_mask;
1350        u32                     rkey;
1351};
1352
1353static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1354{
1355        return container_of(wr, struct ib_atomic_wr, wr);
1356}
1357
1358struct ib_ud_wr {
1359        struct ib_send_wr       wr;
1360        struct ib_ah            *ah;
1361        void                    *header;
1362        int                     hlen;
1363        int                     mss;
1364        u32                     remote_qpn;
1365        u32                     remote_qkey;
1366        u16                     pkey_index; /* valid for GSI only */
1367        u8                      port_num;   /* valid for DR SMPs on switch only */
1368};
1369
1370static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1371{
1372        return container_of(wr, struct ib_ud_wr, wr);
1373}
1374
1375struct ib_reg_wr {
1376        struct ib_send_wr       wr;
1377        struct ib_mr            *mr;
1378        u32                     key;
1379        int                     access;
1380};
1381
1382static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1383{
1384        return container_of(wr, struct ib_reg_wr, wr);
1385}
1386
1387struct ib_sig_handover_wr {
1388        struct ib_send_wr       wr;
1389        struct ib_sig_attrs    *sig_attrs;
1390        struct ib_mr           *sig_mr;
1391        int                     access_flags;
1392        struct ib_sge          *prot;
1393};
1394
1395static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1396{
1397        return container_of(wr, struct ib_sig_handover_wr, wr);
1398}
1399
1400struct ib_recv_wr {
1401        struct ib_recv_wr      *next;
1402        union {
1403                u64             wr_id;
1404                struct ib_cqe   *wr_cqe;
1405        };
1406        struct ib_sge          *sg_list;
1407        int                     num_sge;
1408};
1409
1410enum ib_access_flags {
1411        IB_ACCESS_LOCAL_WRITE   = 1,
1412        IB_ACCESS_REMOTE_WRITE  = (1<<1),
1413        IB_ACCESS_REMOTE_READ   = (1<<2),
1414        IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1415        IB_ACCESS_MW_BIND       = (1<<4),
1416        IB_ZERO_BASED           = (1<<5),
1417        IB_ACCESS_ON_DEMAND     = (1<<6),
1418        IB_ACCESS_HUGETLB       = (1<<7),
1419};
1420
1421/*
1422 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1423 * are hidden here instead of a uapi header!
1424 */
1425enum ib_mr_rereg_flags {
1426        IB_MR_REREG_TRANS       = 1,
1427        IB_MR_REREG_PD          = (1<<1),
1428        IB_MR_REREG_ACCESS      = (1<<2),
1429        IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1430};
1431
1432struct ib_fmr_attr {
1433        int     max_pages;
1434        int     max_maps;
1435        u8      page_shift;
1436};
1437
1438struct ib_umem;
1439
1440enum rdma_remove_reason {
1441        /* Userspace requested uobject deletion. Call could fail */
1442        RDMA_REMOVE_DESTROY,
1443        /* Context deletion. This call should delete the actual object itself */
1444        RDMA_REMOVE_CLOSE,
1445        /* Driver is being hot-unplugged. This call should delete the actual object itself */
1446        RDMA_REMOVE_DRIVER_REMOVE,
1447        /* Context is being cleaned-up, but commit was just completed */
1448        RDMA_REMOVE_DURING_CLEANUP,
1449};
1450
1451struct ib_rdmacg_object {
1452#ifdef CONFIG_CGROUP_RDMA
1453        struct rdma_cgroup      *cg;            /* owner rdma cgroup */
1454#endif
1455};
1456
1457struct ib_ucontext {
1458        struct ib_device       *device;
1459        struct ib_uverbs_file  *ufile;
1460        int                     closing;
1461
1462        /* locking the uobjects_list */
1463        struct mutex            uobjects_lock;
1464        struct list_head        uobjects;
1465        /* protects cleanup process from other actions */
1466        struct rw_semaphore     cleanup_rwsem;
1467        enum rdma_remove_reason cleanup_reason;
1468
1469        struct pid             *tgid;
1470#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1471        struct rb_root_cached   umem_tree;
1472        /*
1473         * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1474         * mmu notifiers registration.
1475         */
1476        struct rw_semaphore     umem_rwsem;
1477        void (*invalidate_range)(struct ib_umem *umem,
1478                                 unsigned long start, unsigned long end);
1479
1480        struct mmu_notifier     mn;
1481        atomic_t                notifier_count;
1482        /* A list of umems that don't have private mmu notifier counters yet. */
1483        struct list_head        no_private_counters;
1484        int                     odp_mrs_count;
1485#endif
1486
1487        struct ib_rdmacg_object cg_obj;
1488};
1489
1490struct ib_uobject {
1491        u64                     user_handle;    /* handle given to us by userspace */
1492        struct ib_ucontext     *context;        /* associated user context */
1493        void                   *object;         /* containing object */
1494        struct list_head        list;           /* link to context's list */
1495        struct ib_rdmacg_object cg_obj;         /* rdmacg object */
1496        int                     id;             /* index into kernel idr */
1497        struct kref             ref;
1498        atomic_t                usecnt;         /* protects exclusive access */
1499        struct rcu_head         rcu;            /* kfree_rcu() overhead */
1500
1501        const struct uverbs_obj_type *type;
1502};
1503
1504struct ib_uobject_file {
1505        struct ib_uobject       uobj;
1506        /* ufile contains the lock between context release and file close */
1507        struct ib_uverbs_file   *ufile;
1508};
1509
1510struct ib_udata {
1511        const void __user *inbuf;
1512        void __user *outbuf;
1513        size_t       inlen;
1514        size_t       outlen;
1515};
1516
1517struct ib_pd {
1518        u32                     local_dma_lkey;
1519        u32                     flags;
1520        struct ib_device       *device;
1521        struct ib_uobject      *uobject;
1522        atomic_t                usecnt; /* count all resources */
1523
1524        u32                     unsafe_global_rkey;
1525
1526        /*
1527         * Implementation details of the RDMA core, don't use in drivers:
1528         */
1529        struct ib_mr           *__internal_mr;
1530        struct rdma_restrack_entry res;
1531};
1532
1533struct ib_xrcd {
1534        struct ib_device       *device;
1535        atomic_t                usecnt; /* count all exposed resources */
1536        struct inode           *inode;
1537
1538        struct mutex            tgt_qp_mutex;
1539        struct list_head        tgt_qp_list;
1540};
1541
1542struct ib_ah {
1543        struct ib_device        *device;
1544        struct ib_pd            *pd;
1545        struct ib_uobject       *uobject;
1546        enum rdma_ah_attr_type  type;
1547};
1548
1549typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1550
1551enum ib_poll_context {
1552        IB_POLL_DIRECT,         /* caller context, no hw completions */
1553        IB_POLL_SOFTIRQ,        /* poll from softirq context */
1554        IB_POLL_WORKQUEUE,      /* poll from workqueue */
1555};
1556
1557struct ib_cq {
1558        struct ib_device       *device;
1559        struct ib_uobject      *uobject;
1560        ib_comp_handler         comp_handler;
1561        void                  (*event_handler)(struct ib_event *, void *);
1562        void                   *cq_context;
1563        int                     cqe;
1564        atomic_t                usecnt; /* count number of work queues */
1565        enum ib_poll_context    poll_ctx;
1566        struct ib_wc            *wc;
1567        union {
1568                struct irq_poll         iop;
1569                struct work_struct      work;
1570        };
1571        /*
1572         * Implementation details of the RDMA core, don't use in drivers:
1573         */
1574        struct rdma_restrack_entry res;
1575};
1576
1577struct ib_srq {
1578        struct ib_device       *device;
1579        struct ib_pd           *pd;
1580        struct ib_uobject      *uobject;
1581        void                  (*event_handler)(struct ib_event *, void *);
1582        void                   *srq_context;
1583        enum ib_srq_type        srq_type;
1584        atomic_t                usecnt;
1585
1586        struct {
1587                struct ib_cq   *cq;
1588                union {
1589                        struct {
1590                                struct ib_xrcd *xrcd;
1591                                u32             srq_num;
1592                        } xrc;
1593                };
1594        } ext;
1595};
1596
1597enum ib_raw_packet_caps {
1598        /* Strip cvlan from incoming packet and report it in the matching work
1599         * completion is supported.
1600         */
1601        IB_RAW_PACKET_CAP_CVLAN_STRIPPING       = (1 << 0),
1602        /* Scatter FCS field of an incoming packet to host memory is supported.
1603         */
1604        IB_RAW_PACKET_CAP_SCATTER_FCS           = (1 << 1),
1605        /* Checksum offloads are supported (for both send and receive). */
1606        IB_RAW_PACKET_CAP_IP_CSUM               = (1 << 2),
1607        /* When a packet is received for an RQ with no receive WQEs, the
1608         * packet processing is delayed.
1609         */
1610        IB_RAW_PACKET_CAP_DELAY_DROP            = (1 << 3),
1611};
1612
1613enum ib_wq_type {
1614        IB_WQT_RQ
1615};
1616
1617enum ib_wq_state {
1618        IB_WQS_RESET,
1619        IB_WQS_RDY,
1620        IB_WQS_ERR
1621};
1622
1623struct ib_wq {
1624        struct ib_device       *device;
1625        struct ib_uobject      *uobject;
1626        void                *wq_context;
1627        void                (*event_handler)(struct ib_event *, void *);
1628        struct ib_pd           *pd;
1629        struct ib_cq           *cq;
1630        u32             wq_num;
1631        enum ib_wq_state       state;
1632        enum ib_wq_type wq_type;
1633        atomic_t                usecnt;
1634};
1635
1636enum ib_wq_flags {
1637        IB_WQ_FLAGS_CVLAN_STRIPPING     = 1 << 0,
1638        IB_WQ_FLAGS_SCATTER_FCS         = 1 << 1,
1639        IB_WQ_FLAGS_DELAY_DROP          = 1 << 2,
1640        IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1641};
1642
1643struct ib_wq_init_attr {
1644        void                   *wq_context;
1645        enum ib_wq_type wq_type;
1646        u32             max_wr;
1647        u32             max_sge;
1648        struct  ib_cq          *cq;
1649        void                (*event_handler)(struct ib_event *, void *);
1650        u32             create_flags; /* Use enum ib_wq_flags */
1651};
1652
1653enum ib_wq_attr_mask {
1654        IB_WQ_STATE             = 1 << 0,
1655        IB_WQ_CUR_STATE         = 1 << 1,
1656        IB_WQ_FLAGS             = 1 << 2,
1657};
1658
1659struct ib_wq_attr {
1660        enum    ib_wq_state     wq_state;
1661        enum    ib_wq_state     curr_wq_state;
1662        u32                     flags; /* Use enum ib_wq_flags */
1663        u32                     flags_mask; /* Use enum ib_wq_flags */
1664};
1665
1666struct ib_rwq_ind_table {
1667        struct ib_device        *device;
1668        struct ib_uobject      *uobject;
1669        atomic_t                usecnt;
1670        u32             ind_tbl_num;
1671        u32             log_ind_tbl_size;
1672        struct ib_wq    **ind_tbl;
1673};
1674
1675struct ib_rwq_ind_table_init_attr {
1676        u32             log_ind_tbl_size;
1677        /* Each entry is a pointer to Receive Work Queue */
1678        struct ib_wq    **ind_tbl;
1679};
1680
1681enum port_pkey_state {
1682        IB_PORT_PKEY_NOT_VALID = 0,
1683        IB_PORT_PKEY_VALID = 1,
1684        IB_PORT_PKEY_LISTED = 2,
1685};
1686
1687struct ib_qp_security;
1688
1689struct ib_port_pkey {
1690        enum port_pkey_state    state;
1691        u16                     pkey_index;
1692        u8                      port_num;
1693        struct list_head        qp_list;
1694        struct list_head        to_error_list;
1695        struct ib_qp_security  *sec;
1696};
1697
1698struct ib_ports_pkeys {
1699        struct ib_port_pkey     main;
1700        struct ib_port_pkey     alt;
1701};
1702
1703struct ib_qp_security {
1704        struct ib_qp           *qp;
1705        struct ib_device       *dev;
1706        /* Hold this mutex when changing port and pkey settings. */
1707        struct mutex            mutex;
1708        struct ib_ports_pkeys  *ports_pkeys;
1709        /* A list of all open shared QP handles.  Required to enforce security
1710         * properly for all users of a shared QP.
1711         */
1712        struct list_head        shared_qp_list;
1713        void                   *security;
1714        bool                    destroying;
1715        atomic_t                error_list_count;
1716        struct completion       error_complete;
1717        int                     error_comps_pending;
1718};
1719
1720/*
1721 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1722 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1723 */
1724struct ib_qp {
1725        struct ib_device       *device;
1726        struct ib_pd           *pd;
1727        struct ib_cq           *send_cq;
1728        struct ib_cq           *recv_cq;
1729        spinlock_t              mr_lock;
1730        int                     mrs_used;
1731        struct list_head        rdma_mrs;
1732        struct list_head        sig_mrs;
1733        struct ib_srq          *srq;
1734        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1735        struct list_head        xrcd_list;
1736
1737        /* count times opened, mcast attaches, flow attaches */
1738        atomic_t                usecnt;
1739        struct list_head        open_list;
1740        struct ib_qp           *real_qp;
1741        struct ib_uobject      *uobject;
1742        void                  (*event_handler)(struct ib_event *, void *);
1743        void                   *qp_context;
1744        u32                     qp_num;
1745        u32                     max_write_sge;
1746        u32                     max_read_sge;
1747        enum ib_qp_type         qp_type;
1748        struct ib_rwq_ind_table *rwq_ind_tbl;
1749        struct ib_qp_security  *qp_sec;
1750        u8                      port;
1751
1752        /*
1753         * Implementation details of the RDMA core, don't use in drivers:
1754         */
1755        struct rdma_restrack_entry     res;
1756};
1757
1758struct ib_mr {
1759        struct ib_device  *device;
1760        struct ib_pd      *pd;
1761        u32                lkey;
1762        u32                rkey;
1763        u64                iova;
1764        u64                length;
1765        unsigned int       page_size;
1766        bool               need_inval;
1767        union {
1768                struct ib_uobject       *uobject;       /* user */
1769                struct list_head        qp_entry;       /* FR */
1770        };
1771};
1772
1773struct ib_mw {
1774        struct ib_device        *device;
1775        struct ib_pd            *pd;
1776        struct ib_uobject       *uobject;
1777        u32                     rkey;
1778        enum ib_mw_type         type;
1779};
1780
1781struct ib_fmr {
1782        struct ib_device        *device;
1783        struct ib_pd            *pd;
1784        struct list_head        list;
1785        u32                     lkey;
1786        u32                     rkey;
1787};
1788
1789/* Supported steering options */
1790enum ib_flow_attr_type {
1791        /* steering according to rule specifications */
1792        IB_FLOW_ATTR_NORMAL             = 0x0,
1793        /* default unicast and multicast rule -
1794         * receive all Eth traffic which isn't steered to any QP
1795         */
1796        IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1797        /* default multicast rule -
1798         * receive all Eth multicast traffic which isn't steered to any QP
1799         */
1800        IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1801        /* sniffer rule - receive all port traffic */
1802        IB_FLOW_ATTR_SNIFFER            = 0x3
1803};
1804
1805/* Supported steering header types */
1806enum ib_flow_spec_type {
1807        /* L2 headers*/
1808        IB_FLOW_SPEC_ETH                = 0x20,
1809        IB_FLOW_SPEC_IB                 = 0x22,
1810        /* L3 header*/
1811        IB_FLOW_SPEC_IPV4               = 0x30,
1812        IB_FLOW_SPEC_IPV6               = 0x31,
1813        /* L4 headers*/
1814        IB_FLOW_SPEC_TCP                = 0x40,
1815        IB_FLOW_SPEC_UDP                = 0x41,
1816        IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
1817        IB_FLOW_SPEC_INNER              = 0x100,
1818        /* Actions */
1819        IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1820        IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1821};
1822#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1823#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1824
1825/* Flow steering rule priority is set according to it's domain.
1826 * Lower domain value means higher priority.
1827 */
1828enum ib_flow_domain {
1829        IB_FLOW_DOMAIN_USER,
1830        IB_FLOW_DOMAIN_ETHTOOL,
1831        IB_FLOW_DOMAIN_RFS,
1832        IB_FLOW_DOMAIN_NIC,
1833        IB_FLOW_DOMAIN_NUM /* Must be last */
1834};
1835
1836enum ib_flow_flags {
1837        IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1838        IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
1839};
1840
1841struct ib_flow_eth_filter {
1842        u8      dst_mac[6];
1843        u8      src_mac[6];
1844        __be16  ether_type;
1845        __be16  vlan_tag;
1846        /* Must be last */
1847        u8      real_sz[0];
1848};
1849
1850struct ib_flow_spec_eth {
1851        u32                       type;
1852        u16                       size;
1853        struct ib_flow_eth_filter val;
1854        struct ib_flow_eth_filter mask;
1855};
1856
1857struct ib_flow_ib_filter {
1858        __be16 dlid;
1859        __u8   sl;
1860        /* Must be last */
1861        u8      real_sz[0];
1862};
1863
1864struct ib_flow_spec_ib {
1865        u32                      type;
1866        u16                      size;
1867        struct ib_flow_ib_filter val;
1868        struct ib_flow_ib_filter mask;
1869};
1870
1871/* IPv4 header flags */
1872enum ib_ipv4_flags {
1873        IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1874        IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1875                                    last have this flag set */
1876};
1877
1878struct ib_flow_ipv4_filter {
1879        __be32  src_ip;
1880        __be32  dst_ip;
1881        u8      proto;
1882        u8      tos;
1883        u8      ttl;
1884        u8      flags;
1885        /* Must be last */
1886        u8      real_sz[0];
1887};
1888
1889struct ib_flow_spec_ipv4 {
1890        u32                        type;
1891        u16                        size;
1892        struct ib_flow_ipv4_filter val;
1893        struct ib_flow_ipv4_filter mask;
1894};
1895
1896struct ib_flow_ipv6_filter {
1897        u8      src_ip[16];
1898        u8      dst_ip[16];
1899        __be32  flow_label;
1900        u8      next_hdr;
1901        u8      traffic_class;
1902        u8      hop_limit;
1903        /* Must be last */
1904        u8      real_sz[0];
1905};
1906
1907struct ib_flow_spec_ipv6 {
1908        u32                        type;
1909        u16                        size;
1910        struct ib_flow_ipv6_filter val;
1911        struct ib_flow_ipv6_filter mask;
1912};
1913
1914struct ib_flow_tcp_udp_filter {
1915        __be16  dst_port;
1916        __be16  src_port;
1917        /* Must be last */
1918        u8      real_sz[0];
1919};
1920
1921struct ib_flow_spec_tcp_udp {
1922        u32                           type;
1923        u16                           size;
1924        struct ib_flow_tcp_udp_filter val;
1925        struct ib_flow_tcp_udp_filter mask;
1926};
1927
1928struct ib_flow_tunnel_filter {
1929        __be32  tunnel_id;
1930        u8      real_sz[0];
1931};
1932
1933/* ib_flow_spec_tunnel describes the Vxlan tunnel
1934 * the tunnel_id from val has the vni value
1935 */
1936struct ib_flow_spec_tunnel {
1937        u32                           type;
1938        u16                           size;
1939        struct ib_flow_tunnel_filter  val;
1940        struct ib_flow_tunnel_filter  mask;
1941};
1942
1943struct ib_flow_spec_action_tag {
1944        enum ib_flow_spec_type        type;
1945        u16                           size;
1946        u32                           tag_id;
1947};
1948
1949struct ib_flow_spec_action_drop {
1950        enum ib_flow_spec_type        type;
1951        u16                           size;
1952};
1953
1954union ib_flow_spec {
1955        struct {
1956                u32                     type;
1957                u16                     size;
1958        };
1959        struct ib_flow_spec_eth         eth;
1960        struct ib_flow_spec_ib          ib;
1961        struct ib_flow_spec_ipv4        ipv4;
1962        struct ib_flow_spec_tcp_udp     tcp_udp;
1963        struct ib_flow_spec_ipv6        ipv6;
1964        struct ib_flow_spec_tunnel      tunnel;
1965        struct ib_flow_spec_action_tag  flow_tag;
1966        struct ib_flow_spec_action_drop drop;
1967};
1968
1969struct ib_flow_attr {
1970        enum ib_flow_attr_type type;
1971        u16          size;
1972        u16          priority;
1973        u32          flags;
1974        u8           num_of_specs;
1975        u8           port;
1976        /* Following are the optional layers according to user request
1977         * struct ib_flow_spec_xxx
1978         * struct ib_flow_spec_yyy
1979         */
1980};
1981
1982struct ib_flow {
1983        struct ib_qp            *qp;
1984        struct ib_uobject       *uobject;
1985};
1986
1987struct ib_mad_hdr;
1988struct ib_grh;
1989
1990enum ib_process_mad_flags {
1991        IB_MAD_IGNORE_MKEY      = 1,
1992        IB_MAD_IGNORE_BKEY      = 2,
1993        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1994};
1995
1996enum ib_mad_result {
1997        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1998        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1999        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2000        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2001};
2002
2003struct ib_port_cache {
2004        u64                   subnet_prefix;
2005        struct ib_pkey_cache  *pkey;
2006        struct ib_gid_table   *gid;
2007        u8                     lmc;
2008        enum ib_port_state     port_state;
2009};
2010
2011struct ib_cache {
2012        rwlock_t                lock;
2013        struct ib_event_handler event_handler;
2014        struct ib_port_cache   *ports;
2015};
2016
2017struct iw_cm_verbs;
2018
2019struct ib_port_immutable {
2020        int                           pkey_tbl_len;
2021        int                           gid_tbl_len;
2022        u32                           core_cap_flags;
2023        u32                           max_mad_size;
2024};
2025
2026/* rdma netdev type - specifies protocol type */
2027enum rdma_netdev_t {
2028        RDMA_NETDEV_OPA_VNIC,
2029        RDMA_NETDEV_IPOIB,
2030};
2031
2032/**
2033 * struct rdma_netdev - rdma netdev
2034 * For cases where netstack interfacing is required.
2035 */
2036struct rdma_netdev {
2037        void              *clnt_priv;
2038        struct ib_device  *hca;
2039        u8                 port_num;
2040
2041        /* cleanup function must be specified */
2042        void (*free_rdma_netdev)(struct net_device *netdev);
2043
2044        /* control functions */
2045        void (*set_id)(struct net_device *netdev, int id);
2046        /* send packet */
2047        int (*send)(struct net_device *dev, struct sk_buff *skb,
2048                    struct ib_ah *address, u32 dqpn);
2049        /* multicast */
2050        int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2051                            union ib_gid *gid, u16 mlid,
2052                            int set_qkey, u32 qkey);
2053        int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2054                            union ib_gid *gid, u16 mlid);
2055};
2056
2057struct ib_port_pkey_list {
2058        /* Lock to hold while modifying the list. */
2059        spinlock_t                    list_lock;
2060        struct list_head              pkey_list;
2061};
2062
2063struct ib_device {
2064        /* Do not access @dma_device directly from ULP nor from HW drivers. */
2065        struct device                *dma_device;
2066
2067        char                          name[IB_DEVICE_NAME_MAX];
2068
2069        struct list_head              event_handler_list;
2070        spinlock_t                    event_handler_lock;
2071
2072        spinlock_t                    client_data_lock;
2073        struct list_head              core_list;
2074        /* Access to the client_data_list is protected by the client_data_lock
2075         * spinlock and the lists_rwsem read-write semaphore */
2076        struct list_head              client_data_list;
2077
2078        struct ib_cache               cache;
2079        /**
2080         * port_immutable is indexed by port number
2081         */
2082        struct ib_port_immutable     *port_immutable;
2083
2084        int                           num_comp_vectors;
2085
2086        struct ib_port_pkey_list     *port_pkey_list;
2087
2088        struct iw_cm_verbs           *iwcm;
2089
2090        /**
2091         * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2092         *   driver initialized data.  The struct is kfree()'ed by the sysfs
2093         *   core when the device is removed.  A lifespan of -1 in the return
2094         *   struct tells the core to set a default lifespan.
2095         */
2096        struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
2097                                                     u8 port_num);
2098        /**
2099         * get_hw_stats - Fill in the counter value(s) in the stats struct.
2100         * @index - The index in the value array we wish to have updated, or
2101         *   num_counters if we want all stats updated
2102         * Return codes -
2103         *   < 0 - Error, no counters updated
2104         *   index - Updated the single counter pointed to by index
2105         *   num_counters - Updated all counters (will reset the timestamp
2106         *     and prevent further calls for lifespan milliseconds)
2107         * Drivers are allowed to update all counters in leiu of just the
2108         *   one given in index at their option
2109         */
2110        int                        (*get_hw_stats)(struct ib_device *device,
2111                                                   struct rdma_hw_stats *stats,
2112                                                   u8 port, int index);
2113        int                        (*query_device)(struct ib_device *device,
2114                                                   struct ib_device_attr *device_attr,
2115                                                   struct ib_udata *udata);
2116        int                        (*query_port)(struct ib_device *device,
2117                                                 u8 port_num,
2118                                                 struct ib_port_attr *port_attr);
2119        enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
2120                                                     u8 port_num);
2121        /* When calling get_netdev, the HW vendor's driver should return the
2122         * net device of device @device at port @port_num or NULL if such
2123         * a net device doesn't exist. The vendor driver should call dev_hold
2124         * on this net device. The HW vendor's device driver must guarantee
2125         * that this function returns NULL before the net device reaches
2126         * NETDEV_UNREGISTER_FINAL state.
2127         */
2128        struct net_device         *(*get_netdev)(struct ib_device *device,
2129                                                 u8 port_num);
2130        int                        (*query_gid)(struct ib_device *device,
2131                                                u8 port_num, int index,
2132                                                union ib_gid *gid);
2133        /* When calling add_gid, the HW vendor's driver should
2134         * add the gid of device @device at gid index @index of
2135         * port @port_num to be @gid. Meta-info of that gid (for example,
2136         * the network device related to this gid is available
2137         * at @attr. @context allows the HW vendor driver to store extra
2138         * information together with a GID entry. The HW vendor may allocate
2139         * memory to contain this information and store it in @context when a
2140         * new GID entry is written to. Params are consistent until the next
2141         * call of add_gid or delete_gid. The function should return 0 on
2142         * success or error otherwise. The function could be called
2143         * concurrently for different ports. This function is only called
2144         * when roce_gid_table is used.
2145         */
2146        int                        (*add_gid)(struct ib_device *device,
2147                                              u8 port_num,
2148                                              unsigned int index,
2149                                              const union ib_gid *gid,
2150                                              const struct ib_gid_attr *attr,
2151                                              void **context);
2152        /* When calling del_gid, the HW vendor's driver should delete the
2153         * gid of device @device at gid index @index of port @port_num.
2154         * Upon the deletion of a GID entry, the HW vendor must free any
2155         * allocated memory. The caller will clear @context afterwards.
2156         * This function is only called when roce_gid_table is used.
2157         */
2158        int                        (*del_gid)(struct ib_device *device,
2159                                              u8 port_num,
2160                                              unsigned int index,
2161                                              void **context);
2162        int                        (*query_pkey)(struct ib_device *device,
2163                                                 u8 port_num, u16 index, u16 *pkey);
2164        int                        (*modify_device)(struct ib_device *device,
2165                                                    int device_modify_mask,
2166                                                    struct ib_device_modify *device_modify);
2167        int                        (*modify_port)(struct ib_device *device,
2168                                                  u8 port_num, int port_modify_mask,
2169                                                  struct ib_port_modify *port_modify);
2170        struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
2171                                                     struct ib_udata *udata);
2172        int                        (*dealloc_ucontext)(struct ib_ucontext *context);
2173        int                        (*mmap)(struct ib_ucontext *context,
2174                                           struct vm_area_struct *vma);
2175        struct ib_pd *             (*alloc_pd)(struct ib_device *device,
2176                                               struct ib_ucontext *context,
2177                                               struct ib_udata *udata);
2178        int                        (*dealloc_pd)(struct ib_pd *pd);
2179        struct ib_ah *             (*create_ah)(struct ib_pd *pd,
2180                                                struct rdma_ah_attr *ah_attr,
2181                                                struct ib_udata *udata);
2182        int                        (*modify_ah)(struct ib_ah *ah,
2183                                                struct rdma_ah_attr *ah_attr);
2184        int                        (*query_ah)(struct ib_ah *ah,
2185                                               struct rdma_ah_attr *ah_attr);
2186        int                        (*destroy_ah)(struct ib_ah *ah);
2187        struct ib_srq *            (*create_srq)(struct ib_pd *pd,
2188                                                 struct ib_srq_init_attr *srq_init_attr,
2189                                                 struct ib_udata *udata);
2190        int                        (*modify_srq)(struct ib_srq *srq,
2191                                                 struct ib_srq_attr *srq_attr,
2192                                                 enum ib_srq_attr_mask srq_attr_mask,
2193                                                 struct ib_udata *udata);
2194        int                        (*query_srq)(struct ib_srq *srq,
2195                                                struct ib_srq_attr *srq_attr);
2196        int                        (*destroy_srq)(struct ib_srq *srq);
2197        int                        (*post_srq_recv)(struct ib_srq *srq,
2198                                                    struct ib_recv_wr *recv_wr,
2199                                                    struct ib_recv_wr **bad_recv_wr);
2200        struct ib_qp *             (*create_qp)(struct ib_pd *pd,
2201                                                struct ib_qp_init_attr *qp_init_attr,
2202                                                struct ib_udata *udata);
2203        int                        (*modify_qp)(struct ib_qp *qp,
2204                                                struct ib_qp_attr *qp_attr,
2205                                                int qp_attr_mask,
2206                                                struct ib_udata *udata);
2207        int                        (*query_qp)(struct ib_qp *qp,
2208                                               struct ib_qp_attr *qp_attr,
2209                                               int qp_attr_mask,
2210                                               struct ib_qp_init_attr *qp_init_attr);
2211        int                        (*destroy_qp)(struct ib_qp *qp);
2212        int                        (*post_send)(struct ib_qp *qp,
2213                                                struct ib_send_wr *send_wr,
2214                                                struct ib_send_wr **bad_send_wr);
2215        int                        (*post_recv)(struct ib_qp *qp,
2216                                                struct ib_recv_wr *recv_wr,
2217                                                struct ib_recv_wr **bad_recv_wr);
2218        struct ib_cq *             (*create_cq)(struct ib_device *device,
2219                                                const struct ib_cq_init_attr *attr,
2220                                                struct ib_ucontext *context,
2221                                                struct ib_udata *udata);
2222        int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2223                                                u16 cq_period);
2224        int                        (*destroy_cq)(struct ib_cq *cq);
2225        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
2226                                                struct ib_udata *udata);
2227        int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
2228                                              struct ib_wc *wc);
2229        int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2230        int                        (*req_notify_cq)(struct ib_cq *cq,
2231                                                    enum ib_cq_notify_flags flags);
2232        int                        (*req_ncomp_notif)(struct ib_cq *cq,
2233                                                      int wc_cnt);
2234        struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2235                                                 int mr_access_flags);
2236        struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2237                                                  u64 start, u64 length,
2238                                                  u64 virt_addr,
2239                                                  int mr_access_flags,
2240                                                  struct ib_udata *udata);
2241        int                        (*rereg_user_mr)(struct ib_mr *mr,
2242                                                    int flags,
2243                                                    u64 start, u64 length,
2244                                                    u64 virt_addr,
2245                                                    int mr_access_flags,
2246                                                    struct ib_pd *pd,
2247                                                    struct ib_udata *udata);
2248        int                        (*dereg_mr)(struct ib_mr *mr);
2249        struct ib_mr *             (*alloc_mr)(struct ib_pd *pd,
2250                                               enum ib_mr_type mr_type,
2251                                               u32 max_num_sg);
2252        int                        (*map_mr_sg)(struct ib_mr *mr,
2253                                                struct scatterlist *sg,
2254                                                int sg_nents,
2255                                                unsigned int *sg_offset);
2256        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2257                                               enum ib_mw_type type,
2258                                               struct ib_udata *udata);
2259        int                        (*dealloc_mw)(struct ib_mw *mw);
2260        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
2261                                                int mr_access_flags,
2262                                                struct ib_fmr_attr *fmr_attr);
2263        int                        (*map_phys_fmr)(struct ib_fmr *fmr,
2264                                                   u64 *page_list, int list_len,
2265                                                   u64 iova);
2266        int                        (*unmap_fmr)(struct list_head *fmr_list);
2267        int                        (*dealloc_fmr)(struct ib_fmr *fmr);
2268        int                        (*attach_mcast)(struct ib_qp *qp,
2269                                                   union ib_gid *gid,
2270                                                   u16 lid);
2271        int                        (*detach_mcast)(struct ib_qp *qp,
2272                                                   union ib_gid *gid,
2273                                                   u16 lid);
2274        int                        (*process_mad)(struct ib_device *device,
2275                                                  int process_mad_flags,
2276                                                  u8 port_num,
2277                                                  const struct ib_wc *in_wc,
2278                                                  const struct ib_grh *in_grh,
2279                                                  const struct ib_mad_hdr *in_mad,
2280                                                  size_t in_mad_size,
2281                                                  struct ib_mad_hdr *out_mad,
2282                                                  size_t *out_mad_size,
2283                                                  u16 *out_mad_pkey_index);
2284        struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
2285                                                 struct ib_ucontext *ucontext,
2286                                                 struct ib_udata *udata);
2287        int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2288        struct ib_flow *           (*create_flow)(struct ib_qp *qp,
2289                                                  struct ib_flow_attr
2290                                                  *flow_attr,
2291                                                  int domain);
2292        int                        (*destroy_flow)(struct ib_flow *flow_id);
2293        int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2294                                                      struct ib_mr_status *mr_status);
2295        void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2296        void                       (*drain_rq)(struct ib_qp *qp);
2297        void                       (*drain_sq)(struct ib_qp *qp);
2298        int                        (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2299                                                        int state);
2300        int                        (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2301                                                   struct ifla_vf_info *ivf);
2302        int                        (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2303                                                   struct ifla_vf_stats *stats);
2304        int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2305                                                  int type);
2306        struct ib_wq *             (*create_wq)(struct ib_pd *pd,
2307                                                struct ib_wq_init_attr *init_attr,
2308                                                struct ib_udata *udata);
2309        int                        (*destroy_wq)(struct ib_wq *wq);
2310        int                        (*modify_wq)(struct ib_wq *wq,
2311                                                struct ib_wq_attr *attr,
2312                                                u32 wq_attr_mask,
2313                                                struct ib_udata *udata);
2314        struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2315                                                           struct ib_rwq_ind_table_init_attr *init_attr,
2316                                                           struct ib_udata *udata);
2317        int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2318        /**
2319         * rdma netdev operation
2320         *
2321         * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2322         * doesn't support the specified rdma netdev type.
2323         */
2324        struct net_device *(*alloc_rdma_netdev)(
2325                                        struct ib_device *device,
2326                                        u8 port_num,
2327                                        enum rdma_netdev_t type,
2328                                        const char *name,
2329                                        unsigned char name_assign_type,
2330                                        void (*setup)(struct net_device *));
2331
2332        struct module               *owner;
2333        struct device                dev;
2334        struct kobject               *ports_parent;
2335        struct list_head             port_list;
2336
2337        enum {
2338                IB_DEV_UNINITIALIZED,
2339                IB_DEV_REGISTERED,
2340                IB_DEV_UNREGISTERED
2341        }                            reg_state;
2342
2343        int                          uverbs_abi_ver;
2344        u64                          uverbs_cmd_mask;
2345        u64                          uverbs_ex_cmd_mask;
2346
2347        char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
2348        __be64                       node_guid;
2349        u32                          local_dma_lkey;
2350        u16                          is_switch:1;
2351        u8                           node_type;
2352        u8                           phys_port_cnt;
2353        struct ib_device_attr        attrs;
2354        struct attribute_group       *hw_stats_ag;
2355        struct rdma_hw_stats         *hw_stats;
2356
2357#ifdef CONFIG_CGROUP_RDMA
2358        struct rdmacg_device         cg_device;
2359#endif
2360
2361        u32                          index;
2362        /*
2363         * Implementation details of the RDMA core, don't use in drivers
2364         */
2365        struct rdma_restrack_root     res;
2366
2367        /**
2368         * The following mandatory functions are used only at device
2369         * registration.  Keep functions such as these at the end of this
2370         * structure to avoid cache line misses when accessing struct ib_device
2371         * in fast paths.
2372         */
2373        int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2374        void (*get_dev_fw_str)(struct ib_device *, char *str);
2375        const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2376                                                     int comp_vector);
2377
2378        struct uverbs_root_spec         *specs_root;
2379};
2380
2381struct ib_client {
2382        char  *name;
2383        void (*add)   (struct ib_device *);
2384        void (*remove)(struct ib_device *, void *client_data);
2385
2386        /* Returns the net_dev belonging to this ib_client and matching the
2387         * given parameters.
2388         * @dev:         An RDMA device that the net_dev use for communication.
2389         * @port:        A physical port number on the RDMA device.
2390         * @pkey:        P_Key that the net_dev uses if applicable.
2391         * @gid:         A GID that the net_dev uses to communicate.
2392         * @addr:        An IP address the net_dev is configured with.
2393         * @client_data: The device's client data set by ib_set_client_data().
2394         *
2395         * An ib_client that implements a net_dev on top of RDMA devices
2396         * (such as IP over IB) should implement this callback, allowing the
2397         * rdma_cm module to find the right net_dev for a given request.
2398         *
2399         * The caller is responsible for calling dev_put on the returned
2400         * netdev. */
2401        struct net_device *(*get_net_dev_by_params)(
2402                        struct ib_device *dev,
2403                        u8 port,
2404                        u16 pkey,
2405                        const union ib_gid *gid,
2406                        const struct sockaddr *addr,
2407                        void *client_data);
2408        struct list_head list;
2409};
2410
2411struct ib_device *ib_alloc_device(size_t size);
2412void ib_dealloc_device(struct ib_device *device);
2413
2414void ib_get_device_fw_str(struct ib_device *device, char *str);
2415
2416int ib_register_device(struct ib_device *device,
2417                       int (*port_callback)(struct ib_device *,
2418                                            u8, struct kobject *));
2419void ib_unregister_device(struct ib_device *device);
2420
2421int ib_register_client   (struct ib_client *client);
2422void ib_unregister_client(struct ib_client *client);
2423
2424void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2425void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2426                         void *data);
2427
2428static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2429{
2430        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2431}
2432
2433static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2434{
2435        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2436}
2437
2438static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2439                                       size_t offset,
2440                                       size_t len)
2441{
2442        const void __user *p = udata->inbuf + offset;
2443        bool ret;
2444        u8 *buf;
2445
2446        if (len > USHRT_MAX)
2447                return false;
2448
2449        buf = memdup_user(p, len);
2450        if (IS_ERR(buf))
2451                return false;
2452
2453        ret = !memchr_inv(buf, 0, len);
2454        kfree(buf);
2455        return ret;
2456}
2457
2458/**
2459 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2460 * contains all required attributes and no attributes not allowed for
2461 * the given QP state transition.
2462 * @cur_state: Current QP state
2463 * @next_state: Next QP state
2464 * @type: QP type
2465 * @mask: Mask of supplied QP attributes
2466 * @ll : link layer of port
2467 *
2468 * This function is a helper function that a low-level driver's
2469 * modify_qp method can use to validate the consumer's input.  It
2470 * checks that cur_state and next_state are valid QP states, that a
2471 * transition from cur_state to next_state is allowed by the IB spec,
2472 * and that the attribute mask supplied is allowed for the transition.
2473 */
2474int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2475                       enum ib_qp_type type, enum ib_qp_attr_mask mask,
2476                       enum rdma_link_layer ll);
2477
2478void ib_register_event_handler(struct ib_event_handler *event_handler);
2479void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2480void ib_dispatch_event(struct ib_event *event);
2481
2482int ib_query_port(struct ib_device *device,
2483                  u8 port_num, struct ib_port_attr *port_attr);
2484
2485enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2486                                               u8 port_num);
2487
2488/**
2489 * rdma_cap_ib_switch - Check if the device is IB switch
2490 * @device: Device to check
2491 *
2492 * Device driver is responsible for setting is_switch bit on
2493 * in ib_device structure at init time.
2494 *
2495 * Return: true if the device is IB switch.
2496 */
2497static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2498{
2499        return device->is_switch;
2500}
2501
2502/**
2503 * rdma_start_port - Return the first valid port number for the device
2504 * specified
2505 *
2506 * @device: Device to be checked
2507 *
2508 * Return start port number
2509 */
2510static inline u8 rdma_start_port(const struct ib_device *device)
2511{
2512        return rdma_cap_ib_switch(device) ? 0 : 1;
2513}
2514
2515/**
2516 * rdma_end_port - Return the last valid port number for the device
2517 * specified
2518 *
2519 * @device: Device to be checked
2520 *
2521 * Return last port number
2522 */
2523static inline u8 rdma_end_port(const struct ib_device *device)
2524{
2525        return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2526}
2527
2528static inline int rdma_is_port_valid(const struct ib_device *device,
2529                                     unsigned int port)
2530{
2531        return (port >= rdma_start_port(device) &&
2532                port <= rdma_end_port(device));
2533}
2534
2535static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2536{
2537        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2538}
2539
2540static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2541{
2542        return device->port_immutable[port_num].core_cap_flags &
2543                (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2544}
2545
2546static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2547{
2548        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2549}
2550
2551static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2552{
2553        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2554}
2555
2556static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2557{
2558        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2559}
2560
2561static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2562{
2563        return rdma_protocol_ib(device, port_num) ||
2564                rdma_protocol_roce(device, port_num);
2565}
2566
2567static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2568{
2569        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2570}
2571
2572static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2573{
2574        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2575}
2576
2577/**
2578 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2579 * Management Datagrams.
2580 * @device: Device to check
2581 * @port_num: Port number to check
2582 *
2583 * Management Datagrams (MAD) are a required part of the InfiniBand
2584 * specification and are supported on all InfiniBand devices.  A slightly
2585 * extended version are also supported on OPA interfaces.
2586 *
2587 * Return: true if the port supports sending/receiving of MAD packets.
2588 */
2589static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2590{
2591        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2592}
2593
2594/**
2595 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2596 * Management Datagrams.
2597 * @device: Device to check
2598 * @port_num: Port number to check
2599 *
2600 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2601 * datagrams with their own versions.  These OPA MADs share many but not all of
2602 * the characteristics of InfiniBand MADs.
2603 *
2604 * OPA MADs differ in the following ways:
2605 *
2606 *    1) MADs are variable size up to 2K
2607 *       IBTA defined MADs remain fixed at 256 bytes
2608 *    2) OPA SMPs must carry valid PKeys
2609 *    3) OPA SMP packets are a different format
2610 *
2611 * Return: true if the port supports OPA MAD packet formats.
2612 */
2613static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2614{
2615        return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2616                == RDMA_CORE_CAP_OPA_MAD;
2617}
2618
2619/**
2620 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2621 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2622 * @device: Device to check
2623 * @port_num: Port number to check
2624 *
2625 * Each InfiniBand node is required to provide a Subnet Management Agent
2626 * that the subnet manager can access.  Prior to the fabric being fully
2627 * configured by the subnet manager, the SMA is accessed via a well known
2628 * interface called the Subnet Management Interface (SMI).  This interface
2629 * uses directed route packets to communicate with the SM to get around the
2630 * chicken and egg problem of the SM needing to know what's on the fabric
2631 * in order to configure the fabric, and needing to configure the fabric in
2632 * order to send packets to the devices on the fabric.  These directed
2633 * route packets do not need the fabric fully configured in order to reach
2634 * their destination.  The SMI is the only method allowed to send
2635 * directed route packets on an InfiniBand fabric.
2636 *
2637 * Return: true if the port provides an SMI.
2638 */
2639static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2640{
2641        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2642}
2643
2644/**
2645 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2646 * Communication Manager.
2647 * @device: Device to check
2648 * @port_num: Port number to check
2649 *
2650 * The InfiniBand Communication Manager is one of many pre-defined General
2651 * Service Agents (GSA) that are accessed via the General Service
2652 * Interface (GSI).  It's role is to facilitate establishment of connections
2653 * between nodes as well as other management related tasks for established
2654 * connections.
2655 *
2656 * Return: true if the port supports an IB CM (this does not guarantee that
2657 * a CM is actually running however).
2658 */
2659static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2660{
2661        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2662}
2663
2664/**
2665 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2666 * Communication Manager.
2667 * @device: Device to check
2668 * @port_num: Port number to check
2669 *
2670 * Similar to above, but specific to iWARP connections which have a different
2671 * managment protocol than InfiniBand.
2672 *
2673 * Return: true if the port supports an iWARP CM (this does not guarantee that
2674 * a CM is actually running however).
2675 */
2676static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2677{
2678        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2679}
2680
2681/**
2682 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2683 * Subnet Administration.
2684 * @device: Device to check
2685 * @port_num: Port number to check
2686 *
2687 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2688 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2689 * fabrics, devices should resolve routes to other hosts by contacting the
2690 * SA to query the proper route.
2691 *
2692 * Return: true if the port should act as a client to the fabric Subnet
2693 * Administration interface.  This does not imply that the SA service is
2694 * running locally.
2695 */
2696static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2697{
2698        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2699}
2700
2701/**
2702 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2703 * Multicast.
2704 * @device: Device to check
2705 * @port_num: Port number to check
2706 *
2707 * InfiniBand multicast registration is more complex than normal IPv4 or
2708 * IPv6 multicast registration.  Each Host Channel Adapter must register
2709 * with the Subnet Manager when it wishes to join a multicast group.  It
2710 * should do so only once regardless of how many queue pairs it subscribes
2711 * to this group.  And it should leave the group only after all queue pairs
2712 * attached to the group have been detached.
2713 *
2714 * Return: true if the port must undertake the additional adminstrative
2715 * overhead of registering/unregistering with the SM and tracking of the
2716 * total number of queue pairs attached to the multicast group.
2717 */
2718static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2719{
2720        return rdma_cap_ib_sa(device, port_num);
2721}
2722
2723/**
2724 * rdma_cap_af_ib - Check if the port of device has the capability
2725 * Native Infiniband Address.
2726 * @device: Device to check
2727 * @port_num: Port number to check
2728 *
2729 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2730 * GID.  RoCE uses a different mechanism, but still generates a GID via
2731 * a prescribed mechanism and port specific data.
2732 *
2733 * Return: true if the port uses a GID address to identify devices on the
2734 * network.
2735 */
2736static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2737{
2738        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2739}
2740
2741/**
2742 * rdma_cap_eth_ah - Check if the port of device has the capability
2743 * Ethernet Address Handle.
2744 * @device: Device to check
2745 * @port_num: Port number to check
2746 *
2747 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2748 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2749 * port.  Normally, packet headers are generated by the sending host
2750 * adapter, but when sending connectionless datagrams, we must manually
2751 * inject the proper headers for the fabric we are communicating over.
2752 *
2753 * Return: true if we are running as a RoCE port and must force the
2754 * addition of a Global Route Header built from our Ethernet Address
2755 * Handle into our header list for connectionless packets.
2756 */
2757static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2758{
2759        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2760}
2761
2762/**
2763 * rdma_cap_opa_ah - Check if the port of device supports
2764 * OPA Address handles
2765 * @device: Device to check
2766 * @port_num: Port number to check
2767 *
2768 * Return: true if we are running on an OPA device which supports
2769 * the extended OPA addressing.
2770 */
2771static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2772{
2773        return (device->port_immutable[port_num].core_cap_flags &
2774                RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2775}
2776
2777/**
2778 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2779 *
2780 * @device: Device
2781 * @port_num: Port number
2782 *
2783 * This MAD size includes the MAD headers and MAD payload.  No other headers
2784 * are included.
2785 *
2786 * Return the max MAD size required by the Port.  Will return 0 if the port
2787 * does not support MADs
2788 */
2789static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2790{
2791        return device->port_immutable[port_num].max_mad_size;
2792}
2793
2794/**
2795 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2796 * @device: Device to check
2797 * @port_num: Port number to check
2798 *
2799 * RoCE GID table mechanism manages the various GIDs for a device.
2800 *
2801 * NOTE: if allocating the port's GID table has failed, this call will still
2802 * return true, but any RoCE GID table API will fail.
2803 *
2804 * Return: true if the port uses RoCE GID table mechanism in order to manage
2805 * its GIDs.
2806 */
2807static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2808                                           u8 port_num)
2809{
2810        return rdma_protocol_roce(device, port_num) &&
2811                device->add_gid && device->del_gid;
2812}
2813
2814/*
2815 * Check if the device supports READ W/ INVALIDATE.
2816 */
2817static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2818{
2819        /*
2820         * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
2821         * has support for it yet.
2822         */
2823        return rdma_protocol_iwarp(dev, port_num);
2824}
2825
2826int ib_query_gid(struct ib_device *device,
2827                 u8 port_num, int index, union ib_gid *gid,
2828                 struct ib_gid_attr *attr);
2829
2830int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2831                         int state);
2832int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2833                     struct ifla_vf_info *info);
2834int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2835                    struct ifla_vf_stats *stats);
2836int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2837                   int type);
2838
2839int ib_query_pkey(struct ib_device *device,
2840                  u8 port_num, u16 index, u16 *pkey);
2841
2842int ib_modify_device(struct ib_device *device,
2843                     int device_modify_mask,
2844                     struct ib_device_modify *device_modify);
2845
2846int ib_modify_port(struct ib_device *device,
2847                   u8 port_num, int port_modify_mask,
2848                   struct ib_port_modify *port_modify);
2849
2850int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2851                struct net_device *ndev, u8 *port_num, u16 *index);
2852
2853int ib_find_pkey(struct ib_device *device,
2854                 u8 port_num, u16 pkey, u16 *index);
2855
2856enum ib_pd_flags {
2857        /*
2858         * Create a memory registration for all memory in the system and place
2859         * the rkey for it into pd->unsafe_global_rkey.  This can be used by
2860         * ULPs to avoid the overhead of dynamic MRs.
2861         *
2862         * This flag is generally considered unsafe and must only be used in
2863         * extremly trusted environments.  Every use of it will log a warning
2864         * in the kernel log.
2865         */
2866        IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
2867};
2868
2869struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2870                const char *caller);
2871#define ib_alloc_pd(device, flags) \
2872        __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
2873void ib_dealloc_pd(struct ib_pd *pd);
2874
2875/**
2876 * rdma_create_ah - Creates an address handle for the given address vector.
2877 * @pd: The protection domain associated with the address handle.
2878 * @ah_attr: The attributes of the address vector.
2879 *
2880 * The address handle is used to reference a local or global destination
2881 * in all UD QP post sends.
2882 */
2883struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2884
2885/**
2886 * rdma_create_user_ah - Creates an address handle for the given address vector.
2887 * It resolves destination mac address for ah attribute of RoCE type.
2888 * @pd: The protection domain associated with the address handle.
2889 * @ah_attr: The attributes of the address vector.
2890 * @udata: pointer to user's input output buffer information need by
2891 *         provider driver.
2892 *
2893 * It returns 0 on success and returns appropriate error code on error.
2894 * The address handle is used to reference a local or global destination
2895 * in all UD QP post sends.
2896 */
2897struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
2898                                  struct rdma_ah_attr *ah_attr,
2899                                  struct ib_udata *udata);
2900/**
2901 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
2902 *   work completion.
2903 * @hdr: the L3 header to parse
2904 * @net_type: type of header to parse
2905 * @sgid: place to store source gid
2906 * @dgid: place to store destination gid
2907 */
2908int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2909                              enum rdma_network_type net_type,
2910                              union ib_gid *sgid, union ib_gid *dgid);
2911
2912/**
2913 * ib_get_rdma_header_version - Get the header version
2914 * @hdr: the L3 header to parse
2915 */
2916int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2917
2918/**
2919 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
2920 *   work completion.
2921 * @device: Device on which the received message arrived.
2922 * @port_num: Port on which the received message arrived.
2923 * @wc: Work completion associated with the received message.
2924 * @grh: References the received global route header.  This parameter is
2925 *   ignored unless the work completion indicates that the GRH is valid.
2926 * @ah_attr: Returned attributes that can be used when creating an address
2927 *   handle for replying to the message.
2928 */
2929int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
2930                            const struct ib_wc *wc, const struct ib_grh *grh,
2931                            struct rdma_ah_attr *ah_attr);
2932
2933/**
2934 * ib_create_ah_from_wc - Creates an address handle associated with the
2935 *   sender of the specified work completion.
2936 * @pd: The protection domain associated with the address handle.
2937 * @wc: Work completion information associated with a received message.
2938 * @grh: References the received global route header.  This parameter is
2939 *   ignored unless the work completion indicates that the GRH is valid.
2940 * @port_num: The outbound port number to associate with the address.
2941 *
2942 * The address handle is used to reference a local or global destination
2943 * in all UD QP post sends.
2944 */
2945struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2946                                   const struct ib_grh *grh, u8 port_num);
2947
2948/**
2949 * rdma_modify_ah - Modifies the address vector associated with an address
2950 *   handle.
2951 * @ah: The address handle to modify.
2952 * @ah_attr: The new address vector attributes to associate with the
2953 *   address handle.
2954 */
2955int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2956
2957/**
2958 * rdma_query_ah - Queries the address vector associated with an address
2959 *   handle.
2960 * @ah: The address handle to query.
2961 * @ah_attr: The address vector attributes associated with the address
2962 *   handle.
2963 */
2964int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2965
2966/**
2967 * rdma_destroy_ah - Destroys an address handle.
2968 * @ah: The address handle to destroy.
2969 */
2970int rdma_destroy_ah(struct ib_ah *ah);
2971
2972/**
2973 * ib_create_srq - Creates a SRQ associated with the specified protection
2974 *   domain.
2975 * @pd: The protection domain associated with the SRQ.
2976 * @srq_init_attr: A list of initial attributes required to create the
2977 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2978 *   the actual capabilities of the created SRQ.
2979 *
2980 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2981 * requested size of the SRQ, and set to the actual values allocated
2982 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2983 * will always be at least as large as the requested values.
2984 */
2985struct ib_srq *ib_create_srq(struct ib_pd *pd,
2986                             struct ib_srq_init_attr *srq_init_attr);
2987
2988/**
2989 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2990 * @srq: The SRQ to modify.
2991 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2992 *   the current values of selected SRQ attributes are returned.
2993 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2994 *   are being modified.
2995 *
2996 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2997 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2998 * the number of receives queued drops below the limit.
2999 */
3000int ib_modify_srq(struct ib_srq *srq,
3001                  struct ib_srq_attr *srq_attr,
3002                  enum ib_srq_attr_mask srq_attr_mask);
3003
3004/**
3005 * ib_query_srq - Returns the attribute list and current values for the
3006 *   specified SRQ.
3007 * @srq: The SRQ to query.
3008 * @srq_attr: The attributes of the specified SRQ.
3009 */
3010int ib_query_srq(struct ib_srq *srq,
3011                 struct ib_srq_attr *srq_attr);
3012
3013/**
3014 * ib_destroy_srq - Destroys the specified SRQ.
3015 * @srq: The SRQ to destroy.
3016 */
3017int ib_destroy_srq(struct ib_srq *srq);
3018
3019/**
3020 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3021 * @srq: The SRQ to post the work request on.
3022 * @recv_wr: A list of work requests to post on the receive queue.
3023 * @bad_recv_wr: On an immediate failure, this parameter will reference
3024 *   the work request that failed to be posted on the QP.
3025 */
3026static inline int ib_post_srq_recv(struct ib_srq *srq,
3027                                   struct ib_recv_wr *recv_wr,
3028                                   struct ib_recv_wr **bad_recv_wr)
3029{
3030        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
3031}
3032
3033/**
3034 * ib_create_qp - Creates a QP associated with the specified protection
3035 *   domain.
3036 * @pd: The protection domain associated with the QP.
3037 * @qp_init_attr: A list of initial attributes required to create the
3038 *   QP.  If QP creation succeeds, then the attributes are updated to
3039 *   the actual capabilities of the created QP.
3040 */
3041struct ib_qp *ib_create_qp(struct ib_pd *pd,
3042                           struct ib_qp_init_attr *qp_init_attr);
3043
3044/**
3045 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3046 * @qp: The QP to modify.
3047 * @attr: On input, specifies the QP attributes to modify.  On output,
3048 *   the current values of selected QP attributes are returned.
3049 * @attr_mask: A bit-mask used to specify which attributes of the QP
3050 *   are being modified.
3051 * @udata: pointer to user's input output buffer information
3052 *   are being modified.
3053 * It returns 0 on success and returns appropriate error code on error.
3054 */
3055int ib_modify_qp_with_udata(struct ib_qp *qp,
3056                            struct ib_qp_attr *attr,
3057                            int attr_mask,
3058                            struct ib_udata *udata);
3059
3060/**
3061 * ib_modify_qp - Modifies the attributes for the specified QP and then
3062 *   transitions the QP to the given state.
3063 * @qp: The QP to modify.
3064 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3065 *   the current values of selected QP attributes are returned.
3066 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3067 *   are being modified.
3068 */
3069int ib_modify_qp(struct ib_qp *qp,
3070                 struct ib_qp_attr *qp_attr,
3071                 int qp_attr_mask);
3072
3073/**
3074 * ib_query_qp - Returns the attribute list and current values for the
3075 *   specified QP.
3076 * @qp: The QP to query.
3077 * @qp_attr: The attributes of the specified QP.
3078 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3079 * @qp_init_attr: Additional attributes of the selected QP.
3080 *
3081 * The qp_attr_mask may be used to limit the query to gathering only the
3082 * selected attributes.
3083 */
3084int ib_query_qp(struct ib_qp *qp,
3085                struct ib_qp_attr *qp_attr,
3086                int qp_attr_mask,
3087                struct ib_qp_init_attr *qp_init_attr);
3088
3089/**
3090 * ib_destroy_qp - Destroys the specified QP.
3091 * @qp: The QP to destroy.
3092 */
3093int ib_destroy_qp(struct ib_qp *qp);
3094
3095/**
3096 * ib_open_qp - Obtain a reference to an existing sharable QP.
3097 * @xrcd - XRC domain
3098 * @qp_open_attr: Attributes identifying the QP to open.
3099 *
3100 * Returns a reference to a sharable QP.
3101 */
3102struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3103                         struct ib_qp_open_attr *qp_open_attr);
3104
3105/**
3106 * ib_close_qp - Release an external reference to a QP.
3107 * @qp: The QP handle to release
3108 *
3109 * The opened QP handle is released by the caller.  The underlying
3110 * shared QP is not destroyed until all internal references are released.
3111 */
3112int ib_close_qp(struct ib_qp *qp);
3113
3114/**
3115 * ib_post_send - Posts a list of work requests to the send queue of
3116 *   the specified QP.
3117 * @qp: The QP to post the work request on.
3118 * @send_wr: A list of work requests to post on the send queue.
3119 * @bad_send_wr: On an immediate failure, this parameter will reference
3120 *   the work request that failed to be posted on the QP.
3121 *
3122 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3123 * error is returned, the QP state shall not be affected,
3124 * ib_post_send() will return an immediate error after queueing any
3125 * earlier work requests in the list.
3126 */
3127static inline int ib_post_send(struct ib_qp *qp,
3128                               struct ib_send_wr *send_wr,
3129                               struct ib_send_wr **bad_send_wr)
3130{
3131        return qp->device->post_send(qp, send_wr, bad_send_wr);
3132}
3133
3134/**
3135 * ib_post_recv - Posts a list of work requests to the receive queue of
3136 *   the specified QP.
3137 * @qp: The QP to post the work request on.
3138 * @recv_wr: A list of work requests to post on the receive queue.
3139 * @bad_recv_wr: On an immediate failure, this parameter will reference
3140 *   the work request that failed to be posted on the QP.
3141 */
3142static inline int ib_post_recv(struct ib_qp *qp,
3143                               struct ib_recv_wr *recv_wr,
3144                               struct ib_recv_wr **bad_recv_wr)
3145{
3146        return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3147}
3148
3149struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3150                            int nr_cqe, int comp_vector,
3151                            enum ib_poll_context poll_ctx, const char *caller);
3152#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3153        __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3154
3155void ib_free_cq(struct ib_cq *cq);
3156int ib_process_cq_direct(struct ib_cq *cq, int budget);
3157
3158/**
3159 * ib_create_cq - Creates a CQ on the specified device.
3160 * @device: The device on which to create the CQ.
3161 * @comp_handler: A user-specified callback that is invoked when a
3162 *   completion event occurs on the CQ.
3163 * @event_handler: A user-specified callback that is invoked when an
3164 *   asynchronous event not associated with a completion occurs on the CQ.
3165 * @cq_context: Context associated with the CQ returned to the user via
3166 *   the associated completion and event handlers.
3167 * @cq_attr: The attributes the CQ should be created upon.
3168 *
3169 * Users can examine the cq structure to determine the actual CQ size.
3170 */
3171struct ib_cq *ib_create_cq(struct ib_device *device,
3172                           ib_comp_handler comp_handler,
3173                           void (*event_handler)(struct ib_event *, void *),
3174                           void *cq_context,
3175                           const struct ib_cq_init_attr *cq_attr);
3176
3177/**
3178 * ib_resize_cq - Modifies the capacity of the CQ.
3179 * @cq: The CQ to resize.
3180 * @cqe: The minimum size of the CQ.
3181 *
3182 * Users can examine the cq structure to determine the actual CQ size.
3183 */
3184int ib_resize_cq(struct ib_cq *cq, int cqe);
3185
3186/**
3187 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3188 * @cq: The CQ to modify.
3189 * @cq_count: number of CQEs that will trigger an event
3190 * @cq_period: max period of time in usec before triggering an event
3191 *
3192 */
3193int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3194
3195/**
3196 * ib_destroy_cq - Destroys the specified CQ.
3197 * @cq: The CQ to destroy.
3198 */
3199int ib_destroy_cq(struct ib_cq *cq);
3200
3201/**
3202 * ib_poll_cq - poll a CQ for completion(s)
3203 * @cq:the CQ being polled
3204 * @num_entries:maximum number of completions to return
3205 * @wc:array of at least @num_entries &struct ib_wc where completions
3206 *   will be returned
3207 *
3208 * Poll a CQ for (possibly multiple) completions.  If the return value
3209 * is < 0, an error occurred.  If the return value is >= 0, it is the
3210 * number of completions returned.  If the return value is
3211 * non-negative and < num_entries, then the CQ was emptied.
3212 */
3213static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3214                             struct ib_wc *wc)
3215{
3216        return cq->device->poll_cq(cq, num_entries, wc);
3217}
3218
3219/**
3220 * ib_peek_cq - Returns the number of unreaped completions currently
3221 *   on the specified CQ.
3222 * @cq: The CQ to peek.
3223 * @wc_cnt: A minimum number of unreaped completions to check for.
3224 *
3225 * If the number of unreaped completions is greater than or equal to wc_cnt,
3226 * this function returns wc_cnt, otherwise, it returns the actual number of
3227 * unreaped completions.
3228 */
3229int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3230
3231/**
3232 * ib_req_notify_cq - Request completion notification on a CQ.
3233 * @cq: The CQ to generate an event for.
3234 * @flags:
3235 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3236 *   to request an event on the next solicited event or next work
3237 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3238 *   may also be |ed in to request a hint about missed events, as
3239 *   described below.
3240 *
3241 * Return Value:
3242 *    < 0 means an error occurred while requesting notification
3243 *   == 0 means notification was requested successfully, and if
3244 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3245 *        were missed and it is safe to wait for another event.  In
3246 *        this case is it guaranteed that any work completions added
3247 *        to the CQ since the last CQ poll will trigger a completion
3248 *        notification event.
3249 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3250 *        in.  It means that the consumer must poll the CQ again to
3251 *        make sure it is empty to avoid missing an event because of a
3252 *        race between requesting notification and an entry being
3253 *        added to the CQ.  This return value means it is possible
3254 *        (but not guaranteed) that a work completion has been added
3255 *        to the CQ since the last poll without triggering a
3256 *        completion notification event.
3257 */
3258static inline int ib_req_notify_cq(struct ib_cq *cq,
3259                                   enum ib_cq_notify_flags flags)
3260{
3261        return cq->device->req_notify_cq(cq, flags);
3262}
3263
3264/**
3265 * ib_req_ncomp_notif - Request completion notification when there are
3266 *   at least the specified number of unreaped completions on the CQ.
3267 * @cq: The CQ to generate an event for.
3268 * @wc_cnt: The number of unreaped completions that should be on the
3269 *   CQ before an event is generated.
3270 */
3271static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3272{
3273        return cq->device->req_ncomp_notif ?
3274                cq->device->req_ncomp_notif(cq, wc_cnt) :
3275                -ENOSYS;
3276}
3277
3278/**
3279 * ib_dma_mapping_error - check a DMA addr for error
3280 * @dev: The device for which the dma_addr was created
3281 * @dma_addr: The DMA address to check
3282 */
3283static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3284{
3285        return dma_mapping_error(dev->dma_device, dma_addr);
3286}
3287
3288/**
3289 * ib_dma_map_single - Map a kernel virtual address to DMA address
3290 * @dev: The device for which the dma_addr is to be created
3291 * @cpu_addr: The kernel virtual address
3292 * @size: The size of the region in bytes
3293 * @direction: The direction of the DMA
3294 */
3295static inline u64 ib_dma_map_single(struct ib_device *dev,
3296                                    void *cpu_addr, size_t size,
3297                                    enum dma_data_direction direction)
3298{
3299        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3300}
3301
3302/**
3303 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3304 * @dev: The device for which the DMA address was created
3305 * @addr: The DMA address
3306 * @size: The size of the region in bytes
3307 * @direction: The direction of the DMA
3308 */
3309static inline void ib_dma_unmap_single(struct ib_device *dev,
3310                                       u64 addr, size_t size,
3311                                       enum dma_data_direction direction)
3312{
3313        dma_unmap_single(dev->dma_device, addr, size, direction);
3314}
3315
3316/**
3317 * ib_dma_map_page - Map a physical page to DMA address
3318 * @dev: The device for which the dma_addr is to be created
3319 * @page: The page to be mapped
3320 * @offset: The offset within the page
3321 * @size: The size of the region in bytes
3322 * @direction: The direction of the DMA
3323 */
3324static inline u64 ib_dma_map_page(struct ib_device *dev,
3325                                  struct page *page,
3326                                  unsigned long offset,
3327                                  size_t size,
3328                                         enum dma_data_direction direction)
3329{
3330        return dma_map_page(dev->dma_device, page, offset, size, direction);
3331}
3332
3333/**
3334 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3335 * @dev: The device for which the DMA address was created
3336 * @addr: The DMA address
3337 * @size: The size of the region in bytes
3338 * @direction: The direction of the DMA
3339 */
3340static inline void ib_dma_unmap_page(struct ib_device *dev,
3341                                     u64 addr, size_t size,
3342                                     enum dma_data_direction direction)
3343{
3344        dma_unmap_page(dev->dma_device, addr, size, direction);
3345}
3346
3347/**
3348 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3349 * @dev: The device for which the DMA addresses are to be created
3350 * @sg: The array of scatter/gather entries
3351 * @nents: The number of scatter/gather entries
3352 * @direction: The direction of the DMA
3353 */
3354static inline int ib_dma_map_sg(struct ib_device *dev,
3355                                struct scatterlist *sg, int nents,
3356                                enum dma_data_direction direction)
3357{
3358        return dma_map_sg(dev->dma_device, sg, nents, direction);
3359}
3360
3361/**
3362 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3363 * @dev: The device for which the DMA addresses were created
3364 * @sg: The array of scatter/gather entries
3365 * @nents: The number of scatter/gather entries
3366 * @direction: The direction of the DMA
3367 */
3368static inline void ib_dma_unmap_sg(struct ib_device *dev,
3369                                   struct scatterlist *sg, int nents,
3370                                   enum dma_data_direction direction)
3371{
3372        dma_unmap_sg(dev->dma_device, sg, nents, direction);
3373}
3374
3375static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3376                                      struct scatterlist *sg, int nents,
3377                                      enum dma_data_direction direction,
3378                                      unsigned long dma_attrs)
3379{
3380        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3381                                dma_attrs);
3382}
3383
3384static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3385                                         struct scatterlist *sg, int nents,
3386                                         enum dma_data_direction direction,
3387                                         unsigned long dma_attrs)
3388{
3389        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3390}
3391/**
3392 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3393 * @dev: The device for which the DMA addresses were created
3394 * @sg: The scatter/gather entry
3395 *
3396 * Note: this function is obsolete. To do: change all occurrences of
3397 * ib_sg_dma_address() into sg_dma_address().
3398 */
3399static inline u64 ib_sg_dma_address(struct ib_device *dev,
3400                                    struct scatterlist *sg)
3401{
3402        return sg_dma_address(sg);
3403}
3404
3405/**
3406 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3407 * @dev: The device for which the DMA addresses were created
3408 * @sg: The scatter/gather entry
3409 *
3410 * Note: this function is obsolete. To do: change all occurrences of
3411 * ib_sg_dma_len() into sg_dma_len().
3412 */
3413static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3414                                         struct scatterlist *sg)
3415{
3416        return sg_dma_len(sg);
3417}
3418
3419/**
3420 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3421 * @dev: The device for which the DMA address was created
3422 * @addr: The DMA address
3423 * @size: The size of the region in bytes
3424 * @dir: The direction of the DMA
3425 */
3426static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3427                                              u64 addr,
3428                                              size_t size,
3429                                              enum dma_data_direction dir)
3430{
3431        dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3432}
3433
3434/**
3435 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3436 * @dev: The device for which the DMA address was created
3437 * @addr: The DMA address
3438 * @size: The size of the region in bytes
3439 * @dir: The direction of the DMA
3440 */
3441static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3442                                                 u64 addr,
3443                                                 size_t size,
3444                                                 enum dma_data_direction dir)
3445{
3446        dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3447}
3448
3449/**
3450 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3451 * @dev: The device for which the DMA address is requested
3452 * @size: The size of the region to allocate in bytes
3453 * @dma_handle: A pointer for returning the DMA address of the region
3454 * @flag: memory allocator flags
3455 */
3456static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3457                                           size_t size,
3458                                           dma_addr_t *dma_handle,
3459                                           gfp_t flag)
3460{
3461        return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3462}
3463
3464/**
3465 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3466 * @dev: The device for which the DMA addresses were allocated
3467 * @size: The size of the region
3468 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3469 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3470 */
3471static inline void ib_dma_free_coherent(struct ib_device *dev,
3472                                        size_t size, void *cpu_addr,
3473                                        dma_addr_t dma_handle)
3474{
3475        dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3476}
3477
3478/**
3479 * ib_dereg_mr - Deregisters a memory region and removes it from the
3480 *   HCA translation table.
3481 * @mr: The memory region to deregister.
3482 *
3483 * This function can fail, if the memory region has memory windows bound to it.
3484 */
3485int ib_dereg_mr(struct ib_mr *mr);
3486
3487struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3488                          enum ib_mr_type mr_type,
3489                          u32 max_num_sg);
3490
3491/**
3492 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3493 *   R_Key and L_Key.
3494 * @mr - struct ib_mr pointer to be updated.
3495 * @newkey - new key to be used.
3496 */
3497static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3498{
3499        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3500        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3501}
3502
3503/**
3504 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3505 * for calculating a new rkey for type 2 memory windows.
3506 * @rkey - the rkey to increment.
3507 */
3508static inline u32 ib_inc_rkey(u32 rkey)
3509{
3510        const u32 mask = 0x000000ff;
3511        return ((rkey + 1) & mask) | (rkey & ~mask);
3512}
3513
3514/**
3515 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3516 * @pd: The protection domain associated with the unmapped region.
3517 * @mr_access_flags: Specifies the memory access rights.
3518 * @fmr_attr: Attributes of the unmapped region.
3519 *
3520 * A fast memory region must be mapped before it can be used as part of
3521 * a work request.
3522 */
3523struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3524                            int mr_access_flags,
3525                            struct ib_fmr_attr *fmr_attr);
3526
3527/**
3528 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3529 * @fmr: The fast memory region to associate with the pages.
3530 * @page_list: An array of physical pages to map to the fast memory region.
3531 * @list_len: The number of pages in page_list.
3532 * @iova: The I/O virtual address to use with the mapped region.
3533 */
3534static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3535                                  u64 *page_list, int list_len,
3536                                  u64 iova)
3537{
3538        return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3539}
3540
3541/**
3542 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3543 * @fmr_list: A linked list of fast memory regions to unmap.
3544 */
3545int ib_unmap_fmr(struct list_head *fmr_list);
3546
3547/**
3548 * ib_dealloc_fmr - Deallocates a fast memory region.
3549 * @fmr: The fast memory region to deallocate.
3550 */
3551int ib_dealloc_fmr(struct ib_fmr *fmr);
3552
3553/**
3554 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3555 * @qp: QP to attach to the multicast group.  The QP must be type
3556 *   IB_QPT_UD.
3557 * @gid: Multicast group GID.
3558 * @lid: Multicast group LID in host byte order.
3559 *
3560 * In order to send and receive multicast packets, subnet
3561 * administration must have created the multicast group and configured
3562 * the fabric appropriately.  The port associated with the specified
3563 * QP must also be a member of the multicast group.
3564 */
3565int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3566
3567/**
3568 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3569 * @qp: QP to detach from the multicast group.
3570 * @gid: Multicast group GID.
3571 * @lid: Multicast group LID in host byte order.
3572 */
3573int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3574
3575/**
3576 * ib_alloc_xrcd - Allocates an XRC domain.
3577 * @device: The device on which to allocate the XRC domain.
3578 * @caller: Module name for kernel consumers
3579 */
3580struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3581#define ib_alloc_xrcd(device) \
3582        __ib_alloc_xrcd((device), KBUILD_MODNAME)
3583
3584/**
3585 * ib_dealloc_xrcd - Deallocates an XRC domain.
3586 * @xrcd: The XRC domain to deallocate.
3587 */
3588int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3589
3590struct ib_flow *ib_create_flow(struct ib_qp *qp,
3591                               struct ib_flow_attr *flow_attr, int domain);
3592int ib_destroy_flow(struct ib_flow *flow_id);
3593
3594static inline int ib_check_mr_access(int flags)
3595{
3596        /*
3597         * Local write permission is required if remote write or
3598         * remote atomic permission is also requested.
3599         */
3600        if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3601            !(flags & IB_ACCESS_LOCAL_WRITE))
3602                return -EINVAL;
3603
3604        return 0;
3605}
3606
3607/**
3608 * ib_check_mr_status: lightweight check of MR status.
3609 *     This routine may provide status checks on a selected
3610 *     ib_mr. first use is for signature status check.
3611 *
3612 * @mr: A memory region.
3613 * @check_mask: Bitmask of which checks to perform from
3614 *     ib_mr_status_check enumeration.
3615 * @mr_status: The container of relevant status checks.
3616 *     failed checks will be indicated in the status bitmask
3617 *     and the relevant info shall be in the error item.
3618 */
3619int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3620                       struct ib_mr_status *mr_status);
3621
3622struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3623                                            u16 pkey, const union ib_gid *gid,
3624                                            const struct sockaddr *addr);
3625struct ib_wq *ib_create_wq(struct ib_pd *pd,
3626                           struct ib_wq_init_attr *init_attr);
3627int ib_destroy_wq(struct ib_wq *wq);
3628int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3629                 u32 wq_attr_mask);
3630struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3631                                                 struct ib_rwq_ind_table_init_attr*
3632                                                 wq_ind_table_init_attr);
3633int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3634
3635int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3636                 unsigned int *sg_offset, unsigned int page_size);
3637
3638static inline int
3639ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3640                  unsigned int *sg_offset, unsigned int page_size)
3641{
3642        int n;
3643
3644        n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3645        mr->iova = 0;
3646
3647        return n;
3648}
3649
3650int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3651                unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3652
3653void ib_drain_rq(struct ib_qp *qp);
3654void ib_drain_sq(struct ib_qp *qp);
3655void ib_drain_qp(struct ib_qp *qp);
3656
3657int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
3658
3659static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3660{
3661        if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3662                return attr->roce.dmac;
3663        return NULL;
3664}
3665
3666static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3667{
3668        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3669                attr->ib.dlid = (u16)dlid;
3670        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3671                attr->opa.dlid = dlid;
3672}
3673
3674static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3675{
3676        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3677                return attr->ib.dlid;
3678        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3679                return attr->opa.dlid;
3680        return 0;
3681}
3682
3683static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3684{
3685        attr->sl = sl;
3686}
3687
3688static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3689{
3690        return attr->sl;
3691}
3692
3693static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3694                                         u8 src_path_bits)
3695{
3696        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3697                attr->ib.src_path_bits = src_path_bits;
3698        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3699                attr->opa.src_path_bits = src_path_bits;
3700}
3701
3702static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3703{
3704        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3705                return attr->ib.src_path_bits;
3706        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3707                return attr->opa.src_path_bits;
3708        return 0;
3709}
3710
3711static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
3712                                        bool make_grd)
3713{
3714        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3715                attr->opa.make_grd = make_grd;
3716}
3717
3718static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
3719{
3720        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3721                return attr->opa.make_grd;
3722        return false;
3723}
3724
3725static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3726{
3727        attr->port_num = port_num;
3728}
3729
3730static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3731{
3732        return attr->port_num;
3733}
3734
3735static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3736                                           u8 static_rate)
3737{
3738        attr->static_rate = static_rate;
3739}
3740
3741static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3742{
3743        return attr->static_rate;
3744}
3745
3746static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3747                                        enum ib_ah_flags flag)
3748{
3749        attr->ah_flags = flag;
3750}
3751
3752static inline enum ib_ah_flags
3753                rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3754{
3755        return attr->ah_flags;
3756}
3757
3758static inline const struct ib_global_route
3759                *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3760{
3761        return &attr->grh;
3762}
3763
3764/*To retrieve and modify the grh */
3765static inline struct ib_global_route
3766                *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3767{
3768        return &attr->grh;
3769}
3770
3771static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3772{
3773        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3774
3775        memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3776}
3777
3778static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3779                                             __be64 prefix)
3780{
3781        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3782
3783        grh->dgid.global.subnet_prefix = prefix;
3784}
3785
3786static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3787                                            __be64 if_id)
3788{
3789        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3790
3791        grh->dgid.global.interface_id = if_id;
3792}
3793
3794static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3795                                   union ib_gid *dgid, u32 flow_label,
3796                                   u8 sgid_index, u8 hop_limit,
3797                                   u8 traffic_class)
3798{
3799        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3800
3801        attr->ah_flags = IB_AH_GRH;
3802        if (dgid)
3803                grh->dgid = *dgid;
3804        grh->flow_label = flow_label;
3805        grh->sgid_index = sgid_index;
3806        grh->hop_limit = hop_limit;
3807        grh->traffic_class = traffic_class;
3808}
3809
3810/**
3811 * rdma_ah_find_type - Return address handle type.
3812 *
3813 * @dev: Device to be checked
3814 * @port_num: Port number
3815 */
3816static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3817                                                       u8 port_num)
3818{
3819        if (rdma_protocol_roce(dev, port_num))
3820                return RDMA_AH_ATTR_TYPE_ROCE;
3821        if (rdma_protocol_ib(dev, port_num)) {
3822                if (rdma_cap_opa_ah(dev, port_num))
3823                        return RDMA_AH_ATTR_TYPE_OPA;
3824                return RDMA_AH_ATTR_TYPE_IB;
3825        }
3826
3827        return RDMA_AH_ATTR_TYPE_UNDEFINED;
3828}
3829
3830/**
3831 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
3832 *     In the current implementation the only way to get
3833 *     get the 32bit lid is from other sources for OPA.
3834 *     For IB, lids will always be 16bits so cast the
3835 *     value accordingly.
3836 *
3837 * @lid: A 32bit LID
3838 */
3839static inline u16 ib_lid_cpu16(u32 lid)
3840{
3841        WARN_ON_ONCE(lid & 0xFFFF0000);
3842        return (u16)lid;
3843}
3844
3845/**
3846 * ib_lid_be16 - Return lid in 16bit BE encoding.
3847 *
3848 * @lid: A 32bit LID
3849 */
3850static inline __be16 ib_lid_be16(u32 lid)
3851{
3852        WARN_ON_ONCE(lid & 0xFFFF0000);
3853        return cpu_to_be16((u16)lid);
3854}
3855
3856/**
3857 * ib_get_vector_affinity - Get the affinity mappings of a given completion
3858 *   vector
3859 * @device:         the rdma device
3860 * @comp_vector:    index of completion vector
3861 *
3862 * Returns NULL on failure, otherwise a corresponding cpu map of the
3863 * completion vector (returns all-cpus map if the device driver doesn't
3864 * implement get_vector_affinity).
3865 */
3866static inline const struct cpumask *
3867ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3868{
3869        if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
3870            !device->get_vector_affinity)
3871                return NULL;
3872
3873        return device->get_vector_affinity(device, comp_vector);
3874
3875}
3876
3877/**
3878 * rdma_roce_rescan_device - Rescan all of the network devices in the system
3879 * and add their gids, as needed, to the relevant RoCE devices.
3880 *
3881 * @device:         the rdma device
3882 */
3883void rdma_roce_rescan_device(struct ib_device *ibdev);
3884
3885#endif /* IB_VERBS_H */
3886