linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/dma-mapping.h>
  45#include <linux/kref.h>
  46#include <linux/list.h>
  47#include <linux/rwsem.h>
  48#include <linux/workqueue.h>
  49#include <linux/irq_poll.h>
  50#include <uapi/linux/if_ether.h>
  51#include <net/ipv6.h>
  52#include <net/ip.h>
  53#include <linux/string.h>
  54#include <linux/slab.h>
  55#include <linux/netdevice.h>
  56#include <linux/refcount.h>
  57#include <linux/if_link.h>
  58#include <linux/atomic.h>
  59#include <linux/mmu_notifier.h>
  60#include <linux/uaccess.h>
  61#include <linux/cgroup_rdma.h>
  62#include <linux/irqflags.h>
  63#include <linux/preempt.h>
  64#include <linux/dim.h>
  65#include <uapi/rdma/ib_user_verbs.h>
  66#include <rdma/rdma_counter.h>
  67#include <rdma/restrack.h>
  68#include <rdma/signature.h>
  69#include <uapi/rdma/rdma_user_ioctl.h>
  70#include <uapi/rdma/ib_user_ioctl_verbs.h>
  71
  72#define IB_FW_VERSION_NAME_MAX  ETHTOOL_FWVERS_LEN
  73
  74struct ib_umem_odp;
  75
  76extern struct workqueue_struct *ib_wq;
  77extern struct workqueue_struct *ib_comp_wq;
  78extern struct workqueue_struct *ib_comp_unbound_wq;
  79
  80__printf(3, 4) __cold
  81void ibdev_printk(const char *level, const struct ib_device *ibdev,
  82                  const char *format, ...);
  83__printf(2, 3) __cold
  84void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
  85__printf(2, 3) __cold
  86void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
  87__printf(2, 3) __cold
  88void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
  89__printf(2, 3) __cold
  90void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
  91__printf(2, 3) __cold
  92void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
  93__printf(2, 3) __cold
  94void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
  95__printf(2, 3) __cold
  96void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
  97
  98#if defined(CONFIG_DYNAMIC_DEBUG)
  99#define ibdev_dbg(__dev, format, args...)                       \
 100        dynamic_ibdev_dbg(__dev, format, ##args)
 101#elif defined(DEBUG)
 102#define ibdev_dbg(__dev, format, args...)                       \
 103        ibdev_printk(KERN_DEBUG, __dev, format, ##args)
 104#else
 105__printf(2, 3) __cold
 106static inline
 107void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
 108#endif
 109
 110union ib_gid {
 111        u8      raw[16];
 112        struct {
 113                __be64  subnet_prefix;
 114                __be64  interface_id;
 115        } global;
 116};
 117
 118extern union ib_gid zgid;
 119
 120enum ib_gid_type {
 121        /* If link layer is Ethernet, this is RoCE V1 */
 122        IB_GID_TYPE_IB        = 0,
 123        IB_GID_TYPE_ROCE      = 0,
 124        IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
 125        IB_GID_TYPE_SIZE
 126};
 127
 128#define ROCE_V2_UDP_DPORT      4791
 129struct ib_gid_attr {
 130        struct net_device __rcu *ndev;
 131        struct ib_device        *device;
 132        union ib_gid            gid;
 133        enum ib_gid_type        gid_type;
 134        u16                     index;
 135        u8                      port_num;
 136};
 137
 138enum {
 139        /* set the local administered indication */
 140        IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
 141};
 142
 143enum rdma_transport_type {
 144        RDMA_TRANSPORT_IB,
 145        RDMA_TRANSPORT_IWARP,
 146        RDMA_TRANSPORT_USNIC,
 147        RDMA_TRANSPORT_USNIC_UDP,
 148        RDMA_TRANSPORT_UNSPECIFIED,
 149};
 150
 151enum rdma_protocol_type {
 152        RDMA_PROTOCOL_IB,
 153        RDMA_PROTOCOL_IBOE,
 154        RDMA_PROTOCOL_IWARP,
 155        RDMA_PROTOCOL_USNIC_UDP
 156};
 157
 158__attribute_const__ enum rdma_transport_type
 159rdma_node_get_transport(unsigned int node_type);
 160
 161enum rdma_network_type {
 162        RDMA_NETWORK_IB,
 163        RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
 164        RDMA_NETWORK_IPV4,
 165        RDMA_NETWORK_IPV6
 166};
 167
 168static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 169{
 170        if (network_type == RDMA_NETWORK_IPV4 ||
 171            network_type == RDMA_NETWORK_IPV6)
 172                return IB_GID_TYPE_ROCE_UDP_ENCAP;
 173
 174        /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
 175        return IB_GID_TYPE_IB;
 176}
 177
 178static inline enum rdma_network_type
 179rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
 180{
 181        if (attr->gid_type == IB_GID_TYPE_IB)
 182                return RDMA_NETWORK_IB;
 183
 184        if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
 185                return RDMA_NETWORK_IPV4;
 186        else
 187                return RDMA_NETWORK_IPV6;
 188}
 189
 190enum rdma_link_layer {
 191        IB_LINK_LAYER_UNSPECIFIED,
 192        IB_LINK_LAYER_INFINIBAND,
 193        IB_LINK_LAYER_ETHERNET,
 194};
 195
 196enum ib_device_cap_flags {
 197        IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
 198        IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
 199        IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
 200        IB_DEVICE_RAW_MULTI                     = (1 << 3),
 201        IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
 202        IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
 203        IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
 204        IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
 205        IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
 206        /* Not in use, former INIT_TYPE         = (1 << 9),*/
 207        IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
 208        IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
 209        IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
 210        IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
 211        IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
 212
 213        /*
 214         * This device supports a per-device lkey or stag that can be
 215         * used without performing a memory registration for the local
 216         * memory.  Note that ULPs should never check this flag, but
 217         * instead of use the local_dma_lkey flag in the ib_pd structure,
 218         * which will always contain a usable lkey.
 219         */
 220        IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
 221        /* Reserved, old SEND_W_INV             = (1 << 16),*/
 222        IB_DEVICE_MEM_WINDOW                    = (1 << 17),
 223        /*
 224         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 225         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 226         * messages and can verify the validity of checksum for
 227         * incoming messages.  Setting this flag implies that the
 228         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 229         */
 230        IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
 231        IB_DEVICE_UD_TSO                        = (1 << 19),
 232        IB_DEVICE_XRC                           = (1 << 20),
 233
 234        /*
 235         * This device supports the IB "base memory management extension",
 236         * which includes support for fast registrations (IB_WR_REG_MR,
 237         * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 238         * also be set by any iWarp device which must support FRs to comply
 239         * to the iWarp verbs spec.  iWarp devices also support the
 240         * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 241         * stag.
 242         */
 243        IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
 244        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
 245        IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
 246        IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
 247        IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
 248        /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 249        IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
 250        /*
 251         * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 252         * support execution of WQEs that involve synchronization
 253         * of I/O operations with single completion queue managed
 254         * by hardware.
 255         */
 256        IB_DEVICE_CROSS_CHANNEL                 = (1 << 27),
 257        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
 258        IB_DEVICE_INTEGRITY_HANDOVER            = (1 << 30),
 259        IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
 260        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
 261        IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
 262        /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 263        IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 264        IB_DEVICE_RDMA_NETDEV_OPA_VNIC          = (1ULL << 35),
 265        /* The device supports padding incoming writes to cacheline. */
 266        IB_DEVICE_PCI_WRITE_END_PADDING         = (1ULL << 36),
 267        IB_DEVICE_ALLOW_USER_UNREG              = (1ULL << 37),
 268};
 269
 270enum ib_atomic_cap {
 271        IB_ATOMIC_NONE,
 272        IB_ATOMIC_HCA,
 273        IB_ATOMIC_GLOB
 274};
 275
 276enum ib_odp_general_cap_bits {
 277        IB_ODP_SUPPORT          = 1 << 0,
 278        IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 279};
 280
 281enum ib_odp_transport_cap_bits {
 282        IB_ODP_SUPPORT_SEND     = 1 << 0,
 283        IB_ODP_SUPPORT_RECV     = 1 << 1,
 284        IB_ODP_SUPPORT_WRITE    = 1 << 2,
 285        IB_ODP_SUPPORT_READ     = 1 << 3,
 286        IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
 287        IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
 288};
 289
 290struct ib_odp_caps {
 291        uint64_t general_caps;
 292        struct {
 293                uint32_t  rc_odp_caps;
 294                uint32_t  uc_odp_caps;
 295                uint32_t  ud_odp_caps;
 296                uint32_t  xrc_odp_caps;
 297        } per_transport_caps;
 298};
 299
 300struct ib_rss_caps {
 301        /* Corresponding bit will be set if qp type from
 302         * 'enum ib_qp_type' is supported, e.g.
 303         * supported_qpts |= 1 << IB_QPT_UD
 304         */
 305        u32 supported_qpts;
 306        u32 max_rwq_indirection_tables;
 307        u32 max_rwq_indirection_table_size;
 308};
 309
 310enum ib_tm_cap_flags {
 311        /*  Support tag matching with rendezvous offload for RC transport */
 312        IB_TM_CAP_RNDV_RC = 1 << 0,
 313};
 314
 315struct ib_tm_caps {
 316        /* Max size of RNDV header */
 317        u32 max_rndv_hdr_size;
 318        /* Max number of entries in tag matching list */
 319        u32 max_num_tags;
 320        /* From enum ib_tm_cap_flags */
 321        u32 flags;
 322        /* Max number of outstanding list operations */
 323        u32 max_ops;
 324        /* Max number of SGE in tag matching entry */
 325        u32 max_sge;
 326};
 327
 328struct ib_cq_init_attr {
 329        unsigned int    cqe;
 330        int             comp_vector;
 331        u32             flags;
 332};
 333
 334enum ib_cq_attr_mask {
 335        IB_CQ_MODERATE = 1 << 0,
 336};
 337
 338struct ib_cq_caps {
 339        u16     max_cq_moderation_count;
 340        u16     max_cq_moderation_period;
 341};
 342
 343struct ib_dm_mr_attr {
 344        u64             length;
 345        u64             offset;
 346        u32             access_flags;
 347};
 348
 349struct ib_dm_alloc_attr {
 350        u64     length;
 351        u32     alignment;
 352        u32     flags;
 353};
 354
 355struct ib_device_attr {
 356        u64                     fw_ver;
 357        __be64                  sys_image_guid;
 358        u64                     max_mr_size;
 359        u64                     page_size_cap;
 360        u32                     vendor_id;
 361        u32                     vendor_part_id;
 362        u32                     hw_ver;
 363        int                     max_qp;
 364        int                     max_qp_wr;
 365        u64                     device_cap_flags;
 366        int                     max_send_sge;
 367        int                     max_recv_sge;
 368        int                     max_sge_rd;
 369        int                     max_cq;
 370        int                     max_cqe;
 371        int                     max_mr;
 372        int                     max_pd;
 373        int                     max_qp_rd_atom;
 374        int                     max_ee_rd_atom;
 375        int                     max_res_rd_atom;
 376        int                     max_qp_init_rd_atom;
 377        int                     max_ee_init_rd_atom;
 378        enum ib_atomic_cap      atomic_cap;
 379        enum ib_atomic_cap      masked_atomic_cap;
 380        int                     max_ee;
 381        int                     max_rdd;
 382        int                     max_mw;
 383        int                     max_raw_ipv6_qp;
 384        int                     max_raw_ethy_qp;
 385        int                     max_mcast_grp;
 386        int                     max_mcast_qp_attach;
 387        int                     max_total_mcast_qp_attach;
 388        int                     max_ah;
 389        int                     max_fmr;
 390        int                     max_map_per_fmr;
 391        int                     max_srq;
 392        int                     max_srq_wr;
 393        int                     max_srq_sge;
 394        unsigned int            max_fast_reg_page_list_len;
 395        unsigned int            max_pi_fast_reg_page_list_len;
 396        u16                     max_pkeys;
 397        u8                      local_ca_ack_delay;
 398        int                     sig_prot_cap;
 399        int                     sig_guard_cap;
 400        struct ib_odp_caps      odp_caps;
 401        uint64_t                timestamp_mask;
 402        uint64_t                hca_core_clock; /* in KHZ */
 403        struct ib_rss_caps      rss_caps;
 404        u32                     max_wq_type_rq;
 405        u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
 406        struct ib_tm_caps       tm_caps;
 407        struct ib_cq_caps       cq_caps;
 408        u64                     max_dm_size;
 409};
 410
 411enum ib_mtu {
 412        IB_MTU_256  = 1,
 413        IB_MTU_512  = 2,
 414        IB_MTU_1024 = 3,
 415        IB_MTU_2048 = 4,
 416        IB_MTU_4096 = 5
 417};
 418
 419static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 420{
 421        switch (mtu) {
 422        case IB_MTU_256:  return  256;
 423        case IB_MTU_512:  return  512;
 424        case IB_MTU_1024: return 1024;
 425        case IB_MTU_2048: return 2048;
 426        case IB_MTU_4096: return 4096;
 427        default:          return -1;
 428        }
 429}
 430
 431static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 432{
 433        if (mtu >= 4096)
 434                return IB_MTU_4096;
 435        else if (mtu >= 2048)
 436                return IB_MTU_2048;
 437        else if (mtu >= 1024)
 438                return IB_MTU_1024;
 439        else if (mtu >= 512)
 440                return IB_MTU_512;
 441        else
 442                return IB_MTU_256;
 443}
 444
 445enum ib_port_state {
 446        IB_PORT_NOP             = 0,
 447        IB_PORT_DOWN            = 1,
 448        IB_PORT_INIT            = 2,
 449        IB_PORT_ARMED           = 3,
 450        IB_PORT_ACTIVE          = 4,
 451        IB_PORT_ACTIVE_DEFER    = 5
 452};
 453
 454enum ib_port_width {
 455        IB_WIDTH_1X     = 1,
 456        IB_WIDTH_2X     = 16,
 457        IB_WIDTH_4X     = 2,
 458        IB_WIDTH_8X     = 4,
 459        IB_WIDTH_12X    = 8
 460};
 461
 462static inline int ib_width_enum_to_int(enum ib_port_width width)
 463{
 464        switch (width) {
 465        case IB_WIDTH_1X:  return  1;
 466        case IB_WIDTH_2X:  return  2;
 467        case IB_WIDTH_4X:  return  4;
 468        case IB_WIDTH_8X:  return  8;
 469        case IB_WIDTH_12X: return 12;
 470        default:          return -1;
 471        }
 472}
 473
 474enum ib_port_speed {
 475        IB_SPEED_SDR    = 1,
 476        IB_SPEED_DDR    = 2,
 477        IB_SPEED_QDR    = 4,
 478        IB_SPEED_FDR10  = 8,
 479        IB_SPEED_FDR    = 16,
 480        IB_SPEED_EDR    = 32,
 481        IB_SPEED_HDR    = 64
 482};
 483
 484/**
 485 * struct rdma_hw_stats
 486 * @lock - Mutex to protect parallel write access to lifespan and values
 487 *    of counters, which are 64bits and not guaranteeed to be written
 488 *    atomicaly on 32bits systems.
 489 * @timestamp - Used by the core code to track when the last update was
 490 * @lifespan - Used by the core code to determine how old the counters
 491 *   should be before being updated again.  Stored in jiffies, defaults
 492 *   to 10 milliseconds, drivers can override the default be specifying
 493 *   their own value during their allocation routine.
 494 * @name - Array of pointers to static names used for the counters in
 495 *   directory.
 496 * @num_counters - How many hardware counters there are.  If name is
 497 *   shorter than this number, a kernel oops will result.  Driver authors
 498 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 499 *   in their code to prevent this.
 500 * @value - Array of u64 counters that are accessed by the sysfs code and
 501 *   filled in by the drivers get_stats routine
 502 */
 503struct rdma_hw_stats {
 504        struct mutex    lock; /* Protect lifespan and values[] */
 505        unsigned long   timestamp;
 506        unsigned long   lifespan;
 507        const char * const *names;
 508        int             num_counters;
 509        u64             value[];
 510};
 511
 512#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 513/**
 514 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 515 *   for drivers.
 516 * @names - Array of static const char *
 517 * @num_counters - How many elements in array
 518 * @lifespan - How many milliseconds between updates
 519 */
 520static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 521                const char * const *names, int num_counters,
 522                unsigned long lifespan)
 523{
 524        struct rdma_hw_stats *stats;
 525
 526        stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 527                        GFP_KERNEL);
 528        if (!stats)
 529                return NULL;
 530        stats->names = names;
 531        stats->num_counters = num_counters;
 532        stats->lifespan = msecs_to_jiffies(lifespan);
 533
 534        return stats;
 535}
 536
 537
 538/* Define bits for the various functionality this port needs to be supported by
 539 * the core.
 540 */
 541/* Management                           0x00000FFF */
 542#define RDMA_CORE_CAP_IB_MAD            0x00000001
 543#define RDMA_CORE_CAP_IB_SMI            0x00000002
 544#define RDMA_CORE_CAP_IB_CM             0x00000004
 545#define RDMA_CORE_CAP_IW_CM             0x00000008
 546#define RDMA_CORE_CAP_IB_SA             0x00000010
 547#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 548
 549/* Address format                       0x000FF000 */
 550#define RDMA_CORE_CAP_AF_IB             0x00001000
 551#define RDMA_CORE_CAP_ETH_AH            0x00002000
 552#define RDMA_CORE_CAP_OPA_AH            0x00004000
 553#define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
 554
 555/* Protocol                             0xFFF00000 */
 556#define RDMA_CORE_CAP_PROT_IB           0x00100000
 557#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 558#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 559#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 560#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 561#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 562
 563#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
 564                                        | RDMA_CORE_CAP_PROT_ROCE     \
 565                                        | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
 566
 567#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 568                                        | RDMA_CORE_CAP_IB_MAD \
 569                                        | RDMA_CORE_CAP_IB_SMI \
 570                                        | RDMA_CORE_CAP_IB_CM  \
 571                                        | RDMA_CORE_CAP_IB_SA  \
 572                                        | RDMA_CORE_CAP_AF_IB)
 573#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 574                                        | RDMA_CORE_CAP_IB_MAD  \
 575                                        | RDMA_CORE_CAP_IB_CM   \
 576                                        | RDMA_CORE_CAP_AF_IB   \
 577                                        | RDMA_CORE_CAP_ETH_AH)
 578#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
 579                                        (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 580                                        | RDMA_CORE_CAP_IB_MAD  \
 581                                        | RDMA_CORE_CAP_IB_CM   \
 582                                        | RDMA_CORE_CAP_AF_IB   \
 583                                        | RDMA_CORE_CAP_ETH_AH)
 584#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 585                                        | RDMA_CORE_CAP_IW_CM)
 586#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 587                                        | RDMA_CORE_CAP_OPA_MAD)
 588
 589#define RDMA_CORE_PORT_RAW_PACKET       (RDMA_CORE_CAP_PROT_RAW_PACKET)
 590
 591#define RDMA_CORE_PORT_USNIC            (RDMA_CORE_CAP_PROT_USNIC)
 592
 593struct ib_port_attr {
 594        u64                     subnet_prefix;
 595        enum ib_port_state      state;
 596        enum ib_mtu             max_mtu;
 597        enum ib_mtu             active_mtu;
 598        int                     gid_tbl_len;
 599        unsigned int            ip_gids:1;
 600        /* This is the value from PortInfo CapabilityMask, defined by IBA */
 601        u32                     port_cap_flags;
 602        u32                     max_msg_sz;
 603        u32                     bad_pkey_cntr;
 604        u32                     qkey_viol_cntr;
 605        u16                     pkey_tbl_len;
 606        u32                     sm_lid;
 607        u32                     lid;
 608        u8                      lmc;
 609        u8                      max_vl_num;
 610        u8                      sm_sl;
 611        u8                      subnet_timeout;
 612        u8                      init_type_reply;
 613        u8                      active_width;
 614        u8                      active_speed;
 615        u8                      phys_state;
 616        u16                     port_cap_flags2;
 617};
 618
 619enum ib_device_modify_flags {
 620        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 621        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 622};
 623
 624#define IB_DEVICE_NODE_DESC_MAX 64
 625
 626struct ib_device_modify {
 627        u64     sys_image_guid;
 628        char    node_desc[IB_DEVICE_NODE_DESC_MAX];
 629};
 630
 631enum ib_port_modify_flags {
 632        IB_PORT_SHUTDOWN                = 1,
 633        IB_PORT_INIT_TYPE               = (1<<2),
 634        IB_PORT_RESET_QKEY_CNTR         = (1<<3),
 635        IB_PORT_OPA_MASK_CHG            = (1<<4)
 636};
 637
 638struct ib_port_modify {
 639        u32     set_port_cap_mask;
 640        u32     clr_port_cap_mask;
 641        u8      init_type;
 642};
 643
 644enum ib_event_type {
 645        IB_EVENT_CQ_ERR,
 646        IB_EVENT_QP_FATAL,
 647        IB_EVENT_QP_REQ_ERR,
 648        IB_EVENT_QP_ACCESS_ERR,
 649        IB_EVENT_COMM_EST,
 650        IB_EVENT_SQ_DRAINED,
 651        IB_EVENT_PATH_MIG,
 652        IB_EVENT_PATH_MIG_ERR,
 653        IB_EVENT_DEVICE_FATAL,
 654        IB_EVENT_PORT_ACTIVE,
 655        IB_EVENT_PORT_ERR,
 656        IB_EVENT_LID_CHANGE,
 657        IB_EVENT_PKEY_CHANGE,
 658        IB_EVENT_SM_CHANGE,
 659        IB_EVENT_SRQ_ERR,
 660        IB_EVENT_SRQ_LIMIT_REACHED,
 661        IB_EVENT_QP_LAST_WQE_REACHED,
 662        IB_EVENT_CLIENT_REREGISTER,
 663        IB_EVENT_GID_CHANGE,
 664        IB_EVENT_WQ_FATAL,
 665};
 666
 667const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 668
 669struct ib_event {
 670        struct ib_device        *device;
 671        union {
 672                struct ib_cq    *cq;
 673                struct ib_qp    *qp;
 674                struct ib_srq   *srq;
 675                struct ib_wq    *wq;
 676                u8              port_num;
 677        } element;
 678        enum ib_event_type      event;
 679};
 680
 681struct ib_event_handler {
 682        struct ib_device *device;
 683        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 684        struct list_head  list;
 685};
 686
 687#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 688        do {                                                    \
 689                (_ptr)->device  = _device;                      \
 690                (_ptr)->handler = _handler;                     \
 691                INIT_LIST_HEAD(&(_ptr)->list);                  \
 692        } while (0)
 693
 694struct ib_global_route {
 695        const struct ib_gid_attr *sgid_attr;
 696        union ib_gid    dgid;
 697        u32             flow_label;
 698        u8              sgid_index;
 699        u8              hop_limit;
 700        u8              traffic_class;
 701};
 702
 703struct ib_grh {
 704        __be32          version_tclass_flow;
 705        __be16          paylen;
 706        u8              next_hdr;
 707        u8              hop_limit;
 708        union ib_gid    sgid;
 709        union ib_gid    dgid;
 710};
 711
 712union rdma_network_hdr {
 713        struct ib_grh ibgrh;
 714        struct {
 715                /* The IB spec states that if it's IPv4, the header
 716                 * is located in the last 20 bytes of the header.
 717                 */
 718                u8              reserved[20];
 719                struct iphdr    roce4grh;
 720        };
 721};
 722
 723#define IB_QPN_MASK             0xFFFFFF
 724
 725enum {
 726        IB_MULTICAST_QPN = 0xffffff
 727};
 728
 729#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 730#define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
 731
 732enum ib_ah_flags {
 733        IB_AH_GRH       = 1
 734};
 735
 736enum ib_rate {
 737        IB_RATE_PORT_CURRENT = 0,
 738        IB_RATE_2_5_GBPS = 2,
 739        IB_RATE_5_GBPS   = 5,
 740        IB_RATE_10_GBPS  = 3,
 741        IB_RATE_20_GBPS  = 6,
 742        IB_RATE_30_GBPS  = 4,
 743        IB_RATE_40_GBPS  = 7,
 744        IB_RATE_60_GBPS  = 8,
 745        IB_RATE_80_GBPS  = 9,
 746        IB_RATE_120_GBPS = 10,
 747        IB_RATE_14_GBPS  = 11,
 748        IB_RATE_56_GBPS  = 12,
 749        IB_RATE_112_GBPS = 13,
 750        IB_RATE_168_GBPS = 14,
 751        IB_RATE_25_GBPS  = 15,
 752        IB_RATE_100_GBPS = 16,
 753        IB_RATE_200_GBPS = 17,
 754        IB_RATE_300_GBPS = 18,
 755        IB_RATE_28_GBPS  = 19,
 756        IB_RATE_50_GBPS  = 20,
 757        IB_RATE_400_GBPS = 21,
 758        IB_RATE_600_GBPS = 22,
 759};
 760
 761/**
 762 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 763 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 764 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 765 * @rate: rate to convert.
 766 */
 767__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 768
 769/**
 770 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 771 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 772 * @rate: rate to convert.
 773 */
 774__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 775
 776
 777/**
 778 * enum ib_mr_type - memory region type
 779 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 780 *                            normal registration
 781 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 782 *                            register any arbitrary sg lists (without
 783 *                            the normal mr constraints - see
 784 *                            ib_map_mr_sg)
 785 * @IB_MR_TYPE_DM:            memory region that is used for device
 786 *                            memory registration
 787 * @IB_MR_TYPE_USER:          memory region that is used for the user-space
 788 *                            application
 789 * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
 790 *                            without address translations (VA=PA)
 791 * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
 792 *                            data integrity operations
 793 */
 794enum ib_mr_type {
 795        IB_MR_TYPE_MEM_REG,
 796        IB_MR_TYPE_SG_GAPS,
 797        IB_MR_TYPE_DM,
 798        IB_MR_TYPE_USER,
 799        IB_MR_TYPE_DMA,
 800        IB_MR_TYPE_INTEGRITY,
 801};
 802
 803enum ib_mr_status_check {
 804        IB_MR_CHECK_SIG_STATUS = 1,
 805};
 806
 807/**
 808 * struct ib_mr_status - Memory region status container
 809 *
 810 * @fail_status: Bitmask of MR checks status. For each
 811 *     failed check a corresponding status bit is set.
 812 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 813 *     failure.
 814 */
 815struct ib_mr_status {
 816        u32                 fail_status;
 817        struct ib_sig_err   sig_err;
 818};
 819
 820/**
 821 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 822 * enum.
 823 * @mult: multiple to convert.
 824 */
 825__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 826
 827enum rdma_ah_attr_type {
 828        RDMA_AH_ATTR_TYPE_UNDEFINED,
 829        RDMA_AH_ATTR_TYPE_IB,
 830        RDMA_AH_ATTR_TYPE_ROCE,
 831        RDMA_AH_ATTR_TYPE_OPA,
 832};
 833
 834struct ib_ah_attr {
 835        u16                     dlid;
 836        u8                      src_path_bits;
 837};
 838
 839struct roce_ah_attr {
 840        u8                      dmac[ETH_ALEN];
 841};
 842
 843struct opa_ah_attr {
 844        u32                     dlid;
 845        u8                      src_path_bits;
 846        bool                    make_grd;
 847};
 848
 849struct rdma_ah_attr {
 850        struct ib_global_route  grh;
 851        u8                      sl;
 852        u8                      static_rate;
 853        u8                      port_num;
 854        u8                      ah_flags;
 855        enum rdma_ah_attr_type type;
 856        union {
 857                struct ib_ah_attr ib;
 858                struct roce_ah_attr roce;
 859                struct opa_ah_attr opa;
 860        };
 861};
 862
 863enum ib_wc_status {
 864        IB_WC_SUCCESS,
 865        IB_WC_LOC_LEN_ERR,
 866        IB_WC_LOC_QP_OP_ERR,
 867        IB_WC_LOC_EEC_OP_ERR,
 868        IB_WC_LOC_PROT_ERR,
 869        IB_WC_WR_FLUSH_ERR,
 870        IB_WC_MW_BIND_ERR,
 871        IB_WC_BAD_RESP_ERR,
 872        IB_WC_LOC_ACCESS_ERR,
 873        IB_WC_REM_INV_REQ_ERR,
 874        IB_WC_REM_ACCESS_ERR,
 875        IB_WC_REM_OP_ERR,
 876        IB_WC_RETRY_EXC_ERR,
 877        IB_WC_RNR_RETRY_EXC_ERR,
 878        IB_WC_LOC_RDD_VIOL_ERR,
 879        IB_WC_REM_INV_RD_REQ_ERR,
 880        IB_WC_REM_ABORT_ERR,
 881        IB_WC_INV_EECN_ERR,
 882        IB_WC_INV_EEC_STATE_ERR,
 883        IB_WC_FATAL_ERR,
 884        IB_WC_RESP_TIMEOUT_ERR,
 885        IB_WC_GENERAL_ERR
 886};
 887
 888const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 889
 890enum ib_wc_opcode {
 891        IB_WC_SEND,
 892        IB_WC_RDMA_WRITE,
 893        IB_WC_RDMA_READ,
 894        IB_WC_COMP_SWAP,
 895        IB_WC_FETCH_ADD,
 896        IB_WC_LSO,
 897        IB_WC_LOCAL_INV,
 898        IB_WC_REG_MR,
 899        IB_WC_MASKED_COMP_SWAP,
 900        IB_WC_MASKED_FETCH_ADD,
 901/*
 902 * Set value of IB_WC_RECV so consumers can test if a completion is a
 903 * receive by testing (opcode & IB_WC_RECV).
 904 */
 905        IB_WC_RECV                      = 1 << 7,
 906        IB_WC_RECV_RDMA_WITH_IMM
 907};
 908
 909enum ib_wc_flags {
 910        IB_WC_GRH               = 1,
 911        IB_WC_WITH_IMM          = (1<<1),
 912        IB_WC_WITH_INVALIDATE   = (1<<2),
 913        IB_WC_IP_CSUM_OK        = (1<<3),
 914        IB_WC_WITH_SMAC         = (1<<4),
 915        IB_WC_WITH_VLAN         = (1<<5),
 916        IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
 917};
 918
 919struct ib_wc {
 920        union {
 921                u64             wr_id;
 922                struct ib_cqe   *wr_cqe;
 923        };
 924        enum ib_wc_status       status;
 925        enum ib_wc_opcode       opcode;
 926        u32                     vendor_err;
 927        u32                     byte_len;
 928        struct ib_qp           *qp;
 929        union {
 930                __be32          imm_data;
 931                u32             invalidate_rkey;
 932        } ex;
 933        u32                     src_qp;
 934        u32                     slid;
 935        int                     wc_flags;
 936        u16                     pkey_index;
 937        u8                      sl;
 938        u8                      dlid_path_bits;
 939        u8                      port_num;       /* valid only for DR SMPs on switches */
 940        u8                      smac[ETH_ALEN];
 941        u16                     vlan_id;
 942        u8                      network_hdr_type;
 943};
 944
 945enum ib_cq_notify_flags {
 946        IB_CQ_SOLICITED                 = 1 << 0,
 947        IB_CQ_NEXT_COMP                 = 1 << 1,
 948        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 949        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
 950};
 951
 952enum ib_srq_type {
 953        IB_SRQT_BASIC,
 954        IB_SRQT_XRC,
 955        IB_SRQT_TM,
 956};
 957
 958static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
 959{
 960        return srq_type == IB_SRQT_XRC ||
 961               srq_type == IB_SRQT_TM;
 962}
 963
 964enum ib_srq_attr_mask {
 965        IB_SRQ_MAX_WR   = 1 << 0,
 966        IB_SRQ_LIMIT    = 1 << 1,
 967};
 968
 969struct ib_srq_attr {
 970        u32     max_wr;
 971        u32     max_sge;
 972        u32     srq_limit;
 973};
 974
 975struct ib_srq_init_attr {
 976        void                  (*event_handler)(struct ib_event *, void *);
 977        void                   *srq_context;
 978        struct ib_srq_attr      attr;
 979        enum ib_srq_type        srq_type;
 980
 981        struct {
 982                struct ib_cq   *cq;
 983                union {
 984                        struct {
 985                                struct ib_xrcd *xrcd;
 986                        } xrc;
 987
 988                        struct {
 989                                u32             max_num_tags;
 990                        } tag_matching;
 991                };
 992        } ext;
 993};
 994
 995struct ib_qp_cap {
 996        u32     max_send_wr;
 997        u32     max_recv_wr;
 998        u32     max_send_sge;
 999        u32     max_recv_sge;
1000        u32     max_inline_data;
1001
1002        /*
1003         * Maximum number of rdma_rw_ctx structures in flight at a time.
1004         * ib_create_qp() will calculate the right amount of neededed WRs
1005         * and MRs based on this.
1006         */
1007        u32     max_rdma_ctxs;
1008};
1009
1010enum ib_sig_type {
1011        IB_SIGNAL_ALL_WR,
1012        IB_SIGNAL_REQ_WR
1013};
1014
1015enum ib_qp_type {
1016        /*
1017         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1018         * here (and in that order) since the MAD layer uses them as
1019         * indices into a 2-entry table.
1020         */
1021        IB_QPT_SMI,
1022        IB_QPT_GSI,
1023
1024        IB_QPT_RC,
1025        IB_QPT_UC,
1026        IB_QPT_UD,
1027        IB_QPT_RAW_IPV6,
1028        IB_QPT_RAW_ETHERTYPE,
1029        IB_QPT_RAW_PACKET = 8,
1030        IB_QPT_XRC_INI = 9,
1031        IB_QPT_XRC_TGT,
1032        IB_QPT_MAX,
1033        IB_QPT_DRIVER = 0xFF,
1034        /* Reserve a range for qp types internal to the low level driver.
1035         * These qp types will not be visible at the IB core layer, so the
1036         * IB_QPT_MAX usages should not be affected in the core layer
1037         */
1038        IB_QPT_RESERVED1 = 0x1000,
1039        IB_QPT_RESERVED2,
1040        IB_QPT_RESERVED3,
1041        IB_QPT_RESERVED4,
1042        IB_QPT_RESERVED5,
1043        IB_QPT_RESERVED6,
1044        IB_QPT_RESERVED7,
1045        IB_QPT_RESERVED8,
1046        IB_QPT_RESERVED9,
1047        IB_QPT_RESERVED10,
1048};
1049
1050enum ib_qp_create_flags {
1051        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
1052        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
1053        IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1054        IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1055        IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1056        IB_QP_CREATE_NETIF_QP                   = 1 << 5,
1057        IB_QP_CREATE_INTEGRITY_EN               = 1 << 6,
1058        /* FREE                                 = 1 << 7, */
1059        IB_QP_CREATE_SCATTER_FCS                = 1 << 8,
1060        IB_QP_CREATE_CVLAN_STRIPPING            = 1 << 9,
1061        IB_QP_CREATE_SOURCE_QPN                 = 1 << 10,
1062        IB_QP_CREATE_PCI_WRITE_END_PADDING      = 1 << 11,
1063        /* reserve bits 26-31 for low level drivers' internal use */
1064        IB_QP_CREATE_RESERVED_START             = 1 << 26,
1065        IB_QP_CREATE_RESERVED_END               = 1 << 31,
1066};
1067
1068/*
1069 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1070 * callback to destroy the passed in QP.
1071 */
1072
1073struct ib_qp_init_attr {
1074        /* Consumer's event_handler callback must not block */
1075        void                  (*event_handler)(struct ib_event *, void *);
1076
1077        void                   *qp_context;
1078        struct ib_cq           *send_cq;
1079        struct ib_cq           *recv_cq;
1080        struct ib_srq          *srq;
1081        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1082        struct ib_qp_cap        cap;
1083        enum ib_sig_type        sq_sig_type;
1084        enum ib_qp_type         qp_type;
1085        u32                     create_flags;
1086
1087        /*
1088         * Only needed for special QP types, or when using the RW API.
1089         */
1090        u8                      port_num;
1091        struct ib_rwq_ind_table *rwq_ind_tbl;
1092        u32                     source_qpn;
1093};
1094
1095struct ib_qp_open_attr {
1096        void                  (*event_handler)(struct ib_event *, void *);
1097        void                   *qp_context;
1098        u32                     qp_num;
1099        enum ib_qp_type         qp_type;
1100};
1101
1102enum ib_rnr_timeout {
1103        IB_RNR_TIMER_655_36 =  0,
1104        IB_RNR_TIMER_000_01 =  1,
1105        IB_RNR_TIMER_000_02 =  2,
1106        IB_RNR_TIMER_000_03 =  3,
1107        IB_RNR_TIMER_000_04 =  4,
1108        IB_RNR_TIMER_000_06 =  5,
1109        IB_RNR_TIMER_000_08 =  6,
1110        IB_RNR_TIMER_000_12 =  7,
1111        IB_RNR_TIMER_000_16 =  8,
1112        IB_RNR_TIMER_000_24 =  9,
1113        IB_RNR_TIMER_000_32 = 10,
1114        IB_RNR_TIMER_000_48 = 11,
1115        IB_RNR_TIMER_000_64 = 12,
1116        IB_RNR_TIMER_000_96 = 13,
1117        IB_RNR_TIMER_001_28 = 14,
1118        IB_RNR_TIMER_001_92 = 15,
1119        IB_RNR_TIMER_002_56 = 16,
1120        IB_RNR_TIMER_003_84 = 17,
1121        IB_RNR_TIMER_005_12 = 18,
1122        IB_RNR_TIMER_007_68 = 19,
1123        IB_RNR_TIMER_010_24 = 20,
1124        IB_RNR_TIMER_015_36 = 21,
1125        IB_RNR_TIMER_020_48 = 22,
1126        IB_RNR_TIMER_030_72 = 23,
1127        IB_RNR_TIMER_040_96 = 24,
1128        IB_RNR_TIMER_061_44 = 25,
1129        IB_RNR_TIMER_081_92 = 26,
1130        IB_RNR_TIMER_122_88 = 27,
1131        IB_RNR_TIMER_163_84 = 28,
1132        IB_RNR_TIMER_245_76 = 29,
1133        IB_RNR_TIMER_327_68 = 30,
1134        IB_RNR_TIMER_491_52 = 31
1135};
1136
1137enum ib_qp_attr_mask {
1138        IB_QP_STATE                     = 1,
1139        IB_QP_CUR_STATE                 = (1<<1),
1140        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
1141        IB_QP_ACCESS_FLAGS              = (1<<3),
1142        IB_QP_PKEY_INDEX                = (1<<4),
1143        IB_QP_PORT                      = (1<<5),
1144        IB_QP_QKEY                      = (1<<6),
1145        IB_QP_AV                        = (1<<7),
1146        IB_QP_PATH_MTU                  = (1<<8),
1147        IB_QP_TIMEOUT                   = (1<<9),
1148        IB_QP_RETRY_CNT                 = (1<<10),
1149        IB_QP_RNR_RETRY                 = (1<<11),
1150        IB_QP_RQ_PSN                    = (1<<12),
1151        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
1152        IB_QP_ALT_PATH                  = (1<<14),
1153        IB_QP_MIN_RNR_TIMER             = (1<<15),
1154        IB_QP_SQ_PSN                    = (1<<16),
1155        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
1156        IB_QP_PATH_MIG_STATE            = (1<<18),
1157        IB_QP_CAP                       = (1<<19),
1158        IB_QP_DEST_QPN                  = (1<<20),
1159        IB_QP_RESERVED1                 = (1<<21),
1160        IB_QP_RESERVED2                 = (1<<22),
1161        IB_QP_RESERVED3                 = (1<<23),
1162        IB_QP_RESERVED4                 = (1<<24),
1163        IB_QP_RATE_LIMIT                = (1<<25),
1164};
1165
1166enum ib_qp_state {
1167        IB_QPS_RESET,
1168        IB_QPS_INIT,
1169        IB_QPS_RTR,
1170        IB_QPS_RTS,
1171        IB_QPS_SQD,
1172        IB_QPS_SQE,
1173        IB_QPS_ERR
1174};
1175
1176enum ib_mig_state {
1177        IB_MIG_MIGRATED,
1178        IB_MIG_REARM,
1179        IB_MIG_ARMED
1180};
1181
1182enum ib_mw_type {
1183        IB_MW_TYPE_1 = 1,
1184        IB_MW_TYPE_2 = 2
1185};
1186
1187struct ib_qp_attr {
1188        enum ib_qp_state        qp_state;
1189        enum ib_qp_state        cur_qp_state;
1190        enum ib_mtu             path_mtu;
1191        enum ib_mig_state       path_mig_state;
1192        u32                     qkey;
1193        u32                     rq_psn;
1194        u32                     sq_psn;
1195        u32                     dest_qp_num;
1196        int                     qp_access_flags;
1197        struct ib_qp_cap        cap;
1198        struct rdma_ah_attr     ah_attr;
1199        struct rdma_ah_attr     alt_ah_attr;
1200        u16                     pkey_index;
1201        u16                     alt_pkey_index;
1202        u8                      en_sqd_async_notify;
1203        u8                      sq_draining;
1204        u8                      max_rd_atomic;
1205        u8                      max_dest_rd_atomic;
1206        u8                      min_rnr_timer;
1207        u8                      port_num;
1208        u8                      timeout;
1209        u8                      retry_cnt;
1210        u8                      rnr_retry;
1211        u8                      alt_port_num;
1212        u8                      alt_timeout;
1213        u32                     rate_limit;
1214};
1215
1216enum ib_wr_opcode {
1217        /* These are shared with userspace */
1218        IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1219        IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1220        IB_WR_SEND = IB_UVERBS_WR_SEND,
1221        IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1222        IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1223        IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1224        IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1225        IB_WR_LSO = IB_UVERBS_WR_TSO,
1226        IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1227        IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1228        IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1229        IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1230                IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1231        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1232                IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1233
1234        /* These are kernel only and can not be issued by userspace */
1235        IB_WR_REG_MR = 0x20,
1236        IB_WR_REG_MR_INTEGRITY,
1237
1238        /* reserve values for low level drivers' internal use.
1239         * These values will not be used at all in the ib core layer.
1240         */
1241        IB_WR_RESERVED1 = 0xf0,
1242        IB_WR_RESERVED2,
1243        IB_WR_RESERVED3,
1244        IB_WR_RESERVED4,
1245        IB_WR_RESERVED5,
1246        IB_WR_RESERVED6,
1247        IB_WR_RESERVED7,
1248        IB_WR_RESERVED8,
1249        IB_WR_RESERVED9,
1250        IB_WR_RESERVED10,
1251};
1252
1253enum ib_send_flags {
1254        IB_SEND_FENCE           = 1,
1255        IB_SEND_SIGNALED        = (1<<1),
1256        IB_SEND_SOLICITED       = (1<<2),
1257        IB_SEND_INLINE          = (1<<3),
1258        IB_SEND_IP_CSUM         = (1<<4),
1259
1260        /* reserve bits 26-31 for low level drivers' internal use */
1261        IB_SEND_RESERVED_START  = (1 << 26),
1262        IB_SEND_RESERVED_END    = (1 << 31),
1263};
1264
1265struct ib_sge {
1266        u64     addr;
1267        u32     length;
1268        u32     lkey;
1269};
1270
1271struct ib_cqe {
1272        void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1273};
1274
1275struct ib_send_wr {
1276        struct ib_send_wr      *next;
1277        union {
1278                u64             wr_id;
1279                struct ib_cqe   *wr_cqe;
1280        };
1281        struct ib_sge          *sg_list;
1282        int                     num_sge;
1283        enum ib_wr_opcode       opcode;
1284        int                     send_flags;
1285        union {
1286                __be32          imm_data;
1287                u32             invalidate_rkey;
1288        } ex;
1289};
1290
1291struct ib_rdma_wr {
1292        struct ib_send_wr       wr;
1293        u64                     remote_addr;
1294        u32                     rkey;
1295};
1296
1297static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1298{
1299        return container_of(wr, struct ib_rdma_wr, wr);
1300}
1301
1302struct ib_atomic_wr {
1303        struct ib_send_wr       wr;
1304        u64                     remote_addr;
1305        u64                     compare_add;
1306        u64                     swap;
1307        u64                     compare_add_mask;
1308        u64                     swap_mask;
1309        u32                     rkey;
1310};
1311
1312static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1313{
1314        return container_of(wr, struct ib_atomic_wr, wr);
1315}
1316
1317struct ib_ud_wr {
1318        struct ib_send_wr       wr;
1319        struct ib_ah            *ah;
1320        void                    *header;
1321        int                     hlen;
1322        int                     mss;
1323        u32                     remote_qpn;
1324        u32                     remote_qkey;
1325        u16                     pkey_index; /* valid for GSI only */
1326        u8                      port_num;   /* valid for DR SMPs on switch only */
1327};
1328
1329static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1330{
1331        return container_of(wr, struct ib_ud_wr, wr);
1332}
1333
1334struct ib_reg_wr {
1335        struct ib_send_wr       wr;
1336        struct ib_mr            *mr;
1337        u32                     key;
1338        int                     access;
1339};
1340
1341static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1342{
1343        return container_of(wr, struct ib_reg_wr, wr);
1344}
1345
1346struct ib_recv_wr {
1347        struct ib_recv_wr      *next;
1348        union {
1349                u64             wr_id;
1350                struct ib_cqe   *wr_cqe;
1351        };
1352        struct ib_sge          *sg_list;
1353        int                     num_sge;
1354};
1355
1356enum ib_access_flags {
1357        IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1358        IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1359        IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1360        IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1361        IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1362        IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1363        IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1364        IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1365
1366        IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
1367};
1368
1369/*
1370 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1371 * are hidden here instead of a uapi header!
1372 */
1373enum ib_mr_rereg_flags {
1374        IB_MR_REREG_TRANS       = 1,
1375        IB_MR_REREG_PD          = (1<<1),
1376        IB_MR_REREG_ACCESS      = (1<<2),
1377        IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1378};
1379
1380struct ib_fmr_attr {
1381        int     max_pages;
1382        int     max_maps;
1383        u8      page_shift;
1384};
1385
1386struct ib_umem;
1387
1388enum rdma_remove_reason {
1389        /*
1390         * Userspace requested uobject deletion or initial try
1391         * to remove uobject via cleanup. Call could fail
1392         */
1393        RDMA_REMOVE_DESTROY,
1394        /* Context deletion. This call should delete the actual object itself */
1395        RDMA_REMOVE_CLOSE,
1396        /* Driver is being hot-unplugged. This call should delete the actual object itself */
1397        RDMA_REMOVE_DRIVER_REMOVE,
1398        /* uobj is being cleaned-up before being committed */
1399        RDMA_REMOVE_ABORT,
1400};
1401
1402struct ib_rdmacg_object {
1403#ifdef CONFIG_CGROUP_RDMA
1404        struct rdma_cgroup      *cg;            /* owner rdma cgroup */
1405#endif
1406};
1407
1408struct ib_ucontext {
1409        struct ib_device       *device;
1410        struct ib_uverbs_file  *ufile;
1411        /*
1412         * 'closing' can be read by the driver only during a destroy callback,
1413         * it is set when we are closing the file descriptor and indicates
1414         * that mm_sem may be locked.
1415         */
1416        bool closing;
1417
1418        bool cleanup_retryable;
1419
1420        void (*invalidate_range)(struct ib_umem_odp *umem_odp,
1421                                 unsigned long start, unsigned long end);
1422        struct mutex per_mm_list_lock;
1423        struct list_head per_mm_list;
1424
1425        struct ib_rdmacg_object cg_obj;
1426        /*
1427         * Implementation details of the RDMA core, don't use in drivers:
1428         */
1429        struct rdma_restrack_entry res;
1430};
1431
1432struct ib_uobject {
1433        u64                     user_handle;    /* handle given to us by userspace */
1434        /* ufile & ucontext owning this object */
1435        struct ib_uverbs_file  *ufile;
1436        /* FIXME, save memory: ufile->context == context */
1437        struct ib_ucontext     *context;        /* associated user context */
1438        void                   *object;         /* containing object */
1439        struct list_head        list;           /* link to context's list */
1440        struct ib_rdmacg_object cg_obj;         /* rdmacg object */
1441        int                     id;             /* index into kernel idr */
1442        struct kref             ref;
1443        atomic_t                usecnt;         /* protects exclusive access */
1444        struct rcu_head         rcu;            /* kfree_rcu() overhead */
1445
1446        const struct uverbs_api_object *uapi_object;
1447};
1448
1449struct ib_udata {
1450        const void __user *inbuf;
1451        void __user *outbuf;
1452        size_t       inlen;
1453        size_t       outlen;
1454};
1455
1456struct ib_pd {
1457        u32                     local_dma_lkey;
1458        u32                     flags;
1459        struct ib_device       *device;
1460        struct ib_uobject      *uobject;
1461        atomic_t                usecnt; /* count all resources */
1462
1463        u32                     unsafe_global_rkey;
1464
1465        /*
1466         * Implementation details of the RDMA core, don't use in drivers:
1467         */
1468        struct ib_mr           *__internal_mr;
1469        struct rdma_restrack_entry res;
1470};
1471
1472struct ib_xrcd {
1473        struct ib_device       *device;
1474        atomic_t                usecnt; /* count all exposed resources */
1475        struct inode           *inode;
1476
1477        struct mutex            tgt_qp_mutex;
1478        struct list_head        tgt_qp_list;
1479};
1480
1481struct ib_ah {
1482        struct ib_device        *device;
1483        struct ib_pd            *pd;
1484        struct ib_uobject       *uobject;
1485        const struct ib_gid_attr *sgid_attr;
1486        enum rdma_ah_attr_type  type;
1487};
1488
1489typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1490
1491enum ib_poll_context {
1492        IB_POLL_DIRECT,            /* caller context, no hw completions */
1493        IB_POLL_SOFTIRQ,           /* poll from softirq context */
1494        IB_POLL_WORKQUEUE,         /* poll from workqueue */
1495        IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1496};
1497
1498struct ib_cq {
1499        struct ib_device       *device;
1500        struct ib_uobject      *uobject;
1501        ib_comp_handler         comp_handler;
1502        void                  (*event_handler)(struct ib_event *, void *);
1503        void                   *cq_context;
1504        int                     cqe;
1505        atomic_t                usecnt; /* count number of work queues */
1506        enum ib_poll_context    poll_ctx;
1507        struct ib_wc            *wc;
1508        union {
1509                struct irq_poll         iop;
1510                struct work_struct      work;
1511        };
1512        struct workqueue_struct *comp_wq;
1513        struct dim *dim;
1514        /*
1515         * Implementation details of the RDMA core, don't use in drivers:
1516         */
1517        struct rdma_restrack_entry res;
1518};
1519
1520struct ib_srq {
1521        struct ib_device       *device;
1522        struct ib_pd           *pd;
1523        struct ib_uobject      *uobject;
1524        void                  (*event_handler)(struct ib_event *, void *);
1525        void                   *srq_context;
1526        enum ib_srq_type        srq_type;
1527        atomic_t                usecnt;
1528
1529        struct {
1530                struct ib_cq   *cq;
1531                union {
1532                        struct {
1533                                struct ib_xrcd *xrcd;
1534                                u32             srq_num;
1535                        } xrc;
1536                };
1537        } ext;
1538};
1539
1540enum ib_raw_packet_caps {
1541        /* Strip cvlan from incoming packet and report it in the matching work
1542         * completion is supported.
1543         */
1544        IB_RAW_PACKET_CAP_CVLAN_STRIPPING       = (1 << 0),
1545        /* Scatter FCS field of an incoming packet to host memory is supported.
1546         */
1547        IB_RAW_PACKET_CAP_SCATTER_FCS           = (1 << 1),
1548        /* Checksum offloads are supported (for both send and receive). */
1549        IB_RAW_PACKET_CAP_IP_CSUM               = (1 << 2),
1550        /* When a packet is received for an RQ with no receive WQEs, the
1551         * packet processing is delayed.
1552         */
1553        IB_RAW_PACKET_CAP_DELAY_DROP            = (1 << 3),
1554};
1555
1556enum ib_wq_type {
1557        IB_WQT_RQ
1558};
1559
1560enum ib_wq_state {
1561        IB_WQS_RESET,
1562        IB_WQS_RDY,
1563        IB_WQS_ERR
1564};
1565
1566struct ib_wq {
1567        struct ib_device       *device;
1568        struct ib_uobject      *uobject;
1569        void                *wq_context;
1570        void                (*event_handler)(struct ib_event *, void *);
1571        struct ib_pd           *pd;
1572        struct ib_cq           *cq;
1573        u32             wq_num;
1574        enum ib_wq_state       state;
1575        enum ib_wq_type wq_type;
1576        atomic_t                usecnt;
1577};
1578
1579enum ib_wq_flags {
1580        IB_WQ_FLAGS_CVLAN_STRIPPING     = 1 << 0,
1581        IB_WQ_FLAGS_SCATTER_FCS         = 1 << 1,
1582        IB_WQ_FLAGS_DELAY_DROP          = 1 << 2,
1583        IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1584};
1585
1586struct ib_wq_init_attr {
1587        void                   *wq_context;
1588        enum ib_wq_type wq_type;
1589        u32             max_wr;
1590        u32             max_sge;
1591        struct  ib_cq          *cq;
1592        void                (*event_handler)(struct ib_event *, void *);
1593        u32             create_flags; /* Use enum ib_wq_flags */
1594};
1595
1596enum ib_wq_attr_mask {
1597        IB_WQ_STATE             = 1 << 0,
1598        IB_WQ_CUR_STATE         = 1 << 1,
1599        IB_WQ_FLAGS             = 1 << 2,
1600};
1601
1602struct ib_wq_attr {
1603        enum    ib_wq_state     wq_state;
1604        enum    ib_wq_state     curr_wq_state;
1605        u32                     flags; /* Use enum ib_wq_flags */
1606        u32                     flags_mask; /* Use enum ib_wq_flags */
1607};
1608
1609struct ib_rwq_ind_table {
1610        struct ib_device        *device;
1611        struct ib_uobject      *uobject;
1612        atomic_t                usecnt;
1613        u32             ind_tbl_num;
1614        u32             log_ind_tbl_size;
1615        struct ib_wq    **ind_tbl;
1616};
1617
1618struct ib_rwq_ind_table_init_attr {
1619        u32             log_ind_tbl_size;
1620        /* Each entry is a pointer to Receive Work Queue */
1621        struct ib_wq    **ind_tbl;
1622};
1623
1624enum port_pkey_state {
1625        IB_PORT_PKEY_NOT_VALID = 0,
1626        IB_PORT_PKEY_VALID = 1,
1627        IB_PORT_PKEY_LISTED = 2,
1628};
1629
1630struct ib_qp_security;
1631
1632struct ib_port_pkey {
1633        enum port_pkey_state    state;
1634        u16                     pkey_index;
1635        u8                      port_num;
1636        struct list_head        qp_list;
1637        struct list_head        to_error_list;
1638        struct ib_qp_security  *sec;
1639};
1640
1641struct ib_ports_pkeys {
1642        struct ib_port_pkey     main;
1643        struct ib_port_pkey     alt;
1644};
1645
1646struct ib_qp_security {
1647        struct ib_qp           *qp;
1648        struct ib_device       *dev;
1649        /* Hold this mutex when changing port and pkey settings. */
1650        struct mutex            mutex;
1651        struct ib_ports_pkeys  *ports_pkeys;
1652        /* A list of all open shared QP handles.  Required to enforce security
1653         * properly for all users of a shared QP.
1654         */
1655        struct list_head        shared_qp_list;
1656        void                   *security;
1657        bool                    destroying;
1658        atomic_t                error_list_count;
1659        struct completion       error_complete;
1660        int                     error_comps_pending;
1661};
1662
1663/*
1664 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1665 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1666 */
1667struct ib_qp {
1668        struct ib_device       *device;
1669        struct ib_pd           *pd;
1670        struct ib_cq           *send_cq;
1671        struct ib_cq           *recv_cq;
1672        spinlock_t              mr_lock;
1673        int                     mrs_used;
1674        struct list_head        rdma_mrs;
1675        struct list_head        sig_mrs;
1676        struct ib_srq          *srq;
1677        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1678        struct list_head        xrcd_list;
1679
1680        /* count times opened, mcast attaches, flow attaches */
1681        atomic_t                usecnt;
1682        struct list_head        open_list;
1683        struct ib_qp           *real_qp;
1684        struct ib_uobject      *uobject;
1685        void                  (*event_handler)(struct ib_event *, void *);
1686        void                   *qp_context;
1687        /* sgid_attrs associated with the AV's */
1688        const struct ib_gid_attr *av_sgid_attr;
1689        const struct ib_gid_attr *alt_path_sgid_attr;
1690        u32                     qp_num;
1691        u32                     max_write_sge;
1692        u32                     max_read_sge;
1693        enum ib_qp_type         qp_type;
1694        struct ib_rwq_ind_table *rwq_ind_tbl;
1695        struct ib_qp_security  *qp_sec;
1696        u8                      port;
1697
1698        bool                    integrity_en;
1699        /*
1700         * Implementation details of the RDMA core, don't use in drivers:
1701         */
1702        struct rdma_restrack_entry     res;
1703
1704        /* The counter the qp is bind to */
1705        struct rdma_counter    *counter;
1706};
1707
1708struct ib_dm {
1709        struct ib_device  *device;
1710        u32                length;
1711        u32                flags;
1712        struct ib_uobject *uobject;
1713        atomic_t           usecnt;
1714};
1715
1716struct ib_mr {
1717        struct ib_device  *device;
1718        struct ib_pd      *pd;
1719        u32                lkey;
1720        u32                rkey;
1721        u64                iova;
1722        u64                length;
1723        unsigned int       page_size;
1724        enum ib_mr_type    type;
1725        bool               need_inval;
1726        union {
1727                struct ib_uobject       *uobject;       /* user */
1728                struct list_head        qp_entry;       /* FR */
1729        };
1730
1731        struct ib_dm      *dm;
1732        struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1733        /*
1734         * Implementation details of the RDMA core, don't use in drivers:
1735         */
1736        struct rdma_restrack_entry res;
1737};
1738
1739struct ib_mw {
1740        struct ib_device        *device;
1741        struct ib_pd            *pd;
1742        struct ib_uobject       *uobject;
1743        u32                     rkey;
1744        enum ib_mw_type         type;
1745};
1746
1747struct ib_fmr {
1748        struct ib_device        *device;
1749        struct ib_pd            *pd;
1750        struct list_head        list;
1751        u32                     lkey;
1752        u32                     rkey;
1753};
1754
1755/* Supported steering options */
1756enum ib_flow_attr_type {
1757        /* steering according to rule specifications */
1758        IB_FLOW_ATTR_NORMAL             = 0x0,
1759        /* default unicast and multicast rule -
1760         * receive all Eth traffic which isn't steered to any QP
1761         */
1762        IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1763        /* default multicast rule -
1764         * receive all Eth multicast traffic which isn't steered to any QP
1765         */
1766        IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1767        /* sniffer rule - receive all port traffic */
1768        IB_FLOW_ATTR_SNIFFER            = 0x3
1769};
1770
1771/* Supported steering header types */
1772enum ib_flow_spec_type {
1773        /* L2 headers*/
1774        IB_FLOW_SPEC_ETH                = 0x20,
1775        IB_FLOW_SPEC_IB                 = 0x22,
1776        /* L3 header*/
1777        IB_FLOW_SPEC_IPV4               = 0x30,
1778        IB_FLOW_SPEC_IPV6               = 0x31,
1779        IB_FLOW_SPEC_ESP                = 0x34,
1780        /* L4 headers*/
1781        IB_FLOW_SPEC_TCP                = 0x40,
1782        IB_FLOW_SPEC_UDP                = 0x41,
1783        IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
1784        IB_FLOW_SPEC_GRE                = 0x51,
1785        IB_FLOW_SPEC_MPLS               = 0x60,
1786        IB_FLOW_SPEC_INNER              = 0x100,
1787        /* Actions */
1788        IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1789        IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1790        IB_FLOW_SPEC_ACTION_HANDLE      = 0x1002,
1791        IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1792};
1793#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1794#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1795
1796/* Flow steering rule priority is set according to it's domain.
1797 * Lower domain value means higher priority.
1798 */
1799enum ib_flow_domain {
1800        IB_FLOW_DOMAIN_USER,
1801        IB_FLOW_DOMAIN_ETHTOOL,
1802        IB_FLOW_DOMAIN_RFS,
1803        IB_FLOW_DOMAIN_NIC,
1804        IB_FLOW_DOMAIN_NUM /* Must be last */
1805};
1806
1807enum ib_flow_flags {
1808        IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1809        IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1810        IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1811};
1812
1813struct ib_flow_eth_filter {
1814        u8      dst_mac[6];
1815        u8      src_mac[6];
1816        __be16  ether_type;
1817        __be16  vlan_tag;
1818        /* Must be last */
1819        u8      real_sz[0];
1820};
1821
1822struct ib_flow_spec_eth {
1823        u32                       type;
1824        u16                       size;
1825        struct ib_flow_eth_filter val;
1826        struct ib_flow_eth_filter mask;
1827};
1828
1829struct ib_flow_ib_filter {
1830        __be16 dlid;
1831        __u8   sl;
1832        /* Must be last */
1833        u8      real_sz[0];
1834};
1835
1836struct ib_flow_spec_ib {
1837        u32                      type;
1838        u16                      size;
1839        struct ib_flow_ib_filter val;
1840        struct ib_flow_ib_filter mask;
1841};
1842
1843/* IPv4 header flags */
1844enum ib_ipv4_flags {
1845        IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1846        IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1847                                    last have this flag set */
1848};
1849
1850struct ib_flow_ipv4_filter {
1851        __be32  src_ip;
1852        __be32  dst_ip;
1853        u8      proto;
1854        u8      tos;
1855        u8      ttl;
1856        u8      flags;
1857        /* Must be last */
1858        u8      real_sz[0];
1859};
1860
1861struct ib_flow_spec_ipv4 {
1862        u32                        type;
1863        u16                        size;
1864        struct ib_flow_ipv4_filter val;
1865        struct ib_flow_ipv4_filter mask;
1866};
1867
1868struct ib_flow_ipv6_filter {
1869        u8      src_ip[16];
1870        u8      dst_ip[16];
1871        __be32  flow_label;
1872        u8      next_hdr;
1873        u8      traffic_class;
1874        u8      hop_limit;
1875        /* Must be last */
1876        u8      real_sz[0];
1877};
1878
1879struct ib_flow_spec_ipv6 {
1880        u32                        type;
1881        u16                        size;
1882        struct ib_flow_ipv6_filter val;
1883        struct ib_flow_ipv6_filter mask;
1884};
1885
1886struct ib_flow_tcp_udp_filter {
1887        __be16  dst_port;
1888        __be16  src_port;
1889        /* Must be last */
1890        u8      real_sz[0];
1891};
1892
1893struct ib_flow_spec_tcp_udp {
1894        u32                           type;
1895        u16                           size;
1896        struct ib_flow_tcp_udp_filter val;
1897        struct ib_flow_tcp_udp_filter mask;
1898};
1899
1900struct ib_flow_tunnel_filter {
1901        __be32  tunnel_id;
1902        u8      real_sz[0];
1903};
1904
1905/* ib_flow_spec_tunnel describes the Vxlan tunnel
1906 * the tunnel_id from val has the vni value
1907 */
1908struct ib_flow_spec_tunnel {
1909        u32                           type;
1910        u16                           size;
1911        struct ib_flow_tunnel_filter  val;
1912        struct ib_flow_tunnel_filter  mask;
1913};
1914
1915struct ib_flow_esp_filter {
1916        __be32  spi;
1917        __be32  seq;
1918        /* Must be last */
1919        u8      real_sz[0];
1920};
1921
1922struct ib_flow_spec_esp {
1923        u32                           type;
1924        u16                           size;
1925        struct ib_flow_esp_filter     val;
1926        struct ib_flow_esp_filter     mask;
1927};
1928
1929struct ib_flow_gre_filter {
1930        __be16 c_ks_res0_ver;
1931        __be16 protocol;
1932        __be32 key;
1933        /* Must be last */
1934        u8      real_sz[0];
1935};
1936
1937struct ib_flow_spec_gre {
1938        u32                           type;
1939        u16                           size;
1940        struct ib_flow_gre_filter     val;
1941        struct ib_flow_gre_filter     mask;
1942};
1943
1944struct ib_flow_mpls_filter {
1945        __be32 tag;
1946        /* Must be last */
1947        u8      real_sz[0];
1948};
1949
1950struct ib_flow_spec_mpls {
1951        u32                           type;
1952        u16                           size;
1953        struct ib_flow_mpls_filter     val;
1954        struct ib_flow_mpls_filter     mask;
1955};
1956
1957struct ib_flow_spec_action_tag {
1958        enum ib_flow_spec_type        type;
1959        u16                           size;
1960        u32                           tag_id;
1961};
1962
1963struct ib_flow_spec_action_drop {
1964        enum ib_flow_spec_type        type;
1965        u16                           size;
1966};
1967
1968struct ib_flow_spec_action_handle {
1969        enum ib_flow_spec_type        type;
1970        u16                           size;
1971        struct ib_flow_action        *act;
1972};
1973
1974enum ib_counters_description {
1975        IB_COUNTER_PACKETS,
1976        IB_COUNTER_BYTES,
1977};
1978
1979struct ib_flow_spec_action_count {
1980        enum ib_flow_spec_type type;
1981        u16 size;
1982        struct ib_counters *counters;
1983};
1984
1985union ib_flow_spec {
1986        struct {
1987                u32                     type;
1988                u16                     size;
1989        };
1990        struct ib_flow_spec_eth         eth;
1991        struct ib_flow_spec_ib          ib;
1992        struct ib_flow_spec_ipv4        ipv4;
1993        struct ib_flow_spec_tcp_udp     tcp_udp;
1994        struct ib_flow_spec_ipv6        ipv6;
1995        struct ib_flow_spec_tunnel      tunnel;
1996        struct ib_flow_spec_esp         esp;
1997        struct ib_flow_spec_gre         gre;
1998        struct ib_flow_spec_mpls        mpls;
1999        struct ib_flow_spec_action_tag  flow_tag;
2000        struct ib_flow_spec_action_drop drop;
2001        struct ib_flow_spec_action_handle action;
2002        struct ib_flow_spec_action_count flow_count;
2003};
2004
2005struct ib_flow_attr {
2006        enum ib_flow_attr_type type;
2007        u16          size;
2008        u16          priority;
2009        u32          flags;
2010        u8           num_of_specs;
2011        u8           port;
2012        union ib_flow_spec flows[];
2013};
2014
2015struct ib_flow {
2016        struct ib_qp            *qp;
2017        struct ib_device        *device;
2018        struct ib_uobject       *uobject;
2019};
2020
2021enum ib_flow_action_type {
2022        IB_FLOW_ACTION_UNSPECIFIED,
2023        IB_FLOW_ACTION_ESP = 1,
2024};
2025
2026struct ib_flow_action_attrs_esp_keymats {
2027        enum ib_uverbs_flow_action_esp_keymat                   protocol;
2028        union {
2029                struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2030        } keymat;
2031};
2032
2033struct ib_flow_action_attrs_esp_replays {
2034        enum ib_uverbs_flow_action_esp_replay                   protocol;
2035        union {
2036                struct ib_uverbs_flow_action_esp_replay_bmp     bmp;
2037        } replay;
2038};
2039
2040enum ib_flow_action_attrs_esp_flags {
2041        /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2042         * This is done in order to share the same flags between user-space and
2043         * kernel and spare an unnecessary translation.
2044         */
2045
2046        /* Kernel flags */
2047        IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED  = 1ULL << 32,
2048        IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS  = 1ULL << 33,
2049};
2050
2051struct ib_flow_spec_list {
2052        struct ib_flow_spec_list        *next;
2053        union ib_flow_spec              spec;
2054};
2055
2056struct ib_flow_action_attrs_esp {
2057        struct ib_flow_action_attrs_esp_keymats         *keymat;
2058        struct ib_flow_action_attrs_esp_replays         *replay;
2059        struct ib_flow_spec_list                        *encap;
2060        /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2061         * Value of 0 is a valid value.
2062         */
2063        u32                                             esn;
2064        u32                                             spi;
2065        u32                                             seq;
2066        u32                                             tfc_pad;
2067        /* Use enum ib_flow_action_attrs_esp_flags */
2068        u64                                             flags;
2069        u64                                             hard_limit_pkts;
2070};
2071
2072struct ib_flow_action {
2073        struct ib_device                *device;
2074        struct ib_uobject               *uobject;
2075        enum ib_flow_action_type        type;
2076        atomic_t                        usecnt;
2077};
2078
2079struct ib_mad_hdr;
2080struct ib_grh;
2081
2082enum ib_process_mad_flags {
2083        IB_MAD_IGNORE_MKEY      = 1,
2084        IB_MAD_IGNORE_BKEY      = 2,
2085        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2086};
2087
2088enum ib_mad_result {
2089        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2090        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2091        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2092        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2093};
2094
2095struct ib_port_cache {
2096        u64                   subnet_prefix;
2097        struct ib_pkey_cache  *pkey;
2098        struct ib_gid_table   *gid;
2099        u8                     lmc;
2100        enum ib_port_state     port_state;
2101};
2102
2103struct ib_cache {
2104        rwlock_t                lock;
2105        struct ib_event_handler event_handler;
2106};
2107
2108struct ib_port_immutable {
2109        int                           pkey_tbl_len;
2110        int                           gid_tbl_len;
2111        u32                           core_cap_flags;
2112        u32                           max_mad_size;
2113};
2114
2115struct ib_port_data {
2116        struct ib_device *ib_dev;
2117
2118        struct ib_port_immutable immutable;
2119
2120        spinlock_t pkey_list_lock;
2121        struct list_head pkey_list;
2122
2123        struct ib_port_cache cache;
2124
2125        spinlock_t netdev_lock;
2126        struct net_device __rcu *netdev;
2127        struct hlist_node ndev_hash_link;
2128        struct rdma_port_counter port_counter;
2129        struct rdma_hw_stats *hw_stats;
2130};
2131
2132/* rdma netdev type - specifies protocol type */
2133enum rdma_netdev_t {
2134        RDMA_NETDEV_OPA_VNIC,
2135        RDMA_NETDEV_IPOIB,
2136};
2137
2138/**
2139 * struct rdma_netdev - rdma netdev
2140 * For cases where netstack interfacing is required.
2141 */
2142struct rdma_netdev {
2143        void              *clnt_priv;
2144        struct ib_device  *hca;
2145        u8                 port_num;
2146
2147        /*
2148         * cleanup function must be specified.
2149         * FIXME: This is only used for OPA_VNIC and that usage should be
2150         * removed too.
2151         */
2152        void (*free_rdma_netdev)(struct net_device *netdev);
2153
2154        /* control functions */
2155        void (*set_id)(struct net_device *netdev, int id);
2156        /* send packet */
2157        int (*send)(struct net_device *dev, struct sk_buff *skb,
2158                    struct ib_ah *address, u32 dqpn);
2159        /* multicast */
2160        int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2161                            union ib_gid *gid, u16 mlid,
2162                            int set_qkey, u32 qkey);
2163        int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2164                            union ib_gid *gid, u16 mlid);
2165};
2166
2167struct rdma_netdev_alloc_params {
2168        size_t sizeof_priv;
2169        unsigned int txqs;
2170        unsigned int rxqs;
2171        void *param;
2172
2173        int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2174                                      struct net_device *netdev, void *param);
2175};
2176
2177struct ib_counters {
2178        struct ib_device        *device;
2179        struct ib_uobject       *uobject;
2180        /* num of objects attached */
2181        atomic_t        usecnt;
2182};
2183
2184struct ib_counters_read_attr {
2185        u64     *counters_buff;
2186        u32     ncounters;
2187        u32     flags; /* use enum ib_read_counters_flags */
2188};
2189
2190struct uverbs_attr_bundle;
2191struct iw_cm_id;
2192struct iw_cm_conn_param;
2193
2194#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2195        .size_##ib_struct =                                                    \
2196                (sizeof(struct drv_struct) +                                   \
2197                 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2198                 BUILD_BUG_ON_ZERO(                                            \
2199                         !__same_type(((struct drv_struct *)NULL)->member,     \
2200                                      struct ib_struct)))
2201
2202#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                         \
2203        ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2204
2205#define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2206        rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2207
2208#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2209
2210/**
2211 * struct ib_device_ops - InfiniBand device operations
2212 * This structure defines all the InfiniBand device operations, providers will
2213 * need to define the supported operations, otherwise they will be set to null.
2214 */
2215struct ib_device_ops {
2216        struct module *owner;
2217        enum rdma_driver_id driver_id;
2218        u32 uverbs_abi_ver;
2219        unsigned int uverbs_no_driver_id_binding:1;
2220
2221        int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2222                         const struct ib_send_wr **bad_send_wr);
2223        int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2224                         const struct ib_recv_wr **bad_recv_wr);
2225        void (*drain_rq)(struct ib_qp *qp);
2226        void (*drain_sq)(struct ib_qp *qp);
2227        int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2228        int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2229        int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2230        int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2231        int (*post_srq_recv)(struct ib_srq *srq,
2232                             const struct ib_recv_wr *recv_wr,
2233                             const struct ib_recv_wr **bad_recv_wr);
2234        int (*process_mad)(struct ib_device *device, int process_mad_flags,
2235                           u8 port_num, const struct ib_wc *in_wc,
2236                           const struct ib_grh *in_grh,
2237                           const struct ib_mad_hdr *in_mad, size_t in_mad_size,
2238                           struct ib_mad_hdr *out_mad, size_t *out_mad_size,
2239                           u16 *out_mad_pkey_index);
2240        int (*query_device)(struct ib_device *device,
2241                            struct ib_device_attr *device_attr,
2242                            struct ib_udata *udata);
2243        int (*modify_device)(struct ib_device *device, int device_modify_mask,
2244                             struct ib_device_modify *device_modify);
2245        void (*get_dev_fw_str)(struct ib_device *device, char *str);
2246        const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2247                                                     int comp_vector);
2248        int (*query_port)(struct ib_device *device, u8 port_num,
2249                          struct ib_port_attr *port_attr);
2250        int (*modify_port)(struct ib_device *device, u8 port_num,
2251                           int port_modify_mask,
2252                           struct ib_port_modify *port_modify);
2253        /**
2254         * The following mandatory functions are used only at device
2255         * registration.  Keep functions such as these at the end of this
2256         * structure to avoid cache line misses when accessing struct ib_device
2257         * in fast paths.
2258         */
2259        int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2260                                  struct ib_port_immutable *immutable);
2261        enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2262                                               u8 port_num);
2263        /**
2264         * When calling get_netdev, the HW vendor's driver should return the
2265         * net device of device @device at port @port_num or NULL if such
2266         * a net device doesn't exist. The vendor driver should call dev_hold
2267         * on this net device. The HW vendor's device driver must guarantee
2268         * that this function returns NULL before the net device has finished
2269         * NETDEV_UNREGISTER state.
2270         */
2271        struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2272        /**
2273         * rdma netdev operation
2274         *
2275         * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2276         * must return -EOPNOTSUPP if it doesn't support the specified type.
2277         */
2278        struct net_device *(*alloc_rdma_netdev)(
2279                struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2280                const char *name, unsigned char name_assign_type,
2281                void (*setup)(struct net_device *));
2282
2283        int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2284                                      enum rdma_netdev_t type,
2285                                      struct rdma_netdev_alloc_params *params);
2286        /**
2287         * query_gid should be return GID value for @device, when @port_num
2288         * link layer is either IB or iWarp. It is no-op if @port_num port
2289         * is RoCE link layer.
2290         */
2291        int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2292                         union ib_gid *gid);
2293        /**
2294         * When calling add_gid, the HW vendor's driver should add the gid
2295         * of device of port at gid index available at @attr. Meta-info of
2296         * that gid (for example, the network device related to this gid) is
2297         * available at @attr. @context allows the HW vendor driver to store
2298         * extra information together with a GID entry. The HW vendor driver may
2299         * allocate memory to contain this information and store it in @context
2300         * when a new GID entry is written to. Params are consistent until the
2301         * next call of add_gid or delete_gid. The function should return 0 on
2302         * success or error otherwise. The function could be called
2303         * concurrently for different ports. This function is only called when
2304         * roce_gid_table is used.
2305         */
2306        int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2307        /**
2308         * When calling del_gid, the HW vendor's driver should delete the
2309         * gid of device @device at gid index gid_index of port port_num
2310         * available in @attr.
2311         * Upon the deletion of a GID entry, the HW vendor must free any
2312         * allocated memory. The caller will clear @context afterwards.
2313         * This function is only called when roce_gid_table is used.
2314         */
2315        int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2316        int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2317                          u16 *pkey);
2318        int (*alloc_ucontext)(struct ib_ucontext *context,
2319                              struct ib_udata *udata);
2320        void (*dealloc_ucontext)(struct ib_ucontext *context);
2321        int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2322        void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2323        int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2324        void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2325        int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
2326                         u32 flags, struct ib_udata *udata);
2327        int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2328        int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2329        void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2330        int (*create_srq)(struct ib_srq *srq,
2331                          struct ib_srq_init_attr *srq_init_attr,
2332                          struct ib_udata *udata);
2333        int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2334                          enum ib_srq_attr_mask srq_attr_mask,
2335                          struct ib_udata *udata);
2336        int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2337        void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2338        struct ib_qp *(*create_qp)(struct ib_pd *pd,
2339                                   struct ib_qp_init_attr *qp_init_attr,
2340                                   struct ib_udata *udata);
2341        int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2342                         int qp_attr_mask, struct ib_udata *udata);
2343        int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2344                        int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2345        int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2346        int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2347                         struct ib_udata *udata);
2348        int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2349        void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2350        int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2351        struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2352        struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2353                                     u64 virt_addr, int mr_access_flags,
2354                                     struct ib_udata *udata);
2355        int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2356                             u64 virt_addr, int mr_access_flags,
2357                             struct ib_pd *pd, struct ib_udata *udata);
2358        int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2359        struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2360                                  u32 max_num_sg, struct ib_udata *udata);
2361        struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2362                                            u32 max_num_data_sg,
2363                                            u32 max_num_meta_sg);
2364        int (*advise_mr)(struct ib_pd *pd,
2365                         enum ib_uverbs_advise_mr_advice advice, u32 flags,
2366                         struct ib_sge *sg_list, u32 num_sge,
2367                         struct uverbs_attr_bundle *attrs);
2368        int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2369                         unsigned int *sg_offset);
2370        int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2371                               struct ib_mr_status *mr_status);
2372        struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2373                                  struct ib_udata *udata);
2374        int (*dealloc_mw)(struct ib_mw *mw);
2375        struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
2376                                    struct ib_fmr_attr *fmr_attr);
2377        int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2378                            u64 iova);
2379        int (*unmap_fmr)(struct list_head *fmr_list);
2380        int (*dealloc_fmr)(struct ib_fmr *fmr);
2381        int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2382        int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2383        struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2384                                      struct ib_udata *udata);
2385        int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2386        struct ib_flow *(*create_flow)(struct ib_qp *qp,
2387                                       struct ib_flow_attr *flow_attr,
2388                                       int domain, struct ib_udata *udata);
2389        int (*destroy_flow)(struct ib_flow *flow_id);
2390        struct ib_flow_action *(*create_flow_action_esp)(
2391                struct ib_device *device,
2392                const struct ib_flow_action_attrs_esp *attr,
2393                struct uverbs_attr_bundle *attrs);
2394        int (*destroy_flow_action)(struct ib_flow_action *action);
2395        int (*modify_flow_action_esp)(
2396                struct ib_flow_action *action,
2397                const struct ib_flow_action_attrs_esp *attr,
2398                struct uverbs_attr_bundle *attrs);
2399        int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2400                                 int state);
2401        int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2402                             struct ifla_vf_info *ivf);
2403        int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2404                            struct ifla_vf_stats *stats);
2405        int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2406                           int type);
2407        struct ib_wq *(*create_wq)(struct ib_pd *pd,
2408                                   struct ib_wq_init_attr *init_attr,
2409                                   struct ib_udata *udata);
2410        void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2411        int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2412                         u32 wq_attr_mask, struct ib_udata *udata);
2413        struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2414                struct ib_device *device,
2415                struct ib_rwq_ind_table_init_attr *init_attr,
2416                struct ib_udata *udata);
2417        int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2418        struct ib_dm *(*alloc_dm)(struct ib_device *device,
2419                                  struct ib_ucontext *context,
2420                                  struct ib_dm_alloc_attr *attr,
2421                                  struct uverbs_attr_bundle *attrs);
2422        int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2423        struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2424                                   struct ib_dm_mr_attr *attr,
2425                                   struct uverbs_attr_bundle *attrs);
2426        struct ib_counters *(*create_counters)(
2427                struct ib_device *device, struct uverbs_attr_bundle *attrs);
2428        int (*destroy_counters)(struct ib_counters *counters);
2429        int (*read_counters)(struct ib_counters *counters,
2430                             struct ib_counters_read_attr *counters_read_attr,
2431                             struct uverbs_attr_bundle *attrs);
2432        int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2433                            int data_sg_nents, unsigned int *data_sg_offset,
2434                            struct scatterlist *meta_sg, int meta_sg_nents,
2435                            unsigned int *meta_sg_offset);
2436
2437        /**
2438         * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2439         *   driver initialized data.  The struct is kfree()'ed by the sysfs
2440         *   core when the device is removed.  A lifespan of -1 in the return
2441         *   struct tells the core to set a default lifespan.
2442         */
2443        struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2444                                                u8 port_num);
2445        /**
2446         * get_hw_stats - Fill in the counter value(s) in the stats struct.
2447         * @index - The index in the value array we wish to have updated, or
2448         *   num_counters if we want all stats updated
2449         * Return codes -
2450         *   < 0 - Error, no counters updated
2451         *   index - Updated the single counter pointed to by index
2452         *   num_counters - Updated all counters (will reset the timestamp
2453         *     and prevent further calls for lifespan milliseconds)
2454         * Drivers are allowed to update all counters in leiu of just the
2455         *   one given in index at their option
2456         */
2457        int (*get_hw_stats)(struct ib_device *device,
2458                            struct rdma_hw_stats *stats, u8 port, int index);
2459        /*
2460         * This function is called once for each port when a ib device is
2461         * registered.
2462         */
2463        int (*init_port)(struct ib_device *device, u8 port_num,
2464                         struct kobject *port_sysfs);
2465        /**
2466         * Allows rdma drivers to add their own restrack attributes.
2467         */
2468        int (*fill_res_entry)(struct sk_buff *msg,
2469                              struct rdma_restrack_entry *entry);
2470
2471        /* Device lifecycle callbacks */
2472        /*
2473         * Called after the device becomes registered, before clients are
2474         * attached
2475         */
2476        int (*enable_driver)(struct ib_device *dev);
2477        /*
2478         * This is called as part of ib_dealloc_device().
2479         */
2480        void (*dealloc_driver)(struct ib_device *dev);
2481
2482        /* iWarp CM callbacks */
2483        void (*iw_add_ref)(struct ib_qp *qp);
2484        void (*iw_rem_ref)(struct ib_qp *qp);
2485        struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2486        int (*iw_connect)(struct iw_cm_id *cm_id,
2487                          struct iw_cm_conn_param *conn_param);
2488        int (*iw_accept)(struct iw_cm_id *cm_id,
2489                         struct iw_cm_conn_param *conn_param);
2490        int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2491                         u8 pdata_len);
2492        int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2493        int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2494        /**
2495         * counter_bind_qp - Bind a QP to a counter.
2496         * @counter - The counter to be bound. If counter->id is zero then
2497         *   the driver needs to allocate a new counter and set counter->id
2498         */
2499        int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2500        /**
2501         * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2502         *   counter and bind it onto the default one
2503         */
2504        int (*counter_unbind_qp)(struct ib_qp *qp);
2505        /**
2506         * counter_dealloc -De-allocate the hw counter
2507         */
2508        int (*counter_dealloc)(struct rdma_counter *counter);
2509        /**
2510         * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2511         * the driver initialized data.
2512         */
2513        struct rdma_hw_stats *(*counter_alloc_stats)(
2514                struct rdma_counter *counter);
2515        /**
2516         * counter_update_stats - Query the stats value of this counter
2517         */
2518        int (*counter_update_stats)(struct rdma_counter *counter);
2519
2520        DECLARE_RDMA_OBJ_SIZE(ib_ah);
2521        DECLARE_RDMA_OBJ_SIZE(ib_cq);
2522        DECLARE_RDMA_OBJ_SIZE(ib_pd);
2523        DECLARE_RDMA_OBJ_SIZE(ib_srq);
2524        DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2525};
2526
2527struct ib_core_device {
2528        /* device must be the first element in structure until,
2529         * union of ib_core_device and device exists in ib_device.
2530         */
2531        struct device dev;
2532        possible_net_t rdma_net;
2533        struct kobject *ports_kobj;
2534        struct list_head port_list;
2535        struct ib_device *owner; /* reach back to owner ib_device */
2536};
2537
2538struct rdma_restrack_root;
2539struct ib_device {
2540        /* Do not access @dma_device directly from ULP nor from HW drivers. */
2541        struct device                *dma_device;
2542        struct ib_device_ops         ops;
2543        char                          name[IB_DEVICE_NAME_MAX];
2544        struct rcu_head rcu_head;
2545
2546        struct list_head              event_handler_list;
2547        spinlock_t                    event_handler_lock;
2548
2549        struct rw_semaphore           client_data_rwsem;
2550        struct xarray                 client_data;
2551        struct mutex                  unregistration_lock;
2552
2553        struct ib_cache               cache;
2554        /**
2555         * port_data is indexed by port number
2556         */
2557        struct ib_port_data *port_data;
2558
2559        int                           num_comp_vectors;
2560
2561        union {
2562                struct device           dev;
2563                struct ib_core_device   coredev;
2564        };
2565
2566        /* First group for device attributes,
2567         * Second group for driver provided attributes (optional).
2568         * It is NULL terminated array.
2569         */
2570        const struct attribute_group    *groups[3];
2571
2572        u64                          uverbs_cmd_mask;
2573        u64                          uverbs_ex_cmd_mask;
2574
2575        char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
2576        __be64                       node_guid;
2577        u32                          local_dma_lkey;
2578        u16                          is_switch:1;
2579        /* Indicates kernel verbs support, should not be used in drivers */
2580        u16                          kverbs_provider:1;
2581        /* CQ adaptive moderation (RDMA DIM) */
2582        u16                          use_cq_dim:1;
2583        u8                           node_type;
2584        u8                           phys_port_cnt;
2585        struct ib_device_attr        attrs;
2586        struct attribute_group       *hw_stats_ag;
2587        struct rdma_hw_stats         *hw_stats;
2588
2589#ifdef CONFIG_CGROUP_RDMA
2590        struct rdmacg_device         cg_device;
2591#endif
2592
2593        u32                          index;
2594        struct rdma_restrack_root *res;
2595
2596        const struct uapi_definition   *driver_def;
2597
2598        /*
2599         * Positive refcount indicates that the device is currently
2600         * registered and cannot be unregistered.
2601         */
2602        refcount_t refcount;
2603        struct completion unreg_completion;
2604        struct work_struct unregistration_work;
2605
2606        const struct rdma_link_ops *link_ops;
2607
2608        /* Protects compat_devs xarray modifications */
2609        struct mutex compat_devs_mutex;
2610        /* Maintains compat devices for each net namespace */
2611        struct xarray compat_devs;
2612
2613        /* Used by iWarp CM */
2614        char iw_ifname[IFNAMSIZ];
2615        u32 iw_driver_flags;
2616};
2617
2618struct ib_client_nl_info;
2619struct ib_client {
2620        const char *name;
2621        void (*add)   (struct ib_device *);
2622        void (*remove)(struct ib_device *, void *client_data);
2623        void (*rename)(struct ib_device *dev, void *client_data);
2624        int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2625                           struct ib_client_nl_info *res);
2626        int (*get_global_nl_info)(struct ib_client_nl_info *res);
2627
2628        /* Returns the net_dev belonging to this ib_client and matching the
2629         * given parameters.
2630         * @dev:         An RDMA device that the net_dev use for communication.
2631         * @port:        A physical port number on the RDMA device.
2632         * @pkey:        P_Key that the net_dev uses if applicable.
2633         * @gid:         A GID that the net_dev uses to communicate.
2634         * @addr:        An IP address the net_dev is configured with.
2635         * @client_data: The device's client data set by ib_set_client_data().
2636         *
2637         * An ib_client that implements a net_dev on top of RDMA devices
2638         * (such as IP over IB) should implement this callback, allowing the
2639         * rdma_cm module to find the right net_dev for a given request.
2640         *
2641         * The caller is responsible for calling dev_put on the returned
2642         * netdev. */
2643        struct net_device *(*get_net_dev_by_params)(
2644                        struct ib_device *dev,
2645                        u8 port,
2646                        u16 pkey,
2647                        const union ib_gid *gid,
2648                        const struct sockaddr *addr,
2649                        void *client_data);
2650
2651        refcount_t uses;
2652        struct completion uses_zero;
2653        u32 client_id;
2654
2655        /* kverbs are not required by the client */
2656        u8 no_kverbs_req:1;
2657};
2658
2659/*
2660 * IB block DMA iterator
2661 *
2662 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2663 * to a HW supported page size.
2664 */
2665struct ib_block_iter {
2666        /* internal states */
2667        struct scatterlist *__sg;       /* sg holding the current aligned block */
2668        dma_addr_t __dma_addr;          /* unaligned DMA address of this block */
2669        unsigned int __sg_nents;        /* number of SG entries */
2670        unsigned int __sg_advance;      /* number of bytes to advance in sg in next step */
2671        unsigned int __pg_bit;          /* alignment of current block */
2672};
2673
2674struct ib_device *_ib_alloc_device(size_t size);
2675#define ib_alloc_device(drv_struct, member)                                    \
2676        container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2677                                      BUILD_BUG_ON_ZERO(offsetof(              \
2678                                              struct drv_struct, member))),    \
2679                     struct drv_struct, member)
2680
2681void ib_dealloc_device(struct ib_device *device);
2682
2683void ib_get_device_fw_str(struct ib_device *device, char *str);
2684
2685int ib_register_device(struct ib_device *device, const char *name);
2686void ib_unregister_device(struct ib_device *device);
2687void ib_unregister_driver(enum rdma_driver_id driver_id);
2688void ib_unregister_device_and_put(struct ib_device *device);
2689void ib_unregister_device_queued(struct ib_device *ib_dev);
2690
2691int ib_register_client   (struct ib_client *client);
2692void ib_unregister_client(struct ib_client *client);
2693
2694void __rdma_block_iter_start(struct ib_block_iter *biter,
2695                             struct scatterlist *sglist,
2696                             unsigned int nents,
2697                             unsigned long pgsz);
2698bool __rdma_block_iter_next(struct ib_block_iter *biter);
2699
2700/**
2701 * rdma_block_iter_dma_address - get the aligned dma address of the current
2702 * block held by the block iterator.
2703 * @biter: block iterator holding the memory block
2704 */
2705static inline dma_addr_t
2706rdma_block_iter_dma_address(struct ib_block_iter *biter)
2707{
2708        return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2709}
2710
2711/**
2712 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2713 * @sglist: sglist to iterate over
2714 * @biter: block iterator holding the memory block
2715 * @nents: maximum number of sg entries to iterate over
2716 * @pgsz: best HW supported page size to use
2717 *
2718 * Callers may use rdma_block_iter_dma_address() to get each
2719 * blocks aligned DMA address.
2720 */
2721#define rdma_for_each_block(sglist, biter, nents, pgsz)         \
2722        for (__rdma_block_iter_start(biter, sglist, nents,      \
2723                                     pgsz);                     \
2724             __rdma_block_iter_next(biter);)
2725
2726/**
2727 * ib_get_client_data - Get IB client context
2728 * @device:Device to get context for
2729 * @client:Client to get context for
2730 *
2731 * ib_get_client_data() returns the client context data set with
2732 * ib_set_client_data(). This can only be called while the client is
2733 * registered to the device, once the ib_client remove() callback returns this
2734 * cannot be called.
2735 */
2736static inline void *ib_get_client_data(struct ib_device *device,
2737                                       struct ib_client *client)
2738{
2739        return xa_load(&device->client_data, client->client_id);
2740}
2741void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2742                         void *data);
2743void ib_set_device_ops(struct ib_device *device,
2744                       const struct ib_device_ops *ops);
2745
2746#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2747int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2748                      unsigned long pfn, unsigned long size, pgprot_t prot);
2749#else
2750static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
2751                                    struct vm_area_struct *vma,
2752                                    unsigned long pfn, unsigned long size,
2753                                    pgprot_t prot)
2754{
2755        return -EINVAL;
2756}
2757#endif
2758
2759static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2760{
2761        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2762}
2763
2764static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2765{
2766        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2767}
2768
2769static inline bool ib_is_buffer_cleared(const void __user *p,
2770                                        size_t len)
2771{
2772        bool ret;
2773        u8 *buf;
2774
2775        if (len > USHRT_MAX)
2776                return false;
2777
2778        buf = memdup_user(p, len);
2779        if (IS_ERR(buf))
2780                return false;
2781
2782        ret = !memchr_inv(buf, 0, len);
2783        kfree(buf);
2784        return ret;
2785}
2786
2787static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2788                                       size_t offset,
2789                                       size_t len)
2790{
2791        return ib_is_buffer_cleared(udata->inbuf + offset, len);
2792}
2793
2794/**
2795 * ib_is_destroy_retryable - Check whether the uobject destruction
2796 * is retryable.
2797 * @ret: The initial destruction return code
2798 * @why: remove reason
2799 * @uobj: The uobject that is destroyed
2800 *
2801 * This function is a helper function that IB layer and low-level drivers
2802 * can use to consider whether the destruction of the given uobject is
2803 * retry-able.
2804 * It checks the original return code, if it wasn't success the destruction
2805 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2806 * the remove reason. (i.e. why).
2807 * Must be called with the object locked for destroy.
2808 */
2809static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2810                                           struct ib_uobject *uobj)
2811{
2812        return ret && (why == RDMA_REMOVE_DESTROY ||
2813                       uobj->context->cleanup_retryable);
2814}
2815
2816/**
2817 * ib_destroy_usecnt - Called during destruction to check the usecnt
2818 * @usecnt: The usecnt atomic
2819 * @why: remove reason
2820 * @uobj: The uobject that is destroyed
2821 *
2822 * Non-zero usecnts will block destruction unless destruction was triggered by
2823 * a ucontext cleanup.
2824 */
2825static inline int ib_destroy_usecnt(atomic_t *usecnt,
2826                                    enum rdma_remove_reason why,
2827                                    struct ib_uobject *uobj)
2828{
2829        if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2830                return -EBUSY;
2831        return 0;
2832}
2833
2834/**
2835 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2836 * contains all required attributes and no attributes not allowed for
2837 * the given QP state transition.
2838 * @cur_state: Current QP state
2839 * @next_state: Next QP state
2840 * @type: QP type
2841 * @mask: Mask of supplied QP attributes
2842 *
2843 * This function is a helper function that a low-level driver's
2844 * modify_qp method can use to validate the consumer's input.  It
2845 * checks that cur_state and next_state are valid QP states, that a
2846 * transition from cur_state to next_state is allowed by the IB spec,
2847 * and that the attribute mask supplied is allowed for the transition.
2848 */
2849bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2850                        enum ib_qp_type type, enum ib_qp_attr_mask mask);
2851
2852void ib_register_event_handler(struct ib_event_handler *event_handler);
2853void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2854void ib_dispatch_event(struct ib_event *event);
2855
2856int ib_query_port(struct ib_device *device,
2857                  u8 port_num, struct ib_port_attr *port_attr);
2858
2859enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2860                                               u8 port_num);
2861
2862/**
2863 * rdma_cap_ib_switch - Check if the device is IB switch
2864 * @device: Device to check
2865 *
2866 * Device driver is responsible for setting is_switch bit on
2867 * in ib_device structure at init time.
2868 *
2869 * Return: true if the device is IB switch.
2870 */
2871static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2872{
2873        return device->is_switch;
2874}
2875
2876/**
2877 * rdma_start_port - Return the first valid port number for the device
2878 * specified
2879 *
2880 * @device: Device to be checked
2881 *
2882 * Return start port number
2883 */
2884static inline u8 rdma_start_port(const struct ib_device *device)
2885{
2886        return rdma_cap_ib_switch(device) ? 0 : 1;
2887}
2888
2889/**
2890 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
2891 * @device - The struct ib_device * to iterate over
2892 * @iter - The unsigned int to store the port number
2893 */
2894#define rdma_for_each_port(device, iter)                                       \
2895        for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type(   \
2896                                                     unsigned int, iter)));    \
2897             iter <= rdma_end_port(device); (iter)++)
2898
2899/**
2900 * rdma_end_port - Return the last valid port number for the device
2901 * specified
2902 *
2903 * @device: Device to be checked
2904 *
2905 * Return last port number
2906 */
2907static inline u8 rdma_end_port(const struct ib_device *device)
2908{
2909        return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2910}
2911
2912static inline int rdma_is_port_valid(const struct ib_device *device,
2913                                     unsigned int port)
2914{
2915        return (port >= rdma_start_port(device) &&
2916                port <= rdma_end_port(device));
2917}
2918
2919static inline bool rdma_is_grh_required(const struct ib_device *device,
2920                                        u8 port_num)
2921{
2922        return device->port_data[port_num].immutable.core_cap_flags &
2923               RDMA_CORE_PORT_IB_GRH_REQUIRED;
2924}
2925
2926static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2927{
2928        return device->port_data[port_num].immutable.core_cap_flags &
2929               RDMA_CORE_CAP_PROT_IB;
2930}
2931
2932static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2933{
2934        return device->port_data[port_num].immutable.core_cap_flags &
2935               (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2936}
2937
2938static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2939{
2940        return device->port_data[port_num].immutable.core_cap_flags &
2941               RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2942}
2943
2944static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2945{
2946        return device->port_data[port_num].immutable.core_cap_flags &
2947               RDMA_CORE_CAP_PROT_ROCE;
2948}
2949
2950static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2951{
2952        return device->port_data[port_num].immutable.core_cap_flags &
2953               RDMA_CORE_CAP_PROT_IWARP;
2954}
2955
2956static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2957{
2958        return rdma_protocol_ib(device, port_num) ||
2959                rdma_protocol_roce(device, port_num);
2960}
2961
2962static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2963{
2964        return device->port_data[port_num].immutable.core_cap_flags &
2965               RDMA_CORE_CAP_PROT_RAW_PACKET;
2966}
2967
2968static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2969{
2970        return device->port_data[port_num].immutable.core_cap_flags &
2971               RDMA_CORE_CAP_PROT_USNIC;
2972}
2973
2974/**
2975 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2976 * Management Datagrams.
2977 * @device: Device to check
2978 * @port_num: Port number to check
2979 *
2980 * Management Datagrams (MAD) are a required part of the InfiniBand
2981 * specification and are supported on all InfiniBand devices.  A slightly
2982 * extended version are also supported on OPA interfaces.
2983 *
2984 * Return: true if the port supports sending/receiving of MAD packets.
2985 */
2986static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2987{
2988        return device->port_data[port_num].immutable.core_cap_flags &
2989               RDMA_CORE_CAP_IB_MAD;
2990}
2991
2992/**
2993 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2994 * Management Datagrams.
2995 * @device: Device to check
2996 * @port_num: Port number to check
2997 *
2998 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2999 * datagrams with their own versions.  These OPA MADs share many but not all of
3000 * the characteristics of InfiniBand MADs.
3001 *
3002 * OPA MADs differ in the following ways:
3003 *
3004 *    1) MADs are variable size up to 2K
3005 *       IBTA defined MADs remain fixed at 256 bytes
3006 *    2) OPA SMPs must carry valid PKeys
3007 *    3) OPA SMP packets are a different format
3008 *
3009 * Return: true if the port supports OPA MAD packet formats.
3010 */
3011static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3012{
3013        return device->port_data[port_num].immutable.core_cap_flags &
3014                RDMA_CORE_CAP_OPA_MAD;
3015}
3016
3017/**
3018 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3019 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3020 * @device: Device to check
3021 * @port_num: Port number to check
3022 *
3023 * Each InfiniBand node is required to provide a Subnet Management Agent
3024 * that the subnet manager can access.  Prior to the fabric being fully
3025 * configured by the subnet manager, the SMA is accessed via a well known
3026 * interface called the Subnet Management Interface (SMI).  This interface
3027 * uses directed route packets to communicate with the SM to get around the
3028 * chicken and egg problem of the SM needing to know what's on the fabric
3029 * in order to configure the fabric, and needing to configure the fabric in
3030 * order to send packets to the devices on the fabric.  These directed
3031 * route packets do not need the fabric fully configured in order to reach
3032 * their destination.  The SMI is the only method allowed to send
3033 * directed route packets on an InfiniBand fabric.
3034 *
3035 * Return: true if the port provides an SMI.
3036 */
3037static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3038{
3039        return device->port_data[port_num].immutable.core_cap_flags &
3040               RDMA_CORE_CAP_IB_SMI;
3041}
3042
3043/**
3044 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3045 * Communication Manager.
3046 * @device: Device to check
3047 * @port_num: Port number to check
3048 *
3049 * The InfiniBand Communication Manager is one of many pre-defined General
3050 * Service Agents (GSA) that are accessed via the General Service
3051 * Interface (GSI).  It's role is to facilitate establishment of connections
3052 * between nodes as well as other management related tasks for established
3053 * connections.
3054 *
3055 * Return: true if the port supports an IB CM (this does not guarantee that
3056 * a CM is actually running however).
3057 */
3058static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3059{
3060        return device->port_data[port_num].immutable.core_cap_flags &
3061               RDMA_CORE_CAP_IB_CM;
3062}
3063
3064/**
3065 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3066 * Communication Manager.
3067 * @device: Device to check
3068 * @port_num: Port number to check
3069 *
3070 * Similar to above, but specific to iWARP connections which have a different
3071 * managment protocol than InfiniBand.
3072 *
3073 * Return: true if the port supports an iWARP CM (this does not guarantee that
3074 * a CM is actually running however).
3075 */
3076static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3077{
3078        return device->port_data[port_num].immutable.core_cap_flags &
3079               RDMA_CORE_CAP_IW_CM;
3080}
3081
3082/**
3083 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3084 * Subnet Administration.
3085 * @device: Device to check
3086 * @port_num: Port number to check
3087 *
3088 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3089 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3090 * fabrics, devices should resolve routes to other hosts by contacting the
3091 * SA to query the proper route.
3092 *
3093 * Return: true if the port should act as a client to the fabric Subnet
3094 * Administration interface.  This does not imply that the SA service is
3095 * running locally.
3096 */
3097static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3098{
3099        return device->port_data[port_num].immutable.core_cap_flags &
3100               RDMA_CORE_CAP_IB_SA;
3101}
3102
3103/**
3104 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3105 * Multicast.
3106 * @device: Device to check
3107 * @port_num: Port number to check
3108 *
3109 * InfiniBand multicast registration is more complex than normal IPv4 or
3110 * IPv6 multicast registration.  Each Host Channel Adapter must register
3111 * with the Subnet Manager when it wishes to join a multicast group.  It
3112 * should do so only once regardless of how many queue pairs it subscribes
3113 * to this group.  And it should leave the group only after all queue pairs
3114 * attached to the group have been detached.
3115 *
3116 * Return: true if the port must undertake the additional adminstrative
3117 * overhead of registering/unregistering with the SM and tracking of the
3118 * total number of queue pairs attached to the multicast group.
3119 */
3120static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3121{
3122        return rdma_cap_ib_sa(device, port_num);
3123}
3124
3125/**
3126 * rdma_cap_af_ib - Check if the port of device has the capability
3127 * Native Infiniband Address.
3128 * @device: Device to check
3129 * @port_num: Port number to check
3130 *
3131 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3132 * GID.  RoCE uses a different mechanism, but still generates a GID via
3133 * a prescribed mechanism and port specific data.
3134 *
3135 * Return: true if the port uses a GID address to identify devices on the
3136 * network.
3137 */
3138static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3139{
3140        return device->port_data[port_num].immutable.core_cap_flags &
3141               RDMA_CORE_CAP_AF_IB;
3142}
3143
3144/**
3145 * rdma_cap_eth_ah - Check if the port of device has the capability
3146 * Ethernet Address Handle.
3147 * @device: Device to check
3148 * @port_num: Port number to check
3149 *
3150 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3151 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3152 * port.  Normally, packet headers are generated by the sending host
3153 * adapter, but when sending connectionless datagrams, we must manually
3154 * inject the proper headers for the fabric we are communicating over.
3155 *
3156 * Return: true if we are running as a RoCE port and must force the
3157 * addition of a Global Route Header built from our Ethernet Address
3158 * Handle into our header list for connectionless packets.
3159 */
3160static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3161{
3162        return device->port_data[port_num].immutable.core_cap_flags &
3163               RDMA_CORE_CAP_ETH_AH;
3164}
3165
3166/**
3167 * rdma_cap_opa_ah - Check if the port of device supports
3168 * OPA Address handles
3169 * @device: Device to check
3170 * @port_num: Port number to check
3171 *
3172 * Return: true if we are running on an OPA device which supports
3173 * the extended OPA addressing.
3174 */
3175static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3176{
3177        return (device->port_data[port_num].immutable.core_cap_flags &
3178                RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3179}
3180
3181/**
3182 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3183 *
3184 * @device: Device
3185 * @port_num: Port number
3186 *
3187 * This MAD size includes the MAD headers and MAD payload.  No other headers
3188 * are included.
3189 *
3190 * Return the max MAD size required by the Port.  Will return 0 if the port
3191 * does not support MADs
3192 */
3193static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3194{
3195        return device->port_data[port_num].immutable.max_mad_size;
3196}
3197
3198/**
3199 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3200 * @device: Device to check
3201 * @port_num: Port number to check
3202 *
3203 * RoCE GID table mechanism manages the various GIDs for a device.
3204 *
3205 * NOTE: if allocating the port's GID table has failed, this call will still
3206 * return true, but any RoCE GID table API will fail.
3207 *
3208 * Return: true if the port uses RoCE GID table mechanism in order to manage
3209 * its GIDs.
3210 */
3211static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3212                                           u8 port_num)
3213{
3214        return rdma_protocol_roce(device, port_num) &&
3215                device->ops.add_gid && device->ops.del_gid;
3216}
3217
3218/*
3219 * Check if the device supports READ W/ INVALIDATE.
3220 */
3221static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3222{
3223        /*
3224         * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3225         * has support for it yet.
3226         */
3227        return rdma_protocol_iwarp(dev, port_num);
3228}
3229
3230/**
3231 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
3232 *
3233 * @addr: address
3234 * @pgsz_bitmap: bitmap of HW supported page sizes
3235 */
3236static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3237                                            unsigned long pgsz_bitmap)
3238{
3239        unsigned long align;
3240        unsigned long pgsz;
3241
3242        align = addr & -addr;
3243
3244        /* Find page bit such that addr is aligned to the highest supported
3245         * HW page size
3246         */
3247        pgsz = pgsz_bitmap & ~(-align << 1);
3248        if (!pgsz)
3249                return __ffs(pgsz_bitmap);
3250
3251        return __fls(pgsz);
3252}
3253
3254int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3255                         int state);
3256int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3257                     struct ifla_vf_info *info);
3258int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3259                    struct ifla_vf_stats *stats);
3260int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3261                   int type);
3262
3263int ib_query_pkey(struct ib_device *device,
3264                  u8 port_num, u16 index, u16 *pkey);
3265
3266int ib_modify_device(struct ib_device *device,
3267                     int device_modify_mask,
3268                     struct ib_device_modify *device_modify);
3269
3270int ib_modify_port(struct ib_device *device,
3271                   u8 port_num, int port_modify_mask,
3272                   struct ib_port_modify *port_modify);
3273
3274int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3275                u8 *port_num, u16 *index);
3276
3277int ib_find_pkey(struct ib_device *device,
3278                 u8 port_num, u16 pkey, u16 *index);
3279
3280enum ib_pd_flags {
3281        /*
3282         * Create a memory registration for all memory in the system and place
3283         * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3284         * ULPs to avoid the overhead of dynamic MRs.
3285         *
3286         * This flag is generally considered unsafe and must only be used in
3287         * extremly trusted environments.  Every use of it will log a warning
3288         * in the kernel log.
3289         */
3290        IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
3291};
3292
3293struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3294                const char *caller);
3295
3296#define ib_alloc_pd(device, flags) \
3297        __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3298
3299/**
3300 * ib_dealloc_pd_user - Deallocate kernel/user PD
3301 * @pd: The protection domain
3302 * @udata: Valid user data or NULL for kernel objects
3303 */
3304void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3305
3306/**
3307 * ib_dealloc_pd - Deallocate kernel PD
3308 * @pd: The protection domain
3309 *
3310 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3311 */
3312static inline void ib_dealloc_pd(struct ib_pd *pd)
3313{
3314        ib_dealloc_pd_user(pd, NULL);
3315}
3316
3317enum rdma_create_ah_flags {
3318        /* In a sleepable context */
3319        RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3320};
3321
3322/**
3323 * rdma_create_ah - Creates an address handle for the given address vector.
3324 * @pd: The protection domain associated with the address handle.
3325 * @ah_attr: The attributes of the address vector.
3326 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3327 *
3328 * The address handle is used to reference a local or global destination
3329 * in all UD QP post sends.
3330 */
3331struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3332                             u32 flags);
3333
3334/**
3335 * rdma_create_user_ah - Creates an address handle for the given address vector.
3336 * It resolves destination mac address for ah attribute of RoCE type.
3337 * @pd: The protection domain associated with the address handle.
3338 * @ah_attr: The attributes of the address vector.
3339 * @udata: pointer to user's input output buffer information need by
3340 *         provider driver.
3341 *
3342 * It returns 0 on success and returns appropriate error code on error.
3343 * The address handle is used to reference a local or global destination
3344 * in all UD QP post sends.
3345 */
3346struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3347                                  struct rdma_ah_attr *ah_attr,
3348                                  struct ib_udata *udata);
3349/**
3350 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3351 *   work completion.
3352 * @hdr: the L3 header to parse
3353 * @net_type: type of header to parse
3354 * @sgid: place to store source gid
3355 * @dgid: place to store destination gid
3356 */
3357int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3358                              enum rdma_network_type net_type,
3359                              union ib_gid *sgid, union ib_gid *dgid);
3360
3361/**
3362 * ib_get_rdma_header_version - Get the header version
3363 * @hdr: the L3 header to parse
3364 */
3365int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3366
3367/**
3368 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3369 *   work completion.
3370 * @device: Device on which the received message arrived.
3371 * @port_num: Port on which the received message arrived.
3372 * @wc: Work completion associated with the received message.
3373 * @grh: References the received global route header.  This parameter is
3374 *   ignored unless the work completion indicates that the GRH is valid.
3375 * @ah_attr: Returned attributes that can be used when creating an address
3376 *   handle for replying to the message.
3377 * When ib_init_ah_attr_from_wc() returns success,
3378 * (a) for IB link layer it optionally contains a reference to SGID attribute
3379 * when GRH is present for IB link layer.
3380 * (b) for RoCE link layer it contains a reference to SGID attribute.
3381 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3382 * attributes which are initialized using ib_init_ah_attr_from_wc().
3383 *
3384 */
3385int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3386                            const struct ib_wc *wc, const struct ib_grh *grh,
3387                            struct rdma_ah_attr *ah_attr);
3388
3389/**
3390 * ib_create_ah_from_wc - Creates an address handle associated with the
3391 *   sender of the specified work completion.
3392 * @pd: The protection domain associated with the address handle.
3393 * @wc: Work completion information associated with a received message.
3394 * @grh: References the received global route header.  This parameter is
3395 *   ignored unless the work completion indicates that the GRH is valid.
3396 * @port_num: The outbound port number to associate with the address.
3397 *
3398 * The address handle is used to reference a local or global destination
3399 * in all UD QP post sends.
3400 */
3401struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3402                                   const struct ib_grh *grh, u8 port_num);
3403
3404/**
3405 * rdma_modify_ah - Modifies the address vector associated with an address
3406 *   handle.
3407 * @ah: The address handle to modify.
3408 * @ah_attr: The new address vector attributes to associate with the
3409 *   address handle.
3410 */
3411int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3412
3413/**
3414 * rdma_query_ah - Queries the address vector associated with an address
3415 *   handle.
3416 * @ah: The address handle to query.
3417 * @ah_attr: The address vector attributes associated with the address
3418 *   handle.
3419 */
3420int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3421
3422enum rdma_destroy_ah_flags {
3423        /* In a sleepable context */
3424        RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3425};
3426
3427/**
3428 * rdma_destroy_ah_user - Destroys an address handle.
3429 * @ah: The address handle to destroy.
3430 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3431 * @udata: Valid user data or NULL for kernel objects
3432 */
3433int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3434
3435/**
3436 * rdma_destroy_ah - Destroys an kernel address handle.
3437 * @ah: The address handle to destroy.
3438 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3439 *
3440 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3441 */
3442static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3443{
3444        return rdma_destroy_ah_user(ah, flags, NULL);
3445}
3446
3447/**
3448 * ib_create_srq - Creates a SRQ associated with the specified protection
3449 *   domain.
3450 * @pd: The protection domain associated with the SRQ.
3451 * @srq_init_attr: A list of initial attributes required to create the
3452 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
3453 *   the actual capabilities of the created SRQ.
3454 *
3455 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3456 * requested size of the SRQ, and set to the actual values allocated
3457 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
3458 * will always be at least as large as the requested values.
3459 */
3460struct ib_srq *ib_create_srq(struct ib_pd *pd,
3461                             struct ib_srq_init_attr *srq_init_attr);
3462
3463/**
3464 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3465 * @srq: The SRQ to modify.
3466 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3467 *   the current values of selected SRQ attributes are returned.
3468 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3469 *   are being modified.
3470 *
3471 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3472 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3473 * the number of receives queued drops below the limit.
3474 */
3475int ib_modify_srq(struct ib_srq *srq,
3476                  struct ib_srq_attr *srq_attr,
3477                  enum ib_srq_attr_mask srq_attr_mask);
3478
3479/**
3480 * ib_query_srq - Returns the attribute list and current values for the
3481 *   specified SRQ.
3482 * @srq: The SRQ to query.
3483 * @srq_attr: The attributes of the specified SRQ.
3484 */
3485int ib_query_srq(struct ib_srq *srq,
3486                 struct ib_srq_attr *srq_attr);
3487
3488/**
3489 * ib_destroy_srq_user - Destroys the specified SRQ.
3490 * @srq: The SRQ to destroy.
3491 * @udata: Valid user data or NULL for kernel objects
3492 */
3493int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3494
3495/**
3496 * ib_destroy_srq - Destroys the specified kernel SRQ.
3497 * @srq: The SRQ to destroy.
3498 *
3499 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3500 */
3501static inline int ib_destroy_srq(struct ib_srq *srq)
3502{
3503        return ib_destroy_srq_user(srq, NULL);
3504}
3505
3506/**
3507 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3508 * @srq: The SRQ to post the work request on.
3509 * @recv_wr: A list of work requests to post on the receive queue.
3510 * @bad_recv_wr: On an immediate failure, this parameter will reference
3511 *   the work request that failed to be posted on the QP.
3512 */
3513static inline int ib_post_srq_recv(struct ib_srq *srq,
3514                                   const struct ib_recv_wr *recv_wr,
3515                                   const struct ib_recv_wr **bad_recv_wr)
3516{
3517        const struct ib_recv_wr *dummy;
3518
3519        return srq->device->ops.post_srq_recv(srq, recv_wr,
3520                                              bad_recv_wr ? : &dummy);
3521}
3522
3523/**
3524 * ib_create_qp_user - Creates a QP associated with the specified protection
3525 *   domain.
3526 * @pd: The protection domain associated with the QP.
3527 * @qp_init_attr: A list of initial attributes required to create the
3528 *   QP.  If QP creation succeeds, then the attributes are updated to
3529 *   the actual capabilities of the created QP.
3530 * @udata: Valid user data or NULL for kernel objects
3531 */
3532struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3533                                struct ib_qp_init_attr *qp_init_attr,
3534                                struct ib_udata *udata);
3535
3536/**
3537 * ib_create_qp - Creates a kernel QP associated with the specified protection
3538 *   domain.
3539 * @pd: The protection domain associated with the QP.
3540 * @qp_init_attr: A list of initial attributes required to create the
3541 *   QP.  If QP creation succeeds, then the attributes are updated to
3542 *   the actual capabilities of the created QP.
3543 * @udata: Valid user data or NULL for kernel objects
3544 *
3545 * NOTE: for user qp use ib_create_qp_user with valid udata!
3546 */
3547static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3548                                         struct ib_qp_init_attr *qp_init_attr)
3549{
3550        return ib_create_qp_user(pd, qp_init_attr, NULL);
3551}
3552
3553/**
3554 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3555 * @qp: The QP to modify.
3556 * @attr: On input, specifies the QP attributes to modify.  On output,
3557 *   the current values of selected QP attributes are returned.
3558 * @attr_mask: A bit-mask used to specify which attributes of the QP
3559 *   are being modified.
3560 * @udata: pointer to user's input output buffer information
3561 *   are being modified.
3562 * It returns 0 on success and returns appropriate error code on error.
3563 */
3564int ib_modify_qp_with_udata(struct ib_qp *qp,
3565                            struct ib_qp_attr *attr,
3566                            int attr_mask,
3567                            struct ib_udata *udata);
3568
3569/**
3570 * ib_modify_qp - Modifies the attributes for the specified QP and then
3571 *   transitions the QP to the given state.
3572 * @qp: The QP to modify.
3573 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3574 *   the current values of selected QP attributes are returned.
3575 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3576 *   are being modified.
3577 */
3578int ib_modify_qp(struct ib_qp *qp,
3579                 struct ib_qp_attr *qp_attr,
3580                 int qp_attr_mask);
3581
3582/**
3583 * ib_query_qp - Returns the attribute list and current values for the
3584 *   specified QP.
3585 * @qp: The QP to query.
3586 * @qp_attr: The attributes of the specified QP.
3587 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3588 * @qp_init_attr: Additional attributes of the selected QP.
3589 *
3590 * The qp_attr_mask may be used to limit the query to gathering only the
3591 * selected attributes.
3592 */
3593int ib_query_qp(struct ib_qp *qp,
3594                struct ib_qp_attr *qp_attr,
3595                int qp_attr_mask,
3596                struct ib_qp_init_attr *qp_init_attr);
3597
3598/**
3599 * ib_destroy_qp - Destroys the specified QP.
3600 * @qp: The QP to destroy.
3601 * @udata: Valid udata or NULL for kernel objects
3602 */
3603int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3604
3605/**
3606 * ib_destroy_qp - Destroys the specified kernel QP.
3607 * @qp: The QP to destroy.
3608 *
3609 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3610 */
3611static inline int ib_destroy_qp(struct ib_qp *qp)
3612{
3613        return ib_destroy_qp_user(qp, NULL);
3614}
3615
3616/**
3617 * ib_open_qp - Obtain a reference to an existing sharable QP.
3618 * @xrcd - XRC domain
3619 * @qp_open_attr: Attributes identifying the QP to open.
3620 *
3621 * Returns a reference to a sharable QP.
3622 */
3623struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3624                         struct ib_qp_open_attr *qp_open_attr);
3625
3626/**
3627 * ib_close_qp - Release an external reference to a QP.
3628 * @qp: The QP handle to release
3629 *
3630 * The opened QP handle is released by the caller.  The underlying
3631 * shared QP is not destroyed until all internal references are released.
3632 */
3633int ib_close_qp(struct ib_qp *qp);
3634
3635/**
3636 * ib_post_send - Posts a list of work requests to the send queue of
3637 *   the specified QP.
3638 * @qp: The QP to post the work request on.
3639 * @send_wr: A list of work requests to post on the send queue.
3640 * @bad_send_wr: On an immediate failure, this parameter will reference
3641 *   the work request that failed to be posted on the QP.
3642 *
3643 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3644 * error is returned, the QP state shall not be affected,
3645 * ib_post_send() will return an immediate error after queueing any
3646 * earlier work requests in the list.
3647 */
3648static inline int ib_post_send(struct ib_qp *qp,
3649                               const struct ib_send_wr *send_wr,
3650                               const struct ib_send_wr **bad_send_wr)
3651{
3652        const struct ib_send_wr *dummy;
3653
3654        return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3655}
3656
3657/**
3658 * ib_post_recv - Posts a list of work requests to the receive queue of
3659 *   the specified QP.
3660 * @qp: The QP to post the work request on.
3661 * @recv_wr: A list of work requests to post on the receive queue.
3662 * @bad_recv_wr: On an immediate failure, this parameter will reference
3663 *   the work request that failed to be posted on the QP.
3664 */
3665static inline int ib_post_recv(struct ib_qp *qp,
3666                               const struct ib_recv_wr *recv_wr,
3667                               const struct ib_recv_wr **bad_recv_wr)
3668{
3669        const struct ib_recv_wr *dummy;
3670
3671        return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3672}
3673
3674struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3675                                 int nr_cqe, int comp_vector,
3676                                 enum ib_poll_context poll_ctx,
3677                                 const char *caller, struct ib_udata *udata);
3678
3679/**
3680 * ib_alloc_cq_user: Allocate kernel/user CQ
3681 * @dev: The IB device
3682 * @private: Private data attached to the CQE
3683 * @nr_cqe: Number of CQEs in the CQ
3684 * @comp_vector: Completion vector used for the IRQs
3685 * @poll_ctx: Context used for polling the CQ
3686 * @udata: Valid user data or NULL for kernel objects
3687 */
3688static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3689                                             void *private, int nr_cqe,
3690                                             int comp_vector,
3691                                             enum ib_poll_context poll_ctx,
3692                                             struct ib_udata *udata)
3693{
3694        return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3695                                  KBUILD_MODNAME, udata);
3696}
3697
3698/**
3699 * ib_alloc_cq: Allocate kernel CQ
3700 * @dev: The IB device
3701 * @private: Private data attached to the CQE
3702 * @nr_cqe: Number of CQEs in the CQ
3703 * @comp_vector: Completion vector used for the IRQs
3704 * @poll_ctx: Context used for polling the CQ
3705 *
3706 * NOTE: for user cq use ib_alloc_cq_user with valid udata!
3707 */
3708static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3709                                        int nr_cqe, int comp_vector,
3710                                        enum ib_poll_context poll_ctx)
3711{
3712        return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3713                                NULL);
3714}
3715
3716/**
3717 * ib_free_cq_user - Free kernel/user CQ
3718 * @cq: The CQ to free
3719 * @udata: Valid user data or NULL for kernel objects
3720 */
3721void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3722
3723/**
3724 * ib_free_cq - Free kernel CQ
3725 * @cq: The CQ to free
3726 *
3727 * NOTE: for user cq use ib_free_cq_user with valid udata!
3728 */
3729static inline void ib_free_cq(struct ib_cq *cq)
3730{
3731        ib_free_cq_user(cq, NULL);
3732}
3733
3734int ib_process_cq_direct(struct ib_cq *cq, int budget);
3735
3736/**
3737 * ib_create_cq - Creates a CQ on the specified device.
3738 * @device: The device on which to create the CQ.
3739 * @comp_handler: A user-specified callback that is invoked when a
3740 *   completion event occurs on the CQ.
3741 * @event_handler: A user-specified callback that is invoked when an
3742 *   asynchronous event not associated with a completion occurs on the CQ.
3743 * @cq_context: Context associated with the CQ returned to the user via
3744 *   the associated completion and event handlers.
3745 * @cq_attr: The attributes the CQ should be created upon.
3746 *
3747 * Users can examine the cq structure to determine the actual CQ size.
3748 */
3749struct ib_cq *__ib_create_cq(struct ib_device *device,
3750                             ib_comp_handler comp_handler,
3751                             void (*event_handler)(struct ib_event *, void *),
3752                             void *cq_context,
3753                             const struct ib_cq_init_attr *cq_attr,
3754                             const char *caller);
3755#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3756        __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3757
3758/**
3759 * ib_resize_cq - Modifies the capacity of the CQ.
3760 * @cq: The CQ to resize.
3761 * @cqe: The minimum size of the CQ.
3762 *
3763 * Users can examine the cq structure to determine the actual CQ size.
3764 */
3765int ib_resize_cq(struct ib_cq *cq, int cqe);
3766
3767/**
3768 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3769 * @cq: The CQ to modify.
3770 * @cq_count: number of CQEs that will trigger an event
3771 * @cq_period: max period of time in usec before triggering an event
3772 *
3773 */
3774int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3775
3776/**
3777 * ib_destroy_cq_user - Destroys the specified CQ.
3778 * @cq: The CQ to destroy.
3779 * @udata: Valid user data or NULL for kernel objects
3780 */
3781int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3782
3783/**
3784 * ib_destroy_cq - Destroys the specified kernel CQ.
3785 * @cq: The CQ to destroy.
3786 *
3787 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3788 */
3789static inline void ib_destroy_cq(struct ib_cq *cq)
3790{
3791        ib_destroy_cq_user(cq, NULL);
3792}
3793
3794/**
3795 * ib_poll_cq - poll a CQ for completion(s)
3796 * @cq:the CQ being polled
3797 * @num_entries:maximum number of completions to return
3798 * @wc:array of at least @num_entries &struct ib_wc where completions
3799 *   will be returned
3800 *
3801 * Poll a CQ for (possibly multiple) completions.  If the return value
3802 * is < 0, an error occurred.  If the return value is >= 0, it is the
3803 * number of completions returned.  If the return value is
3804 * non-negative and < num_entries, then the CQ was emptied.
3805 */
3806static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3807                             struct ib_wc *wc)
3808{
3809        return cq->device->ops.poll_cq(cq, num_entries, wc);
3810}
3811
3812/**
3813 * ib_req_notify_cq - Request completion notification on a CQ.
3814 * @cq: The CQ to generate an event for.
3815 * @flags:
3816 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3817 *   to request an event on the next solicited event or next work
3818 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3819 *   may also be |ed in to request a hint about missed events, as
3820 *   described below.
3821 *
3822 * Return Value:
3823 *    < 0 means an error occurred while requesting notification
3824 *   == 0 means notification was requested successfully, and if
3825 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3826 *        were missed and it is safe to wait for another event.  In
3827 *        this case is it guaranteed that any work completions added
3828 *        to the CQ since the last CQ poll will trigger a completion
3829 *        notification event.
3830 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3831 *        in.  It means that the consumer must poll the CQ again to
3832 *        make sure it is empty to avoid missing an event because of a
3833 *        race between requesting notification and an entry being
3834 *        added to the CQ.  This return value means it is possible
3835 *        (but not guaranteed) that a work completion has been added
3836 *        to the CQ since the last poll without triggering a
3837 *        completion notification event.
3838 */
3839static inline int ib_req_notify_cq(struct ib_cq *cq,
3840                                   enum ib_cq_notify_flags flags)
3841{
3842        return cq->device->ops.req_notify_cq(cq, flags);
3843}
3844
3845/**
3846 * ib_req_ncomp_notif - Request completion notification when there are
3847 *   at least the specified number of unreaped completions on the CQ.
3848 * @cq: The CQ to generate an event for.
3849 * @wc_cnt: The number of unreaped completions that should be on the
3850 *   CQ before an event is generated.
3851 */
3852static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3853{
3854        return cq->device->ops.req_ncomp_notif ?
3855                cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3856                -ENOSYS;
3857}
3858
3859/**
3860 * ib_dma_mapping_error - check a DMA addr for error
3861 * @dev: The device for which the dma_addr was created
3862 * @dma_addr: The DMA address to check
3863 */
3864static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3865{
3866        return dma_mapping_error(dev->dma_device, dma_addr);
3867}
3868
3869/**
3870 * ib_dma_map_single - Map a kernel virtual address to DMA address
3871 * @dev: The device for which the dma_addr is to be created
3872 * @cpu_addr: The kernel virtual address
3873 * @size: The size of the region in bytes
3874 * @direction: The direction of the DMA
3875 */
3876static inline u64 ib_dma_map_single(struct ib_device *dev,
3877                                    void *cpu_addr, size_t size,
3878                                    enum dma_data_direction direction)
3879{
3880        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3881}
3882
3883/**
3884 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3885 * @dev: The device for which the DMA address was created
3886 * @addr: The DMA address
3887 * @size: The size of the region in bytes
3888 * @direction: The direction of the DMA
3889 */
3890static inline void ib_dma_unmap_single(struct ib_device *dev,
3891                                       u64 addr, size_t size,
3892                                       enum dma_data_direction direction)
3893{
3894        dma_unmap_single(dev->dma_device, addr, size, direction);
3895}
3896
3897/**
3898 * ib_dma_map_page - Map a physical page to DMA address
3899 * @dev: The device for which the dma_addr is to be created
3900 * @page: The page to be mapped
3901 * @offset: The offset within the page
3902 * @size: The size of the region in bytes
3903 * @direction: The direction of the DMA
3904 */
3905static inline u64 ib_dma_map_page(struct ib_device *dev,
3906                                  struct page *page,
3907                                  unsigned long offset,
3908                                  size_t size,
3909                                         enum dma_data_direction direction)
3910{
3911        return dma_map_page(dev->dma_device, page, offset, size, direction);
3912}
3913
3914/**
3915 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3916 * @dev: The device for which the DMA address was created
3917 * @addr: The DMA address
3918 * @size: The size of the region in bytes
3919 * @direction: The direction of the DMA
3920 */
3921static inline void ib_dma_unmap_page(struct ib_device *dev,
3922                                     u64 addr, size_t size,
3923                                     enum dma_data_direction direction)
3924{
3925        dma_unmap_page(dev->dma_device, addr, size, direction);
3926}
3927
3928/**
3929 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3930 * @dev: The device for which the DMA addresses are to be created
3931 * @sg: The array of scatter/gather entries
3932 * @nents: The number of scatter/gather entries
3933 * @direction: The direction of the DMA
3934 */
3935static inline int ib_dma_map_sg(struct ib_device *dev,
3936                                struct scatterlist *sg, int nents,
3937                                enum dma_data_direction direction)
3938{
3939        return dma_map_sg(dev->dma_device, sg, nents, direction);
3940}
3941
3942/**
3943 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3944 * @dev: The device for which the DMA addresses were created
3945 * @sg: The array of scatter/gather entries
3946 * @nents: The number of scatter/gather entries
3947 * @direction: The direction of the DMA
3948 */
3949static inline void ib_dma_unmap_sg(struct ib_device *dev,
3950                                   struct scatterlist *sg, int nents,
3951                                   enum dma_data_direction direction)
3952{
3953        dma_unmap_sg(dev->dma_device, sg, nents, direction);
3954}
3955
3956static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3957                                      struct scatterlist *sg, int nents,
3958                                      enum dma_data_direction direction,
3959                                      unsigned long dma_attrs)
3960{
3961        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3962                                dma_attrs);
3963}
3964
3965static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3966                                         struct scatterlist *sg, int nents,
3967                                         enum dma_data_direction direction,
3968                                         unsigned long dma_attrs)
3969{
3970        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3971}
3972
3973/**
3974 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
3975 * @dev: The device to query
3976 *
3977 * The returned value represents a size in bytes.
3978 */
3979static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
3980{
3981        struct device_dma_parameters *p = dev->dma_device->dma_parms;
3982
3983        return p ? p->max_segment_size : UINT_MAX;
3984}
3985
3986/**
3987 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3988 * @dev: The device for which the DMA address was created
3989 * @addr: The DMA address
3990 * @size: The size of the region in bytes
3991 * @dir: The direction of the DMA
3992 */
3993static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3994                                              u64 addr,
3995                                              size_t size,
3996                                              enum dma_data_direction dir)
3997{
3998        dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3999}
4000
4001/**
4002 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4003 * @dev: The device for which the DMA address was created
4004 * @addr: The DMA address
4005 * @size: The size of the region in bytes
4006 * @dir: The direction of the DMA
4007 */
4008static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4009                                                 u64 addr,
4010                                                 size_t size,
4011                                                 enum dma_data_direction dir)
4012{
4013        dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4014}
4015
4016/**
4017 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4018 * @dev: The device for which the DMA address is requested
4019 * @size: The size of the region to allocate in bytes
4020 * @dma_handle: A pointer for returning the DMA address of the region
4021 * @flag: memory allocator flags
4022 */
4023static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4024                                           size_t size,
4025                                           dma_addr_t *dma_handle,
4026                                           gfp_t flag)
4027{
4028        return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4029}
4030
4031/**
4032 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4033 * @dev: The device for which the DMA addresses were allocated
4034 * @size: The size of the region
4035 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4036 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4037 */
4038static inline void ib_dma_free_coherent(struct ib_device *dev,
4039                                        size_t size, void *cpu_addr,
4040                                        dma_addr_t dma_handle)
4041{
4042        dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4043}
4044
4045/**
4046 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4047 *   HCA translation table.
4048 * @mr: The memory region to deregister.
4049 * @udata: Valid user data or NULL for kernel object
4050 *
4051 * This function can fail, if the memory region has memory windows bound to it.
4052 */
4053int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4054
4055/**
4056 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4057 *   HCA translation table.
4058 * @mr: The memory region to deregister.
4059 *
4060 * This function can fail, if the memory region has memory windows bound to it.
4061 *
4062 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4063 */
4064static inline int ib_dereg_mr(struct ib_mr *mr)
4065{
4066        return ib_dereg_mr_user(mr, NULL);
4067}
4068
4069struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4070                               u32 max_num_sg, struct ib_udata *udata);
4071
4072static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4073                                        enum ib_mr_type mr_type, u32 max_num_sg)
4074{
4075        return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4076}
4077
4078struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4079                                    u32 max_num_data_sg,
4080                                    u32 max_num_meta_sg);
4081
4082/**
4083 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4084 *   R_Key and L_Key.
4085 * @mr - struct ib_mr pointer to be updated.
4086 * @newkey - new key to be used.
4087 */
4088static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4089{
4090        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4091        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4092}
4093
4094/**
4095 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4096 * for calculating a new rkey for type 2 memory windows.
4097 * @rkey - the rkey to increment.
4098 */
4099static inline u32 ib_inc_rkey(u32 rkey)
4100{
4101        const u32 mask = 0x000000ff;
4102        return ((rkey + 1) & mask) | (rkey & ~mask);
4103}
4104
4105/**
4106 * ib_alloc_fmr - Allocates a unmapped fast memory region.
4107 * @pd: The protection domain associated with the unmapped region.
4108 * @mr_access_flags: Specifies the memory access rights.
4109 * @fmr_attr: Attributes of the unmapped region.
4110 *
4111 * A fast memory region must be mapped before it can be used as part of
4112 * a work request.
4113 */
4114struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
4115                            int mr_access_flags,
4116                            struct ib_fmr_attr *fmr_attr);
4117
4118/**
4119 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
4120 * @fmr: The fast memory region to associate with the pages.
4121 * @page_list: An array of physical pages to map to the fast memory region.
4122 * @list_len: The number of pages in page_list.
4123 * @iova: The I/O virtual address to use with the mapped region.
4124 */
4125static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4126                                  u64 *page_list, int list_len,
4127                                  u64 iova)
4128{
4129        return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
4130}
4131
4132/**
4133 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
4134 * @fmr_list: A linked list of fast memory regions to unmap.
4135 */
4136int ib_unmap_fmr(struct list_head *fmr_list);
4137
4138/**
4139 * ib_dealloc_fmr - Deallocates a fast memory region.
4140 * @fmr: The fast memory region to deallocate.
4141 */
4142int ib_dealloc_fmr(struct ib_fmr *fmr);
4143
4144/**
4145 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4146 * @qp: QP to attach to the multicast group.  The QP must be type
4147 *   IB_QPT_UD.
4148 * @gid: Multicast group GID.
4149 * @lid: Multicast group LID in host byte order.
4150 *
4151 * In order to send and receive multicast packets, subnet
4152 * administration must have created the multicast group and configured
4153 * the fabric appropriately.  The port associated with the specified
4154 * QP must also be a member of the multicast group.
4155 */
4156int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4157
4158/**
4159 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4160 * @qp: QP to detach from the multicast group.
4161 * @gid: Multicast group GID.
4162 * @lid: Multicast group LID in host byte order.
4163 */
4164int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4165
4166/**
4167 * ib_alloc_xrcd - Allocates an XRC domain.
4168 * @device: The device on which to allocate the XRC domain.
4169 * @caller: Module name for kernel consumers
4170 */
4171struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4172#define ib_alloc_xrcd(device) \
4173        __ib_alloc_xrcd((device), KBUILD_MODNAME)
4174
4175/**
4176 * ib_dealloc_xrcd - Deallocates an XRC domain.
4177 * @xrcd: The XRC domain to deallocate.
4178 * @udata: Valid user data or NULL for kernel object
4179 */
4180int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
4181
4182static inline int ib_check_mr_access(int flags)
4183{
4184        /*
4185         * Local write permission is required if remote write or
4186         * remote atomic permission is also requested.
4187         */
4188        if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4189            !(flags & IB_ACCESS_LOCAL_WRITE))
4190                return -EINVAL;
4191
4192        return 0;
4193}
4194
4195static inline bool ib_access_writable(int access_flags)
4196{
4197        /*
4198         * We have writable memory backing the MR if any of the following
4199         * access flags are set.  "Local write" and "remote write" obviously
4200         * require write access.  "Remote atomic" can do things like fetch and
4201         * add, which will modify memory, and "MW bind" can change permissions
4202         * by binding a window.
4203         */
4204        return access_flags &
4205                (IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4206                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4207}
4208
4209/**
4210 * ib_check_mr_status: lightweight check of MR status.
4211 *     This routine may provide status checks on a selected
4212 *     ib_mr. first use is for signature status check.
4213 *
4214 * @mr: A memory region.
4215 * @check_mask: Bitmask of which checks to perform from
4216 *     ib_mr_status_check enumeration.
4217 * @mr_status: The container of relevant status checks.
4218 *     failed checks will be indicated in the status bitmask
4219 *     and the relevant info shall be in the error item.
4220 */
4221int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4222                       struct ib_mr_status *mr_status);
4223
4224/**
4225 * ib_device_try_get: Hold a registration lock
4226 * device: The device to lock
4227 *
4228 * A device under an active registration lock cannot become unregistered. It
4229 * is only possible to obtain a registration lock on a device that is fully
4230 * registered, otherwise this function returns false.
4231 *
4232 * The registration lock is only necessary for actions which require the
4233 * device to still be registered. Uses that only require the device pointer to
4234 * be valid should use get_device(&ibdev->dev) to hold the memory.
4235 *
4236 */
4237static inline bool ib_device_try_get(struct ib_device *dev)
4238{
4239        return refcount_inc_not_zero(&dev->refcount);
4240}
4241
4242void ib_device_put(struct ib_device *device);
4243struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4244                                          enum rdma_driver_id driver_id);
4245struct ib_device *ib_device_get_by_name(const char *name,
4246                                        enum rdma_driver_id driver_id);
4247struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4248                                            u16 pkey, const union ib_gid *gid,
4249                                            const struct sockaddr *addr);
4250int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4251                         unsigned int port);
4252struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4253
4254struct ib_wq *ib_create_wq(struct ib_pd *pd,
4255                           struct ib_wq_init_attr *init_attr);
4256int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4257int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4258                 u32 wq_attr_mask);
4259struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4260                                                 struct ib_rwq_ind_table_init_attr*
4261                                                 wq_ind_table_init_attr);
4262int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4263
4264int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4265                 unsigned int *sg_offset, unsigned int page_size);
4266int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4267                    int data_sg_nents, unsigned int *data_sg_offset,
4268                    struct scatterlist *meta_sg, int meta_sg_nents,
4269                    unsigned int *meta_sg_offset, unsigned int page_size);
4270
4271static inline int
4272ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4273                  unsigned int *sg_offset, unsigned int page_size)
4274{
4275        int n;
4276
4277        n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4278        mr->iova = 0;
4279
4280        return n;
4281}
4282
4283int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4284                unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4285
4286void ib_drain_rq(struct ib_qp *qp);
4287void ib_drain_sq(struct ib_qp *qp);
4288void ib_drain_qp(struct ib_qp *qp);
4289
4290int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4291
4292static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4293{
4294        if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4295                return attr->roce.dmac;
4296        return NULL;
4297}
4298
4299static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4300{
4301        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4302                attr->ib.dlid = (u16)dlid;
4303        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4304                attr->opa.dlid = dlid;
4305}
4306
4307static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4308{
4309        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4310                return attr->ib.dlid;
4311        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4312                return attr->opa.dlid;
4313        return 0;
4314}
4315
4316static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4317{
4318        attr->sl = sl;
4319}
4320
4321static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4322{
4323        return attr->sl;
4324}
4325
4326static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4327                                         u8 src_path_bits)
4328{
4329        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4330                attr->ib.src_path_bits = src_path_bits;
4331        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4332                attr->opa.src_path_bits = src_path_bits;
4333}
4334
4335static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4336{
4337        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4338                return attr->ib.src_path_bits;
4339        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4340                return attr->opa.src_path_bits;
4341        return 0;
4342}
4343
4344static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4345                                        bool make_grd)
4346{
4347        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4348                attr->opa.make_grd = make_grd;
4349}
4350
4351static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4352{
4353        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4354                return attr->opa.make_grd;
4355        return false;
4356}
4357
4358static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4359{
4360        attr->port_num = port_num;
4361}
4362
4363static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4364{
4365        return attr->port_num;
4366}
4367
4368static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4369                                           u8 static_rate)
4370{
4371        attr->static_rate = static_rate;
4372}
4373
4374static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4375{
4376        return attr->static_rate;
4377}
4378
4379static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4380                                        enum ib_ah_flags flag)
4381{
4382        attr->ah_flags = flag;
4383}
4384
4385static inline enum ib_ah_flags
4386                rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4387{
4388        return attr->ah_flags;
4389}
4390
4391static inline const struct ib_global_route
4392                *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4393{
4394        return &attr->grh;
4395}
4396
4397/*To retrieve and modify the grh */
4398static inline struct ib_global_route
4399                *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4400{
4401        return &attr->grh;
4402}
4403
4404static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4405{
4406        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4407
4408        memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4409}
4410
4411static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4412                                             __be64 prefix)
4413{
4414        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4415
4416        grh->dgid.global.subnet_prefix = prefix;
4417}
4418
4419static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4420                                            __be64 if_id)
4421{
4422        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4423
4424        grh->dgid.global.interface_id = if_id;
4425}
4426
4427static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4428                                   union ib_gid *dgid, u32 flow_label,
4429                                   u8 sgid_index, u8 hop_limit,
4430                                   u8 traffic_class)
4431{
4432        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4433
4434        attr->ah_flags = IB_AH_GRH;
4435        if (dgid)
4436                grh->dgid = *dgid;
4437        grh->flow_label = flow_label;
4438        grh->sgid_index = sgid_index;
4439        grh->hop_limit = hop_limit;
4440        grh->traffic_class = traffic_class;
4441        grh->sgid_attr = NULL;
4442}
4443
4444void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4445void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4446                             u32 flow_label, u8 hop_limit, u8 traffic_class,
4447                             const struct ib_gid_attr *sgid_attr);
4448void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4449                       const struct rdma_ah_attr *src);
4450void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4451                          const struct rdma_ah_attr *new);
4452void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4453
4454/**
4455 * rdma_ah_find_type - Return address handle type.
4456 *
4457 * @dev: Device to be checked
4458 * @port_num: Port number
4459 */
4460static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4461                                                       u8 port_num)
4462{
4463        if (rdma_protocol_roce(dev, port_num))
4464                return RDMA_AH_ATTR_TYPE_ROCE;
4465        if (rdma_protocol_ib(dev, port_num)) {
4466                if (rdma_cap_opa_ah(dev, port_num))
4467                        return RDMA_AH_ATTR_TYPE_OPA;
4468                return RDMA_AH_ATTR_TYPE_IB;
4469        }
4470
4471        return RDMA_AH_ATTR_TYPE_UNDEFINED;
4472}
4473
4474/**
4475 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4476 *     In the current implementation the only way to get
4477 *     get the 32bit lid is from other sources for OPA.
4478 *     For IB, lids will always be 16bits so cast the
4479 *     value accordingly.
4480 *
4481 * @lid: A 32bit LID
4482 */
4483static inline u16 ib_lid_cpu16(u32 lid)
4484{
4485        WARN_ON_ONCE(lid & 0xFFFF0000);
4486        return (u16)lid;
4487}
4488
4489/**
4490 * ib_lid_be16 - Return lid in 16bit BE encoding.
4491 *
4492 * @lid: A 32bit LID
4493 */
4494static inline __be16 ib_lid_be16(u32 lid)
4495{
4496        WARN_ON_ONCE(lid & 0xFFFF0000);
4497        return cpu_to_be16((u16)lid);
4498}
4499
4500/**
4501 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4502 *   vector
4503 * @device:         the rdma device
4504 * @comp_vector:    index of completion vector
4505 *
4506 * Returns NULL on failure, otherwise a corresponding cpu map of the
4507 * completion vector (returns all-cpus map if the device driver doesn't
4508 * implement get_vector_affinity).
4509 */
4510static inline const struct cpumask *
4511ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4512{
4513        if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4514            !device->ops.get_vector_affinity)
4515                return NULL;
4516
4517        return device->ops.get_vector_affinity(device, comp_vector);
4518
4519}
4520
4521/**
4522 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4523 * and add their gids, as needed, to the relevant RoCE devices.
4524 *
4525 * @device:         the rdma device
4526 */
4527void rdma_roce_rescan_device(struct ib_device *ibdev);
4528
4529struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4530
4531int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4532
4533struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4534                                     enum rdma_netdev_t type, const char *name,
4535                                     unsigned char name_assign_type,
4536                                     void (*setup)(struct net_device *));
4537
4538int rdma_init_netdev(struct ib_device *device, u8 port_num,
4539                     enum rdma_netdev_t type, const char *name,
4540                     unsigned char name_assign_type,
4541                     void (*setup)(struct net_device *),
4542                     struct net_device *netdev);
4543
4544/**
4545 * rdma_set_device_sysfs_group - Set device attributes group to have
4546 *                               driver specific sysfs entries at
4547 *                               for infiniband class.
4548 *
4549 * @device:     device pointer for which attributes to be created
4550 * @group:      Pointer to group which should be added when device
4551 *              is registered with sysfs.
4552 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4553 * group per device to have sysfs attributes.
4554 *
4555 * NOTE: New drivers should not make use of this API; instead new device
4556 * parameter should be exposed via netlink command. This API and mechanism
4557 * exist only for existing drivers.
4558 */
4559static inline void
4560rdma_set_device_sysfs_group(struct ib_device *dev,
4561                            const struct attribute_group *group)
4562{
4563        dev->groups[1] = group;
4564}
4565
4566/**
4567 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4568 *
4569 * @device:     device pointer for which ib_device pointer to retrieve
4570 *
4571 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4572 *
4573 */
4574static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4575{
4576        struct ib_core_device *coredev =
4577                container_of(device, struct ib_core_device, dev);
4578
4579        return coredev->owner;
4580}
4581
4582/**
4583 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4584 *                             ib_device holder structure from device pointer.
4585 *
4586 * NOTE: New drivers should not make use of this API; This API is only for
4587 * existing drivers who have exposed sysfs entries using
4588 * rdma_set_device_sysfs_group().
4589 */
4590#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4591        container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4592
4593bool rdma_dev_access_netns(const struct ib_device *device,
4594                           const struct net *net);
4595#endif /* IB_VERBS_H */
4596