linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/dma-mapping.h>
  45#include <linux/kref.h>
  46#include <linux/list.h>
  47#include <linux/rwsem.h>
  48#include <linux/workqueue.h>
  49#include <linux/irq_poll.h>
  50#include <uapi/linux/if_ether.h>
  51#include <net/ipv6.h>
  52#include <net/ip.h>
  53#include <linux/string.h>
  54#include <linux/slab.h>
  55#include <linux/netdevice.h>
  56#include <linux/refcount.h>
  57#include <linux/if_link.h>
  58#include <linux/atomic.h>
  59#include <linux/mmu_notifier.h>
  60#include <linux/uaccess.h>
  61#include <linux/cgroup_rdma.h>
  62#include <linux/irqflags.h>
  63#include <linux/preempt.h>
  64#include <linux/dim.h>
  65#include <uapi/rdma/ib_user_verbs.h>
  66#include <rdma/rdma_counter.h>
  67#include <rdma/restrack.h>
  68#include <rdma/signature.h>
  69#include <uapi/rdma/rdma_user_ioctl.h>
  70#include <uapi/rdma/ib_user_ioctl_verbs.h>
  71
  72#define IB_FW_VERSION_NAME_MAX  ETHTOOL_FWVERS_LEN
  73
  74struct ib_umem_odp;
  75struct ib_uqp_object;
  76struct ib_usrq_object;
  77struct ib_uwq_object;
  78
  79extern struct workqueue_struct *ib_wq;
  80extern struct workqueue_struct *ib_comp_wq;
  81extern struct workqueue_struct *ib_comp_unbound_wq;
  82
  83struct ib_ucq_object;
  84
  85__printf(3, 4) __cold
  86void ibdev_printk(const char *level, const struct ib_device *ibdev,
  87                  const char *format, ...);
  88__printf(2, 3) __cold
  89void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
  90__printf(2, 3) __cold
  91void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
  92__printf(2, 3) __cold
  93void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
  94__printf(2, 3) __cold
  95void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
  96__printf(2, 3) __cold
  97void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
  98__printf(2, 3) __cold
  99void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
 100__printf(2, 3) __cold
 101void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
 102
 103#if defined(CONFIG_DYNAMIC_DEBUG) || \
 104        (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
 105#define ibdev_dbg(__dev, format, args...)                       \
 106        dynamic_ibdev_dbg(__dev, format, ##args)
 107#else
 108__printf(2, 3) __cold
 109static inline
 110void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
 111#endif
 112
 113#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
 114do {                                                                    \
 115        static DEFINE_RATELIMIT_STATE(_rs,                              \
 116                                      DEFAULT_RATELIMIT_INTERVAL,       \
 117                                      DEFAULT_RATELIMIT_BURST);         \
 118        if (__ratelimit(&_rs))                                          \
 119                ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
 120} while (0)
 121
 122#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
 123        ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
 124#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
 125        ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
 126#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
 127        ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
 128#define ibdev_err_ratelimited(ibdev, fmt, ...) \
 129        ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
 130#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
 131        ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
 132#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
 133        ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
 134#define ibdev_info_ratelimited(ibdev, fmt, ...) \
 135        ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
 136
 137#if defined(CONFIG_DYNAMIC_DEBUG) || \
 138        (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
 139/* descriptor check is first to prevent flooding with "callbacks suppressed" */
 140#define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
 141do {                                                                    \
 142        static DEFINE_RATELIMIT_STATE(_rs,                              \
 143                                      DEFAULT_RATELIMIT_INTERVAL,       \
 144                                      DEFAULT_RATELIMIT_BURST);         \
 145        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
 146        if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
 147                __dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
 148                                    ##__VA_ARGS__);                     \
 149} while (0)
 150#else
 151__printf(2, 3) __cold
 152static inline
 153void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
 154#endif
 155
 156union ib_gid {
 157        u8      raw[16];
 158        struct {
 159                __be64  subnet_prefix;
 160                __be64  interface_id;
 161        } global;
 162};
 163
 164extern union ib_gid zgid;
 165
 166enum ib_gid_type {
 167        /* If link layer is Ethernet, this is RoCE V1 */
 168        IB_GID_TYPE_IB        = 0,
 169        IB_GID_TYPE_ROCE      = 0,
 170        IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
 171        IB_GID_TYPE_SIZE
 172};
 173
 174#define ROCE_V2_UDP_DPORT      4791
 175struct ib_gid_attr {
 176        struct net_device __rcu *ndev;
 177        struct ib_device        *device;
 178        union ib_gid            gid;
 179        enum ib_gid_type        gid_type;
 180        u16                     index;
 181        u8                      port_num;
 182};
 183
 184enum {
 185        /* set the local administered indication */
 186        IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
 187};
 188
 189enum rdma_transport_type {
 190        RDMA_TRANSPORT_IB,
 191        RDMA_TRANSPORT_IWARP,
 192        RDMA_TRANSPORT_USNIC,
 193        RDMA_TRANSPORT_USNIC_UDP,
 194        RDMA_TRANSPORT_UNSPECIFIED,
 195};
 196
 197enum rdma_protocol_type {
 198        RDMA_PROTOCOL_IB,
 199        RDMA_PROTOCOL_IBOE,
 200        RDMA_PROTOCOL_IWARP,
 201        RDMA_PROTOCOL_USNIC_UDP
 202};
 203
 204__attribute_const__ enum rdma_transport_type
 205rdma_node_get_transport(unsigned int node_type);
 206
 207enum rdma_network_type {
 208        RDMA_NETWORK_IB,
 209        RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
 210        RDMA_NETWORK_IPV4,
 211        RDMA_NETWORK_IPV6
 212};
 213
 214static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 215{
 216        if (network_type == RDMA_NETWORK_IPV4 ||
 217            network_type == RDMA_NETWORK_IPV6)
 218                return IB_GID_TYPE_ROCE_UDP_ENCAP;
 219
 220        /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
 221        return IB_GID_TYPE_IB;
 222}
 223
 224static inline enum rdma_network_type
 225rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
 226{
 227        if (attr->gid_type == IB_GID_TYPE_IB)
 228                return RDMA_NETWORK_IB;
 229
 230        if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
 231                return RDMA_NETWORK_IPV4;
 232        else
 233                return RDMA_NETWORK_IPV6;
 234}
 235
 236enum rdma_link_layer {
 237        IB_LINK_LAYER_UNSPECIFIED,
 238        IB_LINK_LAYER_INFINIBAND,
 239        IB_LINK_LAYER_ETHERNET,
 240};
 241
 242enum ib_device_cap_flags {
 243        IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
 244        IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
 245        IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
 246        IB_DEVICE_RAW_MULTI                     = (1 << 3),
 247        IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
 248        IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
 249        IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
 250        IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
 251        IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
 252        /* Not in use, former INIT_TYPE         = (1 << 9),*/
 253        IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
 254        IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
 255        IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
 256        IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
 257        IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
 258
 259        /*
 260         * This device supports a per-device lkey or stag that can be
 261         * used without performing a memory registration for the local
 262         * memory.  Note that ULPs should never check this flag, but
 263         * instead of use the local_dma_lkey flag in the ib_pd structure,
 264         * which will always contain a usable lkey.
 265         */
 266        IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
 267        /* Reserved, old SEND_W_INV             = (1 << 16),*/
 268        IB_DEVICE_MEM_WINDOW                    = (1 << 17),
 269        /*
 270         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 271         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 272         * messages and can verify the validity of checksum for
 273         * incoming messages.  Setting this flag implies that the
 274         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 275         */
 276        IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
 277        IB_DEVICE_UD_TSO                        = (1 << 19),
 278        IB_DEVICE_XRC                           = (1 << 20),
 279
 280        /*
 281         * This device supports the IB "base memory management extension",
 282         * which includes support for fast registrations (IB_WR_REG_MR,
 283         * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 284         * also be set by any iWarp device which must support FRs to comply
 285         * to the iWarp verbs spec.  iWarp devices also support the
 286         * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 287         * stag.
 288         */
 289        IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
 290        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
 291        IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
 292        IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
 293        IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
 294        /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 295        IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
 296        /*
 297         * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 298         * support execution of WQEs that involve synchronization
 299         * of I/O operations with single completion queue managed
 300         * by hardware.
 301         */
 302        IB_DEVICE_CROSS_CHANNEL                 = (1 << 27),
 303        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
 304        IB_DEVICE_INTEGRITY_HANDOVER            = (1 << 30),
 305        IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
 306        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
 307        IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
 308        /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 309        IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 310        IB_DEVICE_RDMA_NETDEV_OPA               = (1ULL << 35),
 311        /* The device supports padding incoming writes to cacheline. */
 312        IB_DEVICE_PCI_WRITE_END_PADDING         = (1ULL << 36),
 313        IB_DEVICE_ALLOW_USER_UNREG              = (1ULL << 37),
 314};
 315
 316enum ib_atomic_cap {
 317        IB_ATOMIC_NONE,
 318        IB_ATOMIC_HCA,
 319        IB_ATOMIC_GLOB
 320};
 321
 322enum ib_odp_general_cap_bits {
 323        IB_ODP_SUPPORT          = 1 << 0,
 324        IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 325};
 326
 327enum ib_odp_transport_cap_bits {
 328        IB_ODP_SUPPORT_SEND     = 1 << 0,
 329        IB_ODP_SUPPORT_RECV     = 1 << 1,
 330        IB_ODP_SUPPORT_WRITE    = 1 << 2,
 331        IB_ODP_SUPPORT_READ     = 1 << 3,
 332        IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
 333        IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
 334};
 335
 336struct ib_odp_caps {
 337        uint64_t general_caps;
 338        struct {
 339                uint32_t  rc_odp_caps;
 340                uint32_t  uc_odp_caps;
 341                uint32_t  ud_odp_caps;
 342                uint32_t  xrc_odp_caps;
 343        } per_transport_caps;
 344};
 345
 346struct ib_rss_caps {
 347        /* Corresponding bit will be set if qp type from
 348         * 'enum ib_qp_type' is supported, e.g.
 349         * supported_qpts |= 1 << IB_QPT_UD
 350         */
 351        u32 supported_qpts;
 352        u32 max_rwq_indirection_tables;
 353        u32 max_rwq_indirection_table_size;
 354};
 355
 356enum ib_tm_cap_flags {
 357        /*  Support tag matching with rendezvous offload for RC transport */
 358        IB_TM_CAP_RNDV_RC = 1 << 0,
 359};
 360
 361struct ib_tm_caps {
 362        /* Max size of RNDV header */
 363        u32 max_rndv_hdr_size;
 364        /* Max number of entries in tag matching list */
 365        u32 max_num_tags;
 366        /* From enum ib_tm_cap_flags */
 367        u32 flags;
 368        /* Max number of outstanding list operations */
 369        u32 max_ops;
 370        /* Max number of SGE in tag matching entry */
 371        u32 max_sge;
 372};
 373
 374struct ib_cq_init_attr {
 375        unsigned int    cqe;
 376        u32             comp_vector;
 377        u32             flags;
 378};
 379
 380enum ib_cq_attr_mask {
 381        IB_CQ_MODERATE = 1 << 0,
 382};
 383
 384struct ib_cq_caps {
 385        u16     max_cq_moderation_count;
 386        u16     max_cq_moderation_period;
 387};
 388
 389struct ib_dm_mr_attr {
 390        u64             length;
 391        u64             offset;
 392        u32             access_flags;
 393};
 394
 395struct ib_dm_alloc_attr {
 396        u64     length;
 397        u32     alignment;
 398        u32     flags;
 399};
 400
 401struct ib_device_attr {
 402        u64                     fw_ver;
 403        __be64                  sys_image_guid;
 404        u64                     max_mr_size;
 405        u64                     page_size_cap;
 406        u32                     vendor_id;
 407        u32                     vendor_part_id;
 408        u32                     hw_ver;
 409        int                     max_qp;
 410        int                     max_qp_wr;
 411        u64                     device_cap_flags;
 412        int                     max_send_sge;
 413        int                     max_recv_sge;
 414        int                     max_sge_rd;
 415        int                     max_cq;
 416        int                     max_cqe;
 417        int                     max_mr;
 418        int                     max_pd;
 419        int                     max_qp_rd_atom;
 420        int                     max_ee_rd_atom;
 421        int                     max_res_rd_atom;
 422        int                     max_qp_init_rd_atom;
 423        int                     max_ee_init_rd_atom;
 424        enum ib_atomic_cap      atomic_cap;
 425        enum ib_atomic_cap      masked_atomic_cap;
 426        int                     max_ee;
 427        int                     max_rdd;
 428        int                     max_mw;
 429        int                     max_raw_ipv6_qp;
 430        int                     max_raw_ethy_qp;
 431        int                     max_mcast_grp;
 432        int                     max_mcast_qp_attach;
 433        int                     max_total_mcast_qp_attach;
 434        int                     max_ah;
 435        int                     max_srq;
 436        int                     max_srq_wr;
 437        int                     max_srq_sge;
 438        unsigned int            max_fast_reg_page_list_len;
 439        unsigned int            max_pi_fast_reg_page_list_len;
 440        u16                     max_pkeys;
 441        u8                      local_ca_ack_delay;
 442        int                     sig_prot_cap;
 443        int                     sig_guard_cap;
 444        struct ib_odp_caps      odp_caps;
 445        uint64_t                timestamp_mask;
 446        uint64_t                hca_core_clock; /* in KHZ */
 447        struct ib_rss_caps      rss_caps;
 448        u32                     max_wq_type_rq;
 449        u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
 450        struct ib_tm_caps       tm_caps;
 451        struct ib_cq_caps       cq_caps;
 452        u64                     max_dm_size;
 453        /* Max entries for sgl for optimized performance per READ */
 454        u32                     max_sgl_rd;
 455};
 456
 457enum ib_mtu {
 458        IB_MTU_256  = 1,
 459        IB_MTU_512  = 2,
 460        IB_MTU_1024 = 3,
 461        IB_MTU_2048 = 4,
 462        IB_MTU_4096 = 5
 463};
 464
 465enum opa_mtu {
 466        OPA_MTU_8192 = 6,
 467        OPA_MTU_10240 = 7
 468};
 469
 470static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 471{
 472        switch (mtu) {
 473        case IB_MTU_256:  return  256;
 474        case IB_MTU_512:  return  512;
 475        case IB_MTU_1024: return 1024;
 476        case IB_MTU_2048: return 2048;
 477        case IB_MTU_4096: return 4096;
 478        default:          return -1;
 479        }
 480}
 481
 482static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 483{
 484        if (mtu >= 4096)
 485                return IB_MTU_4096;
 486        else if (mtu >= 2048)
 487                return IB_MTU_2048;
 488        else if (mtu >= 1024)
 489                return IB_MTU_1024;
 490        else if (mtu >= 512)
 491                return IB_MTU_512;
 492        else
 493                return IB_MTU_256;
 494}
 495
 496static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
 497{
 498        switch (mtu) {
 499        case OPA_MTU_8192:
 500                return 8192;
 501        case OPA_MTU_10240:
 502                return 10240;
 503        default:
 504                return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
 505        }
 506}
 507
 508static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
 509{
 510        if (mtu >= 10240)
 511                return OPA_MTU_10240;
 512        else if (mtu >= 8192)
 513                return OPA_MTU_8192;
 514        else
 515                return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
 516}
 517
 518enum ib_port_state {
 519        IB_PORT_NOP             = 0,
 520        IB_PORT_DOWN            = 1,
 521        IB_PORT_INIT            = 2,
 522        IB_PORT_ARMED           = 3,
 523        IB_PORT_ACTIVE          = 4,
 524        IB_PORT_ACTIVE_DEFER    = 5
 525};
 526
 527enum ib_port_phys_state {
 528        IB_PORT_PHYS_STATE_SLEEP = 1,
 529        IB_PORT_PHYS_STATE_POLLING = 2,
 530        IB_PORT_PHYS_STATE_DISABLED = 3,
 531        IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
 532        IB_PORT_PHYS_STATE_LINK_UP = 5,
 533        IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
 534        IB_PORT_PHYS_STATE_PHY_TEST = 7,
 535};
 536
 537enum ib_port_width {
 538        IB_WIDTH_1X     = 1,
 539        IB_WIDTH_2X     = 16,
 540        IB_WIDTH_4X     = 2,
 541        IB_WIDTH_8X     = 4,
 542        IB_WIDTH_12X    = 8
 543};
 544
 545static inline int ib_width_enum_to_int(enum ib_port_width width)
 546{
 547        switch (width) {
 548        case IB_WIDTH_1X:  return  1;
 549        case IB_WIDTH_2X:  return  2;
 550        case IB_WIDTH_4X:  return  4;
 551        case IB_WIDTH_8X:  return  8;
 552        case IB_WIDTH_12X: return 12;
 553        default:          return -1;
 554        }
 555}
 556
 557enum ib_port_speed {
 558        IB_SPEED_SDR    = 1,
 559        IB_SPEED_DDR    = 2,
 560        IB_SPEED_QDR    = 4,
 561        IB_SPEED_FDR10  = 8,
 562        IB_SPEED_FDR    = 16,
 563        IB_SPEED_EDR    = 32,
 564        IB_SPEED_HDR    = 64
 565};
 566
 567/**
 568 * struct rdma_hw_stats
 569 * @lock - Mutex to protect parallel write access to lifespan and values
 570 *    of counters, which are 64bits and not guaranteeed to be written
 571 *    atomicaly on 32bits systems.
 572 * @timestamp - Used by the core code to track when the last update was
 573 * @lifespan - Used by the core code to determine how old the counters
 574 *   should be before being updated again.  Stored in jiffies, defaults
 575 *   to 10 milliseconds, drivers can override the default be specifying
 576 *   their own value during their allocation routine.
 577 * @name - Array of pointers to static names used for the counters in
 578 *   directory.
 579 * @num_counters - How many hardware counters there are.  If name is
 580 *   shorter than this number, a kernel oops will result.  Driver authors
 581 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 582 *   in their code to prevent this.
 583 * @value - Array of u64 counters that are accessed by the sysfs code and
 584 *   filled in by the drivers get_stats routine
 585 */
 586struct rdma_hw_stats {
 587        struct mutex    lock; /* Protect lifespan and values[] */
 588        unsigned long   timestamp;
 589        unsigned long   lifespan;
 590        const char * const *names;
 591        int             num_counters;
 592        u64             value[];
 593};
 594
 595#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 596/**
 597 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 598 *   for drivers.
 599 * @names - Array of static const char *
 600 * @num_counters - How many elements in array
 601 * @lifespan - How many milliseconds between updates
 602 */
 603static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 604                const char * const *names, int num_counters,
 605                unsigned long lifespan)
 606{
 607        struct rdma_hw_stats *stats;
 608
 609        stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 610                        GFP_KERNEL);
 611        if (!stats)
 612                return NULL;
 613        stats->names = names;
 614        stats->num_counters = num_counters;
 615        stats->lifespan = msecs_to_jiffies(lifespan);
 616
 617        return stats;
 618}
 619
 620
 621/* Define bits for the various functionality this port needs to be supported by
 622 * the core.
 623 */
 624/* Management                           0x00000FFF */
 625#define RDMA_CORE_CAP_IB_MAD            0x00000001
 626#define RDMA_CORE_CAP_IB_SMI            0x00000002
 627#define RDMA_CORE_CAP_IB_CM             0x00000004
 628#define RDMA_CORE_CAP_IW_CM             0x00000008
 629#define RDMA_CORE_CAP_IB_SA             0x00000010
 630#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 631
 632/* Address format                       0x000FF000 */
 633#define RDMA_CORE_CAP_AF_IB             0x00001000
 634#define RDMA_CORE_CAP_ETH_AH            0x00002000
 635#define RDMA_CORE_CAP_OPA_AH            0x00004000
 636#define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
 637
 638/* Protocol                             0xFFF00000 */
 639#define RDMA_CORE_CAP_PROT_IB           0x00100000
 640#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 641#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 642#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 643#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 644#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 645
 646#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
 647                                        | RDMA_CORE_CAP_PROT_ROCE     \
 648                                        | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
 649
 650#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 651                                        | RDMA_CORE_CAP_IB_MAD \
 652                                        | RDMA_CORE_CAP_IB_SMI \
 653                                        | RDMA_CORE_CAP_IB_CM  \
 654                                        | RDMA_CORE_CAP_IB_SA  \
 655                                        | RDMA_CORE_CAP_AF_IB)
 656#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 657                                        | RDMA_CORE_CAP_IB_MAD  \
 658                                        | RDMA_CORE_CAP_IB_CM   \
 659                                        | RDMA_CORE_CAP_AF_IB   \
 660                                        | RDMA_CORE_CAP_ETH_AH)
 661#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
 662                                        (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 663                                        | RDMA_CORE_CAP_IB_MAD  \
 664                                        | RDMA_CORE_CAP_IB_CM   \
 665                                        | RDMA_CORE_CAP_AF_IB   \
 666                                        | RDMA_CORE_CAP_ETH_AH)
 667#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 668                                        | RDMA_CORE_CAP_IW_CM)
 669#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 670                                        | RDMA_CORE_CAP_OPA_MAD)
 671
 672#define RDMA_CORE_PORT_RAW_PACKET       (RDMA_CORE_CAP_PROT_RAW_PACKET)
 673
 674#define RDMA_CORE_PORT_USNIC            (RDMA_CORE_CAP_PROT_USNIC)
 675
 676struct ib_port_attr {
 677        u64                     subnet_prefix;
 678        enum ib_port_state      state;
 679        enum ib_mtu             max_mtu;
 680        enum ib_mtu             active_mtu;
 681        u32                     phys_mtu;
 682        int                     gid_tbl_len;
 683        unsigned int            ip_gids:1;
 684        /* This is the value from PortInfo CapabilityMask, defined by IBA */
 685        u32                     port_cap_flags;
 686        u32                     max_msg_sz;
 687        u32                     bad_pkey_cntr;
 688        u32                     qkey_viol_cntr;
 689        u16                     pkey_tbl_len;
 690        u32                     sm_lid;
 691        u32                     lid;
 692        u8                      lmc;
 693        u8                      max_vl_num;
 694        u8                      sm_sl;
 695        u8                      subnet_timeout;
 696        u8                      init_type_reply;
 697        u8                      active_width;
 698        u8                      active_speed;
 699        u8                      phys_state;
 700        u16                     port_cap_flags2;
 701};
 702
 703enum ib_device_modify_flags {
 704        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 705        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 706};
 707
 708#define IB_DEVICE_NODE_DESC_MAX 64
 709
 710struct ib_device_modify {
 711        u64     sys_image_guid;
 712        char    node_desc[IB_DEVICE_NODE_DESC_MAX];
 713};
 714
 715enum ib_port_modify_flags {
 716        IB_PORT_SHUTDOWN                = 1,
 717        IB_PORT_INIT_TYPE               = (1<<2),
 718        IB_PORT_RESET_QKEY_CNTR         = (1<<3),
 719        IB_PORT_OPA_MASK_CHG            = (1<<4)
 720};
 721
 722struct ib_port_modify {
 723        u32     set_port_cap_mask;
 724        u32     clr_port_cap_mask;
 725        u8      init_type;
 726};
 727
 728enum ib_event_type {
 729        IB_EVENT_CQ_ERR,
 730        IB_EVENT_QP_FATAL,
 731        IB_EVENT_QP_REQ_ERR,
 732        IB_EVENT_QP_ACCESS_ERR,
 733        IB_EVENT_COMM_EST,
 734        IB_EVENT_SQ_DRAINED,
 735        IB_EVENT_PATH_MIG,
 736        IB_EVENT_PATH_MIG_ERR,
 737        IB_EVENT_DEVICE_FATAL,
 738        IB_EVENT_PORT_ACTIVE,
 739        IB_EVENT_PORT_ERR,
 740        IB_EVENT_LID_CHANGE,
 741        IB_EVENT_PKEY_CHANGE,
 742        IB_EVENT_SM_CHANGE,
 743        IB_EVENT_SRQ_ERR,
 744        IB_EVENT_SRQ_LIMIT_REACHED,
 745        IB_EVENT_QP_LAST_WQE_REACHED,
 746        IB_EVENT_CLIENT_REREGISTER,
 747        IB_EVENT_GID_CHANGE,
 748        IB_EVENT_WQ_FATAL,
 749};
 750
 751const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 752
 753struct ib_event {
 754        struct ib_device        *device;
 755        union {
 756                struct ib_cq    *cq;
 757                struct ib_qp    *qp;
 758                struct ib_srq   *srq;
 759                struct ib_wq    *wq;
 760                u8              port_num;
 761        } element;
 762        enum ib_event_type      event;
 763};
 764
 765struct ib_event_handler {
 766        struct ib_device *device;
 767        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 768        struct list_head  list;
 769};
 770
 771#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 772        do {                                                    \
 773                (_ptr)->device  = _device;                      \
 774                (_ptr)->handler = _handler;                     \
 775                INIT_LIST_HEAD(&(_ptr)->list);                  \
 776        } while (0)
 777
 778struct ib_global_route {
 779        const struct ib_gid_attr *sgid_attr;
 780        union ib_gid    dgid;
 781        u32             flow_label;
 782        u8              sgid_index;
 783        u8              hop_limit;
 784        u8              traffic_class;
 785};
 786
 787struct ib_grh {
 788        __be32          version_tclass_flow;
 789        __be16          paylen;
 790        u8              next_hdr;
 791        u8              hop_limit;
 792        union ib_gid    sgid;
 793        union ib_gid    dgid;
 794};
 795
 796union rdma_network_hdr {
 797        struct ib_grh ibgrh;
 798        struct {
 799                /* The IB spec states that if it's IPv4, the header
 800                 * is located in the last 20 bytes of the header.
 801                 */
 802                u8              reserved[20];
 803                struct iphdr    roce4grh;
 804        };
 805};
 806
 807#define IB_QPN_MASK             0xFFFFFF
 808
 809enum {
 810        IB_MULTICAST_QPN = 0xffffff
 811};
 812
 813#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 814#define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
 815
 816enum ib_ah_flags {
 817        IB_AH_GRH       = 1
 818};
 819
 820enum ib_rate {
 821        IB_RATE_PORT_CURRENT = 0,
 822        IB_RATE_2_5_GBPS = 2,
 823        IB_RATE_5_GBPS   = 5,
 824        IB_RATE_10_GBPS  = 3,
 825        IB_RATE_20_GBPS  = 6,
 826        IB_RATE_30_GBPS  = 4,
 827        IB_RATE_40_GBPS  = 7,
 828        IB_RATE_60_GBPS  = 8,
 829        IB_RATE_80_GBPS  = 9,
 830        IB_RATE_120_GBPS = 10,
 831        IB_RATE_14_GBPS  = 11,
 832        IB_RATE_56_GBPS  = 12,
 833        IB_RATE_112_GBPS = 13,
 834        IB_RATE_168_GBPS = 14,
 835        IB_RATE_25_GBPS  = 15,
 836        IB_RATE_100_GBPS = 16,
 837        IB_RATE_200_GBPS = 17,
 838        IB_RATE_300_GBPS = 18,
 839        IB_RATE_28_GBPS  = 19,
 840        IB_RATE_50_GBPS  = 20,
 841        IB_RATE_400_GBPS = 21,
 842        IB_RATE_600_GBPS = 22,
 843};
 844
 845/**
 846 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 847 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 848 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 849 * @rate: rate to convert.
 850 */
 851__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 852
 853/**
 854 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 855 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 856 * @rate: rate to convert.
 857 */
 858__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 859
 860
 861/**
 862 * enum ib_mr_type - memory region type
 863 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 864 *                            normal registration
 865 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 866 *                            register any arbitrary sg lists (without
 867 *                            the normal mr constraints - see
 868 *                            ib_map_mr_sg)
 869 * @IB_MR_TYPE_DM:            memory region that is used for device
 870 *                            memory registration
 871 * @IB_MR_TYPE_USER:          memory region that is used for the user-space
 872 *                            application
 873 * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
 874 *                            without address translations (VA=PA)
 875 * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
 876 *                            data integrity operations
 877 */
 878enum ib_mr_type {
 879        IB_MR_TYPE_MEM_REG,
 880        IB_MR_TYPE_SG_GAPS,
 881        IB_MR_TYPE_DM,
 882        IB_MR_TYPE_USER,
 883        IB_MR_TYPE_DMA,
 884        IB_MR_TYPE_INTEGRITY,
 885};
 886
 887enum ib_mr_status_check {
 888        IB_MR_CHECK_SIG_STATUS = 1,
 889};
 890
 891/**
 892 * struct ib_mr_status - Memory region status container
 893 *
 894 * @fail_status: Bitmask of MR checks status. For each
 895 *     failed check a corresponding status bit is set.
 896 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 897 *     failure.
 898 */
 899struct ib_mr_status {
 900        u32                 fail_status;
 901        struct ib_sig_err   sig_err;
 902};
 903
 904/**
 905 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 906 * enum.
 907 * @mult: multiple to convert.
 908 */
 909__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 910
 911struct rdma_ah_init_attr {
 912        struct rdma_ah_attr *ah_attr;
 913        u32 flags;
 914        struct net_device *xmit_slave;
 915};
 916
 917enum rdma_ah_attr_type {
 918        RDMA_AH_ATTR_TYPE_UNDEFINED,
 919        RDMA_AH_ATTR_TYPE_IB,
 920        RDMA_AH_ATTR_TYPE_ROCE,
 921        RDMA_AH_ATTR_TYPE_OPA,
 922};
 923
 924struct ib_ah_attr {
 925        u16                     dlid;
 926        u8                      src_path_bits;
 927};
 928
 929struct roce_ah_attr {
 930        u8                      dmac[ETH_ALEN];
 931};
 932
 933struct opa_ah_attr {
 934        u32                     dlid;
 935        u8                      src_path_bits;
 936        bool                    make_grd;
 937};
 938
 939struct rdma_ah_attr {
 940        struct ib_global_route  grh;
 941        u8                      sl;
 942        u8                      static_rate;
 943        u8                      port_num;
 944        u8                      ah_flags;
 945        enum rdma_ah_attr_type type;
 946        union {
 947                struct ib_ah_attr ib;
 948                struct roce_ah_attr roce;
 949                struct opa_ah_attr opa;
 950        };
 951};
 952
 953enum ib_wc_status {
 954        IB_WC_SUCCESS,
 955        IB_WC_LOC_LEN_ERR,
 956        IB_WC_LOC_QP_OP_ERR,
 957        IB_WC_LOC_EEC_OP_ERR,
 958        IB_WC_LOC_PROT_ERR,
 959        IB_WC_WR_FLUSH_ERR,
 960        IB_WC_MW_BIND_ERR,
 961        IB_WC_BAD_RESP_ERR,
 962        IB_WC_LOC_ACCESS_ERR,
 963        IB_WC_REM_INV_REQ_ERR,
 964        IB_WC_REM_ACCESS_ERR,
 965        IB_WC_REM_OP_ERR,
 966        IB_WC_RETRY_EXC_ERR,
 967        IB_WC_RNR_RETRY_EXC_ERR,
 968        IB_WC_LOC_RDD_VIOL_ERR,
 969        IB_WC_REM_INV_RD_REQ_ERR,
 970        IB_WC_REM_ABORT_ERR,
 971        IB_WC_INV_EECN_ERR,
 972        IB_WC_INV_EEC_STATE_ERR,
 973        IB_WC_FATAL_ERR,
 974        IB_WC_RESP_TIMEOUT_ERR,
 975        IB_WC_GENERAL_ERR
 976};
 977
 978const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 979
 980enum ib_wc_opcode {
 981        IB_WC_SEND,
 982        IB_WC_RDMA_WRITE,
 983        IB_WC_RDMA_READ,
 984        IB_WC_COMP_SWAP,
 985        IB_WC_FETCH_ADD,
 986        IB_WC_LSO,
 987        IB_WC_LOCAL_INV,
 988        IB_WC_REG_MR,
 989        IB_WC_MASKED_COMP_SWAP,
 990        IB_WC_MASKED_FETCH_ADD,
 991/*
 992 * Set value of IB_WC_RECV so consumers can test if a completion is a
 993 * receive by testing (opcode & IB_WC_RECV).
 994 */
 995        IB_WC_RECV                      = 1 << 7,
 996        IB_WC_RECV_RDMA_WITH_IMM
 997};
 998
 999enum ib_wc_flags {
1000        IB_WC_GRH               = 1,
1001        IB_WC_WITH_IMM          = (1<<1),
1002        IB_WC_WITH_INVALIDATE   = (1<<2),
1003        IB_WC_IP_CSUM_OK        = (1<<3),
1004        IB_WC_WITH_SMAC         = (1<<4),
1005        IB_WC_WITH_VLAN         = (1<<5),
1006        IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
1007};
1008
1009struct ib_wc {
1010        union {
1011                u64             wr_id;
1012                struct ib_cqe   *wr_cqe;
1013        };
1014        enum ib_wc_status       status;
1015        enum ib_wc_opcode       opcode;
1016        u32                     vendor_err;
1017        u32                     byte_len;
1018        struct ib_qp           *qp;
1019        union {
1020                __be32          imm_data;
1021                u32             invalidate_rkey;
1022        } ex;
1023        u32                     src_qp;
1024        u32                     slid;
1025        int                     wc_flags;
1026        u16                     pkey_index;
1027        u8                      sl;
1028        u8                      dlid_path_bits;
1029        u8                      port_num;       /* valid only for DR SMPs on switches */
1030        u8                      smac[ETH_ALEN];
1031        u16                     vlan_id;
1032        u8                      network_hdr_type;
1033};
1034
1035enum ib_cq_notify_flags {
1036        IB_CQ_SOLICITED                 = 1 << 0,
1037        IB_CQ_NEXT_COMP                 = 1 << 1,
1038        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1039        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
1040};
1041
1042enum ib_srq_type {
1043        IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1044        IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1045        IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1046};
1047
1048static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1049{
1050        return srq_type == IB_SRQT_XRC ||
1051               srq_type == IB_SRQT_TM;
1052}
1053
1054enum ib_srq_attr_mask {
1055        IB_SRQ_MAX_WR   = 1 << 0,
1056        IB_SRQ_LIMIT    = 1 << 1,
1057};
1058
1059struct ib_srq_attr {
1060        u32     max_wr;
1061        u32     max_sge;
1062        u32     srq_limit;
1063};
1064
1065struct ib_srq_init_attr {
1066        void                  (*event_handler)(struct ib_event *, void *);
1067        void                   *srq_context;
1068        struct ib_srq_attr      attr;
1069        enum ib_srq_type        srq_type;
1070
1071        struct {
1072                struct ib_cq   *cq;
1073                union {
1074                        struct {
1075                                struct ib_xrcd *xrcd;
1076                        } xrc;
1077
1078                        struct {
1079                                u32             max_num_tags;
1080                        } tag_matching;
1081                };
1082        } ext;
1083};
1084
1085struct ib_qp_cap {
1086        u32     max_send_wr;
1087        u32     max_recv_wr;
1088        u32     max_send_sge;
1089        u32     max_recv_sge;
1090        u32     max_inline_data;
1091
1092        /*
1093         * Maximum number of rdma_rw_ctx structures in flight at a time.
1094         * ib_create_qp() will calculate the right amount of neededed WRs
1095         * and MRs based on this.
1096         */
1097        u32     max_rdma_ctxs;
1098};
1099
1100enum ib_sig_type {
1101        IB_SIGNAL_ALL_WR,
1102        IB_SIGNAL_REQ_WR
1103};
1104
1105enum ib_qp_type {
1106        /*
1107         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1108         * here (and in that order) since the MAD layer uses them as
1109         * indices into a 2-entry table.
1110         */
1111        IB_QPT_SMI,
1112        IB_QPT_GSI,
1113
1114        IB_QPT_RC = IB_UVERBS_QPT_RC,
1115        IB_QPT_UC = IB_UVERBS_QPT_UC,
1116        IB_QPT_UD = IB_UVERBS_QPT_UD,
1117        IB_QPT_RAW_IPV6,
1118        IB_QPT_RAW_ETHERTYPE,
1119        IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1120        IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1121        IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1122        IB_QPT_MAX,
1123        IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1124        /* Reserve a range for qp types internal to the low level driver.
1125         * These qp types will not be visible at the IB core layer, so the
1126         * IB_QPT_MAX usages should not be affected in the core layer
1127         */
1128        IB_QPT_RESERVED1 = 0x1000,
1129        IB_QPT_RESERVED2,
1130        IB_QPT_RESERVED3,
1131        IB_QPT_RESERVED4,
1132        IB_QPT_RESERVED5,
1133        IB_QPT_RESERVED6,
1134        IB_QPT_RESERVED7,
1135        IB_QPT_RESERVED8,
1136        IB_QPT_RESERVED9,
1137        IB_QPT_RESERVED10,
1138};
1139
1140enum ib_qp_create_flags {
1141        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
1142        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   =
1143                IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1144        IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1145        IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1146        IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1147        IB_QP_CREATE_NETIF_QP                   = 1 << 5,
1148        IB_QP_CREATE_INTEGRITY_EN               = 1 << 6,
1149        IB_QP_CREATE_NETDEV_USE                 = 1 << 7,
1150        IB_QP_CREATE_SCATTER_FCS                =
1151                IB_UVERBS_QP_CREATE_SCATTER_FCS,
1152        IB_QP_CREATE_CVLAN_STRIPPING            =
1153                IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1154        IB_QP_CREATE_SOURCE_QPN                 = 1 << 10,
1155        IB_QP_CREATE_PCI_WRITE_END_PADDING      =
1156                IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1157        /* reserve bits 26-31 for low level drivers' internal use */
1158        IB_QP_CREATE_RESERVED_START             = 1 << 26,
1159        IB_QP_CREATE_RESERVED_END               = 1 << 31,
1160};
1161
1162/*
1163 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1164 * callback to destroy the passed in QP.
1165 */
1166
1167struct ib_qp_init_attr {
1168        /* Consumer's event_handler callback must not block */
1169        void                  (*event_handler)(struct ib_event *, void *);
1170
1171        void                   *qp_context;
1172        struct ib_cq           *send_cq;
1173        struct ib_cq           *recv_cq;
1174        struct ib_srq          *srq;
1175        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1176        struct ib_qp_cap        cap;
1177        enum ib_sig_type        sq_sig_type;
1178        enum ib_qp_type         qp_type;
1179        u32                     create_flags;
1180
1181        /*
1182         * Only needed for special QP types, or when using the RW API.
1183         */
1184        u8                      port_num;
1185        struct ib_rwq_ind_table *rwq_ind_tbl;
1186        u32                     source_qpn;
1187};
1188
1189struct ib_qp_open_attr {
1190        void                  (*event_handler)(struct ib_event *, void *);
1191        void                   *qp_context;
1192        u32                     qp_num;
1193        enum ib_qp_type         qp_type;
1194};
1195
1196enum ib_rnr_timeout {
1197        IB_RNR_TIMER_655_36 =  0,
1198        IB_RNR_TIMER_000_01 =  1,
1199        IB_RNR_TIMER_000_02 =  2,
1200        IB_RNR_TIMER_000_03 =  3,
1201        IB_RNR_TIMER_000_04 =  4,
1202        IB_RNR_TIMER_000_06 =  5,
1203        IB_RNR_TIMER_000_08 =  6,
1204        IB_RNR_TIMER_000_12 =  7,
1205        IB_RNR_TIMER_000_16 =  8,
1206        IB_RNR_TIMER_000_24 =  9,
1207        IB_RNR_TIMER_000_32 = 10,
1208        IB_RNR_TIMER_000_48 = 11,
1209        IB_RNR_TIMER_000_64 = 12,
1210        IB_RNR_TIMER_000_96 = 13,
1211        IB_RNR_TIMER_001_28 = 14,
1212        IB_RNR_TIMER_001_92 = 15,
1213        IB_RNR_TIMER_002_56 = 16,
1214        IB_RNR_TIMER_003_84 = 17,
1215        IB_RNR_TIMER_005_12 = 18,
1216        IB_RNR_TIMER_007_68 = 19,
1217        IB_RNR_TIMER_010_24 = 20,
1218        IB_RNR_TIMER_015_36 = 21,
1219        IB_RNR_TIMER_020_48 = 22,
1220        IB_RNR_TIMER_030_72 = 23,
1221        IB_RNR_TIMER_040_96 = 24,
1222        IB_RNR_TIMER_061_44 = 25,
1223        IB_RNR_TIMER_081_92 = 26,
1224        IB_RNR_TIMER_122_88 = 27,
1225        IB_RNR_TIMER_163_84 = 28,
1226        IB_RNR_TIMER_245_76 = 29,
1227        IB_RNR_TIMER_327_68 = 30,
1228        IB_RNR_TIMER_491_52 = 31
1229};
1230
1231enum ib_qp_attr_mask {
1232        IB_QP_STATE                     = 1,
1233        IB_QP_CUR_STATE                 = (1<<1),
1234        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
1235        IB_QP_ACCESS_FLAGS              = (1<<3),
1236        IB_QP_PKEY_INDEX                = (1<<4),
1237        IB_QP_PORT                      = (1<<5),
1238        IB_QP_QKEY                      = (1<<6),
1239        IB_QP_AV                        = (1<<7),
1240        IB_QP_PATH_MTU                  = (1<<8),
1241        IB_QP_TIMEOUT                   = (1<<9),
1242        IB_QP_RETRY_CNT                 = (1<<10),
1243        IB_QP_RNR_RETRY                 = (1<<11),
1244        IB_QP_RQ_PSN                    = (1<<12),
1245        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
1246        IB_QP_ALT_PATH                  = (1<<14),
1247        IB_QP_MIN_RNR_TIMER             = (1<<15),
1248        IB_QP_SQ_PSN                    = (1<<16),
1249        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
1250        IB_QP_PATH_MIG_STATE            = (1<<18),
1251        IB_QP_CAP                       = (1<<19),
1252        IB_QP_DEST_QPN                  = (1<<20),
1253        IB_QP_RESERVED1                 = (1<<21),
1254        IB_QP_RESERVED2                 = (1<<22),
1255        IB_QP_RESERVED3                 = (1<<23),
1256        IB_QP_RESERVED4                 = (1<<24),
1257        IB_QP_RATE_LIMIT                = (1<<25),
1258};
1259
1260enum ib_qp_state {
1261        IB_QPS_RESET,
1262        IB_QPS_INIT,
1263        IB_QPS_RTR,
1264        IB_QPS_RTS,
1265        IB_QPS_SQD,
1266        IB_QPS_SQE,
1267        IB_QPS_ERR
1268};
1269
1270enum ib_mig_state {
1271        IB_MIG_MIGRATED,
1272        IB_MIG_REARM,
1273        IB_MIG_ARMED
1274};
1275
1276enum ib_mw_type {
1277        IB_MW_TYPE_1 = 1,
1278        IB_MW_TYPE_2 = 2
1279};
1280
1281struct ib_qp_attr {
1282        enum ib_qp_state        qp_state;
1283        enum ib_qp_state        cur_qp_state;
1284        enum ib_mtu             path_mtu;
1285        enum ib_mig_state       path_mig_state;
1286        u32                     qkey;
1287        u32                     rq_psn;
1288        u32                     sq_psn;
1289        u32                     dest_qp_num;
1290        int                     qp_access_flags;
1291        struct ib_qp_cap        cap;
1292        struct rdma_ah_attr     ah_attr;
1293        struct rdma_ah_attr     alt_ah_attr;
1294        u16                     pkey_index;
1295        u16                     alt_pkey_index;
1296        u8                      en_sqd_async_notify;
1297        u8                      sq_draining;
1298        u8                      max_rd_atomic;
1299        u8                      max_dest_rd_atomic;
1300        u8                      min_rnr_timer;
1301        u8                      port_num;
1302        u8                      timeout;
1303        u8                      retry_cnt;
1304        u8                      rnr_retry;
1305        u8                      alt_port_num;
1306        u8                      alt_timeout;
1307        u32                     rate_limit;
1308        struct net_device       *xmit_slave;
1309};
1310
1311enum ib_wr_opcode {
1312        /* These are shared with userspace */
1313        IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1314        IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1315        IB_WR_SEND = IB_UVERBS_WR_SEND,
1316        IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1317        IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1318        IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1319        IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1320        IB_WR_LSO = IB_UVERBS_WR_TSO,
1321        IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1322        IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1323        IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1324        IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1325                IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1326        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1327                IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1328
1329        /* These are kernel only and can not be issued by userspace */
1330        IB_WR_REG_MR = 0x20,
1331        IB_WR_REG_MR_INTEGRITY,
1332
1333        /* reserve values for low level drivers' internal use.
1334         * These values will not be used at all in the ib core layer.
1335         */
1336        IB_WR_RESERVED1 = 0xf0,
1337        IB_WR_RESERVED2,
1338        IB_WR_RESERVED3,
1339        IB_WR_RESERVED4,
1340        IB_WR_RESERVED5,
1341        IB_WR_RESERVED6,
1342        IB_WR_RESERVED7,
1343        IB_WR_RESERVED8,
1344        IB_WR_RESERVED9,
1345        IB_WR_RESERVED10,
1346};
1347
1348enum ib_send_flags {
1349        IB_SEND_FENCE           = 1,
1350        IB_SEND_SIGNALED        = (1<<1),
1351        IB_SEND_SOLICITED       = (1<<2),
1352        IB_SEND_INLINE          = (1<<3),
1353        IB_SEND_IP_CSUM         = (1<<4),
1354
1355        /* reserve bits 26-31 for low level drivers' internal use */
1356        IB_SEND_RESERVED_START  = (1 << 26),
1357        IB_SEND_RESERVED_END    = (1 << 31),
1358};
1359
1360struct ib_sge {
1361        u64     addr;
1362        u32     length;
1363        u32     lkey;
1364};
1365
1366struct ib_cqe {
1367        void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1368};
1369
1370struct ib_send_wr {
1371        struct ib_send_wr      *next;
1372        union {
1373                u64             wr_id;
1374                struct ib_cqe   *wr_cqe;
1375        };
1376        struct ib_sge          *sg_list;
1377        int                     num_sge;
1378        enum ib_wr_opcode       opcode;
1379        int                     send_flags;
1380        union {
1381                __be32          imm_data;
1382                u32             invalidate_rkey;
1383        } ex;
1384};
1385
1386struct ib_rdma_wr {
1387        struct ib_send_wr       wr;
1388        u64                     remote_addr;
1389        u32                     rkey;
1390};
1391
1392static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1393{
1394        return container_of(wr, struct ib_rdma_wr, wr);
1395}
1396
1397struct ib_atomic_wr {
1398        struct ib_send_wr       wr;
1399        u64                     remote_addr;
1400        u64                     compare_add;
1401        u64                     swap;
1402        u64                     compare_add_mask;
1403        u64                     swap_mask;
1404        u32                     rkey;
1405};
1406
1407static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1408{
1409        return container_of(wr, struct ib_atomic_wr, wr);
1410}
1411
1412struct ib_ud_wr {
1413        struct ib_send_wr       wr;
1414        struct ib_ah            *ah;
1415        void                    *header;
1416        int                     hlen;
1417        int                     mss;
1418        u32                     remote_qpn;
1419        u32                     remote_qkey;
1420        u16                     pkey_index; /* valid for GSI only */
1421        u8                      port_num;   /* valid for DR SMPs on switch only */
1422};
1423
1424static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1425{
1426        return container_of(wr, struct ib_ud_wr, wr);
1427}
1428
1429struct ib_reg_wr {
1430        struct ib_send_wr       wr;
1431        struct ib_mr            *mr;
1432        u32                     key;
1433        int                     access;
1434};
1435
1436static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1437{
1438        return container_of(wr, struct ib_reg_wr, wr);
1439}
1440
1441struct ib_recv_wr {
1442        struct ib_recv_wr      *next;
1443        union {
1444                u64             wr_id;
1445                struct ib_cqe   *wr_cqe;
1446        };
1447        struct ib_sge          *sg_list;
1448        int                     num_sge;
1449};
1450
1451enum ib_access_flags {
1452        IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1453        IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1454        IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1455        IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1456        IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1457        IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1458        IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1459        IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1460        IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1461
1462        IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1463        IB_ACCESS_SUPPORTED =
1464                ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1465};
1466
1467/*
1468 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1469 * are hidden here instead of a uapi header!
1470 */
1471enum ib_mr_rereg_flags {
1472        IB_MR_REREG_TRANS       = 1,
1473        IB_MR_REREG_PD          = (1<<1),
1474        IB_MR_REREG_ACCESS      = (1<<2),
1475        IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1476};
1477
1478struct ib_umem;
1479
1480enum rdma_remove_reason {
1481        /*
1482         * Userspace requested uobject deletion or initial try
1483         * to remove uobject via cleanup. Call could fail
1484         */
1485        RDMA_REMOVE_DESTROY,
1486        /* Context deletion. This call should delete the actual object itself */
1487        RDMA_REMOVE_CLOSE,
1488        /* Driver is being hot-unplugged. This call should delete the actual object itself */
1489        RDMA_REMOVE_DRIVER_REMOVE,
1490        /* uobj is being cleaned-up before being committed */
1491        RDMA_REMOVE_ABORT,
1492        /*
1493         * uobj has been fully created, with the uobj->object set, but is being
1494         * cleaned up before being comitted
1495         */
1496        RDMA_REMOVE_ABORT_HWOBJ,
1497};
1498
1499struct ib_rdmacg_object {
1500#ifdef CONFIG_CGROUP_RDMA
1501        struct rdma_cgroup      *cg;            /* owner rdma cgroup */
1502#endif
1503};
1504
1505struct ib_ucontext {
1506        struct ib_device       *device;
1507        struct ib_uverbs_file  *ufile;
1508        /*
1509         * 'closing' can be read by the driver only during a destroy callback,
1510         * it is set when we are closing the file descriptor and indicates
1511         * that mm_sem may be locked.
1512         */
1513        bool closing;
1514
1515        bool cleanup_retryable;
1516
1517        struct ib_rdmacg_object cg_obj;
1518        /*
1519         * Implementation details of the RDMA core, don't use in drivers:
1520         */
1521        struct rdma_restrack_entry res;
1522        struct xarray mmap_xa;
1523};
1524
1525struct ib_uobject {
1526        u64                     user_handle;    /* handle given to us by userspace */
1527        /* ufile & ucontext owning this object */
1528        struct ib_uverbs_file  *ufile;
1529        /* FIXME, save memory: ufile->context == context */
1530        struct ib_ucontext     *context;        /* associated user context */
1531        void                   *object;         /* containing object */
1532        struct list_head        list;           /* link to context's list */
1533        struct ib_rdmacg_object cg_obj;         /* rdmacg object */
1534        int                     id;             /* index into kernel idr */
1535        struct kref             ref;
1536        atomic_t                usecnt;         /* protects exclusive access */
1537        struct rcu_head         rcu;            /* kfree_rcu() overhead */
1538
1539        const struct uverbs_api_object *uapi_object;
1540};
1541
1542struct ib_udata {
1543        const void __user *inbuf;
1544        void __user *outbuf;
1545        size_t       inlen;
1546        size_t       outlen;
1547};
1548
1549struct ib_pd {
1550        u32                     local_dma_lkey;
1551        u32                     flags;
1552        struct ib_device       *device;
1553        struct ib_uobject      *uobject;
1554        atomic_t                usecnt; /* count all resources */
1555
1556        u32                     unsafe_global_rkey;
1557
1558        /*
1559         * Implementation details of the RDMA core, don't use in drivers:
1560         */
1561        struct ib_mr           *__internal_mr;
1562        struct rdma_restrack_entry res;
1563};
1564
1565struct ib_xrcd {
1566        struct ib_device       *device;
1567        atomic_t                usecnt; /* count all exposed resources */
1568        struct inode           *inode;
1569
1570        struct mutex            tgt_qp_mutex;
1571        struct list_head        tgt_qp_list;
1572};
1573
1574struct ib_ah {
1575        struct ib_device        *device;
1576        struct ib_pd            *pd;
1577        struct ib_uobject       *uobject;
1578        const struct ib_gid_attr *sgid_attr;
1579        enum rdma_ah_attr_type  type;
1580};
1581
1582typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1583
1584enum ib_poll_context {
1585        IB_POLL_SOFTIRQ,           /* poll from softirq context */
1586        IB_POLL_WORKQUEUE,         /* poll from workqueue */
1587        IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1588        IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1589
1590        IB_POLL_DIRECT,            /* caller context, no hw completions */
1591};
1592
1593struct ib_cq {
1594        struct ib_device       *device;
1595        struct ib_ucq_object   *uobject;
1596        ib_comp_handler         comp_handler;
1597        void                  (*event_handler)(struct ib_event *, void *);
1598        void                   *cq_context;
1599        int                     cqe;
1600        unsigned int            cqe_used;
1601        atomic_t                usecnt; /* count number of work queues */
1602        enum ib_poll_context    poll_ctx;
1603        struct ib_wc            *wc;
1604        struct list_head        pool_entry;
1605        union {
1606                struct irq_poll         iop;
1607                struct work_struct      work;
1608        };
1609        struct workqueue_struct *comp_wq;
1610        struct dim *dim;
1611
1612        /* updated only by trace points */
1613        ktime_t timestamp;
1614        u8 interrupt:1;
1615        u8 shared:1;
1616        unsigned int comp_vector;
1617
1618        /*
1619         * Implementation details of the RDMA core, don't use in drivers:
1620         */
1621        struct rdma_restrack_entry res;
1622};
1623
1624struct ib_srq {
1625        struct ib_device       *device;
1626        struct ib_pd           *pd;
1627        struct ib_usrq_object  *uobject;
1628        void                  (*event_handler)(struct ib_event *, void *);
1629        void                   *srq_context;
1630        enum ib_srq_type        srq_type;
1631        atomic_t                usecnt;
1632
1633        struct {
1634                struct ib_cq   *cq;
1635                union {
1636                        struct {
1637                                struct ib_xrcd *xrcd;
1638                                u32             srq_num;
1639                        } xrc;
1640                };
1641        } ext;
1642};
1643
1644enum ib_raw_packet_caps {
1645        /* Strip cvlan from incoming packet and report it in the matching work
1646         * completion is supported.
1647         */
1648        IB_RAW_PACKET_CAP_CVLAN_STRIPPING       = (1 << 0),
1649        /* Scatter FCS field of an incoming packet to host memory is supported.
1650         */
1651        IB_RAW_PACKET_CAP_SCATTER_FCS           = (1 << 1),
1652        /* Checksum offloads are supported (for both send and receive). */
1653        IB_RAW_PACKET_CAP_IP_CSUM               = (1 << 2),
1654        /* When a packet is received for an RQ with no receive WQEs, the
1655         * packet processing is delayed.
1656         */
1657        IB_RAW_PACKET_CAP_DELAY_DROP            = (1 << 3),
1658};
1659
1660enum ib_wq_type {
1661        IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1662};
1663
1664enum ib_wq_state {
1665        IB_WQS_RESET,
1666        IB_WQS_RDY,
1667        IB_WQS_ERR
1668};
1669
1670struct ib_wq {
1671        struct ib_device       *device;
1672        struct ib_uwq_object   *uobject;
1673        void                *wq_context;
1674        void                (*event_handler)(struct ib_event *, void *);
1675        struct ib_pd           *pd;
1676        struct ib_cq           *cq;
1677        u32             wq_num;
1678        enum ib_wq_state       state;
1679        enum ib_wq_type wq_type;
1680        atomic_t                usecnt;
1681};
1682
1683enum ib_wq_flags {
1684        IB_WQ_FLAGS_CVLAN_STRIPPING     = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1685        IB_WQ_FLAGS_SCATTER_FCS         = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1686        IB_WQ_FLAGS_DELAY_DROP          = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1687        IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1688                                IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1689};
1690
1691struct ib_wq_init_attr {
1692        void                   *wq_context;
1693        enum ib_wq_type wq_type;
1694        u32             max_wr;
1695        u32             max_sge;
1696        struct  ib_cq          *cq;
1697        void                (*event_handler)(struct ib_event *, void *);
1698        u32             create_flags; /* Use enum ib_wq_flags */
1699};
1700
1701enum ib_wq_attr_mask {
1702        IB_WQ_STATE             = 1 << 0,
1703        IB_WQ_CUR_STATE         = 1 << 1,
1704        IB_WQ_FLAGS             = 1 << 2,
1705};
1706
1707struct ib_wq_attr {
1708        enum    ib_wq_state     wq_state;
1709        enum    ib_wq_state     curr_wq_state;
1710        u32                     flags; /* Use enum ib_wq_flags */
1711        u32                     flags_mask; /* Use enum ib_wq_flags */
1712};
1713
1714struct ib_rwq_ind_table {
1715        struct ib_device        *device;
1716        struct ib_uobject      *uobject;
1717        atomic_t                usecnt;
1718        u32             ind_tbl_num;
1719        u32             log_ind_tbl_size;
1720        struct ib_wq    **ind_tbl;
1721};
1722
1723struct ib_rwq_ind_table_init_attr {
1724        u32             log_ind_tbl_size;
1725        /* Each entry is a pointer to Receive Work Queue */
1726        struct ib_wq    **ind_tbl;
1727};
1728
1729enum port_pkey_state {
1730        IB_PORT_PKEY_NOT_VALID = 0,
1731        IB_PORT_PKEY_VALID = 1,
1732        IB_PORT_PKEY_LISTED = 2,
1733};
1734
1735struct ib_qp_security;
1736
1737struct ib_port_pkey {
1738        enum port_pkey_state    state;
1739        u16                     pkey_index;
1740        u8                      port_num;
1741        struct list_head        qp_list;
1742        struct list_head        to_error_list;
1743        struct ib_qp_security  *sec;
1744};
1745
1746struct ib_ports_pkeys {
1747        struct ib_port_pkey     main;
1748        struct ib_port_pkey     alt;
1749};
1750
1751struct ib_qp_security {
1752        struct ib_qp           *qp;
1753        struct ib_device       *dev;
1754        /* Hold this mutex when changing port and pkey settings. */
1755        struct mutex            mutex;
1756        struct ib_ports_pkeys  *ports_pkeys;
1757        /* A list of all open shared QP handles.  Required to enforce security
1758         * properly for all users of a shared QP.
1759         */
1760        struct list_head        shared_qp_list;
1761        void                   *security;
1762        bool                    destroying;
1763        atomic_t                error_list_count;
1764        struct completion       error_complete;
1765        int                     error_comps_pending;
1766};
1767
1768/*
1769 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1770 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1771 */
1772struct ib_qp {
1773        struct ib_device       *device;
1774        struct ib_pd           *pd;
1775        struct ib_cq           *send_cq;
1776        struct ib_cq           *recv_cq;
1777        spinlock_t              mr_lock;
1778        int                     mrs_used;
1779        struct list_head        rdma_mrs;
1780        struct list_head        sig_mrs;
1781        struct ib_srq          *srq;
1782        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1783        struct list_head        xrcd_list;
1784
1785        /* count times opened, mcast attaches, flow attaches */
1786        atomic_t                usecnt;
1787        struct list_head        open_list;
1788        struct ib_qp           *real_qp;
1789        struct ib_uqp_object   *uobject;
1790        void                  (*event_handler)(struct ib_event *, void *);
1791        void                   *qp_context;
1792        /* sgid_attrs associated with the AV's */
1793        const struct ib_gid_attr *av_sgid_attr;
1794        const struct ib_gid_attr *alt_path_sgid_attr;
1795        u32                     qp_num;
1796        u32                     max_write_sge;
1797        u32                     max_read_sge;
1798        enum ib_qp_type         qp_type;
1799        struct ib_rwq_ind_table *rwq_ind_tbl;
1800        struct ib_qp_security  *qp_sec;
1801        u8                      port;
1802
1803        bool                    integrity_en;
1804        /*
1805         * Implementation details of the RDMA core, don't use in drivers:
1806         */
1807        struct rdma_restrack_entry     res;
1808
1809        /* The counter the qp is bind to */
1810        struct rdma_counter    *counter;
1811};
1812
1813struct ib_dm {
1814        struct ib_device  *device;
1815        u32                length;
1816        u32                flags;
1817        struct ib_uobject *uobject;
1818        atomic_t           usecnt;
1819};
1820
1821struct ib_mr {
1822        struct ib_device  *device;
1823        struct ib_pd      *pd;
1824        u32                lkey;
1825        u32                rkey;
1826        u64                iova;
1827        u64                length;
1828        unsigned int       page_size;
1829        enum ib_mr_type    type;
1830        bool               need_inval;
1831        union {
1832                struct ib_uobject       *uobject;       /* user */
1833                struct list_head        qp_entry;       /* FR */
1834        };
1835
1836        struct ib_dm      *dm;
1837        struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1838        /*
1839         * Implementation details of the RDMA core, don't use in drivers:
1840         */
1841        struct rdma_restrack_entry res;
1842};
1843
1844struct ib_mw {
1845        struct ib_device        *device;
1846        struct ib_pd            *pd;
1847        struct ib_uobject       *uobject;
1848        u32                     rkey;
1849        enum ib_mw_type         type;
1850};
1851
1852/* Supported steering options */
1853enum ib_flow_attr_type {
1854        /* steering according to rule specifications */
1855        IB_FLOW_ATTR_NORMAL             = 0x0,
1856        /* default unicast and multicast rule -
1857         * receive all Eth traffic which isn't steered to any QP
1858         */
1859        IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1860        /* default multicast rule -
1861         * receive all Eth multicast traffic which isn't steered to any QP
1862         */
1863        IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1864        /* sniffer rule - receive all port traffic */
1865        IB_FLOW_ATTR_SNIFFER            = 0x3
1866};
1867
1868/* Supported steering header types */
1869enum ib_flow_spec_type {
1870        /* L2 headers*/
1871        IB_FLOW_SPEC_ETH                = 0x20,
1872        IB_FLOW_SPEC_IB                 = 0x22,
1873        /* L3 header*/
1874        IB_FLOW_SPEC_IPV4               = 0x30,
1875        IB_FLOW_SPEC_IPV6               = 0x31,
1876        IB_FLOW_SPEC_ESP                = 0x34,
1877        /* L4 headers*/
1878        IB_FLOW_SPEC_TCP                = 0x40,
1879        IB_FLOW_SPEC_UDP                = 0x41,
1880        IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
1881        IB_FLOW_SPEC_GRE                = 0x51,
1882        IB_FLOW_SPEC_MPLS               = 0x60,
1883        IB_FLOW_SPEC_INNER              = 0x100,
1884        /* Actions */
1885        IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1886        IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1887        IB_FLOW_SPEC_ACTION_HANDLE      = 0x1002,
1888        IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1889};
1890#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1891#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1892
1893/* Flow steering rule priority is set according to it's domain.
1894 * Lower domain value means higher priority.
1895 */
1896enum ib_flow_domain {
1897        IB_FLOW_DOMAIN_USER,
1898        IB_FLOW_DOMAIN_ETHTOOL,
1899        IB_FLOW_DOMAIN_RFS,
1900        IB_FLOW_DOMAIN_NIC,
1901        IB_FLOW_DOMAIN_NUM /* Must be last */
1902};
1903
1904enum ib_flow_flags {
1905        IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1906        IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1907        IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1908};
1909
1910struct ib_flow_eth_filter {
1911        u8      dst_mac[6];
1912        u8      src_mac[6];
1913        __be16  ether_type;
1914        __be16  vlan_tag;
1915        /* Must be last */
1916        u8      real_sz[];
1917};
1918
1919struct ib_flow_spec_eth {
1920        u32                       type;
1921        u16                       size;
1922        struct ib_flow_eth_filter val;
1923        struct ib_flow_eth_filter mask;
1924};
1925
1926struct ib_flow_ib_filter {
1927        __be16 dlid;
1928        __u8   sl;
1929        /* Must be last */
1930        u8      real_sz[];
1931};
1932
1933struct ib_flow_spec_ib {
1934        u32                      type;
1935        u16                      size;
1936        struct ib_flow_ib_filter val;
1937        struct ib_flow_ib_filter mask;
1938};
1939
1940/* IPv4 header flags */
1941enum ib_ipv4_flags {
1942        IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1943        IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1944                                    last have this flag set */
1945};
1946
1947struct ib_flow_ipv4_filter {
1948        __be32  src_ip;
1949        __be32  dst_ip;
1950        u8      proto;
1951        u8      tos;
1952        u8      ttl;
1953        u8      flags;
1954        /* Must be last */
1955        u8      real_sz[];
1956};
1957
1958struct ib_flow_spec_ipv4 {
1959        u32                        type;
1960        u16                        size;
1961        struct ib_flow_ipv4_filter val;
1962        struct ib_flow_ipv4_filter mask;
1963};
1964
1965struct ib_flow_ipv6_filter {
1966        u8      src_ip[16];
1967        u8      dst_ip[16];
1968        __be32  flow_label;
1969        u8      next_hdr;
1970        u8      traffic_class;
1971        u8      hop_limit;
1972        /* Must be last */
1973        u8      real_sz[];
1974};
1975
1976struct ib_flow_spec_ipv6 {
1977        u32                        type;
1978        u16                        size;
1979        struct ib_flow_ipv6_filter val;
1980        struct ib_flow_ipv6_filter mask;
1981};
1982
1983struct ib_flow_tcp_udp_filter {
1984        __be16  dst_port;
1985        __be16  src_port;
1986        /* Must be last */
1987        u8      real_sz[];
1988};
1989
1990struct ib_flow_spec_tcp_udp {
1991        u32                           type;
1992        u16                           size;
1993        struct ib_flow_tcp_udp_filter val;
1994        struct ib_flow_tcp_udp_filter mask;
1995};
1996
1997struct ib_flow_tunnel_filter {
1998        __be32  tunnel_id;
1999        u8      real_sz[];
2000};
2001
2002/* ib_flow_spec_tunnel describes the Vxlan tunnel
2003 * the tunnel_id from val has the vni value
2004 */
2005struct ib_flow_spec_tunnel {
2006        u32                           type;
2007        u16                           size;
2008        struct ib_flow_tunnel_filter  val;
2009        struct ib_flow_tunnel_filter  mask;
2010};
2011
2012struct ib_flow_esp_filter {
2013        __be32  spi;
2014        __be32  seq;
2015        /* Must be last */
2016        u8      real_sz[];
2017};
2018
2019struct ib_flow_spec_esp {
2020        u32                           type;
2021        u16                           size;
2022        struct ib_flow_esp_filter     val;
2023        struct ib_flow_esp_filter     mask;
2024};
2025
2026struct ib_flow_gre_filter {
2027        __be16 c_ks_res0_ver;
2028        __be16 protocol;
2029        __be32 key;
2030        /* Must be last */
2031        u8      real_sz[];
2032};
2033
2034struct ib_flow_spec_gre {
2035        u32                           type;
2036        u16                           size;
2037        struct ib_flow_gre_filter     val;
2038        struct ib_flow_gre_filter     mask;
2039};
2040
2041struct ib_flow_mpls_filter {
2042        __be32 tag;
2043        /* Must be last */
2044        u8      real_sz[];
2045};
2046
2047struct ib_flow_spec_mpls {
2048        u32                           type;
2049        u16                           size;
2050        struct ib_flow_mpls_filter     val;
2051        struct ib_flow_mpls_filter     mask;
2052};
2053
2054struct ib_flow_spec_action_tag {
2055        enum ib_flow_spec_type        type;
2056        u16                           size;
2057        u32                           tag_id;
2058};
2059
2060struct ib_flow_spec_action_drop {
2061        enum ib_flow_spec_type        type;
2062        u16                           size;
2063};
2064
2065struct ib_flow_spec_action_handle {
2066        enum ib_flow_spec_type        type;
2067        u16                           size;
2068        struct ib_flow_action        *act;
2069};
2070
2071enum ib_counters_description {
2072        IB_COUNTER_PACKETS,
2073        IB_COUNTER_BYTES,
2074};
2075
2076struct ib_flow_spec_action_count {
2077        enum ib_flow_spec_type type;
2078        u16 size;
2079        struct ib_counters *counters;
2080};
2081
2082union ib_flow_spec {
2083        struct {
2084                u32                     type;
2085                u16                     size;
2086        };
2087        struct ib_flow_spec_eth         eth;
2088        struct ib_flow_spec_ib          ib;
2089        struct ib_flow_spec_ipv4        ipv4;
2090        struct ib_flow_spec_tcp_udp     tcp_udp;
2091        struct ib_flow_spec_ipv6        ipv6;
2092        struct ib_flow_spec_tunnel      tunnel;
2093        struct ib_flow_spec_esp         esp;
2094        struct ib_flow_spec_gre         gre;
2095        struct ib_flow_spec_mpls        mpls;
2096        struct ib_flow_spec_action_tag  flow_tag;
2097        struct ib_flow_spec_action_drop drop;
2098        struct ib_flow_spec_action_handle action;
2099        struct ib_flow_spec_action_count flow_count;
2100};
2101
2102struct ib_flow_attr {
2103        enum ib_flow_attr_type type;
2104        u16          size;
2105        u16          priority;
2106        u32          flags;
2107        u8           num_of_specs;
2108        u8           port;
2109        union ib_flow_spec flows[];
2110};
2111
2112struct ib_flow {
2113        struct ib_qp            *qp;
2114        struct ib_device        *device;
2115        struct ib_uobject       *uobject;
2116};
2117
2118enum ib_flow_action_type {
2119        IB_FLOW_ACTION_UNSPECIFIED,
2120        IB_FLOW_ACTION_ESP = 1,
2121};
2122
2123struct ib_flow_action_attrs_esp_keymats {
2124        enum ib_uverbs_flow_action_esp_keymat                   protocol;
2125        union {
2126                struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2127        } keymat;
2128};
2129
2130struct ib_flow_action_attrs_esp_replays {
2131        enum ib_uverbs_flow_action_esp_replay                   protocol;
2132        union {
2133                struct ib_uverbs_flow_action_esp_replay_bmp     bmp;
2134        } replay;
2135};
2136
2137enum ib_flow_action_attrs_esp_flags {
2138        /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2139         * This is done in order to share the same flags between user-space and
2140         * kernel and spare an unnecessary translation.
2141         */
2142
2143        /* Kernel flags */
2144        IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED  = 1ULL << 32,
2145        IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS  = 1ULL << 33,
2146};
2147
2148struct ib_flow_spec_list {
2149        struct ib_flow_spec_list        *next;
2150        union ib_flow_spec              spec;
2151};
2152
2153struct ib_flow_action_attrs_esp {
2154        struct ib_flow_action_attrs_esp_keymats         *keymat;
2155        struct ib_flow_action_attrs_esp_replays         *replay;
2156        struct ib_flow_spec_list                        *encap;
2157        /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2158         * Value of 0 is a valid value.
2159         */
2160        u32                                             esn;
2161        u32                                             spi;
2162        u32                                             seq;
2163        u32                                             tfc_pad;
2164        /* Use enum ib_flow_action_attrs_esp_flags */
2165        u64                                             flags;
2166        u64                                             hard_limit_pkts;
2167};
2168
2169struct ib_flow_action {
2170        struct ib_device                *device;
2171        struct ib_uobject               *uobject;
2172        enum ib_flow_action_type        type;
2173        atomic_t                        usecnt;
2174};
2175
2176struct ib_mad;
2177struct ib_grh;
2178
2179enum ib_process_mad_flags {
2180        IB_MAD_IGNORE_MKEY      = 1,
2181        IB_MAD_IGNORE_BKEY      = 2,
2182        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2183};
2184
2185enum ib_mad_result {
2186        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2187        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2188        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2189        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2190};
2191
2192struct ib_port_cache {
2193        u64                   subnet_prefix;
2194        struct ib_pkey_cache  *pkey;
2195        struct ib_gid_table   *gid;
2196        u8                     lmc;
2197        enum ib_port_state     port_state;
2198};
2199
2200struct ib_port_immutable {
2201        int                           pkey_tbl_len;
2202        int                           gid_tbl_len;
2203        u32                           core_cap_flags;
2204        u32                           max_mad_size;
2205};
2206
2207struct ib_port_data {
2208        struct ib_device *ib_dev;
2209
2210        struct ib_port_immutable immutable;
2211
2212        spinlock_t pkey_list_lock;
2213        struct list_head pkey_list;
2214
2215        struct ib_port_cache cache;
2216
2217        spinlock_t netdev_lock;
2218        struct net_device __rcu *netdev;
2219        struct hlist_node ndev_hash_link;
2220        struct rdma_port_counter port_counter;
2221        struct rdma_hw_stats *hw_stats;
2222};
2223
2224/* rdma netdev type - specifies protocol type */
2225enum rdma_netdev_t {
2226        RDMA_NETDEV_OPA_VNIC,
2227        RDMA_NETDEV_IPOIB,
2228};
2229
2230/**
2231 * struct rdma_netdev - rdma netdev
2232 * For cases where netstack interfacing is required.
2233 */
2234struct rdma_netdev {
2235        void              *clnt_priv;
2236        struct ib_device  *hca;
2237        u8                 port_num;
2238        int                mtu;
2239
2240        /*
2241         * cleanup function must be specified.
2242         * FIXME: This is only used for OPA_VNIC and that usage should be
2243         * removed too.
2244         */
2245        void (*free_rdma_netdev)(struct net_device *netdev);
2246
2247        /* control functions */
2248        void (*set_id)(struct net_device *netdev, int id);
2249        /* send packet */
2250        int (*send)(struct net_device *dev, struct sk_buff *skb,
2251                    struct ib_ah *address, u32 dqpn);
2252        /* multicast */
2253        int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2254                            union ib_gid *gid, u16 mlid,
2255                            int set_qkey, u32 qkey);
2256        int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2257                            union ib_gid *gid, u16 mlid);
2258};
2259
2260struct rdma_netdev_alloc_params {
2261        size_t sizeof_priv;
2262        unsigned int txqs;
2263        unsigned int rxqs;
2264        void *param;
2265
2266        int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2267                                      struct net_device *netdev, void *param);
2268};
2269
2270struct ib_odp_counters {
2271        atomic64_t faults;
2272        atomic64_t invalidations;
2273};
2274
2275struct ib_counters {
2276        struct ib_device        *device;
2277        struct ib_uobject       *uobject;
2278        /* num of objects attached */
2279        atomic_t        usecnt;
2280};
2281
2282struct ib_counters_read_attr {
2283        u64     *counters_buff;
2284        u32     ncounters;
2285        u32     flags; /* use enum ib_read_counters_flags */
2286};
2287
2288struct uverbs_attr_bundle;
2289struct iw_cm_id;
2290struct iw_cm_conn_param;
2291
2292#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2293        .size_##ib_struct =                                                    \
2294                (sizeof(struct drv_struct) +                                   \
2295                 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2296                 BUILD_BUG_ON_ZERO(                                            \
2297                         !__same_type(((struct drv_struct *)NULL)->member,     \
2298                                      struct ib_struct)))
2299
2300#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                         \
2301        ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2302
2303#define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2304        rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2305
2306#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2307
2308struct rdma_user_mmap_entry {
2309        struct kref ref;
2310        struct ib_ucontext *ucontext;
2311        unsigned long start_pgoff;
2312        size_t npages;
2313        bool driver_removed;
2314};
2315
2316/* Return the offset (in bytes) the user should pass to libc's mmap() */
2317static inline u64
2318rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2319{
2320        return (u64)entry->start_pgoff << PAGE_SHIFT;
2321}
2322
2323/**
2324 * struct ib_device_ops - InfiniBand device operations
2325 * This structure defines all the InfiniBand device operations, providers will
2326 * need to define the supported operations, otherwise they will be set to null.
2327 */
2328struct ib_device_ops {
2329        struct module *owner;
2330        enum rdma_driver_id driver_id;
2331        u32 uverbs_abi_ver;
2332        unsigned int uverbs_no_driver_id_binding:1;
2333
2334        int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2335                         const struct ib_send_wr **bad_send_wr);
2336        int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2337                         const struct ib_recv_wr **bad_recv_wr);
2338        void (*drain_rq)(struct ib_qp *qp);
2339        void (*drain_sq)(struct ib_qp *qp);
2340        int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2341        int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2342        int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2343        int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2344        int (*post_srq_recv)(struct ib_srq *srq,
2345                             const struct ib_recv_wr *recv_wr,
2346                             const struct ib_recv_wr **bad_recv_wr);
2347        int (*process_mad)(struct ib_device *device, int process_mad_flags,
2348                           u8 port_num, const struct ib_wc *in_wc,
2349                           const struct ib_grh *in_grh,
2350                           const struct ib_mad *in_mad, struct ib_mad *out_mad,
2351                           size_t *out_mad_size, u16 *out_mad_pkey_index);
2352        int (*query_device)(struct ib_device *device,
2353                            struct ib_device_attr *device_attr,
2354                            struct ib_udata *udata);
2355        int (*modify_device)(struct ib_device *device, int device_modify_mask,
2356                             struct ib_device_modify *device_modify);
2357        void (*get_dev_fw_str)(struct ib_device *device, char *str);
2358        const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2359                                                     int comp_vector);
2360        int (*query_port)(struct ib_device *device, u8 port_num,
2361                          struct ib_port_attr *port_attr);
2362        int (*modify_port)(struct ib_device *device, u8 port_num,
2363                           int port_modify_mask,
2364                           struct ib_port_modify *port_modify);
2365        /**
2366         * The following mandatory functions are used only at device
2367         * registration.  Keep functions such as these at the end of this
2368         * structure to avoid cache line misses when accessing struct ib_device
2369         * in fast paths.
2370         */
2371        int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2372                                  struct ib_port_immutable *immutable);
2373        enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2374                                               u8 port_num);
2375        /**
2376         * When calling get_netdev, the HW vendor's driver should return the
2377         * net device of device @device at port @port_num or NULL if such
2378         * a net device doesn't exist. The vendor driver should call dev_hold
2379         * on this net device. The HW vendor's device driver must guarantee
2380         * that this function returns NULL before the net device has finished
2381         * NETDEV_UNREGISTER state.
2382         */
2383        struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2384        /**
2385         * rdma netdev operation
2386         *
2387         * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2388         * must return -EOPNOTSUPP if it doesn't support the specified type.
2389         */
2390        struct net_device *(*alloc_rdma_netdev)(
2391                struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2392                const char *name, unsigned char name_assign_type,
2393                void (*setup)(struct net_device *));
2394
2395        int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2396                                      enum rdma_netdev_t type,
2397                                      struct rdma_netdev_alloc_params *params);
2398        /**
2399         * query_gid should be return GID value for @device, when @port_num
2400         * link layer is either IB or iWarp. It is no-op if @port_num port
2401         * is RoCE link layer.
2402         */
2403        int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2404                         union ib_gid *gid);
2405        /**
2406         * When calling add_gid, the HW vendor's driver should add the gid
2407         * of device of port at gid index available at @attr. Meta-info of
2408         * that gid (for example, the network device related to this gid) is
2409         * available at @attr. @context allows the HW vendor driver to store
2410         * extra information together with a GID entry. The HW vendor driver may
2411         * allocate memory to contain this information and store it in @context
2412         * when a new GID entry is written to. Params are consistent until the
2413         * next call of add_gid or delete_gid. The function should return 0 on
2414         * success or error otherwise. The function could be called
2415         * concurrently for different ports. This function is only called when
2416         * roce_gid_table is used.
2417         */
2418        int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2419        /**
2420         * When calling del_gid, the HW vendor's driver should delete the
2421         * gid of device @device at gid index gid_index of port port_num
2422         * available in @attr.
2423         * Upon the deletion of a GID entry, the HW vendor must free any
2424         * allocated memory. The caller will clear @context afterwards.
2425         * This function is only called when roce_gid_table is used.
2426         */
2427        int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2428        int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2429                          u16 *pkey);
2430        int (*alloc_ucontext)(struct ib_ucontext *context,
2431                              struct ib_udata *udata);
2432        void (*dealloc_ucontext)(struct ib_ucontext *context);
2433        int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2434        /**
2435         * This will be called once refcount of an entry in mmap_xa reaches
2436         * zero. The type of the memory that was mapped may differ between
2437         * entries and is opaque to the rdma_user_mmap interface.
2438         * Therefore needs to be implemented by the driver in mmap_free.
2439         */
2440        void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2441        void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2442        int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2443        void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2444        int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2445                         struct ib_udata *udata);
2446        int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2447        int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2448        void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2449        int (*create_srq)(struct ib_srq *srq,
2450                          struct ib_srq_init_attr *srq_init_attr,
2451                          struct ib_udata *udata);
2452        int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2453                          enum ib_srq_attr_mask srq_attr_mask,
2454                          struct ib_udata *udata);
2455        int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2456        void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2457        struct ib_qp *(*create_qp)(struct ib_pd *pd,
2458                                   struct ib_qp_init_attr *qp_init_attr,
2459                                   struct ib_udata *udata);
2460        int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2461                         int qp_attr_mask, struct ib_udata *udata);
2462        int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2463                        int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2464        int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2465        int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2466                         struct ib_udata *udata);
2467        int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2468        void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2469        int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2470        struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2471        struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2472                                     u64 virt_addr, int mr_access_flags,
2473                                     struct ib_udata *udata);
2474        int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2475                             u64 virt_addr, int mr_access_flags,
2476                             struct ib_pd *pd, struct ib_udata *udata);
2477        int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2478        struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2479                                  u32 max_num_sg, struct ib_udata *udata);
2480        struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2481                                            u32 max_num_data_sg,
2482                                            u32 max_num_meta_sg);
2483        int (*advise_mr)(struct ib_pd *pd,
2484                         enum ib_uverbs_advise_mr_advice advice, u32 flags,
2485                         struct ib_sge *sg_list, u32 num_sge,
2486                         struct uverbs_attr_bundle *attrs);
2487        int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2488                         unsigned int *sg_offset);
2489        int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2490                               struct ib_mr_status *mr_status);
2491        struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2492                                  struct ib_udata *udata);
2493        int (*dealloc_mw)(struct ib_mw *mw);
2494        int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2495        int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2496        struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2497                                      struct ib_udata *udata);
2498        int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2499        struct ib_flow *(*create_flow)(struct ib_qp *qp,
2500                                       struct ib_flow_attr *flow_attr,
2501                                       int domain, struct ib_udata *udata);
2502        int (*destroy_flow)(struct ib_flow *flow_id);
2503        struct ib_flow_action *(*create_flow_action_esp)(
2504                struct ib_device *device,
2505                const struct ib_flow_action_attrs_esp *attr,
2506                struct uverbs_attr_bundle *attrs);
2507        int (*destroy_flow_action)(struct ib_flow_action *action);
2508        int (*modify_flow_action_esp)(
2509                struct ib_flow_action *action,
2510                const struct ib_flow_action_attrs_esp *attr,
2511                struct uverbs_attr_bundle *attrs);
2512        int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2513                                 int state);
2514        int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2515                             struct ifla_vf_info *ivf);
2516        int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2517                            struct ifla_vf_stats *stats);
2518        int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
2519                            struct ifla_vf_guid *node_guid,
2520                            struct ifla_vf_guid *port_guid);
2521        int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2522                           int type);
2523        struct ib_wq *(*create_wq)(struct ib_pd *pd,
2524                                   struct ib_wq_init_attr *init_attr,
2525                                   struct ib_udata *udata);
2526        void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2527        int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2528                         u32 wq_attr_mask, struct ib_udata *udata);
2529        struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2530                struct ib_device *device,
2531                struct ib_rwq_ind_table_init_attr *init_attr,
2532                struct ib_udata *udata);
2533        int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2534        struct ib_dm *(*alloc_dm)(struct ib_device *device,
2535                                  struct ib_ucontext *context,
2536                                  struct ib_dm_alloc_attr *attr,
2537                                  struct uverbs_attr_bundle *attrs);
2538        int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2539        struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2540                                   struct ib_dm_mr_attr *attr,
2541                                   struct uverbs_attr_bundle *attrs);
2542        struct ib_counters *(*create_counters)(
2543                struct ib_device *device, struct uverbs_attr_bundle *attrs);
2544        int (*destroy_counters)(struct ib_counters *counters);
2545        int (*read_counters)(struct ib_counters *counters,
2546                             struct ib_counters_read_attr *counters_read_attr,
2547                             struct uverbs_attr_bundle *attrs);
2548        int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2549                            int data_sg_nents, unsigned int *data_sg_offset,
2550                            struct scatterlist *meta_sg, int meta_sg_nents,
2551                            unsigned int *meta_sg_offset);
2552
2553        /**
2554         * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2555         *   driver initialized data.  The struct is kfree()'ed by the sysfs
2556         *   core when the device is removed.  A lifespan of -1 in the return
2557         *   struct tells the core to set a default lifespan.
2558         */
2559        struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2560                                                u8 port_num);
2561        /**
2562         * get_hw_stats - Fill in the counter value(s) in the stats struct.
2563         * @index - The index in the value array we wish to have updated, or
2564         *   num_counters if we want all stats updated
2565         * Return codes -
2566         *   < 0 - Error, no counters updated
2567         *   index - Updated the single counter pointed to by index
2568         *   num_counters - Updated all counters (will reset the timestamp
2569         *     and prevent further calls for lifespan milliseconds)
2570         * Drivers are allowed to update all counters in leiu of just the
2571         *   one given in index at their option
2572         */
2573        int (*get_hw_stats)(struct ib_device *device,
2574                            struct rdma_hw_stats *stats, u8 port, int index);
2575        /*
2576         * This function is called once for each port when a ib device is
2577         * registered.
2578         */
2579        int (*init_port)(struct ib_device *device, u8 port_num,
2580                         struct kobject *port_sysfs);
2581        /**
2582         * Allows rdma drivers to add their own restrack attributes.
2583         */
2584        int (*fill_res_entry)(struct sk_buff *msg,
2585                              struct rdma_restrack_entry *entry);
2586
2587        /* Device lifecycle callbacks */
2588        /*
2589         * Called after the device becomes registered, before clients are
2590         * attached
2591         */
2592        int (*enable_driver)(struct ib_device *dev);
2593        /*
2594         * This is called as part of ib_dealloc_device().
2595         */
2596        void (*dealloc_driver)(struct ib_device *dev);
2597
2598        /* iWarp CM callbacks */
2599        void (*iw_add_ref)(struct ib_qp *qp);
2600        void (*iw_rem_ref)(struct ib_qp *qp);
2601        struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2602        int (*iw_connect)(struct iw_cm_id *cm_id,
2603                          struct iw_cm_conn_param *conn_param);
2604        int (*iw_accept)(struct iw_cm_id *cm_id,
2605                         struct iw_cm_conn_param *conn_param);
2606        int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2607                         u8 pdata_len);
2608        int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2609        int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2610        /**
2611         * counter_bind_qp - Bind a QP to a counter.
2612         * @counter - The counter to be bound. If counter->id is zero then
2613         *   the driver needs to allocate a new counter and set counter->id
2614         */
2615        int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2616        /**
2617         * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2618         *   counter and bind it onto the default one
2619         */
2620        int (*counter_unbind_qp)(struct ib_qp *qp);
2621        /**
2622         * counter_dealloc -De-allocate the hw counter
2623         */
2624        int (*counter_dealloc)(struct rdma_counter *counter);
2625        /**
2626         * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2627         * the driver initialized data.
2628         */
2629        struct rdma_hw_stats *(*counter_alloc_stats)(
2630                struct rdma_counter *counter);
2631        /**
2632         * counter_update_stats - Query the stats value of this counter
2633         */
2634        int (*counter_update_stats)(struct rdma_counter *counter);
2635
2636        /**
2637         * Allows rdma drivers to add their own restrack attributes
2638         * dumped via 'rdma stat' iproute2 command.
2639         */
2640        int (*fill_stat_entry)(struct sk_buff *msg,
2641                               struct rdma_restrack_entry *entry);
2642
2643        DECLARE_RDMA_OBJ_SIZE(ib_ah);
2644        DECLARE_RDMA_OBJ_SIZE(ib_cq);
2645        DECLARE_RDMA_OBJ_SIZE(ib_pd);
2646        DECLARE_RDMA_OBJ_SIZE(ib_srq);
2647        DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2648};
2649
2650struct ib_core_device {
2651        /* device must be the first element in structure until,
2652         * union of ib_core_device and device exists in ib_device.
2653         */
2654        struct device dev;
2655        possible_net_t rdma_net;
2656        struct kobject *ports_kobj;
2657        struct list_head port_list;
2658        struct ib_device *owner; /* reach back to owner ib_device */
2659};
2660
2661struct rdma_restrack_root;
2662struct ib_device {
2663        /* Do not access @dma_device directly from ULP nor from HW drivers. */
2664        struct device                *dma_device;
2665        struct ib_device_ops         ops;
2666        char                          name[IB_DEVICE_NAME_MAX];
2667        struct rcu_head rcu_head;
2668
2669        struct list_head              event_handler_list;
2670        /* Protects event_handler_list */
2671        struct rw_semaphore event_handler_rwsem;
2672
2673        /* Protects QP's event_handler calls and open_qp list */
2674        spinlock_t qp_open_list_lock;
2675
2676        struct rw_semaphore           client_data_rwsem;
2677        struct xarray                 client_data;
2678        struct mutex                  unregistration_lock;
2679
2680        /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2681        rwlock_t cache_lock;
2682        /**
2683         * port_data is indexed by port number
2684         */
2685        struct ib_port_data *port_data;
2686
2687        int                           num_comp_vectors;
2688
2689        union {
2690                struct device           dev;
2691                struct ib_core_device   coredev;
2692        };
2693
2694        /* First group for device attributes,
2695         * Second group for driver provided attributes (optional).
2696         * It is NULL terminated array.
2697         */
2698        const struct attribute_group    *groups[3];
2699
2700        u64                          uverbs_cmd_mask;
2701        u64                          uverbs_ex_cmd_mask;
2702
2703        char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
2704        __be64                       node_guid;
2705        u32                          local_dma_lkey;
2706        u16                          is_switch:1;
2707        /* Indicates kernel verbs support, should not be used in drivers */
2708        u16                          kverbs_provider:1;
2709        /* CQ adaptive moderation (RDMA DIM) */
2710        u16                          use_cq_dim:1;
2711        u8                           node_type;
2712        u8                           phys_port_cnt;
2713        struct ib_device_attr        attrs;
2714        struct attribute_group       *hw_stats_ag;
2715        struct rdma_hw_stats         *hw_stats;
2716
2717#ifdef CONFIG_CGROUP_RDMA
2718        struct rdmacg_device         cg_device;
2719#endif
2720
2721        u32                          index;
2722
2723        spinlock_t                   cq_pools_lock;
2724        struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2725
2726        struct rdma_restrack_root *res;
2727
2728        const struct uapi_definition   *driver_def;
2729
2730        /*
2731         * Positive refcount indicates that the device is currently
2732         * registered and cannot be unregistered.
2733         */
2734        refcount_t refcount;
2735        struct completion unreg_completion;
2736        struct work_struct unregistration_work;
2737
2738        const struct rdma_link_ops *link_ops;
2739
2740        /* Protects compat_devs xarray modifications */
2741        struct mutex compat_devs_mutex;
2742        /* Maintains compat devices for each net namespace */
2743        struct xarray compat_devs;
2744
2745        /* Used by iWarp CM */
2746        char iw_ifname[IFNAMSIZ];
2747        u32 iw_driver_flags;
2748        u32 lag_flags;
2749};
2750
2751struct ib_client_nl_info;
2752struct ib_client {
2753        const char *name;
2754        int (*add)(struct ib_device *ibdev);
2755        void (*remove)(struct ib_device *, void *client_data);
2756        void (*rename)(struct ib_device *dev, void *client_data);
2757        int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2758                           struct ib_client_nl_info *res);
2759        int (*get_global_nl_info)(struct ib_client_nl_info *res);
2760
2761        /* Returns the net_dev belonging to this ib_client and matching the
2762         * given parameters.
2763         * @dev:         An RDMA device that the net_dev use for communication.
2764         * @port:        A physical port number on the RDMA device.
2765         * @pkey:        P_Key that the net_dev uses if applicable.
2766         * @gid:         A GID that the net_dev uses to communicate.
2767         * @addr:        An IP address the net_dev is configured with.
2768         * @client_data: The device's client data set by ib_set_client_data().
2769         *
2770         * An ib_client that implements a net_dev on top of RDMA devices
2771         * (such as IP over IB) should implement this callback, allowing the
2772         * rdma_cm module to find the right net_dev for a given request.
2773         *
2774         * The caller is responsible for calling dev_put on the returned
2775         * netdev. */
2776        struct net_device *(*get_net_dev_by_params)(
2777                        struct ib_device *dev,
2778                        u8 port,
2779                        u16 pkey,
2780                        const union ib_gid *gid,
2781                        const struct sockaddr *addr,
2782                        void *client_data);
2783
2784        refcount_t uses;
2785        struct completion uses_zero;
2786        u32 client_id;
2787
2788        /* kverbs are not required by the client */
2789        u8 no_kverbs_req:1;
2790};
2791
2792/*
2793 * IB block DMA iterator
2794 *
2795 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2796 * to a HW supported page size.
2797 */
2798struct ib_block_iter {
2799        /* internal states */
2800        struct scatterlist *__sg;       /* sg holding the current aligned block */
2801        dma_addr_t __dma_addr;          /* unaligned DMA address of this block */
2802        unsigned int __sg_nents;        /* number of SG entries */
2803        unsigned int __sg_advance;      /* number of bytes to advance in sg in next step */
2804        unsigned int __pg_bit;          /* alignment of current block */
2805};
2806
2807struct ib_device *_ib_alloc_device(size_t size);
2808#define ib_alloc_device(drv_struct, member)                                    \
2809        container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2810                                      BUILD_BUG_ON_ZERO(offsetof(              \
2811                                              struct drv_struct, member))),    \
2812                     struct drv_struct, member)
2813
2814void ib_dealloc_device(struct ib_device *device);
2815
2816void ib_get_device_fw_str(struct ib_device *device, char *str);
2817
2818int ib_register_device(struct ib_device *device, const char *name);
2819void ib_unregister_device(struct ib_device *device);
2820void ib_unregister_driver(enum rdma_driver_id driver_id);
2821void ib_unregister_device_and_put(struct ib_device *device);
2822void ib_unregister_device_queued(struct ib_device *ib_dev);
2823
2824int ib_register_client   (struct ib_client *client);
2825void ib_unregister_client(struct ib_client *client);
2826
2827void __rdma_block_iter_start(struct ib_block_iter *biter,
2828                             struct scatterlist *sglist,
2829                             unsigned int nents,
2830                             unsigned long pgsz);
2831bool __rdma_block_iter_next(struct ib_block_iter *biter);
2832
2833/**
2834 * rdma_block_iter_dma_address - get the aligned dma address of the current
2835 * block held by the block iterator.
2836 * @biter: block iterator holding the memory block
2837 */
2838static inline dma_addr_t
2839rdma_block_iter_dma_address(struct ib_block_iter *biter)
2840{
2841        return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2842}
2843
2844/**
2845 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2846 * @sglist: sglist to iterate over
2847 * @biter: block iterator holding the memory block
2848 * @nents: maximum number of sg entries to iterate over
2849 * @pgsz: best HW supported page size to use
2850 *
2851 * Callers may use rdma_block_iter_dma_address() to get each
2852 * blocks aligned DMA address.
2853 */
2854#define rdma_for_each_block(sglist, biter, nents, pgsz)         \
2855        for (__rdma_block_iter_start(biter, sglist, nents,      \
2856                                     pgsz);                     \
2857             __rdma_block_iter_next(biter);)
2858
2859/**
2860 * ib_get_client_data - Get IB client context
2861 * @device:Device to get context for
2862 * @client:Client to get context for
2863 *
2864 * ib_get_client_data() returns the client context data set with
2865 * ib_set_client_data(). This can only be called while the client is
2866 * registered to the device, once the ib_client remove() callback returns this
2867 * cannot be called.
2868 */
2869static inline void *ib_get_client_data(struct ib_device *device,
2870                                       struct ib_client *client)
2871{
2872        return xa_load(&device->client_data, client->client_id);
2873}
2874void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2875                         void *data);
2876void ib_set_device_ops(struct ib_device *device,
2877                       const struct ib_device_ops *ops);
2878
2879int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2880                      unsigned long pfn, unsigned long size, pgprot_t prot,
2881                      struct rdma_user_mmap_entry *entry);
2882int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2883                                struct rdma_user_mmap_entry *entry,
2884                                size_t length);
2885int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2886                                      struct rdma_user_mmap_entry *entry,
2887                                      size_t length, u32 min_pgoff,
2888                                      u32 max_pgoff);
2889
2890struct rdma_user_mmap_entry *
2891rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2892                               unsigned long pgoff);
2893struct rdma_user_mmap_entry *
2894rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2895                         struct vm_area_struct *vma);
2896void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2897
2898void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2899
2900static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2901{
2902        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2903}
2904
2905static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2906{
2907        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2908}
2909
2910static inline bool ib_is_buffer_cleared(const void __user *p,
2911                                        size_t len)
2912{
2913        bool ret;
2914        u8 *buf;
2915
2916        if (len > USHRT_MAX)
2917                return false;
2918
2919        buf = memdup_user(p, len);
2920        if (IS_ERR(buf))
2921                return false;
2922
2923        ret = !memchr_inv(buf, 0, len);
2924        kfree(buf);
2925        return ret;
2926}
2927
2928static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2929                                       size_t offset,
2930                                       size_t len)
2931{
2932        return ib_is_buffer_cleared(udata->inbuf + offset, len);
2933}
2934
2935/**
2936 * ib_is_destroy_retryable - Check whether the uobject destruction
2937 * is retryable.
2938 * @ret: The initial destruction return code
2939 * @why: remove reason
2940 * @uobj: The uobject that is destroyed
2941 *
2942 * This function is a helper function that IB layer and low-level drivers
2943 * can use to consider whether the destruction of the given uobject is
2944 * retry-able.
2945 * It checks the original return code, if it wasn't success the destruction
2946 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2947 * the remove reason. (i.e. why).
2948 * Must be called with the object locked for destroy.
2949 */
2950static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2951                                           struct ib_uobject *uobj)
2952{
2953        return ret && (why == RDMA_REMOVE_DESTROY ||
2954                       uobj->context->cleanup_retryable);
2955}
2956
2957/**
2958 * ib_destroy_usecnt - Called during destruction to check the usecnt
2959 * @usecnt: The usecnt atomic
2960 * @why: remove reason
2961 * @uobj: The uobject that is destroyed
2962 *
2963 * Non-zero usecnts will block destruction unless destruction was triggered by
2964 * a ucontext cleanup.
2965 */
2966static inline int ib_destroy_usecnt(atomic_t *usecnt,
2967                                    enum rdma_remove_reason why,
2968                                    struct ib_uobject *uobj)
2969{
2970        if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2971                return -EBUSY;
2972        return 0;
2973}
2974
2975/**
2976 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2977 * contains all required attributes and no attributes not allowed for
2978 * the given QP state transition.
2979 * @cur_state: Current QP state
2980 * @next_state: Next QP state
2981 * @type: QP type
2982 * @mask: Mask of supplied QP attributes
2983 *
2984 * This function is a helper function that a low-level driver's
2985 * modify_qp method can use to validate the consumer's input.  It
2986 * checks that cur_state and next_state are valid QP states, that a
2987 * transition from cur_state to next_state is allowed by the IB spec,
2988 * and that the attribute mask supplied is allowed for the transition.
2989 */
2990bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2991                        enum ib_qp_type type, enum ib_qp_attr_mask mask);
2992
2993void ib_register_event_handler(struct ib_event_handler *event_handler);
2994void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2995void ib_dispatch_event(const struct ib_event *event);
2996
2997int ib_query_port(struct ib_device *device,
2998                  u8 port_num, struct ib_port_attr *port_attr);
2999
3000enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3001                                               u8 port_num);
3002
3003/**
3004 * rdma_cap_ib_switch - Check if the device is IB switch
3005 * @device: Device to check
3006 *
3007 * Device driver is responsible for setting is_switch bit on
3008 * in ib_device structure at init time.
3009 *
3010 * Return: true if the device is IB switch.
3011 */
3012static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3013{
3014        return device->is_switch;
3015}
3016
3017/**
3018 * rdma_start_port - Return the first valid port number for the device
3019 * specified
3020 *
3021 * @device: Device to be checked
3022 *
3023 * Return start port number
3024 */
3025static inline u8 rdma_start_port(const struct ib_device *device)
3026{
3027        return rdma_cap_ib_switch(device) ? 0 : 1;
3028}
3029
3030/**
3031 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3032 * @device - The struct ib_device * to iterate over
3033 * @iter - The unsigned int to store the port number
3034 */
3035#define rdma_for_each_port(device, iter)                                       \
3036        for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type(   \
3037                                                     unsigned int, iter)));    \
3038             iter <= rdma_end_port(device); (iter)++)
3039
3040/**
3041 * rdma_end_port - Return the last valid port number for the device
3042 * specified
3043 *
3044 * @device: Device to be checked
3045 *
3046 * Return last port number
3047 */
3048static inline u8 rdma_end_port(const struct ib_device *device)
3049{
3050        return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3051}
3052
3053static inline int rdma_is_port_valid(const struct ib_device *device,
3054                                     unsigned int port)
3055{
3056        return (port >= rdma_start_port(device) &&
3057                port <= rdma_end_port(device));
3058}
3059
3060static inline bool rdma_is_grh_required(const struct ib_device *device,
3061                                        u8 port_num)
3062{
3063        return device->port_data[port_num].immutable.core_cap_flags &
3064               RDMA_CORE_PORT_IB_GRH_REQUIRED;
3065}
3066
3067static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
3068{
3069        return device->port_data[port_num].immutable.core_cap_flags &
3070               RDMA_CORE_CAP_PROT_IB;
3071}
3072
3073static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
3074{
3075        return device->port_data[port_num].immutable.core_cap_flags &
3076               (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3077}
3078
3079static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
3080{
3081        return device->port_data[port_num].immutable.core_cap_flags &
3082               RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3083}
3084
3085static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
3086{
3087        return device->port_data[port_num].immutable.core_cap_flags &
3088               RDMA_CORE_CAP_PROT_ROCE;
3089}
3090
3091static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
3092{
3093        return device->port_data[port_num].immutable.core_cap_flags &
3094               RDMA_CORE_CAP_PROT_IWARP;
3095}
3096
3097static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3098{
3099        return rdma_protocol_ib(device, port_num) ||
3100                rdma_protocol_roce(device, port_num);
3101}
3102
3103static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3104{
3105        return device->port_data[port_num].immutable.core_cap_flags &
3106               RDMA_CORE_CAP_PROT_RAW_PACKET;
3107}
3108
3109static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3110{
3111        return device->port_data[port_num].immutable.core_cap_flags &
3112               RDMA_CORE_CAP_PROT_USNIC;
3113}
3114
3115/**
3116 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3117 * Management Datagrams.
3118 * @device: Device to check
3119 * @port_num: Port number to check
3120 *
3121 * Management Datagrams (MAD) are a required part of the InfiniBand
3122 * specification and are supported on all InfiniBand devices.  A slightly
3123 * extended version are also supported on OPA interfaces.
3124 *
3125 * Return: true if the port supports sending/receiving of MAD packets.
3126 */
3127static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3128{
3129        return device->port_data[port_num].immutable.core_cap_flags &
3130               RDMA_CORE_CAP_IB_MAD;
3131}
3132
3133/**
3134 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3135 * Management Datagrams.
3136 * @device: Device to check
3137 * @port_num: Port number to check
3138 *
3139 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3140 * datagrams with their own versions.  These OPA MADs share many but not all of
3141 * the characteristics of InfiniBand MADs.
3142 *
3143 * OPA MADs differ in the following ways:
3144 *
3145 *    1) MADs are variable size up to 2K
3146 *       IBTA defined MADs remain fixed at 256 bytes
3147 *    2) OPA SMPs must carry valid PKeys
3148 *    3) OPA SMP packets are a different format
3149 *
3150 * Return: true if the port supports OPA MAD packet formats.
3151 */
3152static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3153{
3154        return device->port_data[port_num].immutable.core_cap_flags &
3155                RDMA_CORE_CAP_OPA_MAD;
3156}
3157
3158/**
3159 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3160 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3161 * @device: Device to check
3162 * @port_num: Port number to check
3163 *
3164 * Each InfiniBand node is required to provide a Subnet Management Agent
3165 * that the subnet manager can access.  Prior to the fabric being fully
3166 * configured by the subnet manager, the SMA is accessed via a well known
3167 * interface called the Subnet Management Interface (SMI).  This interface
3168 * uses directed route packets to communicate with the SM to get around the
3169 * chicken and egg problem of the SM needing to know what's on the fabric
3170 * in order to configure the fabric, and needing to configure the fabric in
3171 * order to send packets to the devices on the fabric.  These directed
3172 * route packets do not need the fabric fully configured in order to reach
3173 * their destination.  The SMI is the only method allowed to send
3174 * directed route packets on an InfiniBand fabric.
3175 *
3176 * Return: true if the port provides an SMI.
3177 */
3178static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3179{
3180        return device->port_data[port_num].immutable.core_cap_flags &
3181               RDMA_CORE_CAP_IB_SMI;
3182}
3183
3184/**
3185 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3186 * Communication Manager.
3187 * @device: Device to check
3188 * @port_num: Port number to check
3189 *
3190 * The InfiniBand Communication Manager is one of many pre-defined General
3191 * Service Agents (GSA) that are accessed via the General Service
3192 * Interface (GSI).  It's role is to facilitate establishment of connections
3193 * between nodes as well as other management related tasks for established
3194 * connections.
3195 *
3196 * Return: true if the port supports an IB CM (this does not guarantee that
3197 * a CM is actually running however).
3198 */
3199static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3200{
3201        return device->port_data[port_num].immutable.core_cap_flags &
3202               RDMA_CORE_CAP_IB_CM;
3203}
3204
3205/**
3206 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3207 * Communication Manager.
3208 * @device: Device to check
3209 * @port_num: Port number to check
3210 *
3211 * Similar to above, but specific to iWARP connections which have a different
3212 * managment protocol than InfiniBand.
3213 *
3214 * Return: true if the port supports an iWARP CM (this does not guarantee that
3215 * a CM is actually running however).
3216 */
3217static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3218{
3219        return device->port_data[port_num].immutable.core_cap_flags &
3220               RDMA_CORE_CAP_IW_CM;
3221}
3222
3223/**
3224 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3225 * Subnet Administration.
3226 * @device: Device to check
3227 * @port_num: Port number to check
3228 *
3229 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3230 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3231 * fabrics, devices should resolve routes to other hosts by contacting the
3232 * SA to query the proper route.
3233 *
3234 * Return: true if the port should act as a client to the fabric Subnet
3235 * Administration interface.  This does not imply that the SA service is
3236 * running locally.
3237 */
3238static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3239{
3240        return device->port_data[port_num].immutable.core_cap_flags &
3241               RDMA_CORE_CAP_IB_SA;
3242}
3243
3244/**
3245 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3246 * Multicast.
3247 * @device: Device to check
3248 * @port_num: Port number to check
3249 *
3250 * InfiniBand multicast registration is more complex than normal IPv4 or
3251 * IPv6 multicast registration.  Each Host Channel Adapter must register
3252 * with the Subnet Manager when it wishes to join a multicast group.  It
3253 * should do so only once regardless of how many queue pairs it subscribes
3254 * to this group.  And it should leave the group only after all queue pairs
3255 * attached to the group have been detached.
3256 *
3257 * Return: true if the port must undertake the additional adminstrative
3258 * overhead of registering/unregistering with the SM and tracking of the
3259 * total number of queue pairs attached to the multicast group.
3260 */
3261static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3262{
3263        return rdma_cap_ib_sa(device, port_num);
3264}
3265
3266/**
3267 * rdma_cap_af_ib - Check if the port of device has the capability
3268 * Native Infiniband Address.
3269 * @device: Device to check
3270 * @port_num: Port number to check
3271 *
3272 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3273 * GID.  RoCE uses a different mechanism, but still generates a GID via
3274 * a prescribed mechanism and port specific data.
3275 *
3276 * Return: true if the port uses a GID address to identify devices on the
3277 * network.
3278 */
3279static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3280{
3281        return device->port_data[port_num].immutable.core_cap_flags &
3282               RDMA_CORE_CAP_AF_IB;
3283}
3284
3285/**
3286 * rdma_cap_eth_ah - Check if the port of device has the capability
3287 * Ethernet Address Handle.
3288 * @device: Device to check
3289 * @port_num: Port number to check
3290 *
3291 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3292 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3293 * port.  Normally, packet headers are generated by the sending host
3294 * adapter, but when sending connectionless datagrams, we must manually
3295 * inject the proper headers for the fabric we are communicating over.
3296 *
3297 * Return: true if we are running as a RoCE port and must force the
3298 * addition of a Global Route Header built from our Ethernet Address
3299 * Handle into our header list for connectionless packets.
3300 */
3301static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3302{
3303        return device->port_data[port_num].immutable.core_cap_flags &
3304               RDMA_CORE_CAP_ETH_AH;
3305}
3306
3307/**
3308 * rdma_cap_opa_ah - Check if the port of device supports
3309 * OPA Address handles
3310 * @device: Device to check
3311 * @port_num: Port number to check
3312 *
3313 * Return: true if we are running on an OPA device which supports
3314 * the extended OPA addressing.
3315 */
3316static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3317{
3318        return (device->port_data[port_num].immutable.core_cap_flags &
3319                RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3320}
3321
3322/**
3323 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3324 *
3325 * @device: Device
3326 * @port_num: Port number
3327 *
3328 * This MAD size includes the MAD headers and MAD payload.  No other headers
3329 * are included.
3330 *
3331 * Return the max MAD size required by the Port.  Will return 0 if the port
3332 * does not support MADs
3333 */
3334static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3335{
3336        return device->port_data[port_num].immutable.max_mad_size;
3337}
3338
3339/**
3340 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3341 * @device: Device to check
3342 * @port_num: Port number to check
3343 *
3344 * RoCE GID table mechanism manages the various GIDs for a device.
3345 *
3346 * NOTE: if allocating the port's GID table has failed, this call will still
3347 * return true, but any RoCE GID table API will fail.
3348 *
3349 * Return: true if the port uses RoCE GID table mechanism in order to manage
3350 * its GIDs.
3351 */
3352static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3353                                           u8 port_num)
3354{
3355        return rdma_protocol_roce(device, port_num) &&
3356                device->ops.add_gid && device->ops.del_gid;
3357}
3358
3359/*
3360 * Check if the device supports READ W/ INVALIDATE.
3361 */
3362static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3363{
3364        /*
3365         * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3366         * has support for it yet.
3367         */
3368        return rdma_protocol_iwarp(dev, port_num);
3369}
3370
3371/**
3372 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
3373 *
3374 * @addr: address
3375 * @pgsz_bitmap: bitmap of HW supported page sizes
3376 */
3377static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3378                                            unsigned long pgsz_bitmap)
3379{
3380        unsigned long align;
3381        unsigned long pgsz;
3382
3383        align = addr & -addr;
3384
3385        /* Find page bit such that addr is aligned to the highest supported
3386         * HW page size
3387         */
3388        pgsz = pgsz_bitmap & ~(-align << 1);
3389        if (!pgsz)
3390                return __ffs(pgsz_bitmap);
3391
3392        return __fls(pgsz);
3393}
3394
3395/**
3396 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3397 * @device: Device
3398 * @port_num: 1 based Port number
3399 *
3400 * Return true if port is an Intel OPA port , false if not
3401 */
3402static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3403                                          u32 port_num)
3404{
3405        return (device->port_data[port_num].immutable.core_cap_flags &
3406                RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3407}
3408
3409/**
3410 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3411 * @device: Device
3412 * @port_num: Port number
3413 * @mtu: enum value of MTU
3414 *
3415 * Return the MTU size supported by the port as an integer value. Will return
3416 * -1 if enum value of mtu is not supported.
3417 */
3418static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
3419                                       int mtu)
3420{
3421        if (rdma_core_cap_opa_port(device, port))
3422                return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3423        else
3424                return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3425}
3426
3427/**
3428 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3429 * @device: Device
3430 * @port_num: Port number
3431 * @attr: port attribute
3432 *
3433 * Return the MTU size supported by the port as an integer value.
3434 */
3435static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
3436                                     struct ib_port_attr *attr)
3437{
3438        if (rdma_core_cap_opa_port(device, port))
3439                return attr->phys_mtu;
3440        else
3441                return ib_mtu_enum_to_int(attr->max_mtu);
3442}
3443
3444int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3445                         int state);
3446int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3447                     struct ifla_vf_info *info);
3448int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3449                    struct ifla_vf_stats *stats);
3450int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
3451                    struct ifla_vf_guid *node_guid,
3452                    struct ifla_vf_guid *port_guid);
3453int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3454                   int type);
3455
3456int ib_query_pkey(struct ib_device *device,
3457                  u8 port_num, u16 index, u16 *pkey);
3458
3459int ib_modify_device(struct ib_device *device,
3460                     int device_modify_mask,
3461                     struct ib_device_modify *device_modify);
3462
3463int ib_modify_port(struct ib_device *device,
3464                   u8 port_num, int port_modify_mask,
3465                   struct ib_port_modify *port_modify);
3466
3467int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3468                u8 *port_num, u16 *index);
3469
3470int ib_find_pkey(struct ib_device *device,
3471                 u8 port_num, u16 pkey, u16 *index);
3472
3473enum ib_pd_flags {
3474        /*
3475         * Create a memory registration for all memory in the system and place
3476         * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3477         * ULPs to avoid the overhead of dynamic MRs.
3478         *
3479         * This flag is generally considered unsafe and must only be used in
3480         * extremly trusted environments.  Every use of it will log a warning
3481         * in the kernel log.
3482         */
3483        IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
3484};
3485
3486struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3487                const char *caller);
3488
3489#define ib_alloc_pd(device, flags) \
3490        __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3491
3492/**
3493 * ib_dealloc_pd_user - Deallocate kernel/user PD
3494 * @pd: The protection domain
3495 * @udata: Valid user data or NULL for kernel objects
3496 */
3497void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3498
3499/**
3500 * ib_dealloc_pd - Deallocate kernel PD
3501 * @pd: The protection domain
3502 *
3503 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3504 */
3505static inline void ib_dealloc_pd(struct ib_pd *pd)
3506{
3507        ib_dealloc_pd_user(pd, NULL);
3508}
3509
3510enum rdma_create_ah_flags {
3511        /* In a sleepable context */
3512        RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3513};
3514
3515/**
3516 * rdma_create_ah - Creates an address handle for the given address vector.
3517 * @pd: The protection domain associated with the address handle.
3518 * @ah_attr: The attributes of the address vector.
3519 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3520 *
3521 * The address handle is used to reference a local or global destination
3522 * in all UD QP post sends.
3523 */
3524struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3525                             u32 flags);
3526
3527/**
3528 * rdma_create_user_ah - Creates an address handle for the given address vector.
3529 * It resolves destination mac address for ah attribute of RoCE type.
3530 * @pd: The protection domain associated with the address handle.
3531 * @ah_attr: The attributes of the address vector.
3532 * @udata: pointer to user's input output buffer information need by
3533 *         provider driver.
3534 *
3535 * It returns 0 on success and returns appropriate error code on error.
3536 * The address handle is used to reference a local or global destination
3537 * in all UD QP post sends.
3538 */
3539struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3540                                  struct rdma_ah_attr *ah_attr,
3541                                  struct ib_udata *udata);
3542/**
3543 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3544 *   work completion.
3545 * @hdr: the L3 header to parse
3546 * @net_type: type of header to parse
3547 * @sgid: place to store source gid
3548 * @dgid: place to store destination gid
3549 */
3550int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3551                              enum rdma_network_type net_type,
3552                              union ib_gid *sgid, union ib_gid *dgid);
3553
3554/**
3555 * ib_get_rdma_header_version - Get the header version
3556 * @hdr: the L3 header to parse
3557 */
3558int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3559
3560/**
3561 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3562 *   work completion.
3563 * @device: Device on which the received message arrived.
3564 * @port_num: Port on which the received message arrived.
3565 * @wc: Work completion associated with the received message.
3566 * @grh: References the received global route header.  This parameter is
3567 *   ignored unless the work completion indicates that the GRH is valid.
3568 * @ah_attr: Returned attributes that can be used when creating an address
3569 *   handle for replying to the message.
3570 * When ib_init_ah_attr_from_wc() returns success,
3571 * (a) for IB link layer it optionally contains a reference to SGID attribute
3572 * when GRH is present for IB link layer.
3573 * (b) for RoCE link layer it contains a reference to SGID attribute.
3574 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3575 * attributes which are initialized using ib_init_ah_attr_from_wc().
3576 *
3577 */
3578int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3579                            const struct ib_wc *wc, const struct ib_grh *grh,
3580                            struct rdma_ah_attr *ah_attr);
3581
3582/**
3583 * ib_create_ah_from_wc - Creates an address handle associated with the
3584 *   sender of the specified work completion.
3585 * @pd: The protection domain associated with the address handle.
3586 * @wc: Work completion information associated with a received message.
3587 * @grh: References the received global route header.  This parameter is
3588 *   ignored unless the work completion indicates that the GRH is valid.
3589 * @port_num: The outbound port number to associate with the address.
3590 *
3591 * The address handle is used to reference a local or global destination
3592 * in all UD QP post sends.
3593 */
3594struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3595                                   const struct ib_grh *grh, u8 port_num);
3596
3597/**
3598 * rdma_modify_ah - Modifies the address vector associated with an address
3599 *   handle.
3600 * @ah: The address handle to modify.
3601 * @ah_attr: The new address vector attributes to associate with the
3602 *   address handle.
3603 */
3604int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3605
3606/**
3607 * rdma_query_ah - Queries the address vector associated with an address
3608 *   handle.
3609 * @ah: The address handle to query.
3610 * @ah_attr: The address vector attributes associated with the address
3611 *   handle.
3612 */
3613int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3614
3615enum rdma_destroy_ah_flags {
3616        /* In a sleepable context */
3617        RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3618};
3619
3620/**
3621 * rdma_destroy_ah_user - Destroys an address handle.
3622 * @ah: The address handle to destroy.
3623 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3624 * @udata: Valid user data or NULL for kernel objects
3625 */
3626int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3627
3628/**
3629 * rdma_destroy_ah - Destroys an kernel address handle.
3630 * @ah: The address handle to destroy.
3631 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3632 *
3633 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3634 */
3635static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3636{
3637        return rdma_destroy_ah_user(ah, flags, NULL);
3638}
3639
3640struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3641                                  struct ib_srq_init_attr *srq_init_attr,
3642                                  struct ib_usrq_object *uobject,
3643                                  struct ib_udata *udata);
3644static inline struct ib_srq *
3645ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3646{
3647        if (!pd->device->ops.create_srq)
3648                return ERR_PTR(-EOPNOTSUPP);
3649
3650        return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3651}
3652
3653/**
3654 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3655 * @srq: The SRQ to modify.
3656 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3657 *   the current values of selected SRQ attributes are returned.
3658 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3659 *   are being modified.
3660 *
3661 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3662 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3663 * the number of receives queued drops below the limit.
3664 */
3665int ib_modify_srq(struct ib_srq *srq,
3666                  struct ib_srq_attr *srq_attr,
3667                  enum ib_srq_attr_mask srq_attr_mask);
3668
3669/**
3670 * ib_query_srq - Returns the attribute list and current values for the
3671 *   specified SRQ.
3672 * @srq: The SRQ to query.
3673 * @srq_attr: The attributes of the specified SRQ.
3674 */
3675int ib_query_srq(struct ib_srq *srq,
3676                 struct ib_srq_attr *srq_attr);
3677
3678/**
3679 * ib_destroy_srq_user - Destroys the specified SRQ.
3680 * @srq: The SRQ to destroy.
3681 * @udata: Valid user data or NULL for kernel objects
3682 */
3683int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3684
3685/**
3686 * ib_destroy_srq - Destroys the specified kernel SRQ.
3687 * @srq: The SRQ to destroy.
3688 *
3689 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3690 */
3691static inline int ib_destroy_srq(struct ib_srq *srq)
3692{
3693        return ib_destroy_srq_user(srq, NULL);
3694}
3695
3696/**
3697 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3698 * @srq: The SRQ to post the work request on.
3699 * @recv_wr: A list of work requests to post on the receive queue.
3700 * @bad_recv_wr: On an immediate failure, this parameter will reference
3701 *   the work request that failed to be posted on the QP.
3702 */
3703static inline int ib_post_srq_recv(struct ib_srq *srq,
3704                                   const struct ib_recv_wr *recv_wr,
3705                                   const struct ib_recv_wr **bad_recv_wr)
3706{
3707        const struct ib_recv_wr *dummy;
3708
3709        return srq->device->ops.post_srq_recv(srq, recv_wr,
3710                                              bad_recv_wr ? : &dummy);
3711}
3712
3713struct ib_qp *ib_create_qp(struct ib_pd *pd,
3714                           struct ib_qp_init_attr *qp_init_attr);
3715
3716/**
3717 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3718 * @qp: The QP to modify.
3719 * @attr: On input, specifies the QP attributes to modify.  On output,
3720 *   the current values of selected QP attributes are returned.
3721 * @attr_mask: A bit-mask used to specify which attributes of the QP
3722 *   are being modified.
3723 * @udata: pointer to user's input output buffer information
3724 *   are being modified.
3725 * It returns 0 on success and returns appropriate error code on error.
3726 */
3727int ib_modify_qp_with_udata(struct ib_qp *qp,
3728                            struct ib_qp_attr *attr,
3729                            int attr_mask,
3730                            struct ib_udata *udata);
3731
3732/**
3733 * ib_modify_qp - Modifies the attributes for the specified QP and then
3734 *   transitions the QP to the given state.
3735 * @qp: The QP to modify.
3736 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3737 *   the current values of selected QP attributes are returned.
3738 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3739 *   are being modified.
3740 */
3741int ib_modify_qp(struct ib_qp *qp,
3742                 struct ib_qp_attr *qp_attr,
3743                 int qp_attr_mask);
3744
3745/**
3746 * ib_query_qp - Returns the attribute list and current values for the
3747 *   specified QP.
3748 * @qp: The QP to query.
3749 * @qp_attr: The attributes of the specified QP.
3750 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3751 * @qp_init_attr: Additional attributes of the selected QP.
3752 *
3753 * The qp_attr_mask may be used to limit the query to gathering only the
3754 * selected attributes.
3755 */
3756int ib_query_qp(struct ib_qp *qp,
3757                struct ib_qp_attr *qp_attr,
3758                int qp_attr_mask,
3759                struct ib_qp_init_attr *qp_init_attr);
3760
3761/**
3762 * ib_destroy_qp - Destroys the specified QP.
3763 * @qp: The QP to destroy.
3764 * @udata: Valid udata or NULL for kernel objects
3765 */
3766int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3767
3768/**
3769 * ib_destroy_qp - Destroys the specified kernel QP.
3770 * @qp: The QP to destroy.
3771 *
3772 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3773 */
3774static inline int ib_destroy_qp(struct ib_qp *qp)
3775{
3776        return ib_destroy_qp_user(qp, NULL);
3777}
3778
3779/**
3780 * ib_open_qp - Obtain a reference to an existing sharable QP.
3781 * @xrcd - XRC domain
3782 * @qp_open_attr: Attributes identifying the QP to open.
3783 *
3784 * Returns a reference to a sharable QP.
3785 */
3786struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3787                         struct ib_qp_open_attr *qp_open_attr);
3788
3789/**
3790 * ib_close_qp - Release an external reference to a QP.
3791 * @qp: The QP handle to release
3792 *
3793 * The opened QP handle is released by the caller.  The underlying
3794 * shared QP is not destroyed until all internal references are released.
3795 */
3796int ib_close_qp(struct ib_qp *qp);
3797
3798/**
3799 * ib_post_send - Posts a list of work requests to the send queue of
3800 *   the specified QP.
3801 * @qp: The QP to post the work request on.
3802 * @send_wr: A list of work requests to post on the send queue.
3803 * @bad_send_wr: On an immediate failure, this parameter will reference
3804 *   the work request that failed to be posted on the QP.
3805 *
3806 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3807 * error is returned, the QP state shall not be affected,
3808 * ib_post_send() will return an immediate error after queueing any
3809 * earlier work requests in the list.
3810 */
3811static inline int ib_post_send(struct ib_qp *qp,
3812                               const struct ib_send_wr *send_wr,
3813                               const struct ib_send_wr **bad_send_wr)
3814{
3815        const struct ib_send_wr *dummy;
3816
3817        return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3818}
3819
3820/**
3821 * ib_post_recv - Posts a list of work requests to the receive queue of
3822 *   the specified QP.
3823 * @qp: The QP to post the work request on.
3824 * @recv_wr: A list of work requests to post on the receive queue.
3825 * @bad_recv_wr: On an immediate failure, this parameter will reference
3826 *   the work request that failed to be posted on the QP.
3827 */
3828static inline int ib_post_recv(struct ib_qp *qp,
3829                               const struct ib_recv_wr *recv_wr,
3830                               const struct ib_recv_wr **bad_recv_wr)
3831{
3832        const struct ib_recv_wr *dummy;
3833
3834        return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3835}
3836
3837struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3838                                 int nr_cqe, int comp_vector,
3839                                 enum ib_poll_context poll_ctx,
3840                                 const char *caller, struct ib_udata *udata);
3841
3842/**
3843 * ib_alloc_cq_user: Allocate kernel/user CQ
3844 * @dev: The IB device
3845 * @private: Private data attached to the CQE
3846 * @nr_cqe: Number of CQEs in the CQ
3847 * @comp_vector: Completion vector used for the IRQs
3848 * @poll_ctx: Context used for polling the CQ
3849 * @udata: Valid user data or NULL for kernel objects
3850 */
3851static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3852                                             void *private, int nr_cqe,
3853                                             int comp_vector,
3854                                             enum ib_poll_context poll_ctx,
3855                                             struct ib_udata *udata)
3856{
3857        return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3858                                  KBUILD_MODNAME, udata);
3859}
3860
3861/**
3862 * ib_alloc_cq: Allocate kernel CQ
3863 * @dev: The IB device
3864 * @private: Private data attached to the CQE
3865 * @nr_cqe: Number of CQEs in the CQ
3866 * @comp_vector: Completion vector used for the IRQs
3867 * @poll_ctx: Context used for polling the CQ
3868 *
3869 * NOTE: for user cq use ib_alloc_cq_user with valid udata!
3870 */
3871static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3872                                        int nr_cqe, int comp_vector,
3873                                        enum ib_poll_context poll_ctx)
3874{
3875        return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3876                                NULL);
3877}
3878
3879struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3880                                int nr_cqe, enum ib_poll_context poll_ctx,
3881                                const char *caller);
3882
3883/**
3884 * ib_alloc_cq_any: Allocate kernel CQ
3885 * @dev: The IB device
3886 * @private: Private data attached to the CQE
3887 * @nr_cqe: Number of CQEs in the CQ
3888 * @poll_ctx: Context used for polling the CQ
3889 */
3890static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3891                                            void *private, int nr_cqe,
3892                                            enum ib_poll_context poll_ctx)
3893{
3894        return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3895                                 KBUILD_MODNAME);
3896}
3897
3898/**
3899 * ib_free_cq_user - Free kernel/user CQ
3900 * @cq: The CQ to free
3901 * @udata: Valid user data or NULL for kernel objects
3902 *
3903 * NOTE: This function shouldn't be called on shared CQs.
3904 */
3905void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3906
3907/**
3908 * ib_free_cq - Free kernel CQ
3909 * @cq: The CQ to free
3910 *
3911 * NOTE: for user cq use ib_free_cq_user with valid udata!
3912 */
3913static inline void ib_free_cq(struct ib_cq *cq)
3914{
3915        ib_free_cq_user(cq, NULL);
3916}
3917
3918int ib_process_cq_direct(struct ib_cq *cq, int budget);
3919
3920/**
3921 * ib_create_cq - Creates a CQ on the specified device.
3922 * @device: The device on which to create the CQ.
3923 * @comp_handler: A user-specified callback that is invoked when a
3924 *   completion event occurs on the CQ.
3925 * @event_handler: A user-specified callback that is invoked when an
3926 *   asynchronous event not associated with a completion occurs on the CQ.
3927 * @cq_context: Context associated with the CQ returned to the user via
3928 *   the associated completion and event handlers.
3929 * @cq_attr: The attributes the CQ should be created upon.
3930 *
3931 * Users can examine the cq structure to determine the actual CQ size.
3932 */
3933struct ib_cq *__ib_create_cq(struct ib_device *device,
3934                             ib_comp_handler comp_handler,
3935                             void (*event_handler)(struct ib_event *, void *),
3936                             void *cq_context,
3937                             const struct ib_cq_init_attr *cq_attr,
3938                             const char *caller);
3939#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3940        __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3941
3942/**
3943 * ib_resize_cq - Modifies the capacity of the CQ.
3944 * @cq: The CQ to resize.
3945 * @cqe: The minimum size of the CQ.
3946 *
3947 * Users can examine the cq structure to determine the actual CQ size.
3948 */
3949int ib_resize_cq(struct ib_cq *cq, int cqe);
3950
3951/**
3952 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3953 * @cq: The CQ to modify.
3954 * @cq_count: number of CQEs that will trigger an event
3955 * @cq_period: max period of time in usec before triggering an event
3956 *
3957 */
3958int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3959
3960/**
3961 * ib_destroy_cq_user - Destroys the specified CQ.
3962 * @cq: The CQ to destroy.
3963 * @udata: Valid user data or NULL for kernel objects
3964 */
3965int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3966
3967/**
3968 * ib_destroy_cq - Destroys the specified kernel CQ.
3969 * @cq: The CQ to destroy.
3970 *
3971 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3972 */
3973static inline void ib_destroy_cq(struct ib_cq *cq)
3974{
3975        ib_destroy_cq_user(cq, NULL);
3976}
3977
3978/**
3979 * ib_poll_cq - poll a CQ for completion(s)
3980 * @cq:the CQ being polled
3981 * @num_entries:maximum number of completions to return
3982 * @wc:array of at least @num_entries &struct ib_wc where completions
3983 *   will be returned
3984 *
3985 * Poll a CQ for (possibly multiple) completions.  If the return value
3986 * is < 0, an error occurred.  If the return value is >= 0, it is the
3987 * number of completions returned.  If the return value is
3988 * non-negative and < num_entries, then the CQ was emptied.
3989 */
3990static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3991                             struct ib_wc *wc)
3992{
3993        return cq->device->ops.poll_cq(cq, num_entries, wc);
3994}
3995
3996/**
3997 * ib_req_notify_cq - Request completion notification on a CQ.
3998 * @cq: The CQ to generate an event for.
3999 * @flags:
4000 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
4001 *   to request an event on the next solicited event or next work
4002 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
4003 *   may also be |ed in to request a hint about missed events, as
4004 *   described below.
4005 *
4006 * Return Value:
4007 *    < 0 means an error occurred while requesting notification
4008 *   == 0 means notification was requested successfully, and if
4009 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
4010 *        were missed and it is safe to wait for another event.  In
4011 *        this case is it guaranteed that any work completions added
4012 *        to the CQ since the last CQ poll will trigger a completion
4013 *        notification event.
4014 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
4015 *        in.  It means that the consumer must poll the CQ again to
4016 *        make sure it is empty to avoid missing an event because of a
4017 *        race between requesting notification and an entry being
4018 *        added to the CQ.  This return value means it is possible
4019 *        (but not guaranteed) that a work completion has been added
4020 *        to the CQ since the last poll without triggering a
4021 *        completion notification event.
4022 */
4023static inline int ib_req_notify_cq(struct ib_cq *cq,
4024                                   enum ib_cq_notify_flags flags)
4025{
4026        return cq->device->ops.req_notify_cq(cq, flags);
4027}
4028
4029struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4030                             int comp_vector_hint,
4031                             enum ib_poll_context poll_ctx);
4032
4033void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4034
4035/**
4036 * ib_req_ncomp_notif - Request completion notification when there are
4037 *   at least the specified number of unreaped completions on the CQ.
4038 * @cq: The CQ to generate an event for.
4039 * @wc_cnt: The number of unreaped completions that should be on the
4040 *   CQ before an event is generated.
4041 */
4042static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
4043{
4044        return cq->device->ops.req_ncomp_notif ?
4045                cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
4046                -ENOSYS;
4047}
4048
4049/**
4050 * ib_dma_mapping_error - check a DMA addr for error
4051 * @dev: The device for which the dma_addr was created
4052 * @dma_addr: The DMA address to check
4053 */
4054static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4055{
4056        return dma_mapping_error(dev->dma_device, dma_addr);
4057}
4058
4059/**
4060 * ib_dma_map_single - Map a kernel virtual address to DMA address
4061 * @dev: The device for which the dma_addr is to be created
4062 * @cpu_addr: The kernel virtual address
4063 * @size: The size of the region in bytes
4064 * @direction: The direction of the DMA
4065 */
4066static inline u64 ib_dma_map_single(struct ib_device *dev,
4067                                    void *cpu_addr, size_t size,
4068                                    enum dma_data_direction direction)
4069{
4070        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4071}
4072
4073/**
4074 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4075 * @dev: The device for which the DMA address was created
4076 * @addr: The DMA address
4077 * @size: The size of the region in bytes
4078 * @direction: The direction of the DMA
4079 */
4080static inline void ib_dma_unmap_single(struct ib_device *dev,
4081                                       u64 addr, size_t size,
4082                                       enum dma_data_direction direction)
4083{
4084        dma_unmap_single(dev->dma_device, addr, size, direction);
4085}
4086
4087/**
4088 * ib_dma_map_page - Map a physical page to DMA address
4089 * @dev: The device for which the dma_addr is to be created
4090 * @page: The page to be mapped
4091 * @offset: The offset within the page
4092 * @size: The size of the region in bytes
4093 * @direction: The direction of the DMA
4094 */
4095static inline u64 ib_dma_map_page(struct ib_device *dev,
4096                                  struct page *page,
4097                                  unsigned long offset,
4098                                  size_t size,
4099                                         enum dma_data_direction direction)
4100{
4101        return dma_map_page(dev->dma_device, page, offset, size, direction);
4102}
4103
4104/**
4105 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4106 * @dev: The device for which the DMA address was created
4107 * @addr: The DMA address
4108 * @size: The size of the region in bytes
4109 * @direction: The direction of the DMA
4110 */
4111static inline void ib_dma_unmap_page(struct ib_device *dev,
4112                                     u64 addr, size_t size,
4113                                     enum dma_data_direction direction)
4114{
4115        dma_unmap_page(dev->dma_device, addr, size, direction);
4116}
4117
4118/**
4119 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4120 * @dev: The device for which the DMA addresses are to be created
4121 * @sg: The array of scatter/gather entries
4122 * @nents: The number of scatter/gather entries
4123 * @direction: The direction of the DMA
4124 */
4125static inline int ib_dma_map_sg(struct ib_device *dev,
4126                                struct scatterlist *sg, int nents,
4127                                enum dma_data_direction direction)
4128{
4129        return dma_map_sg(dev->dma_device, sg, nents, direction);
4130}
4131
4132/**
4133 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4134 * @dev: The device for which the DMA addresses were created
4135 * @sg: The array of scatter/gather entries
4136 * @nents: The number of scatter/gather entries
4137 * @direction: The direction of the DMA
4138 */
4139static inline void ib_dma_unmap_sg(struct ib_device *dev,
4140                                   struct scatterlist *sg, int nents,
4141                                   enum dma_data_direction direction)
4142{
4143        dma_unmap_sg(dev->dma_device, sg, nents, direction);
4144}
4145
4146static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4147                                      struct scatterlist *sg, int nents,
4148                                      enum dma_data_direction direction,
4149                                      unsigned long dma_attrs)
4150{
4151        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4152                                dma_attrs);
4153}
4154
4155static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4156                                         struct scatterlist *sg, int nents,
4157                                         enum dma_data_direction direction,
4158                                         unsigned long dma_attrs)
4159{
4160        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
4161}
4162
4163/**
4164 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4165 * @dev: The device to query
4166 *
4167 * The returned value represents a size in bytes.
4168 */
4169static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4170{
4171        return dma_get_max_seg_size(dev->dma_device);
4172}
4173
4174/**
4175 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4176 * @dev: The device for which the DMA address was created
4177 * @addr: The DMA address
4178 * @size: The size of the region in bytes
4179 * @dir: The direction of the DMA
4180 */
4181static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4182                                              u64 addr,
4183                                              size_t size,
4184                                              enum dma_data_direction dir)
4185{
4186        dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4187}
4188
4189/**
4190 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4191 * @dev: The device for which the DMA address was created
4192 * @addr: The DMA address
4193 * @size: The size of the region in bytes
4194 * @dir: The direction of the DMA
4195 */
4196static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4197                                                 u64 addr,
4198                                                 size_t size,
4199                                                 enum dma_data_direction dir)
4200{
4201        dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4202}
4203
4204/**
4205 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4206 * @dev: The device for which the DMA address is requested
4207 * @size: The size of the region to allocate in bytes
4208 * @dma_handle: A pointer for returning the DMA address of the region
4209 * @flag: memory allocator flags
4210 */
4211static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4212                                           size_t size,
4213                                           dma_addr_t *dma_handle,
4214                                           gfp_t flag)
4215{
4216        return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4217}
4218
4219/**
4220 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4221 * @dev: The device for which the DMA addresses were allocated
4222 * @size: The size of the region
4223 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4224 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4225 */
4226static inline void ib_dma_free_coherent(struct ib_device *dev,
4227                                        size_t size, void *cpu_addr,
4228                                        dma_addr_t dma_handle)
4229{
4230        dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4231}
4232
4233/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4234 * space. This function should be called when 'current' is the owning MM.
4235 */
4236struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4237                             u64 virt_addr, int mr_access_flags);
4238
4239/* ib_advise_mr -  give an advice about an address range in a memory region */
4240int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4241                 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4242/**
4243 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4244 *   HCA translation table.
4245 * @mr: The memory region to deregister.
4246 * @udata: Valid user data or NULL for kernel object
4247 *
4248 * This function can fail, if the memory region has memory windows bound to it.
4249 */
4250int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4251
4252/**
4253 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4254 *   HCA translation table.
4255 * @mr: The memory region to deregister.
4256 *
4257 * This function can fail, if the memory region has memory windows bound to it.
4258 *
4259 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4260 */
4261static inline int ib_dereg_mr(struct ib_mr *mr)
4262{
4263        return ib_dereg_mr_user(mr, NULL);
4264}
4265
4266struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4267                               u32 max_num_sg, struct ib_udata *udata);
4268
4269static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4270                                        enum ib_mr_type mr_type, u32 max_num_sg)
4271{
4272        return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4273}
4274
4275struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4276                                    u32 max_num_data_sg,
4277                                    u32 max_num_meta_sg);
4278
4279/**
4280 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4281 *   R_Key and L_Key.
4282 * @mr - struct ib_mr pointer to be updated.
4283 * @newkey - new key to be used.
4284 */
4285static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4286{
4287        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4288        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4289}
4290
4291/**
4292 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4293 * for calculating a new rkey for type 2 memory windows.
4294 * @rkey - the rkey to increment.
4295 */
4296static inline u32 ib_inc_rkey(u32 rkey)
4297{
4298        const u32 mask = 0x000000ff;
4299        return ((rkey + 1) & mask) | (rkey & ~mask);
4300}
4301
4302/**
4303 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4304 * @qp: QP to attach to the multicast group.  The QP must be type
4305 *   IB_QPT_UD.
4306 * @gid: Multicast group GID.
4307 * @lid: Multicast group LID in host byte order.
4308 *
4309 * In order to send and receive multicast packets, subnet
4310 * administration must have created the multicast group and configured
4311 * the fabric appropriately.  The port associated with the specified
4312 * QP must also be a member of the multicast group.
4313 */
4314int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4315
4316/**
4317 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4318 * @qp: QP to detach from the multicast group.
4319 * @gid: Multicast group GID.
4320 * @lid: Multicast group LID in host byte order.
4321 */
4322int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4323
4324/**
4325 * ib_alloc_xrcd - Allocates an XRC domain.
4326 * @device: The device on which to allocate the XRC domain.
4327 * @caller: Module name for kernel consumers
4328 */
4329struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4330#define ib_alloc_xrcd(device) \
4331        __ib_alloc_xrcd((device), KBUILD_MODNAME)
4332
4333/**
4334 * ib_dealloc_xrcd - Deallocates an XRC domain.
4335 * @xrcd: The XRC domain to deallocate.
4336 * @udata: Valid user data or NULL for kernel object
4337 */
4338int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
4339
4340static inline int ib_check_mr_access(int flags)
4341{
4342        /*
4343         * Local write permission is required if remote write or
4344         * remote atomic permission is also requested.
4345         */
4346        if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4347            !(flags & IB_ACCESS_LOCAL_WRITE))
4348                return -EINVAL;
4349
4350        if (flags & ~IB_ACCESS_SUPPORTED)
4351                return -EINVAL;
4352
4353        return 0;
4354}
4355
4356static inline bool ib_access_writable(int access_flags)
4357{
4358        /*
4359         * We have writable memory backing the MR if any of the following
4360         * access flags are set.  "Local write" and "remote write" obviously
4361         * require write access.  "Remote atomic" can do things like fetch and
4362         * add, which will modify memory, and "MW bind" can change permissions
4363         * by binding a window.
4364         */
4365        return access_flags &
4366                (IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4367                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4368}
4369
4370/**
4371 * ib_check_mr_status: lightweight check of MR status.
4372 *     This routine may provide status checks on a selected
4373 *     ib_mr. first use is for signature status check.
4374 *
4375 * @mr: A memory region.
4376 * @check_mask: Bitmask of which checks to perform from
4377 *     ib_mr_status_check enumeration.
4378 * @mr_status: The container of relevant status checks.
4379 *     failed checks will be indicated in the status bitmask
4380 *     and the relevant info shall be in the error item.
4381 */
4382int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4383                       struct ib_mr_status *mr_status);
4384
4385/**
4386 * ib_device_try_get: Hold a registration lock
4387 * device: The device to lock
4388 *
4389 * A device under an active registration lock cannot become unregistered. It
4390 * is only possible to obtain a registration lock on a device that is fully
4391 * registered, otherwise this function returns false.
4392 *
4393 * The registration lock is only necessary for actions which require the
4394 * device to still be registered. Uses that only require the device pointer to
4395 * be valid should use get_device(&ibdev->dev) to hold the memory.
4396 *
4397 */
4398static inline bool ib_device_try_get(struct ib_device *dev)
4399{
4400        return refcount_inc_not_zero(&dev->refcount);
4401}
4402
4403void ib_device_put(struct ib_device *device);
4404struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4405                                          enum rdma_driver_id driver_id);
4406struct ib_device *ib_device_get_by_name(const char *name,
4407                                        enum rdma_driver_id driver_id);
4408struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4409                                            u16 pkey, const union ib_gid *gid,
4410                                            const struct sockaddr *addr);
4411int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4412                         unsigned int port);
4413struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4414
4415struct ib_wq *ib_create_wq(struct ib_pd *pd,
4416                           struct ib_wq_init_attr *init_attr);
4417int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4418int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4419                 u32 wq_attr_mask);
4420struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4421                                                 struct ib_rwq_ind_table_init_attr*
4422                                                 wq_ind_table_init_attr);
4423int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4424
4425int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4426                 unsigned int *sg_offset, unsigned int page_size);
4427int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4428                    int data_sg_nents, unsigned int *data_sg_offset,
4429                    struct scatterlist *meta_sg, int meta_sg_nents,
4430                    unsigned int *meta_sg_offset, unsigned int page_size);
4431
4432static inline int
4433ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4434                  unsigned int *sg_offset, unsigned int page_size)
4435{
4436        int n;
4437
4438        n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4439        mr->iova = 0;
4440
4441        return n;
4442}
4443
4444int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4445                unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4446
4447void ib_drain_rq(struct ib_qp *qp);
4448void ib_drain_sq(struct ib_qp *qp);
4449void ib_drain_qp(struct ib_qp *qp);
4450
4451int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4452
4453static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4454{
4455        if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4456                return attr->roce.dmac;
4457        return NULL;
4458}
4459
4460static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4461{
4462        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4463                attr->ib.dlid = (u16)dlid;
4464        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4465                attr->opa.dlid = dlid;
4466}
4467
4468static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4469{
4470        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4471                return attr->ib.dlid;
4472        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4473                return attr->opa.dlid;
4474        return 0;
4475}
4476
4477static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4478{
4479        attr->sl = sl;
4480}
4481
4482static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4483{
4484        return attr->sl;
4485}
4486
4487static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4488                                         u8 src_path_bits)
4489{
4490        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4491                attr->ib.src_path_bits = src_path_bits;
4492        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4493                attr->opa.src_path_bits = src_path_bits;
4494}
4495
4496static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4497{
4498        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4499                return attr->ib.src_path_bits;
4500        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4501                return attr->opa.src_path_bits;
4502        return 0;
4503}
4504
4505static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4506                                        bool make_grd)
4507{
4508        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4509                attr->opa.make_grd = make_grd;
4510}
4511
4512static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4513{
4514        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4515                return attr->opa.make_grd;
4516        return false;
4517}
4518
4519static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4520{
4521        attr->port_num = port_num;
4522}
4523
4524static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4525{
4526        return attr->port_num;
4527}
4528
4529static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4530                                           u8 static_rate)
4531{
4532        attr->static_rate = static_rate;
4533}
4534
4535static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4536{
4537        return attr->static_rate;
4538}
4539
4540static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4541                                        enum ib_ah_flags flag)
4542{
4543        attr->ah_flags = flag;
4544}
4545
4546static inline enum ib_ah_flags
4547                rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4548{
4549        return attr->ah_flags;
4550}
4551
4552static inline const struct ib_global_route
4553                *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4554{
4555        return &attr->grh;
4556}
4557
4558/*To retrieve and modify the grh */
4559static inline struct ib_global_route
4560                *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4561{
4562        return &attr->grh;
4563}
4564
4565static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4566{
4567        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4568
4569        memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4570}
4571
4572static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4573                                             __be64 prefix)
4574{
4575        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4576
4577        grh->dgid.global.subnet_prefix = prefix;
4578}
4579
4580static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4581                                            __be64 if_id)
4582{
4583        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4584
4585        grh->dgid.global.interface_id = if_id;
4586}
4587
4588static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4589                                   union ib_gid *dgid, u32 flow_label,
4590                                   u8 sgid_index, u8 hop_limit,
4591                                   u8 traffic_class)
4592{
4593        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4594
4595        attr->ah_flags = IB_AH_GRH;
4596        if (dgid)
4597                grh->dgid = *dgid;
4598        grh->flow_label = flow_label;
4599        grh->sgid_index = sgid_index;
4600        grh->hop_limit = hop_limit;
4601        grh->traffic_class = traffic_class;
4602        grh->sgid_attr = NULL;
4603}
4604
4605void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4606void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4607                             u32 flow_label, u8 hop_limit, u8 traffic_class,
4608                             const struct ib_gid_attr *sgid_attr);
4609void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4610                       const struct rdma_ah_attr *src);
4611void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4612                          const struct rdma_ah_attr *new);
4613void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4614
4615/**
4616 * rdma_ah_find_type - Return address handle type.
4617 *
4618 * @dev: Device to be checked
4619 * @port_num: Port number
4620 */
4621static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4622                                                       u8 port_num)
4623{
4624        if (rdma_protocol_roce(dev, port_num))
4625                return RDMA_AH_ATTR_TYPE_ROCE;
4626        if (rdma_protocol_ib(dev, port_num)) {
4627                if (rdma_cap_opa_ah(dev, port_num))
4628                        return RDMA_AH_ATTR_TYPE_OPA;
4629                return RDMA_AH_ATTR_TYPE_IB;
4630        }
4631
4632        return RDMA_AH_ATTR_TYPE_UNDEFINED;
4633}
4634
4635/**
4636 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4637 *     In the current implementation the only way to get
4638 *     get the 32bit lid is from other sources for OPA.
4639 *     For IB, lids will always be 16bits so cast the
4640 *     value accordingly.
4641 *
4642 * @lid: A 32bit LID
4643 */
4644static inline u16 ib_lid_cpu16(u32 lid)
4645{
4646        WARN_ON_ONCE(lid & 0xFFFF0000);
4647        return (u16)lid;
4648}
4649
4650/**
4651 * ib_lid_be16 - Return lid in 16bit BE encoding.
4652 *
4653 * @lid: A 32bit LID
4654 */
4655static inline __be16 ib_lid_be16(u32 lid)
4656{
4657        WARN_ON_ONCE(lid & 0xFFFF0000);
4658        return cpu_to_be16((u16)lid);
4659}
4660
4661/**
4662 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4663 *   vector
4664 * @device:         the rdma device
4665 * @comp_vector:    index of completion vector
4666 *
4667 * Returns NULL on failure, otherwise a corresponding cpu map of the
4668 * completion vector (returns all-cpus map if the device driver doesn't
4669 * implement get_vector_affinity).
4670 */
4671static inline const struct cpumask *
4672ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4673{
4674        if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4675            !device->ops.get_vector_affinity)
4676                return NULL;
4677
4678        return device->ops.get_vector_affinity(device, comp_vector);
4679
4680}
4681
4682/**
4683 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4684 * and add their gids, as needed, to the relevant RoCE devices.
4685 *
4686 * @device:         the rdma device
4687 */
4688void rdma_roce_rescan_device(struct ib_device *ibdev);
4689
4690struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4691
4692int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4693
4694struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4695                                     enum rdma_netdev_t type, const char *name,
4696                                     unsigned char name_assign_type,
4697                                     void (*setup)(struct net_device *));
4698
4699int rdma_init_netdev(struct ib_device *device, u8 port_num,
4700                     enum rdma_netdev_t type, const char *name,
4701                     unsigned char name_assign_type,
4702                     void (*setup)(struct net_device *),
4703                     struct net_device *netdev);
4704
4705/**
4706 * rdma_set_device_sysfs_group - Set device attributes group to have
4707 *                               driver specific sysfs entries at
4708 *                               for infiniband class.
4709 *
4710 * @device:     device pointer for which attributes to be created
4711 * @group:      Pointer to group which should be added when device
4712 *              is registered with sysfs.
4713 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4714 * group per device to have sysfs attributes.
4715 *
4716 * NOTE: New drivers should not make use of this API; instead new device
4717 * parameter should be exposed via netlink command. This API and mechanism
4718 * exist only for existing drivers.
4719 */
4720static inline void
4721rdma_set_device_sysfs_group(struct ib_device *dev,
4722                            const struct attribute_group *group)
4723{
4724        dev->groups[1] = group;
4725}
4726
4727/**
4728 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4729 *
4730 * @device:     device pointer for which ib_device pointer to retrieve
4731 *
4732 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4733 *
4734 */
4735static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4736{
4737        struct ib_core_device *coredev =
4738                container_of(device, struct ib_core_device, dev);
4739
4740        return coredev->owner;
4741}
4742
4743/**
4744 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4745 *                             ib_device holder structure from device pointer.
4746 *
4747 * NOTE: New drivers should not make use of this API; This API is only for
4748 * existing drivers who have exposed sysfs entries using
4749 * rdma_set_device_sysfs_group().
4750 */
4751#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4752        container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4753
4754bool rdma_dev_access_netns(const struct ib_device *device,
4755                           const struct net *net);
4756
4757#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4758#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4759
4760/**
4761 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4762 *                               on the flow_label
4763 *
4764 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4765 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4766 * convention.
4767 */
4768static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4769{
4770        u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4771
4772        fl_low ^= fl_high >> 14;
4773        return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4774}
4775
4776/**
4777 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4778 *                        local and remote qpn values
4779 *
4780 * This function folded the multiplication results of two qpns, 24 bit each,
4781 * fields, and converts it to a 20 bit results.
4782 *
4783 * This function will create symmetric flow_label value based on the local
4784 * and remote qpn values. this will allow both the requester and responder
4785 * to calculate the same flow_label for a given connection.
4786 *
4787 * This helper function should be used by driver in case the upper layer
4788 * provide a zero flow_label value. This is to improve entropy of RDMA
4789 * traffic in the network.
4790 */
4791static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4792{
4793        u64 v = (u64)lqpn * rqpn;
4794
4795        v ^= v >> 20;
4796        v ^= v >> 40;
4797
4798        return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4799}
4800#endif /* IB_VERBS_H */
4801