linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
   2/*
   3 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   4 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   7 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   9 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
  10 */
  11
  12#ifndef IB_VERBS_H
  13#define IB_VERBS_H
  14
  15#include <linux/types.h>
  16#include <linux/device.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/kref.h>
  19#include <linux/list.h>
  20#include <linux/rwsem.h>
  21#include <linux/workqueue.h>
  22#include <linux/irq_poll.h>
  23#include <uapi/linux/if_ether.h>
  24#include <net/ipv6.h>
  25#include <net/ip.h>
  26#include <linux/string.h>
  27#include <linux/slab.h>
  28#include <linux/netdevice.h>
  29#include <linux/refcount.h>
  30#include <linux/if_link.h>
  31#include <linux/atomic.h>
  32#include <linux/mmu_notifier.h>
  33#include <linux/uaccess.h>
  34#include <linux/cgroup_rdma.h>
  35#include <linux/irqflags.h>
  36#include <linux/preempt.h>
  37#include <linux/dim.h>
  38#include <uapi/rdma/ib_user_verbs.h>
  39#include <rdma/rdma_counter.h>
  40#include <rdma/restrack.h>
  41#include <rdma/signature.h>
  42#include <uapi/rdma/rdma_user_ioctl.h>
  43#include <uapi/rdma/ib_user_ioctl_verbs.h>
  44
  45#define IB_FW_VERSION_NAME_MAX  ETHTOOL_FWVERS_LEN
  46
  47struct ib_umem_odp;
  48struct ib_uqp_object;
  49struct ib_usrq_object;
  50struct ib_uwq_object;
  51struct rdma_cm_id;
  52
  53extern struct workqueue_struct *ib_wq;
  54extern struct workqueue_struct *ib_comp_wq;
  55extern struct workqueue_struct *ib_comp_unbound_wq;
  56
  57struct ib_ucq_object;
  58
  59__printf(3, 4) __cold
  60void ibdev_printk(const char *level, const struct ib_device *ibdev,
  61                  const char *format, ...);
  62__printf(2, 3) __cold
  63void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
  64__printf(2, 3) __cold
  65void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
  66__printf(2, 3) __cold
  67void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
  68__printf(2, 3) __cold
  69void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
  70__printf(2, 3) __cold
  71void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
  72__printf(2, 3) __cold
  73void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
  74__printf(2, 3) __cold
  75void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
  76
  77#if defined(CONFIG_DYNAMIC_DEBUG) || \
  78        (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
  79#define ibdev_dbg(__dev, format, args...)                       \
  80        dynamic_ibdev_dbg(__dev, format, ##args)
  81#else
  82__printf(2, 3) __cold
  83static inline
  84void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
  85#endif
  86
  87#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
  88do {                                                                    \
  89        static DEFINE_RATELIMIT_STATE(_rs,                              \
  90                                      DEFAULT_RATELIMIT_INTERVAL,       \
  91                                      DEFAULT_RATELIMIT_BURST);         \
  92        if (__ratelimit(&_rs))                                          \
  93                ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
  94} while (0)
  95
  96#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
  97        ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
  98#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
  99        ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
 100#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
 101        ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
 102#define ibdev_err_ratelimited(ibdev, fmt, ...) \
 103        ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
 104#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
 105        ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
 106#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
 107        ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
 108#define ibdev_info_ratelimited(ibdev, fmt, ...) \
 109        ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
 110
 111#if defined(CONFIG_DYNAMIC_DEBUG) || \
 112        (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
 113/* descriptor check is first to prevent flooding with "callbacks suppressed" */
 114#define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
 115do {                                                                    \
 116        static DEFINE_RATELIMIT_STATE(_rs,                              \
 117                                      DEFAULT_RATELIMIT_INTERVAL,       \
 118                                      DEFAULT_RATELIMIT_BURST);         \
 119        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
 120        if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
 121                __dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
 122                                    ##__VA_ARGS__);                     \
 123} while (0)
 124#else
 125__printf(2, 3) __cold
 126static inline
 127void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
 128#endif
 129
 130union ib_gid {
 131        u8      raw[16];
 132        struct {
 133                __be64  subnet_prefix;
 134                __be64  interface_id;
 135        } global;
 136};
 137
 138extern union ib_gid zgid;
 139
 140enum ib_gid_type {
 141        IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
 142        IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
 143        IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
 144        IB_GID_TYPE_SIZE
 145};
 146
 147#define ROCE_V2_UDP_DPORT      4791
 148struct ib_gid_attr {
 149        struct net_device __rcu *ndev;
 150        struct ib_device        *device;
 151        union ib_gid            gid;
 152        enum ib_gid_type        gid_type;
 153        u16                     index;
 154        u8                      port_num;
 155};
 156
 157enum {
 158        /* set the local administered indication */
 159        IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
 160};
 161
 162enum rdma_transport_type {
 163        RDMA_TRANSPORT_IB,
 164        RDMA_TRANSPORT_IWARP,
 165        RDMA_TRANSPORT_USNIC,
 166        RDMA_TRANSPORT_USNIC_UDP,
 167        RDMA_TRANSPORT_UNSPECIFIED,
 168};
 169
 170enum rdma_protocol_type {
 171        RDMA_PROTOCOL_IB,
 172        RDMA_PROTOCOL_IBOE,
 173        RDMA_PROTOCOL_IWARP,
 174        RDMA_PROTOCOL_USNIC_UDP
 175};
 176
 177__attribute_const__ enum rdma_transport_type
 178rdma_node_get_transport(unsigned int node_type);
 179
 180enum rdma_network_type {
 181        RDMA_NETWORK_IB,
 182        RDMA_NETWORK_ROCE_V1,
 183        RDMA_NETWORK_IPV4,
 184        RDMA_NETWORK_IPV6
 185};
 186
 187static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
 188{
 189        if (network_type == RDMA_NETWORK_IPV4 ||
 190            network_type == RDMA_NETWORK_IPV6)
 191                return IB_GID_TYPE_ROCE_UDP_ENCAP;
 192        else if (network_type == RDMA_NETWORK_ROCE_V1)
 193                return IB_GID_TYPE_ROCE;
 194        else
 195                return IB_GID_TYPE_IB;
 196}
 197
 198static inline enum rdma_network_type
 199rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
 200{
 201        if (attr->gid_type == IB_GID_TYPE_IB)
 202                return RDMA_NETWORK_IB;
 203
 204        if (attr->gid_type == IB_GID_TYPE_ROCE)
 205                return RDMA_NETWORK_ROCE_V1;
 206
 207        if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
 208                return RDMA_NETWORK_IPV4;
 209        else
 210                return RDMA_NETWORK_IPV6;
 211}
 212
 213enum rdma_link_layer {
 214        IB_LINK_LAYER_UNSPECIFIED,
 215        IB_LINK_LAYER_INFINIBAND,
 216        IB_LINK_LAYER_ETHERNET,
 217};
 218
 219enum ib_device_cap_flags {
 220        IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
 221        IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
 222        IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
 223        IB_DEVICE_RAW_MULTI                     = (1 << 3),
 224        IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
 225        IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
 226        IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
 227        IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
 228        IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
 229        /* Not in use, former INIT_TYPE         = (1 << 9),*/
 230        IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
 231        IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
 232        IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
 233        IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
 234        IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
 235
 236        /*
 237         * This device supports a per-device lkey or stag that can be
 238         * used without performing a memory registration for the local
 239         * memory.  Note that ULPs should never check this flag, but
 240         * instead of use the local_dma_lkey flag in the ib_pd structure,
 241         * which will always contain a usable lkey.
 242         */
 243        IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
 244        /* Reserved, old SEND_W_INV             = (1 << 16),*/
 245        IB_DEVICE_MEM_WINDOW                    = (1 << 17),
 246        /*
 247         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 248         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 249         * messages and can verify the validity of checksum for
 250         * incoming messages.  Setting this flag implies that the
 251         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 252         */
 253        IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
 254        IB_DEVICE_UD_TSO                        = (1 << 19),
 255        IB_DEVICE_XRC                           = (1 << 20),
 256
 257        /*
 258         * This device supports the IB "base memory management extension",
 259         * which includes support for fast registrations (IB_WR_REG_MR,
 260         * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
 261         * also be set by any iWarp device which must support FRs to comply
 262         * to the iWarp verbs spec.  iWarp devices also support the
 263         * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
 264         * stag.
 265         */
 266        IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
 267        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
 268        IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
 269        IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
 270        IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
 271        /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
 272        IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
 273        /*
 274         * Devices should set IB_DEVICE_CROSS_CHANNEL if they
 275         * support execution of WQEs that involve synchronization
 276         * of I/O operations with single completion queue managed
 277         * by hardware.
 278         */
 279        IB_DEVICE_CROSS_CHANNEL                 = (1 << 27),
 280        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
 281        IB_DEVICE_INTEGRITY_HANDOVER            = (1 << 30),
 282        IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
 283        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
 284        IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
 285        /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
 286        IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 287        IB_DEVICE_RDMA_NETDEV_OPA               = (1ULL << 35),
 288        /* The device supports padding incoming writes to cacheline. */
 289        IB_DEVICE_PCI_WRITE_END_PADDING         = (1ULL << 36),
 290        IB_DEVICE_ALLOW_USER_UNREG              = (1ULL << 37),
 291};
 292
 293enum ib_atomic_cap {
 294        IB_ATOMIC_NONE,
 295        IB_ATOMIC_HCA,
 296        IB_ATOMIC_GLOB
 297};
 298
 299enum ib_odp_general_cap_bits {
 300        IB_ODP_SUPPORT          = 1 << 0,
 301        IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
 302};
 303
 304enum ib_odp_transport_cap_bits {
 305        IB_ODP_SUPPORT_SEND     = 1 << 0,
 306        IB_ODP_SUPPORT_RECV     = 1 << 1,
 307        IB_ODP_SUPPORT_WRITE    = 1 << 2,
 308        IB_ODP_SUPPORT_READ     = 1 << 3,
 309        IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
 310        IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
 311};
 312
 313struct ib_odp_caps {
 314        uint64_t general_caps;
 315        struct {
 316                uint32_t  rc_odp_caps;
 317                uint32_t  uc_odp_caps;
 318                uint32_t  ud_odp_caps;
 319                uint32_t  xrc_odp_caps;
 320        } per_transport_caps;
 321};
 322
 323struct ib_rss_caps {
 324        /* Corresponding bit will be set if qp type from
 325         * 'enum ib_qp_type' is supported, e.g.
 326         * supported_qpts |= 1 << IB_QPT_UD
 327         */
 328        u32 supported_qpts;
 329        u32 max_rwq_indirection_tables;
 330        u32 max_rwq_indirection_table_size;
 331};
 332
 333enum ib_tm_cap_flags {
 334        /*  Support tag matching with rendezvous offload for RC transport */
 335        IB_TM_CAP_RNDV_RC = 1 << 0,
 336};
 337
 338struct ib_tm_caps {
 339        /* Max size of RNDV header */
 340        u32 max_rndv_hdr_size;
 341        /* Max number of entries in tag matching list */
 342        u32 max_num_tags;
 343        /* From enum ib_tm_cap_flags */
 344        u32 flags;
 345        /* Max number of outstanding list operations */
 346        u32 max_ops;
 347        /* Max number of SGE in tag matching entry */
 348        u32 max_sge;
 349};
 350
 351struct ib_cq_init_attr {
 352        unsigned int    cqe;
 353        u32             comp_vector;
 354        u32             flags;
 355};
 356
 357enum ib_cq_attr_mask {
 358        IB_CQ_MODERATE = 1 << 0,
 359};
 360
 361struct ib_cq_caps {
 362        u16     max_cq_moderation_count;
 363        u16     max_cq_moderation_period;
 364};
 365
 366struct ib_dm_mr_attr {
 367        u64             length;
 368        u64             offset;
 369        u32             access_flags;
 370};
 371
 372struct ib_dm_alloc_attr {
 373        u64     length;
 374        u32     alignment;
 375        u32     flags;
 376};
 377
 378struct ib_device_attr {
 379        u64                     fw_ver;
 380        __be64                  sys_image_guid;
 381        u64                     max_mr_size;
 382        u64                     page_size_cap;
 383        u32                     vendor_id;
 384        u32                     vendor_part_id;
 385        u32                     hw_ver;
 386        int                     max_qp;
 387        int                     max_qp_wr;
 388        u64                     device_cap_flags;
 389        int                     max_send_sge;
 390        int                     max_recv_sge;
 391        int                     max_sge_rd;
 392        int                     max_cq;
 393        int                     max_cqe;
 394        int                     max_mr;
 395        int                     max_pd;
 396        int                     max_qp_rd_atom;
 397        int                     max_ee_rd_atom;
 398        int                     max_res_rd_atom;
 399        int                     max_qp_init_rd_atom;
 400        int                     max_ee_init_rd_atom;
 401        enum ib_atomic_cap      atomic_cap;
 402        enum ib_atomic_cap      masked_atomic_cap;
 403        int                     max_ee;
 404        int                     max_rdd;
 405        int                     max_mw;
 406        int                     max_raw_ipv6_qp;
 407        int                     max_raw_ethy_qp;
 408        int                     max_mcast_grp;
 409        int                     max_mcast_qp_attach;
 410        int                     max_total_mcast_qp_attach;
 411        int                     max_ah;
 412        int                     max_srq;
 413        int                     max_srq_wr;
 414        int                     max_srq_sge;
 415        unsigned int            max_fast_reg_page_list_len;
 416        unsigned int            max_pi_fast_reg_page_list_len;
 417        u16                     max_pkeys;
 418        u8                      local_ca_ack_delay;
 419        int                     sig_prot_cap;
 420        int                     sig_guard_cap;
 421        struct ib_odp_caps      odp_caps;
 422        uint64_t                timestamp_mask;
 423        uint64_t                hca_core_clock; /* in KHZ */
 424        struct ib_rss_caps      rss_caps;
 425        u32                     max_wq_type_rq;
 426        u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
 427        struct ib_tm_caps       tm_caps;
 428        struct ib_cq_caps       cq_caps;
 429        u64                     max_dm_size;
 430        /* Max entries for sgl for optimized performance per READ */
 431        u32                     max_sgl_rd;
 432};
 433
 434enum ib_mtu {
 435        IB_MTU_256  = 1,
 436        IB_MTU_512  = 2,
 437        IB_MTU_1024 = 3,
 438        IB_MTU_2048 = 4,
 439        IB_MTU_4096 = 5
 440};
 441
 442enum opa_mtu {
 443        OPA_MTU_8192 = 6,
 444        OPA_MTU_10240 = 7
 445};
 446
 447static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 448{
 449        switch (mtu) {
 450        case IB_MTU_256:  return  256;
 451        case IB_MTU_512:  return  512;
 452        case IB_MTU_1024: return 1024;
 453        case IB_MTU_2048: return 2048;
 454        case IB_MTU_4096: return 4096;
 455        default:          return -1;
 456        }
 457}
 458
 459static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
 460{
 461        if (mtu >= 4096)
 462                return IB_MTU_4096;
 463        else if (mtu >= 2048)
 464                return IB_MTU_2048;
 465        else if (mtu >= 1024)
 466                return IB_MTU_1024;
 467        else if (mtu >= 512)
 468                return IB_MTU_512;
 469        else
 470                return IB_MTU_256;
 471}
 472
 473static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
 474{
 475        switch (mtu) {
 476        case OPA_MTU_8192:
 477                return 8192;
 478        case OPA_MTU_10240:
 479                return 10240;
 480        default:
 481                return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
 482        }
 483}
 484
 485static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
 486{
 487        if (mtu >= 10240)
 488                return OPA_MTU_10240;
 489        else if (mtu >= 8192)
 490                return OPA_MTU_8192;
 491        else
 492                return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
 493}
 494
 495enum ib_port_state {
 496        IB_PORT_NOP             = 0,
 497        IB_PORT_DOWN            = 1,
 498        IB_PORT_INIT            = 2,
 499        IB_PORT_ARMED           = 3,
 500        IB_PORT_ACTIVE          = 4,
 501        IB_PORT_ACTIVE_DEFER    = 5
 502};
 503
 504enum ib_port_phys_state {
 505        IB_PORT_PHYS_STATE_SLEEP = 1,
 506        IB_PORT_PHYS_STATE_POLLING = 2,
 507        IB_PORT_PHYS_STATE_DISABLED = 3,
 508        IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
 509        IB_PORT_PHYS_STATE_LINK_UP = 5,
 510        IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
 511        IB_PORT_PHYS_STATE_PHY_TEST = 7,
 512};
 513
 514enum ib_port_width {
 515        IB_WIDTH_1X     = 1,
 516        IB_WIDTH_2X     = 16,
 517        IB_WIDTH_4X     = 2,
 518        IB_WIDTH_8X     = 4,
 519        IB_WIDTH_12X    = 8
 520};
 521
 522static inline int ib_width_enum_to_int(enum ib_port_width width)
 523{
 524        switch (width) {
 525        case IB_WIDTH_1X:  return  1;
 526        case IB_WIDTH_2X:  return  2;
 527        case IB_WIDTH_4X:  return  4;
 528        case IB_WIDTH_8X:  return  8;
 529        case IB_WIDTH_12X: return 12;
 530        default:          return -1;
 531        }
 532}
 533
 534enum ib_port_speed {
 535        IB_SPEED_SDR    = 1,
 536        IB_SPEED_DDR    = 2,
 537        IB_SPEED_QDR    = 4,
 538        IB_SPEED_FDR10  = 8,
 539        IB_SPEED_FDR    = 16,
 540        IB_SPEED_EDR    = 32,
 541        IB_SPEED_HDR    = 64,
 542        IB_SPEED_NDR    = 128,
 543};
 544
 545/**
 546 * struct rdma_hw_stats
 547 * @lock - Mutex to protect parallel write access to lifespan and values
 548 *    of counters, which are 64bits and not guaranteeed to be written
 549 *    atomicaly on 32bits systems.
 550 * @timestamp - Used by the core code to track when the last update was
 551 * @lifespan - Used by the core code to determine how old the counters
 552 *   should be before being updated again.  Stored in jiffies, defaults
 553 *   to 10 milliseconds, drivers can override the default be specifying
 554 *   their own value during their allocation routine.
 555 * @name - Array of pointers to static names used for the counters in
 556 *   directory.
 557 * @num_counters - How many hardware counters there are.  If name is
 558 *   shorter than this number, a kernel oops will result.  Driver authors
 559 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
 560 *   in their code to prevent this.
 561 * @value - Array of u64 counters that are accessed by the sysfs code and
 562 *   filled in by the drivers get_stats routine
 563 */
 564struct rdma_hw_stats {
 565        struct mutex    lock; /* Protect lifespan and values[] */
 566        unsigned long   timestamp;
 567        unsigned long   lifespan;
 568        const char * const *names;
 569        int             num_counters;
 570        u64             value[];
 571};
 572
 573#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
 574/**
 575 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
 576 *   for drivers.
 577 * @names - Array of static const char *
 578 * @num_counters - How many elements in array
 579 * @lifespan - How many milliseconds between updates
 580 */
 581static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
 582                const char * const *names, int num_counters,
 583                unsigned long lifespan)
 584{
 585        struct rdma_hw_stats *stats;
 586
 587        stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
 588                        GFP_KERNEL);
 589        if (!stats)
 590                return NULL;
 591        stats->names = names;
 592        stats->num_counters = num_counters;
 593        stats->lifespan = msecs_to_jiffies(lifespan);
 594
 595        return stats;
 596}
 597
 598
 599/* Define bits for the various functionality this port needs to be supported by
 600 * the core.
 601 */
 602/* Management                           0x00000FFF */
 603#define RDMA_CORE_CAP_IB_MAD            0x00000001
 604#define RDMA_CORE_CAP_IB_SMI            0x00000002
 605#define RDMA_CORE_CAP_IB_CM             0x00000004
 606#define RDMA_CORE_CAP_IW_CM             0x00000008
 607#define RDMA_CORE_CAP_IB_SA             0x00000010
 608#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 609
 610/* Address format                       0x000FF000 */
 611#define RDMA_CORE_CAP_AF_IB             0x00001000
 612#define RDMA_CORE_CAP_ETH_AH            0x00002000
 613#define RDMA_CORE_CAP_OPA_AH            0x00004000
 614#define RDMA_CORE_CAP_IB_GRH_REQUIRED   0x00008000
 615
 616/* Protocol                             0xFFF00000 */
 617#define RDMA_CORE_CAP_PROT_IB           0x00100000
 618#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 619#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 620#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
 621#define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
 622#define RDMA_CORE_CAP_PROT_USNIC        0x02000000
 623
 624#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
 625                                        | RDMA_CORE_CAP_PROT_ROCE     \
 626                                        | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
 627
 628#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 629                                        | RDMA_CORE_CAP_IB_MAD \
 630                                        | RDMA_CORE_CAP_IB_SMI \
 631                                        | RDMA_CORE_CAP_IB_CM  \
 632                                        | RDMA_CORE_CAP_IB_SA  \
 633                                        | RDMA_CORE_CAP_AF_IB)
 634#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 635                                        | RDMA_CORE_CAP_IB_MAD  \
 636                                        | RDMA_CORE_CAP_IB_CM   \
 637                                        | RDMA_CORE_CAP_AF_IB   \
 638                                        | RDMA_CORE_CAP_ETH_AH)
 639#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
 640                                        (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
 641                                        | RDMA_CORE_CAP_IB_MAD  \
 642                                        | RDMA_CORE_CAP_IB_CM   \
 643                                        | RDMA_CORE_CAP_AF_IB   \
 644                                        | RDMA_CORE_CAP_ETH_AH)
 645#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 646                                        | RDMA_CORE_CAP_IW_CM)
 647#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 648                                        | RDMA_CORE_CAP_OPA_MAD)
 649
 650#define RDMA_CORE_PORT_RAW_PACKET       (RDMA_CORE_CAP_PROT_RAW_PACKET)
 651
 652#define RDMA_CORE_PORT_USNIC            (RDMA_CORE_CAP_PROT_USNIC)
 653
 654struct ib_port_attr {
 655        u64                     subnet_prefix;
 656        enum ib_port_state      state;
 657        enum ib_mtu             max_mtu;
 658        enum ib_mtu             active_mtu;
 659        u32                     phys_mtu;
 660        int                     gid_tbl_len;
 661        unsigned int            ip_gids:1;
 662        /* This is the value from PortInfo CapabilityMask, defined by IBA */
 663        u32                     port_cap_flags;
 664        u32                     max_msg_sz;
 665        u32                     bad_pkey_cntr;
 666        u32                     qkey_viol_cntr;
 667        u16                     pkey_tbl_len;
 668        u32                     sm_lid;
 669        u32                     lid;
 670        u8                      lmc;
 671        u8                      max_vl_num;
 672        u8                      sm_sl;
 673        u8                      subnet_timeout;
 674        u8                      init_type_reply;
 675        u8                      active_width;
 676        u16                     active_speed;
 677        u8                      phys_state;
 678        u16                     port_cap_flags2;
 679};
 680
 681enum ib_device_modify_flags {
 682        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 683        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 684};
 685
 686#define IB_DEVICE_NODE_DESC_MAX 64
 687
 688struct ib_device_modify {
 689        u64     sys_image_guid;
 690        char    node_desc[IB_DEVICE_NODE_DESC_MAX];
 691};
 692
 693enum ib_port_modify_flags {
 694        IB_PORT_SHUTDOWN                = 1,
 695        IB_PORT_INIT_TYPE               = (1<<2),
 696        IB_PORT_RESET_QKEY_CNTR         = (1<<3),
 697        IB_PORT_OPA_MASK_CHG            = (1<<4)
 698};
 699
 700struct ib_port_modify {
 701        u32     set_port_cap_mask;
 702        u32     clr_port_cap_mask;
 703        u8      init_type;
 704};
 705
 706enum ib_event_type {
 707        IB_EVENT_CQ_ERR,
 708        IB_EVENT_QP_FATAL,
 709        IB_EVENT_QP_REQ_ERR,
 710        IB_EVENT_QP_ACCESS_ERR,
 711        IB_EVENT_COMM_EST,
 712        IB_EVENT_SQ_DRAINED,
 713        IB_EVENT_PATH_MIG,
 714        IB_EVENT_PATH_MIG_ERR,
 715        IB_EVENT_DEVICE_FATAL,
 716        IB_EVENT_PORT_ACTIVE,
 717        IB_EVENT_PORT_ERR,
 718        IB_EVENT_LID_CHANGE,
 719        IB_EVENT_PKEY_CHANGE,
 720        IB_EVENT_SM_CHANGE,
 721        IB_EVENT_SRQ_ERR,
 722        IB_EVENT_SRQ_LIMIT_REACHED,
 723        IB_EVENT_QP_LAST_WQE_REACHED,
 724        IB_EVENT_CLIENT_REREGISTER,
 725        IB_EVENT_GID_CHANGE,
 726        IB_EVENT_WQ_FATAL,
 727};
 728
 729const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
 730
 731struct ib_event {
 732        struct ib_device        *device;
 733        union {
 734                struct ib_cq    *cq;
 735                struct ib_qp    *qp;
 736                struct ib_srq   *srq;
 737                struct ib_wq    *wq;
 738                u8              port_num;
 739        } element;
 740        enum ib_event_type      event;
 741};
 742
 743struct ib_event_handler {
 744        struct ib_device *device;
 745        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 746        struct list_head  list;
 747};
 748
 749#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 750        do {                                                    \
 751                (_ptr)->device  = _device;                      \
 752                (_ptr)->handler = _handler;                     \
 753                INIT_LIST_HEAD(&(_ptr)->list);                  \
 754        } while (0)
 755
 756struct ib_global_route {
 757        const struct ib_gid_attr *sgid_attr;
 758        union ib_gid    dgid;
 759        u32             flow_label;
 760        u8              sgid_index;
 761        u8              hop_limit;
 762        u8              traffic_class;
 763};
 764
 765struct ib_grh {
 766        __be32          version_tclass_flow;
 767        __be16          paylen;
 768        u8              next_hdr;
 769        u8              hop_limit;
 770        union ib_gid    sgid;
 771        union ib_gid    dgid;
 772};
 773
 774union rdma_network_hdr {
 775        struct ib_grh ibgrh;
 776        struct {
 777                /* The IB spec states that if it's IPv4, the header
 778                 * is located in the last 20 bytes of the header.
 779                 */
 780                u8              reserved[20];
 781                struct iphdr    roce4grh;
 782        };
 783};
 784
 785#define IB_QPN_MASK             0xFFFFFF
 786
 787enum {
 788        IB_MULTICAST_QPN = 0xffffff
 789};
 790
 791#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 792#define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
 793
 794enum ib_ah_flags {
 795        IB_AH_GRH       = 1
 796};
 797
 798enum ib_rate {
 799        IB_RATE_PORT_CURRENT = 0,
 800        IB_RATE_2_5_GBPS = 2,
 801        IB_RATE_5_GBPS   = 5,
 802        IB_RATE_10_GBPS  = 3,
 803        IB_RATE_20_GBPS  = 6,
 804        IB_RATE_30_GBPS  = 4,
 805        IB_RATE_40_GBPS  = 7,
 806        IB_RATE_60_GBPS  = 8,
 807        IB_RATE_80_GBPS  = 9,
 808        IB_RATE_120_GBPS = 10,
 809        IB_RATE_14_GBPS  = 11,
 810        IB_RATE_56_GBPS  = 12,
 811        IB_RATE_112_GBPS = 13,
 812        IB_RATE_168_GBPS = 14,
 813        IB_RATE_25_GBPS  = 15,
 814        IB_RATE_100_GBPS = 16,
 815        IB_RATE_200_GBPS = 17,
 816        IB_RATE_300_GBPS = 18,
 817        IB_RATE_28_GBPS  = 19,
 818        IB_RATE_50_GBPS  = 20,
 819        IB_RATE_400_GBPS = 21,
 820        IB_RATE_600_GBPS = 22,
 821};
 822
 823/**
 824 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 825 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 826 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 827 * @rate: rate to convert.
 828 */
 829__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 830
 831/**
 832 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 833 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 834 * @rate: rate to convert.
 835 */
 836__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 837
 838
 839/**
 840 * enum ib_mr_type - memory region type
 841 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 842 *                            normal registration
 843 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
 844 *                            register any arbitrary sg lists (without
 845 *                            the normal mr constraints - see
 846 *                            ib_map_mr_sg)
 847 * @IB_MR_TYPE_DM:            memory region that is used for device
 848 *                            memory registration
 849 * @IB_MR_TYPE_USER:          memory region that is used for the user-space
 850 *                            application
 851 * @IB_MR_TYPE_DMA:           memory region that is used for DMA operations
 852 *                            without address translations (VA=PA)
 853 * @IB_MR_TYPE_INTEGRITY:     memory region that is used for
 854 *                            data integrity operations
 855 */
 856enum ib_mr_type {
 857        IB_MR_TYPE_MEM_REG,
 858        IB_MR_TYPE_SG_GAPS,
 859        IB_MR_TYPE_DM,
 860        IB_MR_TYPE_USER,
 861        IB_MR_TYPE_DMA,
 862        IB_MR_TYPE_INTEGRITY,
 863};
 864
 865enum ib_mr_status_check {
 866        IB_MR_CHECK_SIG_STATUS = 1,
 867};
 868
 869/**
 870 * struct ib_mr_status - Memory region status container
 871 *
 872 * @fail_status: Bitmask of MR checks status. For each
 873 *     failed check a corresponding status bit is set.
 874 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 875 *     failure.
 876 */
 877struct ib_mr_status {
 878        u32                 fail_status;
 879        struct ib_sig_err   sig_err;
 880};
 881
 882/**
 883 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 884 * enum.
 885 * @mult: multiple to convert.
 886 */
 887__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 888
 889struct rdma_ah_init_attr {
 890        struct rdma_ah_attr *ah_attr;
 891        u32 flags;
 892        struct net_device *xmit_slave;
 893};
 894
 895enum rdma_ah_attr_type {
 896        RDMA_AH_ATTR_TYPE_UNDEFINED,
 897        RDMA_AH_ATTR_TYPE_IB,
 898        RDMA_AH_ATTR_TYPE_ROCE,
 899        RDMA_AH_ATTR_TYPE_OPA,
 900};
 901
 902struct ib_ah_attr {
 903        u16                     dlid;
 904        u8                      src_path_bits;
 905};
 906
 907struct roce_ah_attr {
 908        u8                      dmac[ETH_ALEN];
 909};
 910
 911struct opa_ah_attr {
 912        u32                     dlid;
 913        u8                      src_path_bits;
 914        bool                    make_grd;
 915};
 916
 917struct rdma_ah_attr {
 918        struct ib_global_route  grh;
 919        u8                      sl;
 920        u8                      static_rate;
 921        u8                      port_num;
 922        u8                      ah_flags;
 923        enum rdma_ah_attr_type type;
 924        union {
 925                struct ib_ah_attr ib;
 926                struct roce_ah_attr roce;
 927                struct opa_ah_attr opa;
 928        };
 929};
 930
 931enum ib_wc_status {
 932        IB_WC_SUCCESS,
 933        IB_WC_LOC_LEN_ERR,
 934        IB_WC_LOC_QP_OP_ERR,
 935        IB_WC_LOC_EEC_OP_ERR,
 936        IB_WC_LOC_PROT_ERR,
 937        IB_WC_WR_FLUSH_ERR,
 938        IB_WC_MW_BIND_ERR,
 939        IB_WC_BAD_RESP_ERR,
 940        IB_WC_LOC_ACCESS_ERR,
 941        IB_WC_REM_INV_REQ_ERR,
 942        IB_WC_REM_ACCESS_ERR,
 943        IB_WC_REM_OP_ERR,
 944        IB_WC_RETRY_EXC_ERR,
 945        IB_WC_RNR_RETRY_EXC_ERR,
 946        IB_WC_LOC_RDD_VIOL_ERR,
 947        IB_WC_REM_INV_RD_REQ_ERR,
 948        IB_WC_REM_ABORT_ERR,
 949        IB_WC_INV_EECN_ERR,
 950        IB_WC_INV_EEC_STATE_ERR,
 951        IB_WC_FATAL_ERR,
 952        IB_WC_RESP_TIMEOUT_ERR,
 953        IB_WC_GENERAL_ERR
 954};
 955
 956const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
 957
 958enum ib_wc_opcode {
 959        IB_WC_SEND = IB_UVERBS_WC_SEND,
 960        IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
 961        IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
 962        IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
 963        IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
 964        IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
 965        IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
 966        IB_WC_LSO = IB_UVERBS_WC_TSO,
 967        IB_WC_REG_MR,
 968        IB_WC_MASKED_COMP_SWAP,
 969        IB_WC_MASKED_FETCH_ADD,
 970/*
 971 * Set value of IB_WC_RECV so consumers can test if a completion is a
 972 * receive by testing (opcode & IB_WC_RECV).
 973 */
 974        IB_WC_RECV                      = 1 << 7,
 975        IB_WC_RECV_RDMA_WITH_IMM
 976};
 977
 978enum ib_wc_flags {
 979        IB_WC_GRH               = 1,
 980        IB_WC_WITH_IMM          = (1<<1),
 981        IB_WC_WITH_INVALIDATE   = (1<<2),
 982        IB_WC_IP_CSUM_OK        = (1<<3),
 983        IB_WC_WITH_SMAC         = (1<<4),
 984        IB_WC_WITH_VLAN         = (1<<5),
 985        IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
 986};
 987
 988struct ib_wc {
 989        union {
 990                u64             wr_id;
 991                struct ib_cqe   *wr_cqe;
 992        };
 993        enum ib_wc_status       status;
 994        enum ib_wc_opcode       opcode;
 995        u32                     vendor_err;
 996        u32                     byte_len;
 997        struct ib_qp           *qp;
 998        union {
 999                __be32          imm_data;
1000                u32             invalidate_rkey;
1001        } ex;
1002        u32                     src_qp;
1003        u32                     slid;
1004        int                     wc_flags;
1005        u16                     pkey_index;
1006        u8                      sl;
1007        u8                      dlid_path_bits;
1008        u8                      port_num;       /* valid only for DR SMPs on switches */
1009        u8                      smac[ETH_ALEN];
1010        u16                     vlan_id;
1011        u8                      network_hdr_type;
1012};
1013
1014enum ib_cq_notify_flags {
1015        IB_CQ_SOLICITED                 = 1 << 0,
1016        IB_CQ_NEXT_COMP                 = 1 << 1,
1017        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1018        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
1019};
1020
1021enum ib_srq_type {
1022        IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1023        IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1024        IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1025};
1026
1027static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1028{
1029        return srq_type == IB_SRQT_XRC ||
1030               srq_type == IB_SRQT_TM;
1031}
1032
1033enum ib_srq_attr_mask {
1034        IB_SRQ_MAX_WR   = 1 << 0,
1035        IB_SRQ_LIMIT    = 1 << 1,
1036};
1037
1038struct ib_srq_attr {
1039        u32     max_wr;
1040        u32     max_sge;
1041        u32     srq_limit;
1042};
1043
1044struct ib_srq_init_attr {
1045        void                  (*event_handler)(struct ib_event *, void *);
1046        void                   *srq_context;
1047        struct ib_srq_attr      attr;
1048        enum ib_srq_type        srq_type;
1049
1050        struct {
1051                struct ib_cq   *cq;
1052                union {
1053                        struct {
1054                                struct ib_xrcd *xrcd;
1055                        } xrc;
1056
1057                        struct {
1058                                u32             max_num_tags;
1059                        } tag_matching;
1060                };
1061        } ext;
1062};
1063
1064struct ib_qp_cap {
1065        u32     max_send_wr;
1066        u32     max_recv_wr;
1067        u32     max_send_sge;
1068        u32     max_recv_sge;
1069        u32     max_inline_data;
1070
1071        /*
1072         * Maximum number of rdma_rw_ctx structures in flight at a time.
1073         * ib_create_qp() will calculate the right amount of neededed WRs
1074         * and MRs based on this.
1075         */
1076        u32     max_rdma_ctxs;
1077};
1078
1079enum ib_sig_type {
1080        IB_SIGNAL_ALL_WR,
1081        IB_SIGNAL_REQ_WR
1082};
1083
1084enum ib_qp_type {
1085        /*
1086         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1087         * here (and in that order) since the MAD layer uses them as
1088         * indices into a 2-entry table.
1089         */
1090        IB_QPT_SMI,
1091        IB_QPT_GSI,
1092
1093        IB_QPT_RC = IB_UVERBS_QPT_RC,
1094        IB_QPT_UC = IB_UVERBS_QPT_UC,
1095        IB_QPT_UD = IB_UVERBS_QPT_UD,
1096        IB_QPT_RAW_IPV6,
1097        IB_QPT_RAW_ETHERTYPE,
1098        IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1099        IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1100        IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1101        IB_QPT_MAX,
1102        IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1103        /* Reserve a range for qp types internal to the low level driver.
1104         * These qp types will not be visible at the IB core layer, so the
1105         * IB_QPT_MAX usages should not be affected in the core layer
1106         */
1107        IB_QPT_RESERVED1 = 0x1000,
1108        IB_QPT_RESERVED2,
1109        IB_QPT_RESERVED3,
1110        IB_QPT_RESERVED4,
1111        IB_QPT_RESERVED5,
1112        IB_QPT_RESERVED6,
1113        IB_QPT_RESERVED7,
1114        IB_QPT_RESERVED8,
1115        IB_QPT_RESERVED9,
1116        IB_QPT_RESERVED10,
1117};
1118
1119enum ib_qp_create_flags {
1120        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
1121        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   =
1122                IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1123        IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1124        IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1125        IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1126        IB_QP_CREATE_NETIF_QP                   = 1 << 5,
1127        IB_QP_CREATE_INTEGRITY_EN               = 1 << 6,
1128        IB_QP_CREATE_NETDEV_USE                 = 1 << 7,
1129        IB_QP_CREATE_SCATTER_FCS                =
1130                IB_UVERBS_QP_CREATE_SCATTER_FCS,
1131        IB_QP_CREATE_CVLAN_STRIPPING            =
1132                IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1133        IB_QP_CREATE_SOURCE_QPN                 = 1 << 10,
1134        IB_QP_CREATE_PCI_WRITE_END_PADDING      =
1135                IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1136        /* reserve bits 26-31 for low level drivers' internal use */
1137        IB_QP_CREATE_RESERVED_START             = 1 << 26,
1138        IB_QP_CREATE_RESERVED_END               = 1 << 31,
1139};
1140
1141/*
1142 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1143 * callback to destroy the passed in QP.
1144 */
1145
1146struct ib_qp_init_attr {
1147        /* Consumer's event_handler callback must not block */
1148        void                  (*event_handler)(struct ib_event *, void *);
1149
1150        void                   *qp_context;
1151        struct ib_cq           *send_cq;
1152        struct ib_cq           *recv_cq;
1153        struct ib_srq          *srq;
1154        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1155        struct ib_qp_cap        cap;
1156        enum ib_sig_type        sq_sig_type;
1157        enum ib_qp_type         qp_type;
1158        u32                     create_flags;
1159
1160        /*
1161         * Only needed for special QP types, or when using the RW API.
1162         */
1163        u8                      port_num;
1164        struct ib_rwq_ind_table *rwq_ind_tbl;
1165        u32                     source_qpn;
1166};
1167
1168struct ib_qp_open_attr {
1169        void                  (*event_handler)(struct ib_event *, void *);
1170        void                   *qp_context;
1171        u32                     qp_num;
1172        enum ib_qp_type         qp_type;
1173};
1174
1175enum ib_rnr_timeout {
1176        IB_RNR_TIMER_655_36 =  0,
1177        IB_RNR_TIMER_000_01 =  1,
1178        IB_RNR_TIMER_000_02 =  2,
1179        IB_RNR_TIMER_000_03 =  3,
1180        IB_RNR_TIMER_000_04 =  4,
1181        IB_RNR_TIMER_000_06 =  5,
1182        IB_RNR_TIMER_000_08 =  6,
1183        IB_RNR_TIMER_000_12 =  7,
1184        IB_RNR_TIMER_000_16 =  8,
1185        IB_RNR_TIMER_000_24 =  9,
1186        IB_RNR_TIMER_000_32 = 10,
1187        IB_RNR_TIMER_000_48 = 11,
1188        IB_RNR_TIMER_000_64 = 12,
1189        IB_RNR_TIMER_000_96 = 13,
1190        IB_RNR_TIMER_001_28 = 14,
1191        IB_RNR_TIMER_001_92 = 15,
1192        IB_RNR_TIMER_002_56 = 16,
1193        IB_RNR_TIMER_003_84 = 17,
1194        IB_RNR_TIMER_005_12 = 18,
1195        IB_RNR_TIMER_007_68 = 19,
1196        IB_RNR_TIMER_010_24 = 20,
1197        IB_RNR_TIMER_015_36 = 21,
1198        IB_RNR_TIMER_020_48 = 22,
1199        IB_RNR_TIMER_030_72 = 23,
1200        IB_RNR_TIMER_040_96 = 24,
1201        IB_RNR_TIMER_061_44 = 25,
1202        IB_RNR_TIMER_081_92 = 26,
1203        IB_RNR_TIMER_122_88 = 27,
1204        IB_RNR_TIMER_163_84 = 28,
1205        IB_RNR_TIMER_245_76 = 29,
1206        IB_RNR_TIMER_327_68 = 30,
1207        IB_RNR_TIMER_491_52 = 31
1208};
1209
1210enum ib_qp_attr_mask {
1211        IB_QP_STATE                     = 1,
1212        IB_QP_CUR_STATE                 = (1<<1),
1213        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
1214        IB_QP_ACCESS_FLAGS              = (1<<3),
1215        IB_QP_PKEY_INDEX                = (1<<4),
1216        IB_QP_PORT                      = (1<<5),
1217        IB_QP_QKEY                      = (1<<6),
1218        IB_QP_AV                        = (1<<7),
1219        IB_QP_PATH_MTU                  = (1<<8),
1220        IB_QP_TIMEOUT                   = (1<<9),
1221        IB_QP_RETRY_CNT                 = (1<<10),
1222        IB_QP_RNR_RETRY                 = (1<<11),
1223        IB_QP_RQ_PSN                    = (1<<12),
1224        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
1225        IB_QP_ALT_PATH                  = (1<<14),
1226        IB_QP_MIN_RNR_TIMER             = (1<<15),
1227        IB_QP_SQ_PSN                    = (1<<16),
1228        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
1229        IB_QP_PATH_MIG_STATE            = (1<<18),
1230        IB_QP_CAP                       = (1<<19),
1231        IB_QP_DEST_QPN                  = (1<<20),
1232        IB_QP_RESERVED1                 = (1<<21),
1233        IB_QP_RESERVED2                 = (1<<22),
1234        IB_QP_RESERVED3                 = (1<<23),
1235        IB_QP_RESERVED4                 = (1<<24),
1236        IB_QP_RATE_LIMIT                = (1<<25),
1237};
1238
1239enum ib_qp_state {
1240        IB_QPS_RESET,
1241        IB_QPS_INIT,
1242        IB_QPS_RTR,
1243        IB_QPS_RTS,
1244        IB_QPS_SQD,
1245        IB_QPS_SQE,
1246        IB_QPS_ERR
1247};
1248
1249enum ib_mig_state {
1250        IB_MIG_MIGRATED,
1251        IB_MIG_REARM,
1252        IB_MIG_ARMED
1253};
1254
1255enum ib_mw_type {
1256        IB_MW_TYPE_1 = 1,
1257        IB_MW_TYPE_2 = 2
1258};
1259
1260struct ib_qp_attr {
1261        enum ib_qp_state        qp_state;
1262        enum ib_qp_state        cur_qp_state;
1263        enum ib_mtu             path_mtu;
1264        enum ib_mig_state       path_mig_state;
1265        u32                     qkey;
1266        u32                     rq_psn;
1267        u32                     sq_psn;
1268        u32                     dest_qp_num;
1269        int                     qp_access_flags;
1270        struct ib_qp_cap        cap;
1271        struct rdma_ah_attr     ah_attr;
1272        struct rdma_ah_attr     alt_ah_attr;
1273        u16                     pkey_index;
1274        u16                     alt_pkey_index;
1275        u8                      en_sqd_async_notify;
1276        u8                      sq_draining;
1277        u8                      max_rd_atomic;
1278        u8                      max_dest_rd_atomic;
1279        u8                      min_rnr_timer;
1280        u8                      port_num;
1281        u8                      timeout;
1282        u8                      retry_cnt;
1283        u8                      rnr_retry;
1284        u8                      alt_port_num;
1285        u8                      alt_timeout;
1286        u32                     rate_limit;
1287        struct net_device       *xmit_slave;
1288};
1289
1290enum ib_wr_opcode {
1291        /* These are shared with userspace */
1292        IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1293        IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1294        IB_WR_SEND = IB_UVERBS_WR_SEND,
1295        IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1296        IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1297        IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1298        IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1299        IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1300        IB_WR_LSO = IB_UVERBS_WR_TSO,
1301        IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1302        IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1303        IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1304        IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1305                IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1306        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1307                IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1308
1309        /* These are kernel only and can not be issued by userspace */
1310        IB_WR_REG_MR = 0x20,
1311        IB_WR_REG_MR_INTEGRITY,
1312
1313        /* reserve values for low level drivers' internal use.
1314         * These values will not be used at all in the ib core layer.
1315         */
1316        IB_WR_RESERVED1 = 0xf0,
1317        IB_WR_RESERVED2,
1318        IB_WR_RESERVED3,
1319        IB_WR_RESERVED4,
1320        IB_WR_RESERVED5,
1321        IB_WR_RESERVED6,
1322        IB_WR_RESERVED7,
1323        IB_WR_RESERVED8,
1324        IB_WR_RESERVED9,
1325        IB_WR_RESERVED10,
1326};
1327
1328enum ib_send_flags {
1329        IB_SEND_FENCE           = 1,
1330        IB_SEND_SIGNALED        = (1<<1),
1331        IB_SEND_SOLICITED       = (1<<2),
1332        IB_SEND_INLINE          = (1<<3),
1333        IB_SEND_IP_CSUM         = (1<<4),
1334
1335        /* reserve bits 26-31 for low level drivers' internal use */
1336        IB_SEND_RESERVED_START  = (1 << 26),
1337        IB_SEND_RESERVED_END    = (1 << 31),
1338};
1339
1340struct ib_sge {
1341        u64     addr;
1342        u32     length;
1343        u32     lkey;
1344};
1345
1346struct ib_cqe {
1347        void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1348};
1349
1350struct ib_send_wr {
1351        struct ib_send_wr      *next;
1352        union {
1353                u64             wr_id;
1354                struct ib_cqe   *wr_cqe;
1355        };
1356        struct ib_sge          *sg_list;
1357        int                     num_sge;
1358        enum ib_wr_opcode       opcode;
1359        int                     send_flags;
1360        union {
1361                __be32          imm_data;
1362                u32             invalidate_rkey;
1363        } ex;
1364};
1365
1366struct ib_rdma_wr {
1367        struct ib_send_wr       wr;
1368        u64                     remote_addr;
1369        u32                     rkey;
1370};
1371
1372static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1373{
1374        return container_of(wr, struct ib_rdma_wr, wr);
1375}
1376
1377struct ib_atomic_wr {
1378        struct ib_send_wr       wr;
1379        u64                     remote_addr;
1380        u64                     compare_add;
1381        u64                     swap;
1382        u64                     compare_add_mask;
1383        u64                     swap_mask;
1384        u32                     rkey;
1385};
1386
1387static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1388{
1389        return container_of(wr, struct ib_atomic_wr, wr);
1390}
1391
1392struct ib_ud_wr {
1393        struct ib_send_wr       wr;
1394        struct ib_ah            *ah;
1395        void                    *header;
1396        int                     hlen;
1397        int                     mss;
1398        u32                     remote_qpn;
1399        u32                     remote_qkey;
1400        u16                     pkey_index; /* valid for GSI only */
1401        u8                      port_num;   /* valid for DR SMPs on switch only */
1402};
1403
1404static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1405{
1406        return container_of(wr, struct ib_ud_wr, wr);
1407}
1408
1409struct ib_reg_wr {
1410        struct ib_send_wr       wr;
1411        struct ib_mr            *mr;
1412        u32                     key;
1413        int                     access;
1414};
1415
1416static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1417{
1418        return container_of(wr, struct ib_reg_wr, wr);
1419}
1420
1421struct ib_recv_wr {
1422        struct ib_recv_wr      *next;
1423        union {
1424                u64             wr_id;
1425                struct ib_cqe   *wr_cqe;
1426        };
1427        struct ib_sge          *sg_list;
1428        int                     num_sge;
1429};
1430
1431enum ib_access_flags {
1432        IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1433        IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1434        IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1435        IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1436        IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1437        IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1438        IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1439        IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1440        IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1441
1442        IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1443        IB_ACCESS_SUPPORTED =
1444                ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
1445};
1446
1447/*
1448 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1449 * are hidden here instead of a uapi header!
1450 */
1451enum ib_mr_rereg_flags {
1452        IB_MR_REREG_TRANS       = 1,
1453        IB_MR_REREG_PD          = (1<<1),
1454        IB_MR_REREG_ACCESS      = (1<<2),
1455        IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1456};
1457
1458struct ib_umem;
1459
1460enum rdma_remove_reason {
1461        /*
1462         * Userspace requested uobject deletion or initial try
1463         * to remove uobject via cleanup. Call could fail
1464         */
1465        RDMA_REMOVE_DESTROY,
1466        /* Context deletion. This call should delete the actual object itself */
1467        RDMA_REMOVE_CLOSE,
1468        /* Driver is being hot-unplugged. This call should delete the actual object itself */
1469        RDMA_REMOVE_DRIVER_REMOVE,
1470        /* uobj is being cleaned-up before being committed */
1471        RDMA_REMOVE_ABORT,
1472};
1473
1474struct ib_rdmacg_object {
1475#ifdef CONFIG_CGROUP_RDMA
1476        struct rdma_cgroup      *cg;            /* owner rdma cgroup */
1477#endif
1478};
1479
1480struct ib_ucontext {
1481        struct ib_device       *device;
1482        struct ib_uverbs_file  *ufile;
1483
1484        bool cleanup_retryable;
1485
1486        struct ib_rdmacg_object cg_obj;
1487        /*
1488         * Implementation details of the RDMA core, don't use in drivers:
1489         */
1490        struct rdma_restrack_entry res;
1491        struct xarray mmap_xa;
1492};
1493
1494struct ib_uobject {
1495        u64                     user_handle;    /* handle given to us by userspace */
1496        /* ufile & ucontext owning this object */
1497        struct ib_uverbs_file  *ufile;
1498        /* FIXME, save memory: ufile->context == context */
1499        struct ib_ucontext     *context;        /* associated user context */
1500        void                   *object;         /* containing object */
1501        struct list_head        list;           /* link to context's list */
1502        struct ib_rdmacg_object cg_obj;         /* rdmacg object */
1503        int                     id;             /* index into kernel idr */
1504        struct kref             ref;
1505        atomic_t                usecnt;         /* protects exclusive access */
1506        struct rcu_head         rcu;            /* kfree_rcu() overhead */
1507
1508        const struct uverbs_api_object *uapi_object;
1509};
1510
1511struct ib_udata {
1512        const void __user *inbuf;
1513        void __user *outbuf;
1514        size_t       inlen;
1515        size_t       outlen;
1516};
1517
1518struct ib_pd {
1519        u32                     local_dma_lkey;
1520        u32                     flags;
1521        struct ib_device       *device;
1522        struct ib_uobject      *uobject;
1523        atomic_t                usecnt; /* count all resources */
1524
1525        u32                     unsafe_global_rkey;
1526
1527        /*
1528         * Implementation details of the RDMA core, don't use in drivers:
1529         */
1530        struct ib_mr           *__internal_mr;
1531        struct rdma_restrack_entry res;
1532};
1533
1534struct ib_xrcd {
1535        struct ib_device       *device;
1536        atomic_t                usecnt; /* count all exposed resources */
1537        struct inode           *inode;
1538        struct rw_semaphore     tgt_qps_rwsem;
1539        struct xarray           tgt_qps;
1540};
1541
1542struct ib_ah {
1543        struct ib_device        *device;
1544        struct ib_pd            *pd;
1545        struct ib_uobject       *uobject;
1546        const struct ib_gid_attr *sgid_attr;
1547        enum rdma_ah_attr_type  type;
1548};
1549
1550typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1551
1552enum ib_poll_context {
1553        IB_POLL_SOFTIRQ,           /* poll from softirq context */
1554        IB_POLL_WORKQUEUE,         /* poll from workqueue */
1555        IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1556        IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1557
1558        IB_POLL_DIRECT,            /* caller context, no hw completions */
1559};
1560
1561struct ib_cq {
1562        struct ib_device       *device;
1563        struct ib_ucq_object   *uobject;
1564        ib_comp_handler         comp_handler;
1565        void                  (*event_handler)(struct ib_event *, void *);
1566        void                   *cq_context;
1567        int                     cqe;
1568        unsigned int            cqe_used;
1569        atomic_t                usecnt; /* count number of work queues */
1570        enum ib_poll_context    poll_ctx;
1571        struct ib_wc            *wc;
1572        struct list_head        pool_entry;
1573        union {
1574                struct irq_poll         iop;
1575                struct work_struct      work;
1576        };
1577        struct workqueue_struct *comp_wq;
1578        struct dim *dim;
1579
1580        /* updated only by trace points */
1581        ktime_t timestamp;
1582        u8 interrupt:1;
1583        u8 shared:1;
1584        unsigned int comp_vector;
1585
1586        /*
1587         * Implementation details of the RDMA core, don't use in drivers:
1588         */
1589        struct rdma_restrack_entry res;
1590};
1591
1592struct ib_srq {
1593        struct ib_device       *device;
1594        struct ib_pd           *pd;
1595        struct ib_usrq_object  *uobject;
1596        void                  (*event_handler)(struct ib_event *, void *);
1597        void                   *srq_context;
1598        enum ib_srq_type        srq_type;
1599        atomic_t                usecnt;
1600
1601        struct {
1602                struct ib_cq   *cq;
1603                union {
1604                        struct {
1605                                struct ib_xrcd *xrcd;
1606                                u32             srq_num;
1607                        } xrc;
1608                };
1609        } ext;
1610};
1611
1612enum ib_raw_packet_caps {
1613        /* Strip cvlan from incoming packet and report it in the matching work
1614         * completion is supported.
1615         */
1616        IB_RAW_PACKET_CAP_CVLAN_STRIPPING       = (1 << 0),
1617        /* Scatter FCS field of an incoming packet to host memory is supported.
1618         */
1619        IB_RAW_PACKET_CAP_SCATTER_FCS           = (1 << 1),
1620        /* Checksum offloads are supported (for both send and receive). */
1621        IB_RAW_PACKET_CAP_IP_CSUM               = (1 << 2),
1622        /* When a packet is received for an RQ with no receive WQEs, the
1623         * packet processing is delayed.
1624         */
1625        IB_RAW_PACKET_CAP_DELAY_DROP            = (1 << 3),
1626};
1627
1628enum ib_wq_type {
1629        IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1630};
1631
1632enum ib_wq_state {
1633        IB_WQS_RESET,
1634        IB_WQS_RDY,
1635        IB_WQS_ERR
1636};
1637
1638struct ib_wq {
1639        struct ib_device       *device;
1640        struct ib_uwq_object   *uobject;
1641        void                *wq_context;
1642        void                (*event_handler)(struct ib_event *, void *);
1643        struct ib_pd           *pd;
1644        struct ib_cq           *cq;
1645        u32             wq_num;
1646        enum ib_wq_state       state;
1647        enum ib_wq_type wq_type;
1648        atomic_t                usecnt;
1649};
1650
1651enum ib_wq_flags {
1652        IB_WQ_FLAGS_CVLAN_STRIPPING     = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1653        IB_WQ_FLAGS_SCATTER_FCS         = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1654        IB_WQ_FLAGS_DELAY_DROP          = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1655        IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1656                                IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1657};
1658
1659struct ib_wq_init_attr {
1660        void                   *wq_context;
1661        enum ib_wq_type wq_type;
1662        u32             max_wr;
1663        u32             max_sge;
1664        struct  ib_cq          *cq;
1665        void                (*event_handler)(struct ib_event *, void *);
1666        u32             create_flags; /* Use enum ib_wq_flags */
1667};
1668
1669enum ib_wq_attr_mask {
1670        IB_WQ_STATE             = 1 << 0,
1671        IB_WQ_CUR_STATE         = 1 << 1,
1672        IB_WQ_FLAGS             = 1 << 2,
1673};
1674
1675struct ib_wq_attr {
1676        enum    ib_wq_state     wq_state;
1677        enum    ib_wq_state     curr_wq_state;
1678        u32                     flags; /* Use enum ib_wq_flags */
1679        u32                     flags_mask; /* Use enum ib_wq_flags */
1680};
1681
1682struct ib_rwq_ind_table {
1683        struct ib_device        *device;
1684        struct ib_uobject      *uobject;
1685        atomic_t                usecnt;
1686        u32             ind_tbl_num;
1687        u32             log_ind_tbl_size;
1688        struct ib_wq    **ind_tbl;
1689};
1690
1691struct ib_rwq_ind_table_init_attr {
1692        u32             log_ind_tbl_size;
1693        /* Each entry is a pointer to Receive Work Queue */
1694        struct ib_wq    **ind_tbl;
1695};
1696
1697enum port_pkey_state {
1698        IB_PORT_PKEY_NOT_VALID = 0,
1699        IB_PORT_PKEY_VALID = 1,
1700        IB_PORT_PKEY_LISTED = 2,
1701};
1702
1703struct ib_qp_security;
1704
1705struct ib_port_pkey {
1706        enum port_pkey_state    state;
1707        u16                     pkey_index;
1708        u8                      port_num;
1709        struct list_head        qp_list;
1710        struct list_head        to_error_list;
1711        struct ib_qp_security  *sec;
1712};
1713
1714struct ib_ports_pkeys {
1715        struct ib_port_pkey     main;
1716        struct ib_port_pkey     alt;
1717};
1718
1719struct ib_qp_security {
1720        struct ib_qp           *qp;
1721        struct ib_device       *dev;
1722        /* Hold this mutex when changing port and pkey settings. */
1723        struct mutex            mutex;
1724        struct ib_ports_pkeys  *ports_pkeys;
1725        /* A list of all open shared QP handles.  Required to enforce security
1726         * properly for all users of a shared QP.
1727         */
1728        struct list_head        shared_qp_list;
1729        void                   *security;
1730        bool                    destroying;
1731        atomic_t                error_list_count;
1732        struct completion       error_complete;
1733        int                     error_comps_pending;
1734};
1735
1736/*
1737 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1738 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1739 */
1740struct ib_qp {
1741        struct ib_device       *device;
1742        struct ib_pd           *pd;
1743        struct ib_cq           *send_cq;
1744        struct ib_cq           *recv_cq;
1745        spinlock_t              mr_lock;
1746        int                     mrs_used;
1747        struct list_head        rdma_mrs;
1748        struct list_head        sig_mrs;
1749        struct ib_srq          *srq;
1750        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1751        struct list_head        xrcd_list;
1752
1753        /* count times opened, mcast attaches, flow attaches */
1754        atomic_t                usecnt;
1755        struct list_head        open_list;
1756        struct ib_qp           *real_qp;
1757        struct ib_uqp_object   *uobject;
1758        void                  (*event_handler)(struct ib_event *, void *);
1759        void                   *qp_context;
1760        /* sgid_attrs associated with the AV's */
1761        const struct ib_gid_attr *av_sgid_attr;
1762        const struct ib_gid_attr *alt_path_sgid_attr;
1763        u32                     qp_num;
1764        u32                     max_write_sge;
1765        u32                     max_read_sge;
1766        enum ib_qp_type         qp_type;
1767        struct ib_rwq_ind_table *rwq_ind_tbl;
1768        struct ib_qp_security  *qp_sec;
1769        u8                      port;
1770
1771        bool                    integrity_en;
1772        /*
1773         * Implementation details of the RDMA core, don't use in drivers:
1774         */
1775        struct rdma_restrack_entry     res;
1776
1777        /* The counter the qp is bind to */
1778        struct rdma_counter    *counter;
1779};
1780
1781struct ib_dm {
1782        struct ib_device  *device;
1783        u32                length;
1784        u32                flags;
1785        struct ib_uobject *uobject;
1786        atomic_t           usecnt;
1787};
1788
1789struct ib_mr {
1790        struct ib_device  *device;
1791        struct ib_pd      *pd;
1792        u32                lkey;
1793        u32                rkey;
1794        u64                iova;
1795        u64                length;
1796        unsigned int       page_size;
1797        enum ib_mr_type    type;
1798        bool               need_inval;
1799        union {
1800                struct ib_uobject       *uobject;       /* user */
1801                struct list_head        qp_entry;       /* FR */
1802        };
1803
1804        struct ib_dm      *dm;
1805        struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1806        /*
1807         * Implementation details of the RDMA core, don't use in drivers:
1808         */
1809        struct rdma_restrack_entry res;
1810};
1811
1812struct ib_mw {
1813        struct ib_device        *device;
1814        struct ib_pd            *pd;
1815        struct ib_uobject       *uobject;
1816        u32                     rkey;
1817        enum ib_mw_type         type;
1818};
1819
1820/* Supported steering options */
1821enum ib_flow_attr_type {
1822        /* steering according to rule specifications */
1823        IB_FLOW_ATTR_NORMAL             = 0x0,
1824        /* default unicast and multicast rule -
1825         * receive all Eth traffic which isn't steered to any QP
1826         */
1827        IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1828        /* default multicast rule -
1829         * receive all Eth multicast traffic which isn't steered to any QP
1830         */
1831        IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1832        /* sniffer rule - receive all port traffic */
1833        IB_FLOW_ATTR_SNIFFER            = 0x3
1834};
1835
1836/* Supported steering header types */
1837enum ib_flow_spec_type {
1838        /* L2 headers*/
1839        IB_FLOW_SPEC_ETH                = 0x20,
1840        IB_FLOW_SPEC_IB                 = 0x22,
1841        /* L3 header*/
1842        IB_FLOW_SPEC_IPV4               = 0x30,
1843        IB_FLOW_SPEC_IPV6               = 0x31,
1844        IB_FLOW_SPEC_ESP                = 0x34,
1845        /* L4 headers*/
1846        IB_FLOW_SPEC_TCP                = 0x40,
1847        IB_FLOW_SPEC_UDP                = 0x41,
1848        IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
1849        IB_FLOW_SPEC_GRE                = 0x51,
1850        IB_FLOW_SPEC_MPLS               = 0x60,
1851        IB_FLOW_SPEC_INNER              = 0x100,
1852        /* Actions */
1853        IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1854        IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1855        IB_FLOW_SPEC_ACTION_HANDLE      = 0x1002,
1856        IB_FLOW_SPEC_ACTION_COUNT       = 0x1003,
1857};
1858#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1859#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1860
1861enum ib_flow_flags {
1862        IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1863        IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1864        IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 3  /* Must be last */
1865};
1866
1867struct ib_flow_eth_filter {
1868        u8      dst_mac[6];
1869        u8      src_mac[6];
1870        __be16  ether_type;
1871        __be16  vlan_tag;
1872        /* Must be last */
1873        u8      real_sz[];
1874};
1875
1876struct ib_flow_spec_eth {
1877        u32                       type;
1878        u16                       size;
1879        struct ib_flow_eth_filter val;
1880        struct ib_flow_eth_filter mask;
1881};
1882
1883struct ib_flow_ib_filter {
1884        __be16 dlid;
1885        __u8   sl;
1886        /* Must be last */
1887        u8      real_sz[];
1888};
1889
1890struct ib_flow_spec_ib {
1891        u32                      type;
1892        u16                      size;
1893        struct ib_flow_ib_filter val;
1894        struct ib_flow_ib_filter mask;
1895};
1896
1897/* IPv4 header flags */
1898enum ib_ipv4_flags {
1899        IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1900        IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1901                                    last have this flag set */
1902};
1903
1904struct ib_flow_ipv4_filter {
1905        __be32  src_ip;
1906        __be32  dst_ip;
1907        u8      proto;
1908        u8      tos;
1909        u8      ttl;
1910        u8      flags;
1911        /* Must be last */
1912        u8      real_sz[];
1913};
1914
1915struct ib_flow_spec_ipv4 {
1916        u32                        type;
1917        u16                        size;
1918        struct ib_flow_ipv4_filter val;
1919        struct ib_flow_ipv4_filter mask;
1920};
1921
1922struct ib_flow_ipv6_filter {
1923        u8      src_ip[16];
1924        u8      dst_ip[16];
1925        __be32  flow_label;
1926        u8      next_hdr;
1927        u8      traffic_class;
1928        u8      hop_limit;
1929        /* Must be last */
1930        u8      real_sz[];
1931};
1932
1933struct ib_flow_spec_ipv6 {
1934        u32                        type;
1935        u16                        size;
1936        struct ib_flow_ipv6_filter val;
1937        struct ib_flow_ipv6_filter mask;
1938};
1939
1940struct ib_flow_tcp_udp_filter {
1941        __be16  dst_port;
1942        __be16  src_port;
1943        /* Must be last */
1944        u8      real_sz[];
1945};
1946
1947struct ib_flow_spec_tcp_udp {
1948        u32                           type;
1949        u16                           size;
1950        struct ib_flow_tcp_udp_filter val;
1951        struct ib_flow_tcp_udp_filter mask;
1952};
1953
1954struct ib_flow_tunnel_filter {
1955        __be32  tunnel_id;
1956        u8      real_sz[];
1957};
1958
1959/* ib_flow_spec_tunnel describes the Vxlan tunnel
1960 * the tunnel_id from val has the vni value
1961 */
1962struct ib_flow_spec_tunnel {
1963        u32                           type;
1964        u16                           size;
1965        struct ib_flow_tunnel_filter  val;
1966        struct ib_flow_tunnel_filter  mask;
1967};
1968
1969struct ib_flow_esp_filter {
1970        __be32  spi;
1971        __be32  seq;
1972        /* Must be last */
1973        u8      real_sz[];
1974};
1975
1976struct ib_flow_spec_esp {
1977        u32                           type;
1978        u16                           size;
1979        struct ib_flow_esp_filter     val;
1980        struct ib_flow_esp_filter     mask;
1981};
1982
1983struct ib_flow_gre_filter {
1984        __be16 c_ks_res0_ver;
1985        __be16 protocol;
1986        __be32 key;
1987        /* Must be last */
1988        u8      real_sz[];
1989};
1990
1991struct ib_flow_spec_gre {
1992        u32                           type;
1993        u16                           size;
1994        struct ib_flow_gre_filter     val;
1995        struct ib_flow_gre_filter     mask;
1996};
1997
1998struct ib_flow_mpls_filter {
1999        __be32 tag;
2000        /* Must be last */
2001        u8      real_sz[];
2002};
2003
2004struct ib_flow_spec_mpls {
2005        u32                           type;
2006        u16                           size;
2007        struct ib_flow_mpls_filter     val;
2008        struct ib_flow_mpls_filter     mask;
2009};
2010
2011struct ib_flow_spec_action_tag {
2012        enum ib_flow_spec_type        type;
2013        u16                           size;
2014        u32                           tag_id;
2015};
2016
2017struct ib_flow_spec_action_drop {
2018        enum ib_flow_spec_type        type;
2019        u16                           size;
2020};
2021
2022struct ib_flow_spec_action_handle {
2023        enum ib_flow_spec_type        type;
2024        u16                           size;
2025        struct ib_flow_action        *act;
2026};
2027
2028enum ib_counters_description {
2029        IB_COUNTER_PACKETS,
2030        IB_COUNTER_BYTES,
2031};
2032
2033struct ib_flow_spec_action_count {
2034        enum ib_flow_spec_type type;
2035        u16 size;
2036        struct ib_counters *counters;
2037};
2038
2039union ib_flow_spec {
2040        struct {
2041                u32                     type;
2042                u16                     size;
2043        };
2044        struct ib_flow_spec_eth         eth;
2045        struct ib_flow_spec_ib          ib;
2046        struct ib_flow_spec_ipv4        ipv4;
2047        struct ib_flow_spec_tcp_udp     tcp_udp;
2048        struct ib_flow_spec_ipv6        ipv6;
2049        struct ib_flow_spec_tunnel      tunnel;
2050        struct ib_flow_spec_esp         esp;
2051        struct ib_flow_spec_gre         gre;
2052        struct ib_flow_spec_mpls        mpls;
2053        struct ib_flow_spec_action_tag  flow_tag;
2054        struct ib_flow_spec_action_drop drop;
2055        struct ib_flow_spec_action_handle action;
2056        struct ib_flow_spec_action_count flow_count;
2057};
2058
2059struct ib_flow_attr {
2060        enum ib_flow_attr_type type;
2061        u16          size;
2062        u16          priority;
2063        u32          flags;
2064        u8           num_of_specs;
2065        u8           port;
2066        union ib_flow_spec flows[];
2067};
2068
2069struct ib_flow {
2070        struct ib_qp            *qp;
2071        struct ib_device        *device;
2072        struct ib_uobject       *uobject;
2073};
2074
2075enum ib_flow_action_type {
2076        IB_FLOW_ACTION_UNSPECIFIED,
2077        IB_FLOW_ACTION_ESP = 1,
2078};
2079
2080struct ib_flow_action_attrs_esp_keymats {
2081        enum ib_uverbs_flow_action_esp_keymat                   protocol;
2082        union {
2083                struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2084        } keymat;
2085};
2086
2087struct ib_flow_action_attrs_esp_replays {
2088        enum ib_uverbs_flow_action_esp_replay                   protocol;
2089        union {
2090                struct ib_uverbs_flow_action_esp_replay_bmp     bmp;
2091        } replay;
2092};
2093
2094enum ib_flow_action_attrs_esp_flags {
2095        /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2096         * This is done in order to share the same flags between user-space and
2097         * kernel and spare an unnecessary translation.
2098         */
2099
2100        /* Kernel flags */
2101        IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED  = 1ULL << 32,
2102        IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS  = 1ULL << 33,
2103};
2104
2105struct ib_flow_spec_list {
2106        struct ib_flow_spec_list        *next;
2107        union ib_flow_spec              spec;
2108};
2109
2110struct ib_flow_action_attrs_esp {
2111        struct ib_flow_action_attrs_esp_keymats         *keymat;
2112        struct ib_flow_action_attrs_esp_replays         *replay;
2113        struct ib_flow_spec_list                        *encap;
2114        /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2115         * Value of 0 is a valid value.
2116         */
2117        u32                                             esn;
2118        u32                                             spi;
2119        u32                                             seq;
2120        u32                                             tfc_pad;
2121        /* Use enum ib_flow_action_attrs_esp_flags */
2122        u64                                             flags;
2123        u64                                             hard_limit_pkts;
2124};
2125
2126struct ib_flow_action {
2127        struct ib_device                *device;
2128        struct ib_uobject               *uobject;
2129        enum ib_flow_action_type        type;
2130        atomic_t                        usecnt;
2131};
2132
2133struct ib_mad;
2134struct ib_grh;
2135
2136enum ib_process_mad_flags {
2137        IB_MAD_IGNORE_MKEY      = 1,
2138        IB_MAD_IGNORE_BKEY      = 2,
2139        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2140};
2141
2142enum ib_mad_result {
2143        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
2144        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
2145        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
2146        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
2147};
2148
2149struct ib_port_cache {
2150        u64                   subnet_prefix;
2151        struct ib_pkey_cache  *pkey;
2152        struct ib_gid_table   *gid;
2153        u8                     lmc;
2154        enum ib_port_state     port_state;
2155};
2156
2157struct ib_port_immutable {
2158        int                           pkey_tbl_len;
2159        int                           gid_tbl_len;
2160        u32                           core_cap_flags;
2161        u32                           max_mad_size;
2162};
2163
2164struct ib_port_data {
2165        struct ib_device *ib_dev;
2166
2167        struct ib_port_immutable immutable;
2168
2169        spinlock_t pkey_list_lock;
2170        struct list_head pkey_list;
2171
2172        struct ib_port_cache cache;
2173
2174        spinlock_t netdev_lock;
2175        struct net_device __rcu *netdev;
2176        struct hlist_node ndev_hash_link;
2177        struct rdma_port_counter port_counter;
2178        struct rdma_hw_stats *hw_stats;
2179};
2180
2181/* rdma netdev type - specifies protocol type */
2182enum rdma_netdev_t {
2183        RDMA_NETDEV_OPA_VNIC,
2184        RDMA_NETDEV_IPOIB,
2185};
2186
2187/**
2188 * struct rdma_netdev - rdma netdev
2189 * For cases where netstack interfacing is required.
2190 */
2191struct rdma_netdev {
2192        void              *clnt_priv;
2193        struct ib_device  *hca;
2194        u8                 port_num;
2195        int                mtu;
2196
2197        /*
2198         * cleanup function must be specified.
2199         * FIXME: This is only used for OPA_VNIC and that usage should be
2200         * removed too.
2201         */
2202        void (*free_rdma_netdev)(struct net_device *netdev);
2203
2204        /* control functions */
2205        void (*set_id)(struct net_device *netdev, int id);
2206        /* send packet */
2207        int (*send)(struct net_device *dev, struct sk_buff *skb,
2208                    struct ib_ah *address, u32 dqpn);
2209        /* multicast */
2210        int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2211                            union ib_gid *gid, u16 mlid,
2212                            int set_qkey, u32 qkey);
2213        int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2214                            union ib_gid *gid, u16 mlid);
2215};
2216
2217struct rdma_netdev_alloc_params {
2218        size_t sizeof_priv;
2219        unsigned int txqs;
2220        unsigned int rxqs;
2221        void *param;
2222
2223        int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2224                                      struct net_device *netdev, void *param);
2225};
2226
2227struct ib_odp_counters {
2228        atomic64_t faults;
2229        atomic64_t invalidations;
2230        atomic64_t prefetch;
2231};
2232
2233struct ib_counters {
2234        struct ib_device        *device;
2235        struct ib_uobject       *uobject;
2236        /* num of objects attached */
2237        atomic_t        usecnt;
2238};
2239
2240struct ib_counters_read_attr {
2241        u64     *counters_buff;
2242        u32     ncounters;
2243        u32     flags; /* use enum ib_read_counters_flags */
2244};
2245
2246struct uverbs_attr_bundle;
2247struct iw_cm_id;
2248struct iw_cm_conn_param;
2249
2250#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)                      \
2251        .size_##ib_struct =                                                    \
2252                (sizeof(struct drv_struct) +                                   \
2253                 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) +      \
2254                 BUILD_BUG_ON_ZERO(                                            \
2255                         !__same_type(((struct drv_struct *)NULL)->member,     \
2256                                      struct ib_struct)))
2257
2258#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                         \
2259        ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2260
2261#define rdma_zalloc_drv_obj(ib_dev, ib_type)                                   \
2262        rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2263
2264#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2265
2266struct rdma_user_mmap_entry {
2267        struct kref ref;
2268        struct ib_ucontext *ucontext;
2269        unsigned long start_pgoff;
2270        size_t npages;
2271        bool driver_removed;
2272};
2273
2274/* Return the offset (in bytes) the user should pass to libc's mmap() */
2275static inline u64
2276rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2277{
2278        return (u64)entry->start_pgoff << PAGE_SHIFT;
2279}
2280
2281/**
2282 * struct ib_device_ops - InfiniBand device operations
2283 * This structure defines all the InfiniBand device operations, providers will
2284 * need to define the supported operations, otherwise they will be set to null.
2285 */
2286struct ib_device_ops {
2287        struct module *owner;
2288        enum rdma_driver_id driver_id;
2289        u32 uverbs_abi_ver;
2290        unsigned int uverbs_no_driver_id_binding:1;
2291
2292        int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2293                         const struct ib_send_wr **bad_send_wr);
2294        int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2295                         const struct ib_recv_wr **bad_recv_wr);
2296        void (*drain_rq)(struct ib_qp *qp);
2297        void (*drain_sq)(struct ib_qp *qp);
2298        int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2299        int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2300        int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2301        int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2302        int (*post_srq_recv)(struct ib_srq *srq,
2303                             const struct ib_recv_wr *recv_wr,
2304                             const struct ib_recv_wr **bad_recv_wr);
2305        int (*process_mad)(struct ib_device *device, int process_mad_flags,
2306                           u8 port_num, const struct ib_wc *in_wc,
2307                           const struct ib_grh *in_grh,
2308                           const struct ib_mad *in_mad, struct ib_mad *out_mad,
2309                           size_t *out_mad_size, u16 *out_mad_pkey_index);
2310        int (*query_device)(struct ib_device *device,
2311                            struct ib_device_attr *device_attr,
2312                            struct ib_udata *udata);
2313        int (*modify_device)(struct ib_device *device, int device_modify_mask,
2314                             struct ib_device_modify *device_modify);
2315        void (*get_dev_fw_str)(struct ib_device *device, char *str);
2316        const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2317                                                     int comp_vector);
2318        int (*query_port)(struct ib_device *device, u8 port_num,
2319                          struct ib_port_attr *port_attr);
2320        int (*modify_port)(struct ib_device *device, u8 port_num,
2321                           int port_modify_mask,
2322                           struct ib_port_modify *port_modify);
2323        /**
2324         * The following mandatory functions are used only at device
2325         * registration.  Keep functions such as these at the end of this
2326         * structure to avoid cache line misses when accessing struct ib_device
2327         * in fast paths.
2328         */
2329        int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2330                                  struct ib_port_immutable *immutable);
2331        enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2332                                               u8 port_num);
2333        /**
2334         * When calling get_netdev, the HW vendor's driver should return the
2335         * net device of device @device at port @port_num or NULL if such
2336         * a net device doesn't exist. The vendor driver should call dev_hold
2337         * on this net device. The HW vendor's device driver must guarantee
2338         * that this function returns NULL before the net device has finished
2339         * NETDEV_UNREGISTER state.
2340         */
2341        struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2342        /**
2343         * rdma netdev operation
2344         *
2345         * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2346         * must return -EOPNOTSUPP if it doesn't support the specified type.
2347         */
2348        struct net_device *(*alloc_rdma_netdev)(
2349                struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2350                const char *name, unsigned char name_assign_type,
2351                void (*setup)(struct net_device *));
2352
2353        int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2354                                      enum rdma_netdev_t type,
2355                                      struct rdma_netdev_alloc_params *params);
2356        /**
2357         * query_gid should be return GID value for @device, when @port_num
2358         * link layer is either IB or iWarp. It is no-op if @port_num port
2359         * is RoCE link layer.
2360         */
2361        int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2362                         union ib_gid *gid);
2363        /**
2364         * When calling add_gid, the HW vendor's driver should add the gid
2365         * of device of port at gid index available at @attr. Meta-info of
2366         * that gid (for example, the network device related to this gid) is
2367         * available at @attr. @context allows the HW vendor driver to store
2368         * extra information together with a GID entry. The HW vendor driver may
2369         * allocate memory to contain this information and store it in @context
2370         * when a new GID entry is written to. Params are consistent until the
2371         * next call of add_gid or delete_gid. The function should return 0 on
2372         * success or error otherwise. The function could be called
2373         * concurrently for different ports. This function is only called when
2374         * roce_gid_table is used.
2375         */
2376        int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2377        /**
2378         * When calling del_gid, the HW vendor's driver should delete the
2379         * gid of device @device at gid index gid_index of port port_num
2380         * available in @attr.
2381         * Upon the deletion of a GID entry, the HW vendor must free any
2382         * allocated memory. The caller will clear @context afterwards.
2383         * This function is only called when roce_gid_table is used.
2384         */
2385        int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2386        int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2387                          u16 *pkey);
2388        int (*alloc_ucontext)(struct ib_ucontext *context,
2389                              struct ib_udata *udata);
2390        void (*dealloc_ucontext)(struct ib_ucontext *context);
2391        int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2392        /**
2393         * This will be called once refcount of an entry in mmap_xa reaches
2394         * zero. The type of the memory that was mapped may differ between
2395         * entries and is opaque to the rdma_user_mmap interface.
2396         * Therefore needs to be implemented by the driver in mmap_free.
2397         */
2398        void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2399        void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2400        int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2401        int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2402        int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2403                         struct ib_udata *udata);
2404        int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2405        int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2406        int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2407        int (*create_srq)(struct ib_srq *srq,
2408                          struct ib_srq_init_attr *srq_init_attr,
2409                          struct ib_udata *udata);
2410        int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2411                          enum ib_srq_attr_mask srq_attr_mask,
2412                          struct ib_udata *udata);
2413        int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2414        int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2415        struct ib_qp *(*create_qp)(struct ib_pd *pd,
2416                                   struct ib_qp_init_attr *qp_init_attr,
2417                                   struct ib_udata *udata);
2418        int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2419                         int qp_attr_mask, struct ib_udata *udata);
2420        int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2421                        int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2422        int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2423        int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2424                         struct ib_udata *udata);
2425        int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2426        int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2427        int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2428        struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2429        struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2430                                     u64 virt_addr, int mr_access_flags,
2431                                     struct ib_udata *udata);
2432        int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2433                             u64 virt_addr, int mr_access_flags,
2434                             struct ib_pd *pd, struct ib_udata *udata);
2435        int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2436        struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2437                                  u32 max_num_sg);
2438        struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2439                                            u32 max_num_data_sg,
2440                                            u32 max_num_meta_sg);
2441        int (*advise_mr)(struct ib_pd *pd,
2442                         enum ib_uverbs_advise_mr_advice advice, u32 flags,
2443                         struct ib_sge *sg_list, u32 num_sge,
2444                         struct uverbs_attr_bundle *attrs);
2445        int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2446                         unsigned int *sg_offset);
2447        int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2448                               struct ib_mr_status *mr_status);
2449        int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2450        int (*dealloc_mw)(struct ib_mw *mw);
2451        int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2452        int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2453        int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2454        int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2455        struct ib_flow *(*create_flow)(struct ib_qp *qp,
2456                                       struct ib_flow_attr *flow_attr,
2457                                       struct ib_udata *udata);
2458        int (*destroy_flow)(struct ib_flow *flow_id);
2459        struct ib_flow_action *(*create_flow_action_esp)(
2460                struct ib_device *device,
2461                const struct ib_flow_action_attrs_esp *attr,
2462                struct uverbs_attr_bundle *attrs);
2463        int (*destroy_flow_action)(struct ib_flow_action *action);
2464        int (*modify_flow_action_esp)(
2465                struct ib_flow_action *action,
2466                const struct ib_flow_action_attrs_esp *attr,
2467                struct uverbs_attr_bundle *attrs);
2468        int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2469                                 int state);
2470        int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2471                             struct ifla_vf_info *ivf);
2472        int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2473                            struct ifla_vf_stats *stats);
2474        int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
2475                            struct ifla_vf_guid *node_guid,
2476                            struct ifla_vf_guid *port_guid);
2477        int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2478                           int type);
2479        struct ib_wq *(*create_wq)(struct ib_pd *pd,
2480                                   struct ib_wq_init_attr *init_attr,
2481                                   struct ib_udata *udata);
2482        int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2483        int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2484                         u32 wq_attr_mask, struct ib_udata *udata);
2485        int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2486                                    struct ib_rwq_ind_table_init_attr *init_attr,
2487                                    struct ib_udata *udata);
2488        int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2489        struct ib_dm *(*alloc_dm)(struct ib_device *device,
2490                                  struct ib_ucontext *context,
2491                                  struct ib_dm_alloc_attr *attr,
2492                                  struct uverbs_attr_bundle *attrs);
2493        int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2494        struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2495                                   struct ib_dm_mr_attr *attr,
2496                                   struct uverbs_attr_bundle *attrs);
2497        int (*create_counters)(struct ib_counters *counters,
2498                               struct uverbs_attr_bundle *attrs);
2499        int (*destroy_counters)(struct ib_counters *counters);
2500        int (*read_counters)(struct ib_counters *counters,
2501                             struct ib_counters_read_attr *counters_read_attr,
2502                             struct uverbs_attr_bundle *attrs);
2503        int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2504                            int data_sg_nents, unsigned int *data_sg_offset,
2505                            struct scatterlist *meta_sg, int meta_sg_nents,
2506                            unsigned int *meta_sg_offset);
2507
2508        /**
2509         * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2510         *   driver initialized data.  The struct is kfree()'ed by the sysfs
2511         *   core when the device is removed.  A lifespan of -1 in the return
2512         *   struct tells the core to set a default lifespan.
2513         */
2514        struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2515                                                u8 port_num);
2516        /**
2517         * get_hw_stats - Fill in the counter value(s) in the stats struct.
2518         * @index - The index in the value array we wish to have updated, or
2519         *   num_counters if we want all stats updated
2520         * Return codes -
2521         *   < 0 - Error, no counters updated
2522         *   index - Updated the single counter pointed to by index
2523         *   num_counters - Updated all counters (will reset the timestamp
2524         *     and prevent further calls for lifespan milliseconds)
2525         * Drivers are allowed to update all counters in leiu of just the
2526         *   one given in index at their option
2527         */
2528        int (*get_hw_stats)(struct ib_device *device,
2529                            struct rdma_hw_stats *stats, u8 port, int index);
2530        /*
2531         * This function is called once for each port when a ib device is
2532         * registered.
2533         */
2534        int (*init_port)(struct ib_device *device, u8 port_num,
2535                         struct kobject *port_sysfs);
2536        /**
2537         * Allows rdma drivers to add their own restrack attributes.
2538         */
2539        int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2540        int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2541        int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2542        int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2543        int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2544        int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2545        int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2546
2547        /* Device lifecycle callbacks */
2548        /*
2549         * Called after the device becomes registered, before clients are
2550         * attached
2551         */
2552        int (*enable_driver)(struct ib_device *dev);
2553        /*
2554         * This is called as part of ib_dealloc_device().
2555         */
2556        void (*dealloc_driver)(struct ib_device *dev);
2557
2558        /* iWarp CM callbacks */
2559        void (*iw_add_ref)(struct ib_qp *qp);
2560        void (*iw_rem_ref)(struct ib_qp *qp);
2561        struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2562        int (*iw_connect)(struct iw_cm_id *cm_id,
2563                          struct iw_cm_conn_param *conn_param);
2564        int (*iw_accept)(struct iw_cm_id *cm_id,
2565                         struct iw_cm_conn_param *conn_param);
2566        int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2567                         u8 pdata_len);
2568        int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2569        int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2570        /**
2571         * counter_bind_qp - Bind a QP to a counter.
2572         * @counter - The counter to be bound. If counter->id is zero then
2573         *   the driver needs to allocate a new counter and set counter->id
2574         */
2575        int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2576        /**
2577         * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2578         *   counter and bind it onto the default one
2579         */
2580        int (*counter_unbind_qp)(struct ib_qp *qp);
2581        /**
2582         * counter_dealloc -De-allocate the hw counter
2583         */
2584        int (*counter_dealloc)(struct rdma_counter *counter);
2585        /**
2586         * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2587         * the driver initialized data.
2588         */
2589        struct rdma_hw_stats *(*counter_alloc_stats)(
2590                struct rdma_counter *counter);
2591        /**
2592         * counter_update_stats - Query the stats value of this counter
2593         */
2594        int (*counter_update_stats)(struct rdma_counter *counter);
2595
2596        /**
2597         * Allows rdma drivers to add their own restrack attributes
2598         * dumped via 'rdma stat' iproute2 command.
2599         */
2600        int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2601
2602        /* query driver for its ucontext properties */
2603        int (*query_ucontext)(struct ib_ucontext *context,
2604                              struct uverbs_attr_bundle *attrs);
2605
2606        DECLARE_RDMA_OBJ_SIZE(ib_ah);
2607        DECLARE_RDMA_OBJ_SIZE(ib_counters);
2608        DECLARE_RDMA_OBJ_SIZE(ib_cq);
2609        DECLARE_RDMA_OBJ_SIZE(ib_mw);
2610        DECLARE_RDMA_OBJ_SIZE(ib_pd);
2611        DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2612        DECLARE_RDMA_OBJ_SIZE(ib_srq);
2613        DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2614        DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2615};
2616
2617struct ib_core_device {
2618        /* device must be the first element in structure until,
2619         * union of ib_core_device and device exists in ib_device.
2620         */
2621        struct device dev;
2622        possible_net_t rdma_net;
2623        struct kobject *ports_kobj;
2624        struct list_head port_list;
2625        struct ib_device *owner; /* reach back to owner ib_device */
2626};
2627
2628struct rdma_restrack_root;
2629struct ib_device {
2630        /* Do not access @dma_device directly from ULP nor from HW drivers. */
2631        struct device                *dma_device;
2632        struct ib_device_ops         ops;
2633        char                          name[IB_DEVICE_NAME_MAX];
2634        struct rcu_head rcu_head;
2635
2636        struct list_head              event_handler_list;
2637        /* Protects event_handler_list */
2638        struct rw_semaphore event_handler_rwsem;
2639
2640        /* Protects QP's event_handler calls and open_qp list */
2641        spinlock_t qp_open_list_lock;
2642
2643        struct rw_semaphore           client_data_rwsem;
2644        struct xarray                 client_data;
2645        struct mutex                  unregistration_lock;
2646
2647        /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2648        rwlock_t cache_lock;
2649        /**
2650         * port_data is indexed by port number
2651         */
2652        struct ib_port_data *port_data;
2653
2654        int                           num_comp_vectors;
2655
2656        union {
2657                struct device           dev;
2658                struct ib_core_device   coredev;
2659        };
2660
2661        /* First group for device attributes,
2662         * Second group for driver provided attributes (optional).
2663         * It is NULL terminated array.
2664         */
2665        const struct attribute_group    *groups[3];
2666
2667        u64                          uverbs_cmd_mask;
2668        u64                          uverbs_ex_cmd_mask;
2669
2670        char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
2671        __be64                       node_guid;
2672        u32                          local_dma_lkey;
2673        u16                          is_switch:1;
2674        /* Indicates kernel verbs support, should not be used in drivers */
2675        u16                          kverbs_provider:1;
2676        /* CQ adaptive moderation (RDMA DIM) */
2677        u16                          use_cq_dim:1;
2678        u8                           node_type;
2679        u8                           phys_port_cnt;
2680        struct ib_device_attr        attrs;
2681        struct attribute_group       *hw_stats_ag;
2682        struct rdma_hw_stats         *hw_stats;
2683
2684#ifdef CONFIG_CGROUP_RDMA
2685        struct rdmacg_device         cg_device;
2686#endif
2687
2688        u32                          index;
2689
2690        spinlock_t                   cq_pools_lock;
2691        struct list_head             cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2692
2693        struct rdma_restrack_root *res;
2694
2695        const struct uapi_definition   *driver_def;
2696
2697        /*
2698         * Positive refcount indicates that the device is currently
2699         * registered and cannot be unregistered.
2700         */
2701        refcount_t refcount;
2702        struct completion unreg_completion;
2703        struct work_struct unregistration_work;
2704
2705        const struct rdma_link_ops *link_ops;
2706
2707        /* Protects compat_devs xarray modifications */
2708        struct mutex compat_devs_mutex;
2709        /* Maintains compat devices for each net namespace */
2710        struct xarray compat_devs;
2711
2712        /* Used by iWarp CM */
2713        char iw_ifname[IFNAMSIZ];
2714        u32 iw_driver_flags;
2715        u32 lag_flags;
2716};
2717
2718struct ib_client_nl_info;
2719struct ib_client {
2720        const char *name;
2721        int (*add)(struct ib_device *ibdev);
2722        void (*remove)(struct ib_device *, void *client_data);
2723        void (*rename)(struct ib_device *dev, void *client_data);
2724        int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2725                           struct ib_client_nl_info *res);
2726        int (*get_global_nl_info)(struct ib_client_nl_info *res);
2727
2728        /* Returns the net_dev belonging to this ib_client and matching the
2729         * given parameters.
2730         * @dev:         An RDMA device that the net_dev use for communication.
2731         * @port:        A physical port number on the RDMA device.
2732         * @pkey:        P_Key that the net_dev uses if applicable.
2733         * @gid:         A GID that the net_dev uses to communicate.
2734         * @addr:        An IP address the net_dev is configured with.
2735         * @client_data: The device's client data set by ib_set_client_data().
2736         *
2737         * An ib_client that implements a net_dev on top of RDMA devices
2738         * (such as IP over IB) should implement this callback, allowing the
2739         * rdma_cm module to find the right net_dev for a given request.
2740         *
2741         * The caller is responsible for calling dev_put on the returned
2742         * netdev. */
2743        struct net_device *(*get_net_dev_by_params)(
2744                        struct ib_device *dev,
2745                        u8 port,
2746                        u16 pkey,
2747                        const union ib_gid *gid,
2748                        const struct sockaddr *addr,
2749                        void *client_data);
2750
2751        refcount_t uses;
2752        struct completion uses_zero;
2753        u32 client_id;
2754
2755        /* kverbs are not required by the client */
2756        u8 no_kverbs_req:1;
2757};
2758
2759/*
2760 * IB block DMA iterator
2761 *
2762 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2763 * to a HW supported page size.
2764 */
2765struct ib_block_iter {
2766        /* internal states */
2767        struct scatterlist *__sg;       /* sg holding the current aligned block */
2768        dma_addr_t __dma_addr;          /* unaligned DMA address of this block */
2769        unsigned int __sg_nents;        /* number of SG entries */
2770        unsigned int __sg_advance;      /* number of bytes to advance in sg in next step */
2771        unsigned int __pg_bit;          /* alignment of current block */
2772};
2773
2774struct ib_device *_ib_alloc_device(size_t size);
2775#define ib_alloc_device(drv_struct, member)                                    \
2776        container_of(_ib_alloc_device(sizeof(struct drv_struct) +              \
2777                                      BUILD_BUG_ON_ZERO(offsetof(              \
2778                                              struct drv_struct, member))),    \
2779                     struct drv_struct, member)
2780
2781void ib_dealloc_device(struct ib_device *device);
2782
2783void ib_get_device_fw_str(struct ib_device *device, char *str);
2784
2785int ib_register_device(struct ib_device *device, const char *name,
2786                       struct device *dma_device);
2787void ib_unregister_device(struct ib_device *device);
2788void ib_unregister_driver(enum rdma_driver_id driver_id);
2789void ib_unregister_device_and_put(struct ib_device *device);
2790void ib_unregister_device_queued(struct ib_device *ib_dev);
2791
2792int ib_register_client   (struct ib_client *client);
2793void ib_unregister_client(struct ib_client *client);
2794
2795void __rdma_block_iter_start(struct ib_block_iter *biter,
2796                             struct scatterlist *sglist,
2797                             unsigned int nents,
2798                             unsigned long pgsz);
2799bool __rdma_block_iter_next(struct ib_block_iter *biter);
2800
2801/**
2802 * rdma_block_iter_dma_address - get the aligned dma address of the current
2803 * block held by the block iterator.
2804 * @biter: block iterator holding the memory block
2805 */
2806static inline dma_addr_t
2807rdma_block_iter_dma_address(struct ib_block_iter *biter)
2808{
2809        return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2810}
2811
2812/**
2813 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2814 * @sglist: sglist to iterate over
2815 * @biter: block iterator holding the memory block
2816 * @nents: maximum number of sg entries to iterate over
2817 * @pgsz: best HW supported page size to use
2818 *
2819 * Callers may use rdma_block_iter_dma_address() to get each
2820 * blocks aligned DMA address.
2821 */
2822#define rdma_for_each_block(sglist, biter, nents, pgsz)         \
2823        for (__rdma_block_iter_start(biter, sglist, nents,      \
2824                                     pgsz);                     \
2825             __rdma_block_iter_next(biter);)
2826
2827/**
2828 * ib_get_client_data - Get IB client context
2829 * @device:Device to get context for
2830 * @client:Client to get context for
2831 *
2832 * ib_get_client_data() returns the client context data set with
2833 * ib_set_client_data(). This can only be called while the client is
2834 * registered to the device, once the ib_client remove() callback returns this
2835 * cannot be called.
2836 */
2837static inline void *ib_get_client_data(struct ib_device *device,
2838                                       struct ib_client *client)
2839{
2840        return xa_load(&device->client_data, client->client_id);
2841}
2842void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2843                         void *data);
2844void ib_set_device_ops(struct ib_device *device,
2845                       const struct ib_device_ops *ops);
2846
2847int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2848                      unsigned long pfn, unsigned long size, pgprot_t prot,
2849                      struct rdma_user_mmap_entry *entry);
2850int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2851                                struct rdma_user_mmap_entry *entry,
2852                                size_t length);
2853int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2854                                      struct rdma_user_mmap_entry *entry,
2855                                      size_t length, u32 min_pgoff,
2856                                      u32 max_pgoff);
2857
2858struct rdma_user_mmap_entry *
2859rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2860                               unsigned long pgoff);
2861struct rdma_user_mmap_entry *
2862rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2863                         struct vm_area_struct *vma);
2864void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2865
2866void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
2867
2868static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2869{
2870        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2871}
2872
2873static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2874{
2875        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2876}
2877
2878static inline bool ib_is_buffer_cleared(const void __user *p,
2879                                        size_t len)
2880{
2881        bool ret;
2882        u8 *buf;
2883
2884        if (len > USHRT_MAX)
2885                return false;
2886
2887        buf = memdup_user(p, len);
2888        if (IS_ERR(buf))
2889                return false;
2890
2891        ret = !memchr_inv(buf, 0, len);
2892        kfree(buf);
2893        return ret;
2894}
2895
2896static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2897                                       size_t offset,
2898                                       size_t len)
2899{
2900        return ib_is_buffer_cleared(udata->inbuf + offset, len);
2901}
2902
2903/**
2904 * ib_is_destroy_retryable - Check whether the uobject destruction
2905 * is retryable.
2906 * @ret: The initial destruction return code
2907 * @why: remove reason
2908 * @uobj: The uobject that is destroyed
2909 *
2910 * This function is a helper function that IB layer and low-level drivers
2911 * can use to consider whether the destruction of the given uobject is
2912 * retry-able.
2913 * It checks the original return code, if it wasn't success the destruction
2914 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2915 * the remove reason. (i.e. why).
2916 * Must be called with the object locked for destroy.
2917 */
2918static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2919                                           struct ib_uobject *uobj)
2920{
2921        return ret && (why == RDMA_REMOVE_DESTROY ||
2922                       uobj->context->cleanup_retryable);
2923}
2924
2925/**
2926 * ib_destroy_usecnt - Called during destruction to check the usecnt
2927 * @usecnt: The usecnt atomic
2928 * @why: remove reason
2929 * @uobj: The uobject that is destroyed
2930 *
2931 * Non-zero usecnts will block destruction unless destruction was triggered by
2932 * a ucontext cleanup.
2933 */
2934static inline int ib_destroy_usecnt(atomic_t *usecnt,
2935                                    enum rdma_remove_reason why,
2936                                    struct ib_uobject *uobj)
2937{
2938        if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2939                return -EBUSY;
2940        return 0;
2941}
2942
2943/**
2944 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2945 * contains all required attributes and no attributes not allowed for
2946 * the given QP state transition.
2947 * @cur_state: Current QP state
2948 * @next_state: Next QP state
2949 * @type: QP type
2950 * @mask: Mask of supplied QP attributes
2951 *
2952 * This function is a helper function that a low-level driver's
2953 * modify_qp method can use to validate the consumer's input.  It
2954 * checks that cur_state and next_state are valid QP states, that a
2955 * transition from cur_state to next_state is allowed by the IB spec,
2956 * and that the attribute mask supplied is allowed for the transition.
2957 */
2958bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2959                        enum ib_qp_type type, enum ib_qp_attr_mask mask);
2960
2961void ib_register_event_handler(struct ib_event_handler *event_handler);
2962void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2963void ib_dispatch_event(const struct ib_event *event);
2964
2965int ib_query_port(struct ib_device *device,
2966                  u8 port_num, struct ib_port_attr *port_attr);
2967
2968enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2969                                               u8 port_num);
2970
2971/**
2972 * rdma_cap_ib_switch - Check if the device is IB switch
2973 * @device: Device to check
2974 *
2975 * Device driver is responsible for setting is_switch bit on
2976 * in ib_device structure at init time.
2977 *
2978 * Return: true if the device is IB switch.
2979 */
2980static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2981{
2982        return device->is_switch;
2983}
2984
2985/**
2986 * rdma_start_port - Return the first valid port number for the device
2987 * specified
2988 *
2989 * @device: Device to be checked
2990 *
2991 * Return start port number
2992 */
2993static inline u8 rdma_start_port(const struct ib_device *device)
2994{
2995        return rdma_cap_ib_switch(device) ? 0 : 1;
2996}
2997
2998/**
2999 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3000 * @device - The struct ib_device * to iterate over
3001 * @iter - The unsigned int to store the port number
3002 */
3003#define rdma_for_each_port(device, iter)                                       \
3004        for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type(   \
3005                                                     unsigned int, iter)));    \
3006             iter <= rdma_end_port(device); (iter)++)
3007
3008/**
3009 * rdma_end_port - Return the last valid port number for the device
3010 * specified
3011 *
3012 * @device: Device to be checked
3013 *
3014 * Return last port number
3015 */
3016static inline u8 rdma_end_port(const struct ib_device *device)
3017{
3018        return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3019}
3020
3021static inline int rdma_is_port_valid(const struct ib_device *device,
3022                                     unsigned int port)
3023{
3024        return (port >= rdma_start_port(device) &&
3025                port <= rdma_end_port(device));
3026}
3027
3028static inline bool rdma_is_grh_required(const struct ib_device *device,
3029                                        u8 port_num)
3030{
3031        return device->port_data[port_num].immutable.core_cap_flags &
3032               RDMA_CORE_PORT_IB_GRH_REQUIRED;
3033}
3034
3035static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
3036{
3037        return device->port_data[port_num].immutable.core_cap_flags &
3038               RDMA_CORE_CAP_PROT_IB;
3039}
3040
3041static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
3042{
3043        return device->port_data[port_num].immutable.core_cap_flags &
3044               (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3045}
3046
3047static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
3048{
3049        return device->port_data[port_num].immutable.core_cap_flags &
3050               RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3051}
3052
3053static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
3054{
3055        return device->port_data[port_num].immutable.core_cap_flags &
3056               RDMA_CORE_CAP_PROT_ROCE;
3057}
3058
3059static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
3060{
3061        return device->port_data[port_num].immutable.core_cap_flags &
3062               RDMA_CORE_CAP_PROT_IWARP;
3063}
3064
3065static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3066{
3067        return rdma_protocol_ib(device, port_num) ||
3068                rdma_protocol_roce(device, port_num);
3069}
3070
3071static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3072{
3073        return device->port_data[port_num].immutable.core_cap_flags &
3074               RDMA_CORE_CAP_PROT_RAW_PACKET;
3075}
3076
3077static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3078{
3079        return device->port_data[port_num].immutable.core_cap_flags &
3080               RDMA_CORE_CAP_PROT_USNIC;
3081}
3082
3083/**
3084 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3085 * Management Datagrams.
3086 * @device: Device to check
3087 * @port_num: Port number to check
3088 *
3089 * Management Datagrams (MAD) are a required part of the InfiniBand
3090 * specification and are supported on all InfiniBand devices.  A slightly
3091 * extended version are also supported on OPA interfaces.
3092 *
3093 * Return: true if the port supports sending/receiving of MAD packets.
3094 */
3095static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3096{
3097        return device->port_data[port_num].immutable.core_cap_flags &
3098               RDMA_CORE_CAP_IB_MAD;
3099}
3100
3101/**
3102 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3103 * Management Datagrams.
3104 * @device: Device to check
3105 * @port_num: Port number to check
3106 *
3107 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3108 * datagrams with their own versions.  These OPA MADs share many but not all of
3109 * the characteristics of InfiniBand MADs.
3110 *
3111 * OPA MADs differ in the following ways:
3112 *
3113 *    1) MADs are variable size up to 2K
3114 *       IBTA defined MADs remain fixed at 256 bytes
3115 *    2) OPA SMPs must carry valid PKeys
3116 *    3) OPA SMP packets are a different format
3117 *
3118 * Return: true if the port supports OPA MAD packet formats.
3119 */
3120static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3121{
3122        return device->port_data[port_num].immutable.core_cap_flags &
3123                RDMA_CORE_CAP_OPA_MAD;
3124}
3125
3126/**
3127 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3128 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3129 * @device: Device to check
3130 * @port_num: Port number to check
3131 *
3132 * Each InfiniBand node is required to provide a Subnet Management Agent
3133 * that the subnet manager can access.  Prior to the fabric being fully
3134 * configured by the subnet manager, the SMA is accessed via a well known
3135 * interface called the Subnet Management Interface (SMI).  This interface
3136 * uses directed route packets to communicate with the SM to get around the
3137 * chicken and egg problem of the SM needing to know what's on the fabric
3138 * in order to configure the fabric, and needing to configure the fabric in
3139 * order to send packets to the devices on the fabric.  These directed
3140 * route packets do not need the fabric fully configured in order to reach
3141 * their destination.  The SMI is the only method allowed to send
3142 * directed route packets on an InfiniBand fabric.
3143 *
3144 * Return: true if the port provides an SMI.
3145 */
3146static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3147{
3148        return device->port_data[port_num].immutable.core_cap_flags &
3149               RDMA_CORE_CAP_IB_SMI;
3150}
3151
3152/**
3153 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3154 * Communication Manager.
3155 * @device: Device to check
3156 * @port_num: Port number to check
3157 *
3158 * The InfiniBand Communication Manager is one of many pre-defined General
3159 * Service Agents (GSA) that are accessed via the General Service
3160 * Interface (GSI).  It's role is to facilitate establishment of connections
3161 * between nodes as well as other management related tasks for established
3162 * connections.
3163 *
3164 * Return: true if the port supports an IB CM (this does not guarantee that
3165 * a CM is actually running however).
3166 */
3167static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3168{
3169        return device->port_data[port_num].immutable.core_cap_flags &
3170               RDMA_CORE_CAP_IB_CM;
3171}
3172
3173/**
3174 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3175 * Communication Manager.
3176 * @device: Device to check
3177 * @port_num: Port number to check
3178 *
3179 * Similar to above, but specific to iWARP connections which have a different
3180 * managment protocol than InfiniBand.
3181 *
3182 * Return: true if the port supports an iWARP CM (this does not guarantee that
3183 * a CM is actually running however).
3184 */
3185static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3186{
3187        return device->port_data[port_num].immutable.core_cap_flags &
3188               RDMA_CORE_CAP_IW_CM;
3189}
3190
3191/**
3192 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3193 * Subnet Administration.
3194 * @device: Device to check
3195 * @port_num: Port number to check
3196 *
3197 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3198 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
3199 * fabrics, devices should resolve routes to other hosts by contacting the
3200 * SA to query the proper route.
3201 *
3202 * Return: true if the port should act as a client to the fabric Subnet
3203 * Administration interface.  This does not imply that the SA service is
3204 * running locally.
3205 */
3206static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3207{
3208        return device->port_data[port_num].immutable.core_cap_flags &
3209               RDMA_CORE_CAP_IB_SA;
3210}
3211
3212/**
3213 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3214 * Multicast.
3215 * @device: Device to check
3216 * @port_num: Port number to check
3217 *
3218 * InfiniBand multicast registration is more complex than normal IPv4 or
3219 * IPv6 multicast registration.  Each Host Channel Adapter must register
3220 * with the Subnet Manager when it wishes to join a multicast group.  It
3221 * should do so only once regardless of how many queue pairs it subscribes
3222 * to this group.  And it should leave the group only after all queue pairs
3223 * attached to the group have been detached.
3224 *
3225 * Return: true if the port must undertake the additional adminstrative
3226 * overhead of registering/unregistering with the SM and tracking of the
3227 * total number of queue pairs attached to the multicast group.
3228 */
3229static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3230{
3231        return rdma_cap_ib_sa(device, port_num);
3232}
3233
3234/**
3235 * rdma_cap_af_ib - Check if the port of device has the capability
3236 * Native Infiniband Address.
3237 * @device: Device to check
3238 * @port_num: Port number to check
3239 *
3240 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3241 * GID.  RoCE uses a different mechanism, but still generates a GID via
3242 * a prescribed mechanism and port specific data.
3243 *
3244 * Return: true if the port uses a GID address to identify devices on the
3245 * network.
3246 */
3247static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3248{
3249        return device->port_data[port_num].immutable.core_cap_flags &
3250               RDMA_CORE_CAP_AF_IB;
3251}
3252
3253/**
3254 * rdma_cap_eth_ah - Check if the port of device has the capability
3255 * Ethernet Address Handle.
3256 * @device: Device to check
3257 * @port_num: Port number to check
3258 *
3259 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3260 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3261 * port.  Normally, packet headers are generated by the sending host
3262 * adapter, but when sending connectionless datagrams, we must manually
3263 * inject the proper headers for the fabric we are communicating over.
3264 *
3265 * Return: true if we are running as a RoCE port and must force the
3266 * addition of a Global Route Header built from our Ethernet Address
3267 * Handle into our header list for connectionless packets.
3268 */
3269static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3270{
3271        return device->port_data[port_num].immutable.core_cap_flags &
3272               RDMA_CORE_CAP_ETH_AH;
3273}
3274
3275/**
3276 * rdma_cap_opa_ah - Check if the port of device supports
3277 * OPA Address handles
3278 * @device: Device to check
3279 * @port_num: Port number to check
3280 *
3281 * Return: true if we are running on an OPA device which supports
3282 * the extended OPA addressing.
3283 */
3284static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3285{
3286        return (device->port_data[port_num].immutable.core_cap_flags &
3287                RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3288}
3289
3290/**
3291 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3292 *
3293 * @device: Device
3294 * @port_num: Port number
3295 *
3296 * This MAD size includes the MAD headers and MAD payload.  No other headers
3297 * are included.
3298 *
3299 * Return the max MAD size required by the Port.  Will return 0 if the port
3300 * does not support MADs
3301 */
3302static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3303{
3304        return device->port_data[port_num].immutable.max_mad_size;
3305}
3306
3307/**
3308 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3309 * @device: Device to check
3310 * @port_num: Port number to check
3311 *
3312 * RoCE GID table mechanism manages the various GIDs for a device.
3313 *
3314 * NOTE: if allocating the port's GID table has failed, this call will still
3315 * return true, but any RoCE GID table API will fail.
3316 *
3317 * Return: true if the port uses RoCE GID table mechanism in order to manage
3318 * its GIDs.
3319 */
3320static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3321                                           u8 port_num)
3322{
3323        return rdma_protocol_roce(device, port_num) &&
3324                device->ops.add_gid && device->ops.del_gid;
3325}
3326
3327/*
3328 * Check if the device supports READ W/ INVALIDATE.
3329 */
3330static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3331{
3332        /*
3333         * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
3334         * has support for it yet.
3335         */
3336        return rdma_protocol_iwarp(dev, port_num);
3337}
3338
3339/**
3340 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3341 * @device: Device
3342 * @port_num: 1 based Port number
3343 *
3344 * Return true if port is an Intel OPA port , false if not
3345 */
3346static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3347                                          u32 port_num)
3348{
3349        return (device->port_data[port_num].immutable.core_cap_flags &
3350                RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3351}
3352
3353/**
3354 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3355 * @device: Device
3356 * @port_num: Port number
3357 * @mtu: enum value of MTU
3358 *
3359 * Return the MTU size supported by the port as an integer value. Will return
3360 * -1 if enum value of mtu is not supported.
3361 */
3362static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
3363                                       int mtu)
3364{
3365        if (rdma_core_cap_opa_port(device, port))
3366                return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3367        else
3368                return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3369}
3370
3371/**
3372 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3373 * @device: Device
3374 * @port_num: Port number
3375 * @attr: port attribute
3376 *
3377 * Return the MTU size supported by the port as an integer value.
3378 */
3379static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
3380                                     struct ib_port_attr *attr)
3381{
3382        if (rdma_core_cap_opa_port(device, port))
3383                return attr->phys_mtu;
3384        else
3385                return ib_mtu_enum_to_int(attr->max_mtu);
3386}
3387
3388int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3389                         int state);
3390int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3391                     struct ifla_vf_info *info);
3392int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3393                    struct ifla_vf_stats *stats);
3394int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
3395                    struct ifla_vf_guid *node_guid,
3396                    struct ifla_vf_guid *port_guid);
3397int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3398                   int type);
3399
3400int ib_query_pkey(struct ib_device *device,
3401                  u8 port_num, u16 index, u16 *pkey);
3402
3403int ib_modify_device(struct ib_device *device,
3404                     int device_modify_mask,
3405                     struct ib_device_modify *device_modify);
3406
3407int ib_modify_port(struct ib_device *device,
3408                   u8 port_num, int port_modify_mask,
3409                   struct ib_port_modify *port_modify);
3410
3411int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3412                u8 *port_num, u16 *index);
3413
3414int ib_find_pkey(struct ib_device *device,
3415                 u8 port_num, u16 pkey, u16 *index);
3416
3417enum ib_pd_flags {
3418        /*
3419         * Create a memory registration for all memory in the system and place
3420         * the rkey for it into pd->unsafe_global_rkey.  This can be used by
3421         * ULPs to avoid the overhead of dynamic MRs.
3422         *
3423         * This flag is generally considered unsafe and must only be used in
3424         * extremly trusted environments.  Every use of it will log a warning
3425         * in the kernel log.
3426         */
3427        IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
3428};
3429
3430struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3431                const char *caller);
3432
3433#define ib_alloc_pd(device, flags) \
3434        __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3435
3436int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3437
3438/**
3439 * ib_dealloc_pd - Deallocate kernel PD
3440 * @pd: The protection domain
3441 *
3442 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3443 */
3444static inline void ib_dealloc_pd(struct ib_pd *pd)
3445{
3446        int ret = ib_dealloc_pd_user(pd, NULL);
3447
3448        WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3449}
3450
3451enum rdma_create_ah_flags {
3452        /* In a sleepable context */
3453        RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3454};
3455
3456/**
3457 * rdma_create_ah - Creates an address handle for the given address vector.
3458 * @pd: The protection domain associated with the address handle.
3459 * @ah_attr: The attributes of the address vector.
3460 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3461 *
3462 * The address handle is used to reference a local or global destination
3463 * in all UD QP post sends.
3464 */
3465struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3466                             u32 flags);
3467
3468/**
3469 * rdma_create_user_ah - Creates an address handle for the given address vector.
3470 * It resolves destination mac address for ah attribute of RoCE type.
3471 * @pd: The protection domain associated with the address handle.
3472 * @ah_attr: The attributes of the address vector.
3473 * @udata: pointer to user's input output buffer information need by
3474 *         provider driver.
3475 *
3476 * It returns 0 on success and returns appropriate error code on error.
3477 * The address handle is used to reference a local or global destination
3478 * in all UD QP post sends.
3479 */
3480struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3481                                  struct rdma_ah_attr *ah_attr,
3482                                  struct ib_udata *udata);
3483/**
3484 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3485 *   work completion.
3486 * @hdr: the L3 header to parse
3487 * @net_type: type of header to parse
3488 * @sgid: place to store source gid
3489 * @dgid: place to store destination gid
3490 */
3491int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3492                              enum rdma_network_type net_type,
3493                              union ib_gid *sgid, union ib_gid *dgid);
3494
3495/**
3496 * ib_get_rdma_header_version - Get the header version
3497 * @hdr: the L3 header to parse
3498 */
3499int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3500
3501/**
3502 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3503 *   work completion.
3504 * @device: Device on which the received message arrived.
3505 * @port_num: Port on which the received message arrived.
3506 * @wc: Work completion associated with the received message.
3507 * @grh: References the received global route header.  This parameter is
3508 *   ignored unless the work completion indicates that the GRH is valid.
3509 * @ah_attr: Returned attributes that can be used when creating an address
3510 *   handle for replying to the message.
3511 * When ib_init_ah_attr_from_wc() returns success,
3512 * (a) for IB link layer it optionally contains a reference to SGID attribute
3513 * when GRH is present for IB link layer.
3514 * (b) for RoCE link layer it contains a reference to SGID attribute.
3515 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3516 * attributes which are initialized using ib_init_ah_attr_from_wc().
3517 *
3518 */
3519int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3520                            const struct ib_wc *wc, const struct ib_grh *grh,
3521                            struct rdma_ah_attr *ah_attr);
3522
3523/**
3524 * ib_create_ah_from_wc - Creates an address handle associated with the
3525 *   sender of the specified work completion.
3526 * @pd: The protection domain associated with the address handle.
3527 * @wc: Work completion information associated with a received message.
3528 * @grh: References the received global route header.  This parameter is
3529 *   ignored unless the work completion indicates that the GRH is valid.
3530 * @port_num: The outbound port number to associate with the address.
3531 *
3532 * The address handle is used to reference a local or global destination
3533 * in all UD QP post sends.
3534 */
3535struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3536                                   const struct ib_grh *grh, u8 port_num);
3537
3538/**
3539 * rdma_modify_ah - Modifies the address vector associated with an address
3540 *   handle.
3541 * @ah: The address handle to modify.
3542 * @ah_attr: The new address vector attributes to associate with the
3543 *   address handle.
3544 */
3545int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3546
3547/**
3548 * rdma_query_ah - Queries the address vector associated with an address
3549 *   handle.
3550 * @ah: The address handle to query.
3551 * @ah_attr: The address vector attributes associated with the address
3552 *   handle.
3553 */
3554int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3555
3556enum rdma_destroy_ah_flags {
3557        /* In a sleepable context */
3558        RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3559};
3560
3561/**
3562 * rdma_destroy_ah_user - Destroys an address handle.
3563 * @ah: The address handle to destroy.
3564 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3565 * @udata: Valid user data or NULL for kernel objects
3566 */
3567int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3568
3569/**
3570 * rdma_destroy_ah - Destroys an kernel address handle.
3571 * @ah: The address handle to destroy.
3572 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3573 *
3574 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3575 */
3576static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3577{
3578        int ret = rdma_destroy_ah_user(ah, flags, NULL);
3579
3580        WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3581}
3582
3583struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3584                                  struct ib_srq_init_attr *srq_init_attr,
3585                                  struct ib_usrq_object *uobject,
3586                                  struct ib_udata *udata);
3587static inline struct ib_srq *
3588ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3589{
3590        if (!pd->device->ops.create_srq)
3591                return ERR_PTR(-EOPNOTSUPP);
3592
3593        return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3594}
3595
3596/**
3597 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3598 * @srq: The SRQ to modify.
3599 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
3600 *   the current values of selected SRQ attributes are returned.
3601 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3602 *   are being modified.
3603 *
3604 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3605 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3606 * the number of receives queued drops below the limit.
3607 */
3608int ib_modify_srq(struct ib_srq *srq,
3609                  struct ib_srq_attr *srq_attr,
3610                  enum ib_srq_attr_mask srq_attr_mask);
3611
3612/**
3613 * ib_query_srq - Returns the attribute list and current values for the
3614 *   specified SRQ.
3615 * @srq: The SRQ to query.
3616 * @srq_attr: The attributes of the specified SRQ.
3617 */
3618int ib_query_srq(struct ib_srq *srq,
3619                 struct ib_srq_attr *srq_attr);
3620
3621/**
3622 * ib_destroy_srq_user - Destroys the specified SRQ.
3623 * @srq: The SRQ to destroy.
3624 * @udata: Valid user data or NULL for kernel objects
3625 */
3626int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3627
3628/**
3629 * ib_destroy_srq - Destroys the specified kernel SRQ.
3630 * @srq: The SRQ to destroy.
3631 *
3632 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3633 */
3634static inline void ib_destroy_srq(struct ib_srq *srq)
3635{
3636        int ret = ib_destroy_srq_user(srq, NULL);
3637
3638        WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3639}
3640
3641/**
3642 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3643 * @srq: The SRQ to post the work request on.
3644 * @recv_wr: A list of work requests to post on the receive queue.
3645 * @bad_recv_wr: On an immediate failure, this parameter will reference
3646 *   the work request that failed to be posted on the QP.
3647 */
3648static inline int ib_post_srq_recv(struct ib_srq *srq,
3649                                   const struct ib_recv_wr *recv_wr,
3650                                   const struct ib_recv_wr **bad_recv_wr)
3651{
3652        const struct ib_recv_wr *dummy;
3653
3654        return srq->device->ops.post_srq_recv(srq, recv_wr,
3655                                              bad_recv_wr ? : &dummy);
3656}
3657
3658struct ib_qp *ib_create_qp(struct ib_pd *pd,
3659                           struct ib_qp_init_attr *qp_init_attr);
3660
3661/**
3662 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3663 * @qp: The QP to modify.
3664 * @attr: On input, specifies the QP attributes to modify.  On output,
3665 *   the current values of selected QP attributes are returned.
3666 * @attr_mask: A bit-mask used to specify which attributes of the QP
3667 *   are being modified.
3668 * @udata: pointer to user's input output buffer information
3669 *   are being modified.
3670 * It returns 0 on success and returns appropriate error code on error.
3671 */
3672int ib_modify_qp_with_udata(struct ib_qp *qp,
3673                            struct ib_qp_attr *attr,
3674                            int attr_mask,
3675                            struct ib_udata *udata);
3676
3677/**
3678 * ib_modify_qp - Modifies the attributes for the specified QP and then
3679 *   transitions the QP to the given state.
3680 * @qp: The QP to modify.
3681 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
3682 *   the current values of selected QP attributes are returned.
3683 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3684 *   are being modified.
3685 */
3686int ib_modify_qp(struct ib_qp *qp,
3687                 struct ib_qp_attr *qp_attr,
3688                 int qp_attr_mask);
3689
3690/**
3691 * ib_query_qp - Returns the attribute list and current values for the
3692 *   specified QP.
3693 * @qp: The QP to query.
3694 * @qp_attr: The attributes of the specified QP.
3695 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3696 * @qp_init_attr: Additional attributes of the selected QP.
3697 *
3698 * The qp_attr_mask may be used to limit the query to gathering only the
3699 * selected attributes.
3700 */
3701int ib_query_qp(struct ib_qp *qp,
3702                struct ib_qp_attr *qp_attr,
3703                int qp_attr_mask,
3704                struct ib_qp_init_attr *qp_init_attr);
3705
3706/**
3707 * ib_destroy_qp - Destroys the specified QP.
3708 * @qp: The QP to destroy.
3709 * @udata: Valid udata or NULL for kernel objects
3710 */
3711int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3712
3713/**
3714 * ib_destroy_qp - Destroys the specified kernel QP.
3715 * @qp: The QP to destroy.
3716 *
3717 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3718 */
3719static inline int ib_destroy_qp(struct ib_qp *qp)
3720{
3721        return ib_destroy_qp_user(qp, NULL);
3722}
3723
3724/**
3725 * ib_open_qp - Obtain a reference to an existing sharable QP.
3726 * @xrcd - XRC domain
3727 * @qp_open_attr: Attributes identifying the QP to open.
3728 *
3729 * Returns a reference to a sharable QP.
3730 */
3731struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3732                         struct ib_qp_open_attr *qp_open_attr);
3733
3734/**
3735 * ib_close_qp - Release an external reference to a QP.
3736 * @qp: The QP handle to release
3737 *
3738 * The opened QP handle is released by the caller.  The underlying
3739 * shared QP is not destroyed until all internal references are released.
3740 */
3741int ib_close_qp(struct ib_qp *qp);
3742
3743/**
3744 * ib_post_send - Posts a list of work requests to the send queue of
3745 *   the specified QP.
3746 * @qp: The QP to post the work request on.
3747 * @send_wr: A list of work requests to post on the send queue.
3748 * @bad_send_wr: On an immediate failure, this parameter will reference
3749 *   the work request that failed to be posted on the QP.
3750 *
3751 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3752 * error is returned, the QP state shall not be affected,
3753 * ib_post_send() will return an immediate error after queueing any
3754 * earlier work requests in the list.
3755 */
3756static inline int ib_post_send(struct ib_qp *qp,
3757                               const struct ib_send_wr *send_wr,
3758                               const struct ib_send_wr **bad_send_wr)
3759{
3760        const struct ib_send_wr *dummy;
3761
3762        return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3763}
3764
3765/**
3766 * ib_post_recv - Posts a list of work requests to the receive queue of
3767 *   the specified QP.
3768 * @qp: The QP to post the work request on.
3769 * @recv_wr: A list of work requests to post on the receive queue.
3770 * @bad_recv_wr: On an immediate failure, this parameter will reference
3771 *   the work request that failed to be posted on the QP.
3772 */
3773static inline int ib_post_recv(struct ib_qp *qp,
3774                               const struct ib_recv_wr *recv_wr,
3775                               const struct ib_recv_wr **bad_recv_wr)
3776{
3777        const struct ib_recv_wr *dummy;
3778
3779        return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3780}
3781
3782struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
3783                            int comp_vector, enum ib_poll_context poll_ctx,
3784                            const char *caller);
3785static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3786                                        int nr_cqe, int comp_vector,
3787                                        enum ib_poll_context poll_ctx)
3788{
3789        return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
3790                             KBUILD_MODNAME);
3791}
3792
3793struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3794                                int nr_cqe, enum ib_poll_context poll_ctx,
3795                                const char *caller);
3796
3797/**
3798 * ib_alloc_cq_any: Allocate kernel CQ
3799 * @dev: The IB device
3800 * @private: Private data attached to the CQE
3801 * @nr_cqe: Number of CQEs in the CQ
3802 * @poll_ctx: Context used for polling the CQ
3803 */
3804static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3805                                            void *private, int nr_cqe,
3806                                            enum ib_poll_context poll_ctx)
3807{
3808        return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3809                                 KBUILD_MODNAME);
3810}
3811
3812void ib_free_cq(struct ib_cq *cq);
3813int ib_process_cq_direct(struct ib_cq *cq, int budget);
3814
3815/**
3816 * ib_create_cq - Creates a CQ on the specified device.
3817 * @device: The device on which to create the CQ.
3818 * @comp_handler: A user-specified callback that is invoked when a
3819 *   completion event occurs on the CQ.
3820 * @event_handler: A user-specified callback that is invoked when an
3821 *   asynchronous event not associated with a completion occurs on the CQ.
3822 * @cq_context: Context associated with the CQ returned to the user via
3823 *   the associated completion and event handlers.
3824 * @cq_attr: The attributes the CQ should be created upon.
3825 *
3826 * Users can examine the cq structure to determine the actual CQ size.
3827 */
3828struct ib_cq *__ib_create_cq(struct ib_device *device,
3829                             ib_comp_handler comp_handler,
3830                             void (*event_handler)(struct ib_event *, void *),
3831                             void *cq_context,
3832                             const struct ib_cq_init_attr *cq_attr,
3833                             const char *caller);
3834#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3835        __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3836
3837/**
3838 * ib_resize_cq - Modifies the capacity of the CQ.
3839 * @cq: The CQ to resize.
3840 * @cqe: The minimum size of the CQ.
3841 *
3842 * Users can examine the cq structure to determine the actual CQ size.
3843 */
3844int ib_resize_cq(struct ib_cq *cq, int cqe);
3845
3846/**
3847 * rdma_set_cq_moderation - Modifies moderation params of the CQ
3848 * @cq: The CQ to modify.
3849 * @cq_count: number of CQEs that will trigger an event
3850 * @cq_period: max period of time in usec before triggering an event
3851 *
3852 */
3853int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3854
3855/**
3856 * ib_destroy_cq_user - Destroys the specified CQ.
3857 * @cq: The CQ to destroy.
3858 * @udata: Valid user data or NULL for kernel objects
3859 */
3860int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3861
3862/**
3863 * ib_destroy_cq - Destroys the specified kernel CQ.
3864 * @cq: The CQ to destroy.
3865 *
3866 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3867 */
3868static inline void ib_destroy_cq(struct ib_cq *cq)
3869{
3870        int ret = ib_destroy_cq_user(cq, NULL);
3871
3872        WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
3873}
3874
3875/**
3876 * ib_poll_cq - poll a CQ for completion(s)
3877 * @cq:the CQ being polled
3878 * @num_entries:maximum number of completions to return
3879 * @wc:array of at least @num_entries &struct ib_wc where completions
3880 *   will be returned
3881 *
3882 * Poll a CQ for (possibly multiple) completions.  If the return value
3883 * is < 0, an error occurred.  If the return value is >= 0, it is the
3884 * number of completions returned.  If the return value is
3885 * non-negative and < num_entries, then the CQ was emptied.
3886 */
3887static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3888                             struct ib_wc *wc)
3889{
3890        return cq->device->ops.poll_cq(cq, num_entries, wc);
3891}
3892
3893/**
3894 * ib_req_notify_cq - Request completion notification on a CQ.
3895 * @cq: The CQ to generate an event for.
3896 * @flags:
3897 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3898 *   to request an event on the next solicited event or next work
3899 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3900 *   may also be |ed in to request a hint about missed events, as
3901 *   described below.
3902 *
3903 * Return Value:
3904 *    < 0 means an error occurred while requesting notification
3905 *   == 0 means notification was requested successfully, and if
3906 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3907 *        were missed and it is safe to wait for another event.  In
3908 *        this case is it guaranteed that any work completions added
3909 *        to the CQ since the last CQ poll will trigger a completion
3910 *        notification event.
3911 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3912 *        in.  It means that the consumer must poll the CQ again to
3913 *        make sure it is empty to avoid missing an event because of a
3914 *        race between requesting notification and an entry being
3915 *        added to the CQ.  This return value means it is possible
3916 *        (but not guaranteed) that a work completion has been added
3917 *        to the CQ since the last poll without triggering a
3918 *        completion notification event.
3919 */
3920static inline int ib_req_notify_cq(struct ib_cq *cq,
3921                                   enum ib_cq_notify_flags flags)
3922{
3923        return cq->device->ops.req_notify_cq(cq, flags);
3924}
3925
3926struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
3927                             int comp_vector_hint,
3928                             enum ib_poll_context poll_ctx);
3929
3930void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
3931
3932/**
3933 * ib_req_ncomp_notif - Request completion notification when there are
3934 *   at least the specified number of unreaped completions on the CQ.
3935 * @cq: The CQ to generate an event for.
3936 * @wc_cnt: The number of unreaped completions that should be on the
3937 *   CQ before an event is generated.
3938 */
3939static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3940{
3941        return cq->device->ops.req_ncomp_notif ?
3942                cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3943                -ENOSYS;
3944}
3945
3946/**
3947 * ib_dma_mapping_error - check a DMA addr for error
3948 * @dev: The device for which the dma_addr was created
3949 * @dma_addr: The DMA address to check
3950 */
3951static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3952{
3953        return dma_mapping_error(dev->dma_device, dma_addr);
3954}
3955
3956/**
3957 * ib_dma_map_single - Map a kernel virtual address to DMA address
3958 * @dev: The device for which the dma_addr is to be created
3959 * @cpu_addr: The kernel virtual address
3960 * @size: The size of the region in bytes
3961 * @direction: The direction of the DMA
3962 */
3963static inline u64 ib_dma_map_single(struct ib_device *dev,
3964                                    void *cpu_addr, size_t size,
3965                                    enum dma_data_direction direction)
3966{
3967        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3968}
3969
3970/**
3971 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3972 * @dev: The device for which the DMA address was created
3973 * @addr: The DMA address
3974 * @size: The size of the region in bytes
3975 * @direction: The direction of the DMA
3976 */
3977static inline void ib_dma_unmap_single(struct ib_device *dev,
3978                                       u64 addr, size_t size,
3979                                       enum dma_data_direction direction)
3980{
3981        dma_unmap_single(dev->dma_device, addr, size, direction);
3982}
3983
3984/**
3985 * ib_dma_map_page - Map a physical page to DMA address
3986 * @dev: The device for which the dma_addr is to be created
3987 * @page: The page to be mapped
3988 * @offset: The offset within the page
3989 * @size: The size of the region in bytes
3990 * @direction: The direction of the DMA
3991 */
3992static inline u64 ib_dma_map_page(struct ib_device *dev,
3993                                  struct page *page,
3994                                  unsigned long offset,
3995                                  size_t size,
3996                                         enum dma_data_direction direction)
3997{
3998        return dma_map_page(dev->dma_device, page, offset, size, direction);
3999}
4000
4001/**
4002 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4003 * @dev: The device for which the DMA address was created
4004 * @addr: The DMA address
4005 * @size: The size of the region in bytes
4006 * @direction: The direction of the DMA
4007 */
4008static inline void ib_dma_unmap_page(struct ib_device *dev,
4009                                     u64 addr, size_t size,
4010                                     enum dma_data_direction direction)
4011{
4012        dma_unmap_page(dev->dma_device, addr, size, direction);
4013}
4014
4015/**
4016 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4017 * @dev: The device for which the DMA addresses are to be created
4018 * @sg: The array of scatter/gather entries
4019 * @nents: The number of scatter/gather entries
4020 * @direction: The direction of the DMA
4021 */
4022static inline int ib_dma_map_sg(struct ib_device *dev,
4023                                struct scatterlist *sg, int nents,
4024                                enum dma_data_direction direction)
4025{
4026        return dma_map_sg(dev->dma_device, sg, nents, direction);
4027}
4028
4029/**
4030 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4031 * @dev: The device for which the DMA addresses were created
4032 * @sg: The array of scatter/gather entries
4033 * @nents: The number of scatter/gather entries
4034 * @direction: The direction of the DMA
4035 */
4036static inline void ib_dma_unmap_sg(struct ib_device *dev,
4037                                   struct scatterlist *sg, int nents,
4038                                   enum dma_data_direction direction)
4039{
4040        dma_unmap_sg(dev->dma_device, sg, nents, direction);
4041}
4042
4043static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4044                                      struct scatterlist *sg, int nents,
4045                                      enum dma_data_direction direction,
4046                                      unsigned long dma_attrs)
4047{
4048        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4049                                dma_attrs);
4050}
4051
4052static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4053                                         struct scatterlist *sg, int nents,
4054                                         enum dma_data_direction direction,
4055                                         unsigned long dma_attrs)
4056{
4057        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
4058}
4059
4060/**
4061 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4062 * @dev: The device to query
4063 *
4064 * The returned value represents a size in bytes.
4065 */
4066static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4067{
4068        return dma_get_max_seg_size(dev->dma_device);
4069}
4070
4071/**
4072 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4073 * @dev: The device for which the DMA address was created
4074 * @addr: The DMA address
4075 * @size: The size of the region in bytes
4076 * @dir: The direction of the DMA
4077 */
4078static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4079                                              u64 addr,
4080                                              size_t size,
4081                                              enum dma_data_direction dir)
4082{
4083        dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4084}
4085
4086/**
4087 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4088 * @dev: The device for which the DMA address was created
4089 * @addr: The DMA address
4090 * @size: The size of the region in bytes
4091 * @dir: The direction of the DMA
4092 */
4093static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4094                                                 u64 addr,
4095                                                 size_t size,
4096                                                 enum dma_data_direction dir)
4097{
4098        dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4099}
4100
4101/**
4102 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4103 * @dev: The device for which the DMA address is requested
4104 * @size: The size of the region to allocate in bytes
4105 * @dma_handle: A pointer for returning the DMA address of the region
4106 * @flag: memory allocator flags
4107 */
4108static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4109                                           size_t size,
4110                                           dma_addr_t *dma_handle,
4111                                           gfp_t flag)
4112{
4113        return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4114}
4115
4116/**
4117 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4118 * @dev: The device for which the DMA addresses were allocated
4119 * @size: The size of the region
4120 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4121 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4122 */
4123static inline void ib_dma_free_coherent(struct ib_device *dev,
4124                                        size_t size, void *cpu_addr,
4125                                        dma_addr_t dma_handle)
4126{
4127        dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4128}
4129
4130/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4131 * space. This function should be called when 'current' is the owning MM.
4132 */
4133struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4134                             u64 virt_addr, int mr_access_flags);
4135
4136/* ib_advise_mr -  give an advice about an address range in a memory region */
4137int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4138                 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4139/**
4140 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4141 *   HCA translation table.
4142 * @mr: The memory region to deregister.
4143 * @udata: Valid user data or NULL for kernel object
4144 *
4145 * This function can fail, if the memory region has memory windows bound to it.
4146 */
4147int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4148
4149/**
4150 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4151 *   HCA translation table.
4152 * @mr: The memory region to deregister.
4153 *
4154 * This function can fail, if the memory region has memory windows bound to it.
4155 *
4156 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4157 */
4158static inline int ib_dereg_mr(struct ib_mr *mr)
4159{
4160        return ib_dereg_mr_user(mr, NULL);
4161}
4162
4163struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4164                          u32 max_num_sg);
4165
4166struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4167                                    u32 max_num_data_sg,
4168                                    u32 max_num_meta_sg);
4169
4170/**
4171 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4172 *   R_Key and L_Key.
4173 * @mr - struct ib_mr pointer to be updated.
4174 * @newkey - new key to be used.
4175 */
4176static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4177{
4178        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4179        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4180}
4181
4182/**
4183 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4184 * for calculating a new rkey for type 2 memory windows.
4185 * @rkey - the rkey to increment.
4186 */
4187static inline u32 ib_inc_rkey(u32 rkey)
4188{
4189        const u32 mask = 0x000000ff;
4190        return ((rkey + 1) & mask) | (rkey & ~mask);
4191}
4192
4193/**
4194 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4195 * @qp: QP to attach to the multicast group.  The QP must be type
4196 *   IB_QPT_UD.
4197 * @gid: Multicast group GID.
4198 * @lid: Multicast group LID in host byte order.
4199 *
4200 * In order to send and receive multicast packets, subnet
4201 * administration must have created the multicast group and configured
4202 * the fabric appropriately.  The port associated with the specified
4203 * QP must also be a member of the multicast group.
4204 */
4205int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4206
4207/**
4208 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4209 * @qp: QP to detach from the multicast group.
4210 * @gid: Multicast group GID.
4211 * @lid: Multicast group LID in host byte order.
4212 */
4213int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4214
4215struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4216                                   struct inode *inode, struct ib_udata *udata);
4217int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4218
4219static inline int ib_check_mr_access(int flags)
4220{
4221        /*
4222         * Local write permission is required if remote write or
4223         * remote atomic permission is also requested.
4224         */
4225        if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4226            !(flags & IB_ACCESS_LOCAL_WRITE))
4227                return -EINVAL;
4228
4229        if (flags & ~IB_ACCESS_SUPPORTED)
4230                return -EINVAL;
4231
4232        return 0;
4233}
4234
4235static inline bool ib_access_writable(int access_flags)
4236{
4237        /*
4238         * We have writable memory backing the MR if any of the following
4239         * access flags are set.  "Local write" and "remote write" obviously
4240         * require write access.  "Remote atomic" can do things like fetch and
4241         * add, which will modify memory, and "MW bind" can change permissions
4242         * by binding a window.
4243         */
4244        return access_flags &
4245                (IB_ACCESS_LOCAL_WRITE   | IB_ACCESS_REMOTE_WRITE |
4246                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4247}
4248
4249/**
4250 * ib_check_mr_status: lightweight check of MR status.
4251 *     This routine may provide status checks on a selected
4252 *     ib_mr. first use is for signature status check.
4253 *
4254 * @mr: A memory region.
4255 * @check_mask: Bitmask of which checks to perform from
4256 *     ib_mr_status_check enumeration.
4257 * @mr_status: The container of relevant status checks.
4258 *     failed checks will be indicated in the status bitmask
4259 *     and the relevant info shall be in the error item.
4260 */
4261int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4262                       struct ib_mr_status *mr_status);
4263
4264/**
4265 * ib_device_try_get: Hold a registration lock
4266 * device: The device to lock
4267 *
4268 * A device under an active registration lock cannot become unregistered. It
4269 * is only possible to obtain a registration lock on a device that is fully
4270 * registered, otherwise this function returns false.
4271 *
4272 * The registration lock is only necessary for actions which require the
4273 * device to still be registered. Uses that only require the device pointer to
4274 * be valid should use get_device(&ibdev->dev) to hold the memory.
4275 *
4276 */
4277static inline bool ib_device_try_get(struct ib_device *dev)
4278{
4279        return refcount_inc_not_zero(&dev->refcount);
4280}
4281
4282void ib_device_put(struct ib_device *device);
4283struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4284                                          enum rdma_driver_id driver_id);
4285struct ib_device *ib_device_get_by_name(const char *name,
4286                                        enum rdma_driver_id driver_id);
4287struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4288                                            u16 pkey, const union ib_gid *gid,
4289                                            const struct sockaddr *addr);
4290int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4291                         unsigned int port);
4292struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4293
4294struct ib_wq *ib_create_wq(struct ib_pd *pd,
4295                           struct ib_wq_init_attr *init_attr);
4296int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4297int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4298                 u32 wq_attr_mask);
4299
4300int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4301                 unsigned int *sg_offset, unsigned int page_size);
4302int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4303                    int data_sg_nents, unsigned int *data_sg_offset,
4304                    struct scatterlist *meta_sg, int meta_sg_nents,
4305                    unsigned int *meta_sg_offset, unsigned int page_size);
4306
4307static inline int
4308ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4309                  unsigned int *sg_offset, unsigned int page_size)
4310{
4311        int n;
4312
4313        n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4314        mr->iova = 0;
4315
4316        return n;
4317}
4318
4319int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4320                unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4321
4322void ib_drain_rq(struct ib_qp *qp);
4323void ib_drain_sq(struct ib_qp *qp);
4324void ib_drain_qp(struct ib_qp *qp);
4325
4326int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width);
4327
4328static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4329{
4330        if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4331                return attr->roce.dmac;
4332        return NULL;
4333}
4334
4335static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4336{
4337        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4338                attr->ib.dlid = (u16)dlid;
4339        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4340                attr->opa.dlid = dlid;
4341}
4342
4343static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4344{
4345        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4346                return attr->ib.dlid;
4347        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4348                return attr->opa.dlid;
4349        return 0;
4350}
4351
4352static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4353{
4354        attr->sl = sl;
4355}
4356
4357static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4358{
4359        return attr->sl;
4360}
4361
4362static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4363                                         u8 src_path_bits)
4364{
4365        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4366                attr->ib.src_path_bits = src_path_bits;
4367        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4368                attr->opa.src_path_bits = src_path_bits;
4369}
4370
4371static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4372{
4373        if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4374                return attr->ib.src_path_bits;
4375        else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4376                return attr->opa.src_path_bits;
4377        return 0;
4378}
4379
4380static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4381                                        bool make_grd)
4382{
4383        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4384                attr->opa.make_grd = make_grd;
4385}
4386
4387static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4388{
4389        if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4390                return attr->opa.make_grd;
4391        return false;
4392}
4393
4394static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4395{
4396        attr->port_num = port_num;
4397}
4398
4399static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4400{
4401        return attr->port_num;
4402}
4403
4404static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4405                                           u8 static_rate)
4406{
4407        attr->static_rate = static_rate;
4408}
4409
4410static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4411{
4412        return attr->static_rate;
4413}
4414
4415static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4416                                        enum ib_ah_flags flag)
4417{
4418        attr->ah_flags = flag;
4419}
4420
4421static inline enum ib_ah_flags
4422                rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4423{
4424        return attr->ah_flags;
4425}
4426
4427static inline const struct ib_global_route
4428                *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4429{
4430        return &attr->grh;
4431}
4432
4433/*To retrieve and modify the grh */
4434static inline struct ib_global_route
4435                *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4436{
4437        return &attr->grh;
4438}
4439
4440static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4441{
4442        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4443
4444        memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4445}
4446
4447static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4448                                             __be64 prefix)
4449{
4450        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4451
4452        grh->dgid.global.subnet_prefix = prefix;
4453}
4454
4455static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4456                                            __be64 if_id)
4457{
4458        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4459
4460        grh->dgid.global.interface_id = if_id;
4461}
4462
4463static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4464                                   union ib_gid *dgid, u32 flow_label,
4465                                   u8 sgid_index, u8 hop_limit,
4466                                   u8 traffic_class)
4467{
4468        struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4469
4470        attr->ah_flags = IB_AH_GRH;
4471        if (dgid)
4472                grh->dgid = *dgid;
4473        grh->flow_label = flow_label;
4474        grh->sgid_index = sgid_index;
4475        grh->hop_limit = hop_limit;
4476        grh->traffic_class = traffic_class;
4477        grh->sgid_attr = NULL;
4478}
4479
4480void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4481void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4482                             u32 flow_label, u8 hop_limit, u8 traffic_class,
4483                             const struct ib_gid_attr *sgid_attr);
4484void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4485                       const struct rdma_ah_attr *src);
4486void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4487                          const struct rdma_ah_attr *new);
4488void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4489
4490/**
4491 * rdma_ah_find_type - Return address handle type.
4492 *
4493 * @dev: Device to be checked
4494 * @port_num: Port number
4495 */
4496static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4497                                                       u8 port_num)
4498{
4499        if (rdma_protocol_roce(dev, port_num))
4500                return RDMA_AH_ATTR_TYPE_ROCE;
4501        if (rdma_protocol_ib(dev, port_num)) {
4502                if (rdma_cap_opa_ah(dev, port_num))
4503                        return RDMA_AH_ATTR_TYPE_OPA;
4504                return RDMA_AH_ATTR_TYPE_IB;
4505        }
4506
4507        return RDMA_AH_ATTR_TYPE_UNDEFINED;
4508}
4509
4510/**
4511 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4512 *     In the current implementation the only way to get
4513 *     get the 32bit lid is from other sources for OPA.
4514 *     For IB, lids will always be 16bits so cast the
4515 *     value accordingly.
4516 *
4517 * @lid: A 32bit LID
4518 */
4519static inline u16 ib_lid_cpu16(u32 lid)
4520{
4521        WARN_ON_ONCE(lid & 0xFFFF0000);
4522        return (u16)lid;
4523}
4524
4525/**
4526 * ib_lid_be16 - Return lid in 16bit BE encoding.
4527 *
4528 * @lid: A 32bit LID
4529 */
4530static inline __be16 ib_lid_be16(u32 lid)
4531{
4532        WARN_ON_ONCE(lid & 0xFFFF0000);
4533        return cpu_to_be16((u16)lid);
4534}
4535
4536/**
4537 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4538 *   vector
4539 * @device:         the rdma device
4540 * @comp_vector:    index of completion vector
4541 *
4542 * Returns NULL on failure, otherwise a corresponding cpu map of the
4543 * completion vector (returns all-cpus map if the device driver doesn't
4544 * implement get_vector_affinity).
4545 */
4546static inline const struct cpumask *
4547ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4548{
4549        if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4550            !device->ops.get_vector_affinity)
4551                return NULL;
4552
4553        return device->ops.get_vector_affinity(device, comp_vector);
4554
4555}
4556
4557/**
4558 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4559 * and add their gids, as needed, to the relevant RoCE devices.
4560 *
4561 * @device:         the rdma device
4562 */
4563void rdma_roce_rescan_device(struct ib_device *ibdev);
4564
4565struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4566
4567int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4568
4569struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4570                                     enum rdma_netdev_t type, const char *name,
4571                                     unsigned char name_assign_type,
4572                                     void (*setup)(struct net_device *));
4573
4574int rdma_init_netdev(struct ib_device *device, u8 port_num,
4575                     enum rdma_netdev_t type, const char *name,
4576                     unsigned char name_assign_type,
4577                     void (*setup)(struct net_device *),
4578                     struct net_device *netdev);
4579
4580/**
4581 * rdma_set_device_sysfs_group - Set device attributes group to have
4582 *                               driver specific sysfs entries at
4583 *                               for infiniband class.
4584 *
4585 * @device:     device pointer for which attributes to be created
4586 * @group:      Pointer to group which should be added when device
4587 *              is registered with sysfs.
4588 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4589 * group per device to have sysfs attributes.
4590 *
4591 * NOTE: New drivers should not make use of this API; instead new device
4592 * parameter should be exposed via netlink command. This API and mechanism
4593 * exist only for existing drivers.
4594 */
4595static inline void
4596rdma_set_device_sysfs_group(struct ib_device *dev,
4597                            const struct attribute_group *group)
4598{
4599        dev->groups[1] = group;
4600}
4601
4602/**
4603 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4604 *
4605 * @device:     device pointer for which ib_device pointer to retrieve
4606 *
4607 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4608 *
4609 */
4610static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4611{
4612        struct ib_core_device *coredev =
4613                container_of(device, struct ib_core_device, dev);
4614
4615        return coredev->owner;
4616}
4617
4618/**
4619 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4620 *                             ib_device holder structure from device pointer.
4621 *
4622 * NOTE: New drivers should not make use of this API; This API is only for
4623 * existing drivers who have exposed sysfs entries using
4624 * rdma_set_device_sysfs_group().
4625 */
4626#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member)           \
4627        container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4628
4629bool rdma_dev_access_netns(const struct ib_device *device,
4630                           const struct net *net);
4631
4632#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
4633#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
4634#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
4635
4636/**
4637 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
4638 *                               on the flow_label
4639 *
4640 * This function will convert the 20 bit flow_label input to a valid RoCE v2
4641 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
4642 * convention.
4643 */
4644static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
4645{
4646        u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
4647
4648        fl_low ^= fl_high >> 14;
4649        return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
4650}
4651
4652/**
4653 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
4654 *                        local and remote qpn values
4655 *
4656 * This function folded the multiplication results of two qpns, 24 bit each,
4657 * fields, and converts it to a 20 bit results.
4658 *
4659 * This function will create symmetric flow_label value based on the local
4660 * and remote qpn values. this will allow both the requester and responder
4661 * to calculate the same flow_label for a given connection.
4662 *
4663 * This helper function should be used by driver in case the upper layer
4664 * provide a zero flow_label value. This is to improve entropy of RDMA
4665 * traffic in the network.
4666 */
4667static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
4668{
4669        u64 v = (u64)lqpn * rqpn;
4670
4671        v ^= v >> 20;
4672        v ^= v >> 40;
4673
4674        return (u32)(v & IB_GRH_FLOWLABEL_MASK);
4675}
4676#endif /* IB_VERBS_H */
4677