linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51#include <linux/socket.h>
  52#include <uapi/linux/if_ether.h>
  53
  54#include <linux/atomic.h>
  55#include <linux/mmu_notifier.h>
  56#include <asm/uaccess.h>
  57
  58extern struct workqueue_struct *ib_wq;
  59
  60union ib_gid {
  61        u8      raw[16];
  62        struct {
  63                __be64  subnet_prefix;
  64                __be64  interface_id;
  65        } global;
  66};
  67
  68extern union ib_gid zgid;
  69
  70struct ib_gid_attr {
  71        struct net_device       *ndev;
  72};
  73
  74enum rdma_node_type {
  75        /* IB values map to NodeInfo:NodeType. */
  76        RDMA_NODE_IB_CA         = 1,
  77        RDMA_NODE_IB_SWITCH,
  78        RDMA_NODE_IB_ROUTER,
  79        RDMA_NODE_RNIC,
  80        RDMA_NODE_USNIC,
  81        RDMA_NODE_USNIC_UDP,
  82};
  83
  84enum rdma_transport_type {
  85        RDMA_TRANSPORT_IB,
  86        RDMA_TRANSPORT_IWARP,
  87        RDMA_TRANSPORT_USNIC,
  88        RDMA_TRANSPORT_USNIC_UDP
  89};
  90
  91enum rdma_protocol_type {
  92        RDMA_PROTOCOL_IB,
  93        RDMA_PROTOCOL_IBOE,
  94        RDMA_PROTOCOL_IWARP,
  95        RDMA_PROTOCOL_USNIC_UDP
  96};
  97
  98__attribute_const__ enum rdma_transport_type
  99rdma_node_get_transport(enum rdma_node_type node_type);
 100
 101enum rdma_link_layer {
 102        IB_LINK_LAYER_UNSPECIFIED,
 103        IB_LINK_LAYER_INFINIBAND,
 104        IB_LINK_LAYER_ETHERNET,
 105};
 106
 107enum ib_device_cap_flags {
 108        IB_DEVICE_RESIZE_MAX_WR         = 1,
 109        IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),
 110        IB_DEVICE_BAD_QKEY_CNTR         = (1<<2),
 111        IB_DEVICE_RAW_MULTI             = (1<<3),
 112        IB_DEVICE_AUTO_PATH_MIG         = (1<<4),
 113        IB_DEVICE_CHANGE_PHY_PORT       = (1<<5),
 114        IB_DEVICE_UD_AV_PORT_ENFORCE    = (1<<6),
 115        IB_DEVICE_CURR_QP_STATE_MOD     = (1<<7),
 116        IB_DEVICE_SHUTDOWN_PORT         = (1<<8),
 117        IB_DEVICE_INIT_TYPE             = (1<<9),
 118        IB_DEVICE_PORT_ACTIVE_EVENT     = (1<<10),
 119        IB_DEVICE_SYS_IMAGE_GUID        = (1<<11),
 120        IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
 121        IB_DEVICE_SRQ_RESIZE            = (1<<13),
 122        IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
 123        IB_DEVICE_LOCAL_DMA_LKEY        = (1<<15),
 124        IB_DEVICE_RESERVED              = (1<<16), /* old SEND_W_INV */
 125        IB_DEVICE_MEM_WINDOW            = (1<<17),
 126        /*
 127         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 128         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 129         * messages and can verify the validity of checksum for
 130         * incoming messages.  Setting this flag implies that the
 131         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 132         */
 133        IB_DEVICE_UD_IP_CSUM            = (1<<18),
 134        IB_DEVICE_UD_TSO                = (1<<19),
 135        IB_DEVICE_XRC                   = (1<<20),
 136        IB_DEVICE_MEM_MGT_EXTENSIONS    = (1<<21),
 137        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 138        IB_DEVICE_MEM_WINDOW_TYPE_2A    = (1<<23),
 139        IB_DEVICE_MEM_WINDOW_TYPE_2B    = (1<<24),
 140        IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
 141        IB_DEVICE_SIGNATURE_HANDOVER    = (1<<30),
 142        IB_DEVICE_ON_DEMAND_PAGING      = (1<<31),
 143};
 144
 145enum ib_signature_prot_cap {
 146        IB_PROT_T10DIF_TYPE_1 = 1,
 147        IB_PROT_T10DIF_TYPE_2 = 1 << 1,
 148        IB_PROT_T10DIF_TYPE_3 = 1 << 2,
 149};
 150
 151enum ib_signature_guard_cap {
 152        IB_GUARD_T10DIF_CRC     = 1,
 153        IB_GUARD_T10DIF_CSUM    = 1 << 1,
 154};
 155
 156enum ib_atomic_cap {
 157        IB_ATOMIC_NONE,
 158        IB_ATOMIC_HCA,
 159        IB_ATOMIC_GLOB
 160};
 161
 162enum ib_odp_general_cap_bits {
 163        IB_ODP_SUPPORT = 1 << 0,
 164};
 165
 166enum ib_odp_transport_cap_bits {
 167        IB_ODP_SUPPORT_SEND     = 1 << 0,
 168        IB_ODP_SUPPORT_RECV     = 1 << 1,
 169        IB_ODP_SUPPORT_WRITE    = 1 << 2,
 170        IB_ODP_SUPPORT_READ     = 1 << 3,
 171        IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
 172};
 173
 174struct ib_odp_caps {
 175        uint64_t general_caps;
 176        struct {
 177                uint32_t  rc_odp_caps;
 178                uint32_t  uc_odp_caps;
 179                uint32_t  ud_odp_caps;
 180        } per_transport_caps;
 181};
 182
 183enum ib_cq_creation_flags {
 184        IB_CQ_FLAGS_TIMESTAMP_COMPLETION   = 1 << 0,
 185};
 186
 187struct ib_cq_init_attr {
 188        unsigned int    cqe;
 189        int             comp_vector;
 190        u32             flags;
 191};
 192
 193struct ib_device_attr {
 194        u64                     fw_ver;
 195        __be64                  sys_image_guid;
 196        u64                     max_mr_size;
 197        u64                     page_size_cap;
 198        u32                     vendor_id;
 199        u32                     vendor_part_id;
 200        u32                     hw_ver;
 201        int                     max_qp;
 202        int                     max_qp_wr;
 203        int                     device_cap_flags;
 204        int                     max_sge;
 205        int                     max_sge_rd;
 206        int                     max_cq;
 207        int                     max_cqe;
 208        int                     max_mr;
 209        int                     max_pd;
 210        int                     max_qp_rd_atom;
 211        int                     max_ee_rd_atom;
 212        int                     max_res_rd_atom;
 213        int                     max_qp_init_rd_atom;
 214        int                     max_ee_init_rd_atom;
 215        enum ib_atomic_cap      atomic_cap;
 216        enum ib_atomic_cap      masked_atomic_cap;
 217        int                     max_ee;
 218        int                     max_rdd;
 219        int                     max_mw;
 220        int                     max_raw_ipv6_qp;
 221        int                     max_raw_ethy_qp;
 222        int                     max_mcast_grp;
 223        int                     max_mcast_qp_attach;
 224        int                     max_total_mcast_qp_attach;
 225        int                     max_ah;
 226        int                     max_fmr;
 227        int                     max_map_per_fmr;
 228        int                     max_srq;
 229        int                     max_srq_wr;
 230        int                     max_srq_sge;
 231        unsigned int            max_fast_reg_page_list_len;
 232        u16                     max_pkeys;
 233        u8                      local_ca_ack_delay;
 234        int                     sig_prot_cap;
 235        int                     sig_guard_cap;
 236        struct ib_odp_caps      odp_caps;
 237        uint64_t                timestamp_mask;
 238        uint64_t                hca_core_clock; /* in KHZ */
 239};
 240
 241enum ib_mtu {
 242        IB_MTU_256  = 1,
 243        IB_MTU_512  = 2,
 244        IB_MTU_1024 = 3,
 245        IB_MTU_2048 = 4,
 246        IB_MTU_4096 = 5
 247};
 248
 249static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 250{
 251        switch (mtu) {
 252        case IB_MTU_256:  return  256;
 253        case IB_MTU_512:  return  512;
 254        case IB_MTU_1024: return 1024;
 255        case IB_MTU_2048: return 2048;
 256        case IB_MTU_4096: return 4096;
 257        default:          return -1;
 258        }
 259}
 260
 261enum ib_port_state {
 262        IB_PORT_NOP             = 0,
 263        IB_PORT_DOWN            = 1,
 264        IB_PORT_INIT            = 2,
 265        IB_PORT_ARMED           = 3,
 266        IB_PORT_ACTIVE          = 4,
 267        IB_PORT_ACTIVE_DEFER    = 5
 268};
 269
 270enum ib_port_cap_flags {
 271        IB_PORT_SM                              = 1 <<  1,
 272        IB_PORT_NOTICE_SUP                      = 1 <<  2,
 273        IB_PORT_TRAP_SUP                        = 1 <<  3,
 274        IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 275        IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
 276        IB_PORT_SL_MAP_SUP                      = 1 <<  6,
 277        IB_PORT_MKEY_NVRAM                      = 1 <<  7,
 278        IB_PORT_PKEY_NVRAM                      = 1 <<  8,
 279        IB_PORT_LED_INFO_SUP                    = 1 <<  9,
 280        IB_PORT_SM_DISABLED                     = 1 << 10,
 281        IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
 282        IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
 283        IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
 284        IB_PORT_CM_SUP                          = 1 << 16,
 285        IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
 286        IB_PORT_REINIT_SUP                      = 1 << 18,
 287        IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
 288        IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
 289        IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
 290        IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
 291        IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
 292        IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
 293        IB_PORT_CLIENT_REG_SUP                  = 1 << 25,
 294        IB_PORT_IP_BASED_GIDS                   = 1 << 26,
 295};
 296
 297enum ib_port_width {
 298        IB_WIDTH_1X     = 1,
 299        IB_WIDTH_4X     = 2,
 300        IB_WIDTH_8X     = 4,
 301        IB_WIDTH_12X    = 8
 302};
 303
 304static inline int ib_width_enum_to_int(enum ib_port_width width)
 305{
 306        switch (width) {
 307        case IB_WIDTH_1X:  return  1;
 308        case IB_WIDTH_4X:  return  4;
 309        case IB_WIDTH_8X:  return  8;
 310        case IB_WIDTH_12X: return 12;
 311        default:          return -1;
 312        }
 313}
 314
 315enum ib_port_speed {
 316        IB_SPEED_SDR    = 1,
 317        IB_SPEED_DDR    = 2,
 318        IB_SPEED_QDR    = 4,
 319        IB_SPEED_FDR10  = 8,
 320        IB_SPEED_FDR    = 16,
 321        IB_SPEED_EDR    = 32
 322};
 323
 324struct ib_protocol_stats {
 325        /* TBD... */
 326};
 327
 328struct iw_protocol_stats {
 329        u64     ipInReceives;
 330        u64     ipInHdrErrors;
 331        u64     ipInTooBigErrors;
 332        u64     ipInNoRoutes;
 333        u64     ipInAddrErrors;
 334        u64     ipInUnknownProtos;
 335        u64     ipInTruncatedPkts;
 336        u64     ipInDiscards;
 337        u64     ipInDelivers;
 338        u64     ipOutForwDatagrams;
 339        u64     ipOutRequests;
 340        u64     ipOutDiscards;
 341        u64     ipOutNoRoutes;
 342        u64     ipReasmTimeout;
 343        u64     ipReasmReqds;
 344        u64     ipReasmOKs;
 345        u64     ipReasmFails;
 346        u64     ipFragOKs;
 347        u64     ipFragFails;
 348        u64     ipFragCreates;
 349        u64     ipInMcastPkts;
 350        u64     ipOutMcastPkts;
 351        u64     ipInBcastPkts;
 352        u64     ipOutBcastPkts;
 353
 354        u64     tcpRtoAlgorithm;
 355        u64     tcpRtoMin;
 356        u64     tcpRtoMax;
 357        u64     tcpMaxConn;
 358        u64     tcpActiveOpens;
 359        u64     tcpPassiveOpens;
 360        u64     tcpAttemptFails;
 361        u64     tcpEstabResets;
 362        u64     tcpCurrEstab;
 363        u64     tcpInSegs;
 364        u64     tcpOutSegs;
 365        u64     tcpRetransSegs;
 366        u64     tcpInErrs;
 367        u64     tcpOutRsts;
 368};
 369
 370union rdma_protocol_stats {
 371        struct ib_protocol_stats        ib;
 372        struct iw_protocol_stats        iw;
 373};
 374
 375/* Define bits for the various functionality this port needs to be supported by
 376 * the core.
 377 */
 378/* Management                           0x00000FFF */
 379#define RDMA_CORE_CAP_IB_MAD            0x00000001
 380#define RDMA_CORE_CAP_IB_SMI            0x00000002
 381#define RDMA_CORE_CAP_IB_CM             0x00000004
 382#define RDMA_CORE_CAP_IW_CM             0x00000008
 383#define RDMA_CORE_CAP_IB_SA             0x00000010
 384#define RDMA_CORE_CAP_OPA_MAD           0x00000020
 385
 386/* Address format                       0x000FF000 */
 387#define RDMA_CORE_CAP_AF_IB             0x00001000
 388#define RDMA_CORE_CAP_ETH_AH            0x00002000
 389
 390/* Protocol                             0xFFF00000 */
 391#define RDMA_CORE_CAP_PROT_IB           0x00100000
 392#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
 393#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
 394
 395#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
 396                                        | RDMA_CORE_CAP_IB_MAD \
 397                                        | RDMA_CORE_CAP_IB_SMI \
 398                                        | RDMA_CORE_CAP_IB_CM  \
 399                                        | RDMA_CORE_CAP_IB_SA  \
 400                                        | RDMA_CORE_CAP_AF_IB)
 401#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
 402                                        | RDMA_CORE_CAP_IB_MAD  \
 403                                        | RDMA_CORE_CAP_IB_CM   \
 404                                        | RDMA_CORE_CAP_AF_IB   \
 405                                        | RDMA_CORE_CAP_ETH_AH)
 406#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
 407                                        | RDMA_CORE_CAP_IW_CM)
 408#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
 409                                        | RDMA_CORE_CAP_OPA_MAD)
 410
 411struct ib_port_attr {
 412        enum ib_port_state      state;
 413        enum ib_mtu             max_mtu;
 414        enum ib_mtu             active_mtu;
 415        int                     gid_tbl_len;
 416        u32                     port_cap_flags;
 417        u32                     max_msg_sz;
 418        u32                     bad_pkey_cntr;
 419        u32                     qkey_viol_cntr;
 420        u16                     pkey_tbl_len;
 421        u16                     lid;
 422        u16                     sm_lid;
 423        u8                      lmc;
 424        u8                      max_vl_num;
 425        u8                      sm_sl;
 426        u8                      subnet_timeout;
 427        u8                      init_type_reply;
 428        u8                      active_width;
 429        u8                      active_speed;
 430        u8                      phys_state;
 431};
 432
 433enum ib_device_modify_flags {
 434        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 435        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 436};
 437
 438struct ib_device_modify {
 439        u64     sys_image_guid;
 440        char    node_desc[64];
 441};
 442
 443enum ib_port_modify_flags {
 444        IB_PORT_SHUTDOWN                = 1,
 445        IB_PORT_INIT_TYPE               = (1<<2),
 446        IB_PORT_RESET_QKEY_CNTR         = (1<<3)
 447};
 448
 449struct ib_port_modify {
 450        u32     set_port_cap_mask;
 451        u32     clr_port_cap_mask;
 452        u8      init_type;
 453};
 454
 455enum ib_event_type {
 456        IB_EVENT_CQ_ERR,
 457        IB_EVENT_QP_FATAL,
 458        IB_EVENT_QP_REQ_ERR,
 459        IB_EVENT_QP_ACCESS_ERR,
 460        IB_EVENT_COMM_EST,
 461        IB_EVENT_SQ_DRAINED,
 462        IB_EVENT_PATH_MIG,
 463        IB_EVENT_PATH_MIG_ERR,
 464        IB_EVENT_DEVICE_FATAL,
 465        IB_EVENT_PORT_ACTIVE,
 466        IB_EVENT_PORT_ERR,
 467        IB_EVENT_LID_CHANGE,
 468        IB_EVENT_PKEY_CHANGE,
 469        IB_EVENT_SM_CHANGE,
 470        IB_EVENT_SRQ_ERR,
 471        IB_EVENT_SRQ_LIMIT_REACHED,
 472        IB_EVENT_QP_LAST_WQE_REACHED,
 473        IB_EVENT_CLIENT_REREGISTER,
 474        IB_EVENT_GID_CHANGE,
 475};
 476
 477__attribute_const__ const char *ib_event_msg(enum ib_event_type event);
 478
 479struct ib_event {
 480        struct ib_device        *device;
 481        union {
 482                struct ib_cq    *cq;
 483                struct ib_qp    *qp;
 484                struct ib_srq   *srq;
 485                u8              port_num;
 486        } element;
 487        enum ib_event_type      event;
 488};
 489
 490struct ib_event_handler {
 491        struct ib_device *device;
 492        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 493        struct list_head  list;
 494};
 495
 496#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 497        do {                                                    \
 498                (_ptr)->device  = _device;                      \
 499                (_ptr)->handler = _handler;                     \
 500                INIT_LIST_HEAD(&(_ptr)->list);                  \
 501        } while (0)
 502
 503struct ib_global_route {
 504        union ib_gid    dgid;
 505        u32             flow_label;
 506        u8              sgid_index;
 507        u8              hop_limit;
 508        u8              traffic_class;
 509};
 510
 511struct ib_grh {
 512        __be32          version_tclass_flow;
 513        __be16          paylen;
 514        u8              next_hdr;
 515        u8              hop_limit;
 516        union ib_gid    sgid;
 517        union ib_gid    dgid;
 518};
 519
 520enum {
 521        IB_MULTICAST_QPN = 0xffffff
 522};
 523
 524#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 525
 526enum ib_ah_flags {
 527        IB_AH_GRH       = 1
 528};
 529
 530enum ib_rate {
 531        IB_RATE_PORT_CURRENT = 0,
 532        IB_RATE_2_5_GBPS = 2,
 533        IB_RATE_5_GBPS   = 5,
 534        IB_RATE_10_GBPS  = 3,
 535        IB_RATE_20_GBPS  = 6,
 536        IB_RATE_30_GBPS  = 4,
 537        IB_RATE_40_GBPS  = 7,
 538        IB_RATE_60_GBPS  = 8,
 539        IB_RATE_80_GBPS  = 9,
 540        IB_RATE_120_GBPS = 10,
 541        IB_RATE_14_GBPS  = 11,
 542        IB_RATE_56_GBPS  = 12,
 543        IB_RATE_112_GBPS = 13,
 544        IB_RATE_168_GBPS = 14,
 545        IB_RATE_25_GBPS  = 15,
 546        IB_RATE_100_GBPS = 16,
 547        IB_RATE_200_GBPS = 17,
 548        IB_RATE_300_GBPS = 18
 549};
 550
 551/**
 552 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 553 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 554 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 555 * @rate: rate to convert.
 556 */
 557__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
 558
 559/**
 560 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 561 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 562 * @rate: rate to convert.
 563 */
 564__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
 565
 566
 567/**
 568 * enum ib_mr_type - memory region type
 569 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
 570 *                            normal registration
 571 * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
 572 *                            signature operations (data-integrity
 573 *                            capable regions)
 574 */
 575enum ib_mr_type {
 576        IB_MR_TYPE_MEM_REG,
 577        IB_MR_TYPE_SIGNATURE,
 578};
 579
 580/**
 581 * Signature types
 582 * IB_SIG_TYPE_NONE: Unprotected.
 583 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
 584 */
 585enum ib_signature_type {
 586        IB_SIG_TYPE_NONE,
 587        IB_SIG_TYPE_T10_DIF,
 588};
 589
 590/**
 591 * Signature T10-DIF block-guard types
 592 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
 593 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
 594 */
 595enum ib_t10_dif_bg_type {
 596        IB_T10DIF_CRC,
 597        IB_T10DIF_CSUM
 598};
 599
 600/**
 601 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
 602 *     domain.
 603 * @bg_type: T10-DIF block guard type (CRC|CSUM)
 604 * @pi_interval: protection information interval.
 605 * @bg: seed of guard computation.
 606 * @app_tag: application tag of guard block
 607 * @ref_tag: initial guard block reference tag.
 608 * @ref_remap: Indicate wethear the reftag increments each block
 609 * @app_escape: Indicate to skip block check if apptag=0xffff
 610 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
 611 * @apptag_check_mask: check bitmask of application tag.
 612 */
 613struct ib_t10_dif_domain {
 614        enum ib_t10_dif_bg_type bg_type;
 615        u16                     pi_interval;
 616        u16                     bg;
 617        u16                     app_tag;
 618        u32                     ref_tag;
 619        bool                    ref_remap;
 620        bool                    app_escape;
 621        bool                    ref_escape;
 622        u16                     apptag_check_mask;
 623};
 624
 625/**
 626 * struct ib_sig_domain - Parameters for signature domain
 627 * @sig_type: specific signauture type
 628 * @sig: union of all signature domain attributes that may
 629 *     be used to set domain layout.
 630 */
 631struct ib_sig_domain {
 632        enum ib_signature_type sig_type;
 633        union {
 634                struct ib_t10_dif_domain dif;
 635        } sig;
 636};
 637
 638/**
 639 * struct ib_sig_attrs - Parameters for signature handover operation
 640 * @check_mask: bitmask for signature byte check (8 bytes)
 641 * @mem: memory domain layout desciptor.
 642 * @wire: wire domain layout desciptor.
 643 */
 644struct ib_sig_attrs {
 645        u8                      check_mask;
 646        struct ib_sig_domain    mem;
 647        struct ib_sig_domain    wire;
 648};
 649
 650enum ib_sig_err_type {
 651        IB_SIG_BAD_GUARD,
 652        IB_SIG_BAD_REFTAG,
 653        IB_SIG_BAD_APPTAG,
 654};
 655
 656/**
 657 * struct ib_sig_err - signature error descriptor
 658 */
 659struct ib_sig_err {
 660        enum ib_sig_err_type    err_type;
 661        u32                     expected;
 662        u32                     actual;
 663        u64                     sig_err_offset;
 664        u32                     key;
 665};
 666
 667enum ib_mr_status_check {
 668        IB_MR_CHECK_SIG_STATUS = 1,
 669};
 670
 671/**
 672 * struct ib_mr_status - Memory region status container
 673 *
 674 * @fail_status: Bitmask of MR checks status. For each
 675 *     failed check a corresponding status bit is set.
 676 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
 677 *     failure.
 678 */
 679struct ib_mr_status {
 680        u32                 fail_status;
 681        struct ib_sig_err   sig_err;
 682};
 683
 684/**
 685 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 686 * enum.
 687 * @mult: multiple to convert.
 688 */
 689__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
 690
 691struct ib_ah_attr {
 692        struct ib_global_route  grh;
 693        u16                     dlid;
 694        u8                      sl;
 695        u8                      src_path_bits;
 696        u8                      static_rate;
 697        u8                      ah_flags;
 698        u8                      port_num;
 699        u8                      dmac[ETH_ALEN];
 700        u16                     vlan_id;
 701};
 702
 703enum ib_wc_status {
 704        IB_WC_SUCCESS,
 705        IB_WC_LOC_LEN_ERR,
 706        IB_WC_LOC_QP_OP_ERR,
 707        IB_WC_LOC_EEC_OP_ERR,
 708        IB_WC_LOC_PROT_ERR,
 709        IB_WC_WR_FLUSH_ERR,
 710        IB_WC_MW_BIND_ERR,
 711        IB_WC_BAD_RESP_ERR,
 712        IB_WC_LOC_ACCESS_ERR,
 713        IB_WC_REM_INV_REQ_ERR,
 714        IB_WC_REM_ACCESS_ERR,
 715        IB_WC_REM_OP_ERR,
 716        IB_WC_RETRY_EXC_ERR,
 717        IB_WC_RNR_RETRY_EXC_ERR,
 718        IB_WC_LOC_RDD_VIOL_ERR,
 719        IB_WC_REM_INV_RD_REQ_ERR,
 720        IB_WC_REM_ABORT_ERR,
 721        IB_WC_INV_EECN_ERR,
 722        IB_WC_INV_EEC_STATE_ERR,
 723        IB_WC_FATAL_ERR,
 724        IB_WC_RESP_TIMEOUT_ERR,
 725        IB_WC_GENERAL_ERR
 726};
 727
 728__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status);
 729
 730enum ib_wc_opcode {
 731        IB_WC_SEND,
 732        IB_WC_RDMA_WRITE,
 733        IB_WC_RDMA_READ,
 734        IB_WC_COMP_SWAP,
 735        IB_WC_FETCH_ADD,
 736        IB_WC_BIND_MW,
 737        IB_WC_LSO,
 738        IB_WC_LOCAL_INV,
 739        IB_WC_FAST_REG_MR,
 740        IB_WC_MASKED_COMP_SWAP,
 741        IB_WC_MASKED_FETCH_ADD,
 742/*
 743 * Set value of IB_WC_RECV so consumers can test if a completion is a
 744 * receive by testing (opcode & IB_WC_RECV).
 745 */
 746        IB_WC_RECV                      = 1 << 7,
 747        IB_WC_RECV_RDMA_WITH_IMM
 748};
 749
 750enum ib_wc_flags {
 751        IB_WC_GRH               = 1,
 752        IB_WC_WITH_IMM          = (1<<1),
 753        IB_WC_WITH_INVALIDATE   = (1<<2),
 754        IB_WC_IP_CSUM_OK        = (1<<3),
 755        IB_WC_WITH_SMAC         = (1<<4),
 756        IB_WC_WITH_VLAN         = (1<<5),
 757};
 758
 759struct ib_wc {
 760        u64                     wr_id;
 761        enum ib_wc_status       status;
 762        enum ib_wc_opcode       opcode;
 763        u32                     vendor_err;
 764        u32                     byte_len;
 765        struct ib_qp           *qp;
 766        union {
 767                __be32          imm_data;
 768                u32             invalidate_rkey;
 769        } ex;
 770        u32                     src_qp;
 771        int                     wc_flags;
 772        u16                     pkey_index;
 773        u16                     slid;
 774        u8                      sl;
 775        u8                      dlid_path_bits;
 776        u8                      port_num;       /* valid only for DR SMPs on switches */
 777        u8                      smac[ETH_ALEN];
 778        u16                     vlan_id;
 779};
 780
 781enum ib_cq_notify_flags {
 782        IB_CQ_SOLICITED                 = 1 << 0,
 783        IB_CQ_NEXT_COMP                 = 1 << 1,
 784        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 785        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
 786};
 787
 788enum ib_srq_type {
 789        IB_SRQT_BASIC,
 790        IB_SRQT_XRC
 791};
 792
 793enum ib_srq_attr_mask {
 794        IB_SRQ_MAX_WR   = 1 << 0,
 795        IB_SRQ_LIMIT    = 1 << 1,
 796};
 797
 798struct ib_srq_attr {
 799        u32     max_wr;
 800        u32     max_sge;
 801        u32     srq_limit;
 802};
 803
 804struct ib_srq_init_attr {
 805        void                  (*event_handler)(struct ib_event *, void *);
 806        void                   *srq_context;
 807        struct ib_srq_attr      attr;
 808        enum ib_srq_type        srq_type;
 809
 810        union {
 811                struct {
 812                        struct ib_xrcd *xrcd;
 813                        struct ib_cq   *cq;
 814                } xrc;
 815        } ext;
 816};
 817
 818struct ib_qp_cap {
 819        u32     max_send_wr;
 820        u32     max_recv_wr;
 821        u32     max_send_sge;
 822        u32     max_recv_sge;
 823        u32     max_inline_data;
 824};
 825
 826enum ib_sig_type {
 827        IB_SIGNAL_ALL_WR,
 828        IB_SIGNAL_REQ_WR
 829};
 830
 831enum ib_qp_type {
 832        /*
 833         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
 834         * here (and in that order) since the MAD layer uses them as
 835         * indices into a 2-entry table.
 836         */
 837        IB_QPT_SMI,
 838        IB_QPT_GSI,
 839
 840        IB_QPT_RC,
 841        IB_QPT_UC,
 842        IB_QPT_UD,
 843        IB_QPT_RAW_IPV6,
 844        IB_QPT_RAW_ETHERTYPE,
 845        IB_QPT_RAW_PACKET = 8,
 846        IB_QPT_XRC_INI = 9,
 847        IB_QPT_XRC_TGT,
 848        IB_QPT_MAX,
 849        /* Reserve a range for qp types internal to the low level driver.
 850         * These qp types will not be visible at the IB core layer, so the
 851         * IB_QPT_MAX usages should not be affected in the core layer
 852         */
 853        IB_QPT_RESERVED1 = 0x1000,
 854        IB_QPT_RESERVED2,
 855        IB_QPT_RESERVED3,
 856        IB_QPT_RESERVED4,
 857        IB_QPT_RESERVED5,
 858        IB_QPT_RESERVED6,
 859        IB_QPT_RESERVED7,
 860        IB_QPT_RESERVED8,
 861        IB_QPT_RESERVED9,
 862        IB_QPT_RESERVED10,
 863};
 864
 865enum ib_qp_create_flags {
 866        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
 867        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
 868        IB_QP_CREATE_NETIF_QP                   = 1 << 5,
 869        IB_QP_CREATE_SIGNATURE_EN               = 1 << 6,
 870        IB_QP_CREATE_USE_GFP_NOIO               = 1 << 7,
 871        /* reserve bits 26-31 for low level drivers' internal use */
 872        IB_QP_CREATE_RESERVED_START             = 1 << 26,
 873        IB_QP_CREATE_RESERVED_END               = 1 << 31,
 874};
 875
 876
 877/*
 878 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
 879 * callback to destroy the passed in QP.
 880 */
 881
 882struct ib_qp_init_attr {
 883        void                  (*event_handler)(struct ib_event *, void *);
 884        void                   *qp_context;
 885        struct ib_cq           *send_cq;
 886        struct ib_cq           *recv_cq;
 887        struct ib_srq          *srq;
 888        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
 889        struct ib_qp_cap        cap;
 890        enum ib_sig_type        sq_sig_type;
 891        enum ib_qp_type         qp_type;
 892        enum ib_qp_create_flags create_flags;
 893        u8                      port_num; /* special QP types only */
 894};
 895
 896struct ib_qp_open_attr {
 897        void                  (*event_handler)(struct ib_event *, void *);
 898        void                   *qp_context;
 899        u32                     qp_num;
 900        enum ib_qp_type         qp_type;
 901};
 902
 903enum ib_rnr_timeout {
 904        IB_RNR_TIMER_655_36 =  0,
 905        IB_RNR_TIMER_000_01 =  1,
 906        IB_RNR_TIMER_000_02 =  2,
 907        IB_RNR_TIMER_000_03 =  3,
 908        IB_RNR_TIMER_000_04 =  4,
 909        IB_RNR_TIMER_000_06 =  5,
 910        IB_RNR_TIMER_000_08 =  6,
 911        IB_RNR_TIMER_000_12 =  7,
 912        IB_RNR_TIMER_000_16 =  8,
 913        IB_RNR_TIMER_000_24 =  9,
 914        IB_RNR_TIMER_000_32 = 10,
 915        IB_RNR_TIMER_000_48 = 11,
 916        IB_RNR_TIMER_000_64 = 12,
 917        IB_RNR_TIMER_000_96 = 13,
 918        IB_RNR_TIMER_001_28 = 14,
 919        IB_RNR_TIMER_001_92 = 15,
 920        IB_RNR_TIMER_002_56 = 16,
 921        IB_RNR_TIMER_003_84 = 17,
 922        IB_RNR_TIMER_005_12 = 18,
 923        IB_RNR_TIMER_007_68 = 19,
 924        IB_RNR_TIMER_010_24 = 20,
 925        IB_RNR_TIMER_015_36 = 21,
 926        IB_RNR_TIMER_020_48 = 22,
 927        IB_RNR_TIMER_030_72 = 23,
 928        IB_RNR_TIMER_040_96 = 24,
 929        IB_RNR_TIMER_061_44 = 25,
 930        IB_RNR_TIMER_081_92 = 26,
 931        IB_RNR_TIMER_122_88 = 27,
 932        IB_RNR_TIMER_163_84 = 28,
 933        IB_RNR_TIMER_245_76 = 29,
 934        IB_RNR_TIMER_327_68 = 30,
 935        IB_RNR_TIMER_491_52 = 31
 936};
 937
 938enum ib_qp_attr_mask {
 939        IB_QP_STATE                     = 1,
 940        IB_QP_CUR_STATE                 = (1<<1),
 941        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
 942        IB_QP_ACCESS_FLAGS              = (1<<3),
 943        IB_QP_PKEY_INDEX                = (1<<4),
 944        IB_QP_PORT                      = (1<<5),
 945        IB_QP_QKEY                      = (1<<6),
 946        IB_QP_AV                        = (1<<7),
 947        IB_QP_PATH_MTU                  = (1<<8),
 948        IB_QP_TIMEOUT                   = (1<<9),
 949        IB_QP_RETRY_CNT                 = (1<<10),
 950        IB_QP_RNR_RETRY                 = (1<<11),
 951        IB_QP_RQ_PSN                    = (1<<12),
 952        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
 953        IB_QP_ALT_PATH                  = (1<<14),
 954        IB_QP_MIN_RNR_TIMER             = (1<<15),
 955        IB_QP_SQ_PSN                    = (1<<16),
 956        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
 957        IB_QP_PATH_MIG_STATE            = (1<<18),
 958        IB_QP_CAP                       = (1<<19),
 959        IB_QP_DEST_QPN                  = (1<<20),
 960        IB_QP_SMAC                      = (1<<21),
 961        IB_QP_ALT_SMAC                  = (1<<22),
 962        IB_QP_VID                       = (1<<23),
 963        IB_QP_ALT_VID                   = (1<<24),
 964};
 965
 966enum ib_qp_state {
 967        IB_QPS_RESET,
 968        IB_QPS_INIT,
 969        IB_QPS_RTR,
 970        IB_QPS_RTS,
 971        IB_QPS_SQD,
 972        IB_QPS_SQE,
 973        IB_QPS_ERR
 974};
 975
 976enum ib_mig_state {
 977        IB_MIG_MIGRATED,
 978        IB_MIG_REARM,
 979        IB_MIG_ARMED
 980};
 981
 982enum ib_mw_type {
 983        IB_MW_TYPE_1 = 1,
 984        IB_MW_TYPE_2 = 2
 985};
 986
 987struct ib_qp_attr {
 988        enum ib_qp_state        qp_state;
 989        enum ib_qp_state        cur_qp_state;
 990        enum ib_mtu             path_mtu;
 991        enum ib_mig_state       path_mig_state;
 992        u32                     qkey;
 993        u32                     rq_psn;
 994        u32                     sq_psn;
 995        u32                     dest_qp_num;
 996        int                     qp_access_flags;
 997        struct ib_qp_cap        cap;
 998        struct ib_ah_attr       ah_attr;
 999        struct ib_ah_attr       alt_ah_attr;
1000        u16                     pkey_index;
1001        u16                     alt_pkey_index;
1002        u8                      en_sqd_async_notify;
1003        u8                      sq_draining;
1004        u8                      max_rd_atomic;
1005        u8                      max_dest_rd_atomic;
1006        u8                      min_rnr_timer;
1007        u8                      port_num;
1008        u8                      timeout;
1009        u8                      retry_cnt;
1010        u8                      rnr_retry;
1011        u8                      alt_port_num;
1012        u8                      alt_timeout;
1013        u8                      smac[ETH_ALEN];
1014        u8                      alt_smac[ETH_ALEN];
1015        u16                     vlan_id;
1016        u16                     alt_vlan_id;
1017};
1018
1019enum ib_wr_opcode {
1020        IB_WR_RDMA_WRITE,
1021        IB_WR_RDMA_WRITE_WITH_IMM,
1022        IB_WR_SEND,
1023        IB_WR_SEND_WITH_IMM,
1024        IB_WR_RDMA_READ,
1025        IB_WR_ATOMIC_CMP_AND_SWP,
1026        IB_WR_ATOMIC_FETCH_AND_ADD,
1027        IB_WR_LSO,
1028        IB_WR_SEND_WITH_INV,
1029        IB_WR_RDMA_READ_WITH_INV,
1030        IB_WR_LOCAL_INV,
1031        IB_WR_FAST_REG_MR,
1032        IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1033        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1034        IB_WR_BIND_MW,
1035        IB_WR_REG_SIG_MR,
1036        /* reserve values for low level drivers' internal use.
1037         * These values will not be used at all in the ib core layer.
1038         */
1039        IB_WR_RESERVED1 = 0xf0,
1040        IB_WR_RESERVED2,
1041        IB_WR_RESERVED3,
1042        IB_WR_RESERVED4,
1043        IB_WR_RESERVED5,
1044        IB_WR_RESERVED6,
1045        IB_WR_RESERVED7,
1046        IB_WR_RESERVED8,
1047        IB_WR_RESERVED9,
1048        IB_WR_RESERVED10,
1049};
1050
1051enum ib_send_flags {
1052        IB_SEND_FENCE           = 1,
1053        IB_SEND_SIGNALED        = (1<<1),
1054        IB_SEND_SOLICITED       = (1<<2),
1055        IB_SEND_INLINE          = (1<<3),
1056        IB_SEND_IP_CSUM         = (1<<4),
1057
1058        /* reserve bits 26-31 for low level drivers' internal use */
1059        IB_SEND_RESERVED_START  = (1 << 26),
1060        IB_SEND_RESERVED_END    = (1 << 31),
1061};
1062
1063struct ib_sge {
1064        u64     addr;
1065        u32     length;
1066        u32     lkey;
1067};
1068
1069struct ib_fast_reg_page_list {
1070        struct ib_device       *device;
1071        u64                    *page_list;
1072        unsigned int            max_page_list_len;
1073};
1074
1075/**
1076 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1077 * @mr: A memory region to bind the memory window to.
1078 * @addr: The address where the memory window should begin.
1079 * @length: The length of the memory window, in bytes.
1080 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1081 *
1082 * This struct contains the shared parameters for type 1 and type 2
1083 * memory window bind operations.
1084 */
1085struct ib_mw_bind_info {
1086        struct ib_mr   *mr;
1087        u64             addr;
1088        u64             length;
1089        int             mw_access_flags;
1090};
1091
1092struct ib_send_wr {
1093        struct ib_send_wr      *next;
1094        u64                     wr_id;
1095        struct ib_sge          *sg_list;
1096        int                     num_sge;
1097        enum ib_wr_opcode       opcode;
1098        int                     send_flags;
1099        union {
1100                __be32          imm_data;
1101                u32             invalidate_rkey;
1102        } ex;
1103        union {
1104                struct {
1105                        u64     remote_addr;
1106                        u32     rkey;
1107                } rdma;
1108                struct {
1109                        u64     remote_addr;
1110                        u64     compare_add;
1111                        u64     swap;
1112                        u64     compare_add_mask;
1113                        u64     swap_mask;
1114                        u32     rkey;
1115                } atomic;
1116                struct {
1117                        struct ib_ah *ah;
1118                        void   *header;
1119                        int     hlen;
1120                        int     mss;
1121                        u32     remote_qpn;
1122                        u32     remote_qkey;
1123                        u16     pkey_index; /* valid for GSI only */
1124                        u8      port_num;   /* valid for DR SMPs on switch only */
1125                } ud;
1126                struct {
1127                        u64                             iova_start;
1128                        struct ib_fast_reg_page_list   *page_list;
1129                        unsigned int                    page_shift;
1130                        unsigned int                    page_list_len;
1131                        u32                             length;
1132                        int                             access_flags;
1133                        u32                             rkey;
1134                } fast_reg;
1135                struct {
1136                        struct ib_mw            *mw;
1137                        /* The new rkey for the memory window. */
1138                        u32                      rkey;
1139                        struct ib_mw_bind_info   bind_info;
1140                } bind_mw;
1141                struct {
1142                        struct ib_sig_attrs    *sig_attrs;
1143                        struct ib_mr           *sig_mr;
1144                        int                     access_flags;
1145                        struct ib_sge          *prot;
1146                } sig_handover;
1147        } wr;
1148        u32                     xrc_remote_srq_num;     /* XRC TGT QPs only */
1149};
1150
1151struct ib_recv_wr {
1152        struct ib_recv_wr      *next;
1153        u64                     wr_id;
1154        struct ib_sge          *sg_list;
1155        int                     num_sge;
1156};
1157
1158enum ib_access_flags {
1159        IB_ACCESS_LOCAL_WRITE   = 1,
1160        IB_ACCESS_REMOTE_WRITE  = (1<<1),
1161        IB_ACCESS_REMOTE_READ   = (1<<2),
1162        IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1163        IB_ACCESS_MW_BIND       = (1<<4),
1164        IB_ZERO_BASED           = (1<<5),
1165        IB_ACCESS_ON_DEMAND     = (1<<6),
1166};
1167
1168struct ib_phys_buf {
1169        u64      addr;
1170        u64      size;
1171};
1172
1173struct ib_mr_attr {
1174        struct ib_pd    *pd;
1175        u64             device_virt_addr;
1176        u64             size;
1177        int             mr_access_flags;
1178        u32             lkey;
1179        u32             rkey;
1180};
1181
1182enum ib_mr_rereg_flags {
1183        IB_MR_REREG_TRANS       = 1,
1184        IB_MR_REREG_PD          = (1<<1),
1185        IB_MR_REREG_ACCESS      = (1<<2),
1186        IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1187};
1188
1189/**
1190 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1191 * @wr_id:      Work request id.
1192 * @send_flags: Flags from ib_send_flags enum.
1193 * @bind_info:  More parameters of the bind operation.
1194 */
1195struct ib_mw_bind {
1196        u64                    wr_id;
1197        int                    send_flags;
1198        struct ib_mw_bind_info bind_info;
1199};
1200
1201struct ib_fmr_attr {
1202        int     max_pages;
1203        int     max_maps;
1204        u8      page_shift;
1205};
1206
1207struct ib_umem;
1208
1209struct ib_ucontext {
1210        struct ib_device       *device;
1211        struct list_head        pd_list;
1212        struct list_head        mr_list;
1213        struct list_head        mw_list;
1214        struct list_head        cq_list;
1215        struct list_head        qp_list;
1216        struct list_head        srq_list;
1217        struct list_head        ah_list;
1218        struct list_head        xrcd_list;
1219        struct list_head        rule_list;
1220        int                     closing;
1221
1222        struct pid             *tgid;
1223#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1224        struct rb_root      umem_tree;
1225        /*
1226         * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1227         * mmu notifiers registration.
1228         */
1229        struct rw_semaphore     umem_rwsem;
1230        void (*invalidate_range)(struct ib_umem *umem,
1231                                 unsigned long start, unsigned long end);
1232
1233        struct mmu_notifier     mn;
1234        atomic_t                notifier_count;
1235        /* A list of umems that don't have private mmu notifier counters yet. */
1236        struct list_head        no_private_counters;
1237        int                     odp_mrs_count;
1238#endif
1239};
1240
1241struct ib_uobject {
1242        u64                     user_handle;    /* handle given to us by userspace */
1243        struct ib_ucontext     *context;        /* associated user context */
1244        void                   *object;         /* containing object */
1245        struct list_head        list;           /* link to context's list */
1246        int                     id;             /* index into kernel idr */
1247        struct kref             ref;
1248        struct rw_semaphore     mutex;          /* protects .live */
1249        int                     live;
1250};
1251
1252struct ib_udata {
1253        const void __user *inbuf;
1254        void __user *outbuf;
1255        size_t       inlen;
1256        size_t       outlen;
1257};
1258
1259struct ib_pd {
1260        u32                     local_dma_lkey;
1261        struct ib_device       *device;
1262        struct ib_uobject      *uobject;
1263        atomic_t                usecnt; /* count all resources */
1264        struct ib_mr           *local_mr;
1265};
1266
1267struct ib_xrcd {
1268        struct ib_device       *device;
1269        atomic_t                usecnt; /* count all exposed resources */
1270        struct inode           *inode;
1271
1272        struct mutex            tgt_qp_mutex;
1273        struct list_head        tgt_qp_list;
1274};
1275
1276struct ib_ah {
1277        struct ib_device        *device;
1278        struct ib_pd            *pd;
1279        struct ib_uobject       *uobject;
1280};
1281
1282typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1283
1284struct ib_cq {
1285        struct ib_device       *device;
1286        struct ib_uobject      *uobject;
1287        ib_comp_handler         comp_handler;
1288        void                  (*event_handler)(struct ib_event *, void *);
1289        void                   *cq_context;
1290        int                     cqe;
1291        atomic_t                usecnt; /* count number of work queues */
1292};
1293
1294struct ib_srq {
1295        struct ib_device       *device;
1296        struct ib_pd           *pd;
1297        struct ib_uobject      *uobject;
1298        void                  (*event_handler)(struct ib_event *, void *);
1299        void                   *srq_context;
1300        enum ib_srq_type        srq_type;
1301        atomic_t                usecnt;
1302
1303        union {
1304                struct {
1305                        struct ib_xrcd *xrcd;
1306                        struct ib_cq   *cq;
1307                        u32             srq_num;
1308                } xrc;
1309        } ext;
1310};
1311
1312struct ib_qp {
1313        struct ib_device       *device;
1314        struct ib_pd           *pd;
1315        struct ib_cq           *send_cq;
1316        struct ib_cq           *recv_cq;
1317        struct ib_srq          *srq;
1318        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1319        struct list_head        xrcd_list;
1320        /* count times opened, mcast attaches, flow attaches */
1321        atomic_t                usecnt;
1322        struct list_head        open_list;
1323        struct ib_qp           *real_qp;
1324        struct ib_uobject      *uobject;
1325        void                  (*event_handler)(struct ib_event *, void *);
1326        void                   *qp_context;
1327        u32                     qp_num;
1328        enum ib_qp_type         qp_type;
1329};
1330
1331struct ib_mr {
1332        struct ib_device  *device;
1333        struct ib_pd      *pd;
1334        struct ib_uobject *uobject;
1335        u32                lkey;
1336        u32                rkey;
1337        atomic_t           usecnt; /* count number of MWs */
1338};
1339
1340struct ib_mw {
1341        struct ib_device        *device;
1342        struct ib_pd            *pd;
1343        struct ib_uobject       *uobject;
1344        u32                     rkey;
1345        enum ib_mw_type         type;
1346};
1347
1348struct ib_fmr {
1349        struct ib_device        *device;
1350        struct ib_pd            *pd;
1351        struct list_head        list;
1352        u32                     lkey;
1353        u32                     rkey;
1354};
1355
1356/* Supported steering options */
1357enum ib_flow_attr_type {
1358        /* steering according to rule specifications */
1359        IB_FLOW_ATTR_NORMAL             = 0x0,
1360        /* default unicast and multicast rule -
1361         * receive all Eth traffic which isn't steered to any QP
1362         */
1363        IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1364        /* default multicast rule -
1365         * receive all Eth multicast traffic which isn't steered to any QP
1366         */
1367        IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1368        /* sniffer rule - receive all port traffic */
1369        IB_FLOW_ATTR_SNIFFER            = 0x3
1370};
1371
1372/* Supported steering header types */
1373enum ib_flow_spec_type {
1374        /* L2 headers*/
1375        IB_FLOW_SPEC_ETH        = 0x20,
1376        IB_FLOW_SPEC_IB         = 0x22,
1377        /* L3 header*/
1378        IB_FLOW_SPEC_IPV4       = 0x30,
1379        /* L4 headers*/
1380        IB_FLOW_SPEC_TCP        = 0x40,
1381        IB_FLOW_SPEC_UDP        = 0x41
1382};
1383#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1384#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1385
1386/* Flow steering rule priority is set according to it's domain.
1387 * Lower domain value means higher priority.
1388 */
1389enum ib_flow_domain {
1390        IB_FLOW_DOMAIN_USER,
1391        IB_FLOW_DOMAIN_ETHTOOL,
1392        IB_FLOW_DOMAIN_RFS,
1393        IB_FLOW_DOMAIN_NIC,
1394        IB_FLOW_DOMAIN_NUM /* Must be last */
1395};
1396
1397struct ib_flow_eth_filter {
1398        u8      dst_mac[6];
1399        u8      src_mac[6];
1400        __be16  ether_type;
1401        __be16  vlan_tag;
1402};
1403
1404struct ib_flow_spec_eth {
1405        enum ib_flow_spec_type    type;
1406        u16                       size;
1407        struct ib_flow_eth_filter val;
1408        struct ib_flow_eth_filter mask;
1409};
1410
1411struct ib_flow_ib_filter {
1412        __be16 dlid;
1413        __u8   sl;
1414};
1415
1416struct ib_flow_spec_ib {
1417        enum ib_flow_spec_type   type;
1418        u16                      size;
1419        struct ib_flow_ib_filter val;
1420        struct ib_flow_ib_filter mask;
1421};
1422
1423struct ib_flow_ipv4_filter {
1424        __be32  src_ip;
1425        __be32  dst_ip;
1426};
1427
1428struct ib_flow_spec_ipv4 {
1429        enum ib_flow_spec_type     type;
1430        u16                        size;
1431        struct ib_flow_ipv4_filter val;
1432        struct ib_flow_ipv4_filter mask;
1433};
1434
1435struct ib_flow_tcp_udp_filter {
1436        __be16  dst_port;
1437        __be16  src_port;
1438};
1439
1440struct ib_flow_spec_tcp_udp {
1441        enum ib_flow_spec_type        type;
1442        u16                           size;
1443        struct ib_flow_tcp_udp_filter val;
1444        struct ib_flow_tcp_udp_filter mask;
1445};
1446
1447union ib_flow_spec {
1448        struct {
1449                enum ib_flow_spec_type  type;
1450                u16                     size;
1451        };
1452        struct ib_flow_spec_eth         eth;
1453        struct ib_flow_spec_ib          ib;
1454        struct ib_flow_spec_ipv4        ipv4;
1455        struct ib_flow_spec_tcp_udp     tcp_udp;
1456};
1457
1458struct ib_flow_attr {
1459        enum ib_flow_attr_type type;
1460        u16          size;
1461        u16          priority;
1462        u32          flags;
1463        u8           num_of_specs;
1464        u8           port;
1465        /* Following are the optional layers according to user request
1466         * struct ib_flow_spec_xxx
1467         * struct ib_flow_spec_yyy
1468         */
1469};
1470
1471struct ib_flow {
1472        struct ib_qp            *qp;
1473        struct ib_uobject       *uobject;
1474};
1475
1476struct ib_mad_hdr;
1477struct ib_grh;
1478
1479enum ib_process_mad_flags {
1480        IB_MAD_IGNORE_MKEY      = 1,
1481        IB_MAD_IGNORE_BKEY      = 2,
1482        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1483};
1484
1485enum ib_mad_result {
1486        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1487        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1488        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1489        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1490};
1491
1492#define IB_DEVICE_NAME_MAX 64
1493
1494struct ib_cache {
1495        rwlock_t                lock;
1496        struct ib_event_handler event_handler;
1497        struct ib_pkey_cache  **pkey_cache;
1498        struct ib_gid_table   **gid_cache;
1499        u8                     *lmc_cache;
1500};
1501
1502struct ib_dma_mapping_ops {
1503        int             (*mapping_error)(struct ib_device *dev,
1504                                         u64 dma_addr);
1505        u64             (*map_single)(struct ib_device *dev,
1506                                      void *ptr, size_t size,
1507                                      enum dma_data_direction direction);
1508        void            (*unmap_single)(struct ib_device *dev,
1509                                        u64 addr, size_t size,
1510                                        enum dma_data_direction direction);
1511        u64             (*map_page)(struct ib_device *dev,
1512                                    struct page *page, unsigned long offset,
1513                                    size_t size,
1514                                    enum dma_data_direction direction);
1515        void            (*unmap_page)(struct ib_device *dev,
1516                                      u64 addr, size_t size,
1517                                      enum dma_data_direction direction);
1518        int             (*map_sg)(struct ib_device *dev,
1519                                  struct scatterlist *sg, int nents,
1520                                  enum dma_data_direction direction);
1521        void            (*unmap_sg)(struct ib_device *dev,
1522                                    struct scatterlist *sg, int nents,
1523                                    enum dma_data_direction direction);
1524        void            (*sync_single_for_cpu)(struct ib_device *dev,
1525                                               u64 dma_handle,
1526                                               size_t size,
1527                                               enum dma_data_direction dir);
1528        void            (*sync_single_for_device)(struct ib_device *dev,
1529                                                  u64 dma_handle,
1530                                                  size_t size,
1531                                                  enum dma_data_direction dir);
1532        void            *(*alloc_coherent)(struct ib_device *dev,
1533                                           size_t size,
1534                                           u64 *dma_handle,
1535                                           gfp_t flag);
1536        void            (*free_coherent)(struct ib_device *dev,
1537                                         size_t size, void *cpu_addr,
1538                                         u64 dma_handle);
1539};
1540
1541struct iw_cm_verbs;
1542
1543struct ib_port_immutable {
1544        int                           pkey_tbl_len;
1545        int                           gid_tbl_len;
1546        u32                           core_cap_flags;
1547        u32                           max_mad_size;
1548};
1549
1550struct ib_device {
1551        struct device                *dma_device;
1552
1553        char                          name[IB_DEVICE_NAME_MAX];
1554
1555        struct list_head              event_handler_list;
1556        spinlock_t                    event_handler_lock;
1557
1558        spinlock_t                    client_data_lock;
1559        struct list_head              core_list;
1560        /* Access to the client_data_list is protected by the client_data_lock
1561         * spinlock and the lists_rwsem read-write semaphore */
1562        struct list_head              client_data_list;
1563
1564        struct ib_cache               cache;
1565        /**
1566         * port_immutable is indexed by port number
1567         */
1568        struct ib_port_immutable     *port_immutable;
1569
1570        int                           num_comp_vectors;
1571
1572        struct iw_cm_verbs           *iwcm;
1573
1574        int                        (*get_protocol_stats)(struct ib_device *device,
1575                                                         union rdma_protocol_stats *stats);
1576        int                        (*query_device)(struct ib_device *device,
1577                                                   struct ib_device_attr *device_attr,
1578                                                   struct ib_udata *udata);
1579        int                        (*query_port)(struct ib_device *device,
1580                                                 u8 port_num,
1581                                                 struct ib_port_attr *port_attr);
1582        enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
1583                                                     u8 port_num);
1584        /* When calling get_netdev, the HW vendor's driver should return the
1585         * net device of device @device at port @port_num or NULL if such
1586         * a net device doesn't exist. The vendor driver should call dev_hold
1587         * on this net device. The HW vendor's device driver must guarantee
1588         * that this function returns NULL before the net device reaches
1589         * NETDEV_UNREGISTER_FINAL state.
1590         */
1591        struct net_device         *(*get_netdev)(struct ib_device *device,
1592                                                 u8 port_num);
1593        int                        (*query_gid)(struct ib_device *device,
1594                                                u8 port_num, int index,
1595                                                union ib_gid *gid);
1596        /* When calling add_gid, the HW vendor's driver should
1597         * add the gid of device @device at gid index @index of
1598         * port @port_num to be @gid. Meta-info of that gid (for example,
1599         * the network device related to this gid is available
1600         * at @attr. @context allows the HW vendor driver to store extra
1601         * information together with a GID entry. The HW vendor may allocate
1602         * memory to contain this information and store it in @context when a
1603         * new GID entry is written to. Params are consistent until the next
1604         * call of add_gid or delete_gid. The function should return 0 on
1605         * success or error otherwise. The function could be called
1606         * concurrently for different ports. This function is only called
1607         * when roce_gid_table is used.
1608         */
1609        int                        (*add_gid)(struct ib_device *device,
1610                                              u8 port_num,
1611                                              unsigned int index,
1612                                              const union ib_gid *gid,
1613                                              const struct ib_gid_attr *attr,
1614                                              void **context);
1615        /* When calling del_gid, the HW vendor's driver should delete the
1616         * gid of device @device at gid index @index of port @port_num.
1617         * Upon the deletion of a GID entry, the HW vendor must free any
1618         * allocated memory. The caller will clear @context afterwards.
1619         * This function is only called when roce_gid_table is used.
1620         */
1621        int                        (*del_gid)(struct ib_device *device,
1622                                              u8 port_num,
1623                                              unsigned int index,
1624                                              void **context);
1625        int                        (*query_pkey)(struct ib_device *device,
1626                                                 u8 port_num, u16 index, u16 *pkey);
1627        int                        (*modify_device)(struct ib_device *device,
1628                                                    int device_modify_mask,
1629                                                    struct ib_device_modify *device_modify);
1630        int                        (*modify_port)(struct ib_device *device,
1631                                                  u8 port_num, int port_modify_mask,
1632                                                  struct ib_port_modify *port_modify);
1633        struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1634                                                     struct ib_udata *udata);
1635        int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1636        int                        (*mmap)(struct ib_ucontext *context,
1637                                           struct vm_area_struct *vma);
1638        struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1639                                               struct ib_ucontext *context,
1640                                               struct ib_udata *udata);
1641        int                        (*dealloc_pd)(struct ib_pd *pd);
1642        struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1643                                                struct ib_ah_attr *ah_attr);
1644        int                        (*modify_ah)(struct ib_ah *ah,
1645                                                struct ib_ah_attr *ah_attr);
1646        int                        (*query_ah)(struct ib_ah *ah,
1647                                               struct ib_ah_attr *ah_attr);
1648        int                        (*destroy_ah)(struct ib_ah *ah);
1649        struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1650                                                 struct ib_srq_init_attr *srq_init_attr,
1651                                                 struct ib_udata *udata);
1652        int                        (*modify_srq)(struct ib_srq *srq,
1653                                                 struct ib_srq_attr *srq_attr,
1654                                                 enum ib_srq_attr_mask srq_attr_mask,
1655                                                 struct ib_udata *udata);
1656        int                        (*query_srq)(struct ib_srq *srq,
1657                                                struct ib_srq_attr *srq_attr);
1658        int                        (*destroy_srq)(struct ib_srq *srq);
1659        int                        (*post_srq_recv)(struct ib_srq *srq,
1660                                                    struct ib_recv_wr *recv_wr,
1661                                                    struct ib_recv_wr **bad_recv_wr);
1662        struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1663                                                struct ib_qp_init_attr *qp_init_attr,
1664                                                struct ib_udata *udata);
1665        int                        (*modify_qp)(struct ib_qp *qp,
1666                                                struct ib_qp_attr *qp_attr,
1667                                                int qp_attr_mask,
1668                                                struct ib_udata *udata);
1669        int                        (*query_qp)(struct ib_qp *qp,
1670                                               struct ib_qp_attr *qp_attr,
1671                                               int qp_attr_mask,
1672                                               struct ib_qp_init_attr *qp_init_attr);
1673        int                        (*destroy_qp)(struct ib_qp *qp);
1674        int                        (*post_send)(struct ib_qp *qp,
1675                                                struct ib_send_wr *send_wr,
1676                                                struct ib_send_wr **bad_send_wr);
1677        int                        (*post_recv)(struct ib_qp *qp,
1678                                                struct ib_recv_wr *recv_wr,
1679                                                struct ib_recv_wr **bad_recv_wr);
1680        struct ib_cq *             (*create_cq)(struct ib_device *device,
1681                                                const struct ib_cq_init_attr *attr,
1682                                                struct ib_ucontext *context,
1683                                                struct ib_udata *udata);
1684        int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1685                                                u16 cq_period);
1686        int                        (*destroy_cq)(struct ib_cq *cq);
1687        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1688                                                struct ib_udata *udata);
1689        int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1690                                              struct ib_wc *wc);
1691        int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1692        int                        (*req_notify_cq)(struct ib_cq *cq,
1693                                                    enum ib_cq_notify_flags flags);
1694        int                        (*req_ncomp_notif)(struct ib_cq *cq,
1695                                                      int wc_cnt);
1696        struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1697                                                 int mr_access_flags);
1698        struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1699                                                  struct ib_phys_buf *phys_buf_array,
1700                                                  int num_phys_buf,
1701                                                  int mr_access_flags,
1702                                                  u64 *iova_start);
1703        struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1704                                                  u64 start, u64 length,
1705                                                  u64 virt_addr,
1706                                                  int mr_access_flags,
1707                                                  struct ib_udata *udata);
1708        int                        (*rereg_user_mr)(struct ib_mr *mr,
1709                                                    int flags,
1710                                                    u64 start, u64 length,
1711                                                    u64 virt_addr,
1712                                                    int mr_access_flags,
1713                                                    struct ib_pd *pd,
1714                                                    struct ib_udata *udata);
1715        int                        (*query_mr)(struct ib_mr *mr,
1716                                               struct ib_mr_attr *mr_attr);
1717        int                        (*dereg_mr)(struct ib_mr *mr);
1718        struct ib_mr *             (*alloc_mr)(struct ib_pd *pd,
1719                                               enum ib_mr_type mr_type,
1720                                               u32 max_num_sg);
1721        struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1722                                                                   int page_list_len);
1723        void                       (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1724        int                        (*rereg_phys_mr)(struct ib_mr *mr,
1725                                                    int mr_rereg_mask,
1726                                                    struct ib_pd *pd,
1727                                                    struct ib_phys_buf *phys_buf_array,
1728                                                    int num_phys_buf,
1729                                                    int mr_access_flags,
1730                                                    u64 *iova_start);
1731        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
1732                                               enum ib_mw_type type);
1733        int                        (*bind_mw)(struct ib_qp *qp,
1734                                              struct ib_mw *mw,
1735                                              struct ib_mw_bind *mw_bind);
1736        int                        (*dealloc_mw)(struct ib_mw *mw);
1737        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
1738                                                int mr_access_flags,
1739                                                struct ib_fmr_attr *fmr_attr);
1740        int                        (*map_phys_fmr)(struct ib_fmr *fmr,
1741                                                   u64 *page_list, int list_len,
1742                                                   u64 iova);
1743        int                        (*unmap_fmr)(struct list_head *fmr_list);
1744        int                        (*dealloc_fmr)(struct ib_fmr *fmr);
1745        int                        (*attach_mcast)(struct ib_qp *qp,
1746                                                   union ib_gid *gid,
1747                                                   u16 lid);
1748        int                        (*detach_mcast)(struct ib_qp *qp,
1749                                                   union ib_gid *gid,
1750                                                   u16 lid);
1751        int                        (*process_mad)(struct ib_device *device,
1752                                                  int process_mad_flags,
1753                                                  u8 port_num,
1754                                                  const struct ib_wc *in_wc,
1755                                                  const struct ib_grh *in_grh,
1756                                                  const struct ib_mad_hdr *in_mad,
1757                                                  size_t in_mad_size,
1758                                                  struct ib_mad_hdr *out_mad,
1759                                                  size_t *out_mad_size,
1760                                                  u16 *out_mad_pkey_index);
1761        struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
1762                                                 struct ib_ucontext *ucontext,
1763                                                 struct ib_udata *udata);
1764        int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1765        struct ib_flow *           (*create_flow)(struct ib_qp *qp,
1766                                                  struct ib_flow_attr
1767                                                  *flow_attr,
1768                                                  int domain);
1769        int                        (*destroy_flow)(struct ib_flow *flow_id);
1770        int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1771                                                      struct ib_mr_status *mr_status);
1772        void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
1773
1774        struct ib_dma_mapping_ops   *dma_ops;
1775
1776        struct module               *owner;
1777        struct device                dev;
1778        struct kobject               *ports_parent;
1779        struct list_head             port_list;
1780
1781        enum {
1782                IB_DEV_UNINITIALIZED,
1783                IB_DEV_REGISTERED,
1784                IB_DEV_UNREGISTERED
1785        }                            reg_state;
1786
1787        int                          uverbs_abi_ver;
1788        u64                          uverbs_cmd_mask;
1789        u64                          uverbs_ex_cmd_mask;
1790
1791        char                         node_desc[64];
1792        __be64                       node_guid;
1793        u32                          local_dma_lkey;
1794        u16                          is_switch:1;
1795        u8                           node_type;
1796        u8                           phys_port_cnt;
1797
1798        /**
1799         * The following mandatory functions are used only at device
1800         * registration.  Keep functions such as these at the end of this
1801         * structure to avoid cache line misses when accessing struct ib_device
1802         * in fast paths.
1803         */
1804        int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
1805};
1806
1807struct ib_client {
1808        char  *name;
1809        void (*add)   (struct ib_device *);
1810        void (*remove)(struct ib_device *, void *client_data);
1811
1812        /* Returns the net_dev belonging to this ib_client and matching the
1813         * given parameters.
1814         * @dev:         An RDMA device that the net_dev use for communication.
1815         * @port:        A physical port number on the RDMA device.
1816         * @pkey:        P_Key that the net_dev uses if applicable.
1817         * @gid:         A GID that the net_dev uses to communicate.
1818         * @addr:        An IP address the net_dev is configured with.
1819         * @client_data: The device's client data set by ib_set_client_data().
1820         *
1821         * An ib_client that implements a net_dev on top of RDMA devices
1822         * (such as IP over IB) should implement this callback, allowing the
1823         * rdma_cm module to find the right net_dev for a given request.
1824         *
1825         * The caller is responsible for calling dev_put on the returned
1826         * netdev. */
1827        struct net_device *(*get_net_dev_by_params)(
1828                        struct ib_device *dev,
1829                        u8 port,
1830                        u16 pkey,
1831                        const union ib_gid *gid,
1832                        const struct sockaddr *addr,
1833                        void *client_data);
1834        struct list_head list;
1835};
1836
1837struct ib_device *ib_alloc_device(size_t size);
1838void ib_dealloc_device(struct ib_device *device);
1839
1840int ib_register_device(struct ib_device *device,
1841                       int (*port_callback)(struct ib_device *,
1842                                            u8, struct kobject *));
1843void ib_unregister_device(struct ib_device *device);
1844
1845int ib_register_client   (struct ib_client *client);
1846void ib_unregister_client(struct ib_client *client);
1847
1848void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1849void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1850                         void *data);
1851
1852static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1853{
1854        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1855}
1856
1857static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1858{
1859        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1860}
1861
1862/**
1863 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1864 * contains all required attributes and no attributes not allowed for
1865 * the given QP state transition.
1866 * @cur_state: Current QP state
1867 * @next_state: Next QP state
1868 * @type: QP type
1869 * @mask: Mask of supplied QP attributes
1870 * @ll : link layer of port
1871 *
1872 * This function is a helper function that a low-level driver's
1873 * modify_qp method can use to validate the consumer's input.  It
1874 * checks that cur_state and next_state are valid QP states, that a
1875 * transition from cur_state to next_state is allowed by the IB spec,
1876 * and that the attribute mask supplied is allowed for the transition.
1877 */
1878int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1879                       enum ib_qp_type type, enum ib_qp_attr_mask mask,
1880                       enum rdma_link_layer ll);
1881
1882int ib_register_event_handler  (struct ib_event_handler *event_handler);
1883int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1884void ib_dispatch_event(struct ib_event *event);
1885
1886int ib_query_device(struct ib_device *device,
1887                    struct ib_device_attr *device_attr);
1888
1889int ib_query_port(struct ib_device *device,
1890                  u8 port_num, struct ib_port_attr *port_attr);
1891
1892enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1893                                               u8 port_num);
1894
1895/**
1896 * rdma_cap_ib_switch - Check if the device is IB switch
1897 * @device: Device to check
1898 *
1899 * Device driver is responsible for setting is_switch bit on
1900 * in ib_device structure at init time.
1901 *
1902 * Return: true if the device is IB switch.
1903 */
1904static inline bool rdma_cap_ib_switch(const struct ib_device *device)
1905{
1906        return device->is_switch;
1907}
1908
1909/**
1910 * rdma_start_port - Return the first valid port number for the device
1911 * specified
1912 *
1913 * @device: Device to be checked
1914 *
1915 * Return start port number
1916 */
1917static inline u8 rdma_start_port(const struct ib_device *device)
1918{
1919        return rdma_cap_ib_switch(device) ? 0 : 1;
1920}
1921
1922/**
1923 * rdma_end_port - Return the last valid port number for the device
1924 * specified
1925 *
1926 * @device: Device to be checked
1927 *
1928 * Return last port number
1929 */
1930static inline u8 rdma_end_port(const struct ib_device *device)
1931{
1932        return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
1933}
1934
1935static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
1936{
1937        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
1938}
1939
1940static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
1941{
1942        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
1943}
1944
1945static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
1946{
1947        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
1948}
1949
1950static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
1951{
1952        return device->port_immutable[port_num].core_cap_flags &
1953                (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
1954}
1955
1956/**
1957 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
1958 * Management Datagrams.
1959 * @device: Device to check
1960 * @port_num: Port number to check
1961 *
1962 * Management Datagrams (MAD) are a required part of the InfiniBand
1963 * specification and are supported on all InfiniBand devices.  A slightly
1964 * extended version are also supported on OPA interfaces.
1965 *
1966 * Return: true if the port supports sending/receiving of MAD packets.
1967 */
1968static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
1969{
1970        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
1971}
1972
1973/**
1974 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
1975 * Management Datagrams.
1976 * @device: Device to check
1977 * @port_num: Port number to check
1978 *
1979 * Intel OmniPath devices extend and/or replace the InfiniBand Management
1980 * datagrams with their own versions.  These OPA MADs share many but not all of
1981 * the characteristics of InfiniBand MADs.
1982 *
1983 * OPA MADs differ in the following ways:
1984 *
1985 *    1) MADs are variable size up to 2K
1986 *       IBTA defined MADs remain fixed at 256 bytes
1987 *    2) OPA SMPs must carry valid PKeys
1988 *    3) OPA SMP packets are a different format
1989 *
1990 * Return: true if the port supports OPA MAD packet formats.
1991 */
1992static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
1993{
1994        return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
1995                == RDMA_CORE_CAP_OPA_MAD;
1996}
1997
1998/**
1999 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2000 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2001 * @device: Device to check
2002 * @port_num: Port number to check
2003 *
2004 * Each InfiniBand node is required to provide a Subnet Management Agent
2005 * that the subnet manager can access.  Prior to the fabric being fully
2006 * configured by the subnet manager, the SMA is accessed via a well known
2007 * interface called the Subnet Management Interface (SMI).  This interface
2008 * uses directed route packets to communicate with the SM to get around the
2009 * chicken and egg problem of the SM needing to know what's on the fabric
2010 * in order to configure the fabric, and needing to configure the fabric in
2011 * order to send packets to the devices on the fabric.  These directed
2012 * route packets do not need the fabric fully configured in order to reach
2013 * their destination.  The SMI is the only method allowed to send
2014 * directed route packets on an InfiniBand fabric.
2015 *
2016 * Return: true if the port provides an SMI.
2017 */
2018static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2019{
2020        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2021}
2022
2023/**
2024 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2025 * Communication Manager.
2026 * @device: Device to check
2027 * @port_num: Port number to check
2028 *
2029 * The InfiniBand Communication Manager is one of many pre-defined General
2030 * Service Agents (GSA) that are accessed via the General Service
2031 * Interface (GSI).  It's role is to facilitate establishment of connections
2032 * between nodes as well as other management related tasks for established
2033 * connections.
2034 *
2035 * Return: true if the port supports an IB CM (this does not guarantee that
2036 * a CM is actually running however).
2037 */
2038static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2039{
2040        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2041}
2042
2043/**
2044 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2045 * Communication Manager.
2046 * @device: Device to check
2047 * @port_num: Port number to check
2048 *
2049 * Similar to above, but specific to iWARP connections which have a different
2050 * managment protocol than InfiniBand.
2051 *
2052 * Return: true if the port supports an iWARP CM (this does not guarantee that
2053 * a CM is actually running however).
2054 */
2055static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2056{
2057        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2058}
2059
2060/**
2061 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2062 * Subnet Administration.
2063 * @device: Device to check
2064 * @port_num: Port number to check
2065 *
2066 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2067 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2068 * fabrics, devices should resolve routes to other hosts by contacting the
2069 * SA to query the proper route.
2070 *
2071 * Return: true if the port should act as a client to the fabric Subnet
2072 * Administration interface.  This does not imply that the SA service is
2073 * running locally.
2074 */
2075static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2076{
2077        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2078}
2079
2080/**
2081 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2082 * Multicast.
2083 * @device: Device to check
2084 * @port_num: Port number to check
2085 *
2086 * InfiniBand multicast registration is more complex than normal IPv4 or
2087 * IPv6 multicast registration.  Each Host Channel Adapter must register
2088 * with the Subnet Manager when it wishes to join a multicast group.  It
2089 * should do so only once regardless of how many queue pairs it subscribes
2090 * to this group.  And it should leave the group only after all queue pairs
2091 * attached to the group have been detached.
2092 *
2093 * Return: true if the port must undertake the additional adminstrative
2094 * overhead of registering/unregistering with the SM and tracking of the
2095 * total number of queue pairs attached to the multicast group.
2096 */
2097static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2098{
2099        return rdma_cap_ib_sa(device, port_num);
2100}
2101
2102/**
2103 * rdma_cap_af_ib - Check if the port of device has the capability
2104 * Native Infiniband Address.
2105 * @device: Device to check
2106 * @port_num: Port number to check
2107 *
2108 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2109 * GID.  RoCE uses a different mechanism, but still generates a GID via
2110 * a prescribed mechanism and port specific data.
2111 *
2112 * Return: true if the port uses a GID address to identify devices on the
2113 * network.
2114 */
2115static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2116{
2117        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2118}
2119
2120/**
2121 * rdma_cap_eth_ah - Check if the port of device has the capability
2122 * Ethernet Address Handle.
2123 * @device: Device to check
2124 * @port_num: Port number to check
2125 *
2126 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2127 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2128 * port.  Normally, packet headers are generated by the sending host
2129 * adapter, but when sending connectionless datagrams, we must manually
2130 * inject the proper headers for the fabric we are communicating over.
2131 *
2132 * Return: true if we are running as a RoCE port and must force the
2133 * addition of a Global Route Header built from our Ethernet Address
2134 * Handle into our header list for connectionless packets.
2135 */
2136static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2137{
2138        return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2139}
2140
2141/**
2142 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2143 *
2144 * @device: Device
2145 * @port_num: Port number
2146 *
2147 * This MAD size includes the MAD headers and MAD payload.  No other headers
2148 * are included.
2149 *
2150 * Return the max MAD size required by the Port.  Will return 0 if the port
2151 * does not support MADs
2152 */
2153static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2154{
2155        return device->port_immutable[port_num].max_mad_size;
2156}
2157
2158/**
2159 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2160 * @device: Device to check
2161 * @port_num: Port number to check
2162 *
2163 * RoCE GID table mechanism manages the various GIDs for a device.
2164 *
2165 * NOTE: if allocating the port's GID table has failed, this call will still
2166 * return true, but any RoCE GID table API will fail.
2167 *
2168 * Return: true if the port uses RoCE GID table mechanism in order to manage
2169 * its GIDs.
2170 */
2171static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2172                                           u8 port_num)
2173{
2174        return rdma_protocol_roce(device, port_num) &&
2175                device->add_gid && device->del_gid;
2176}
2177
2178int ib_query_gid(struct ib_device *device,
2179                 u8 port_num, int index, union ib_gid *gid);
2180
2181int ib_query_pkey(struct ib_device *device,
2182                  u8 port_num, u16 index, u16 *pkey);
2183
2184int ib_modify_device(struct ib_device *device,
2185                     int device_modify_mask,
2186                     struct ib_device_modify *device_modify);
2187
2188int ib_modify_port(struct ib_device *device,
2189                   u8 port_num, int port_modify_mask,
2190                   struct ib_port_modify *port_modify);
2191
2192int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2193                u8 *port_num, u16 *index);
2194
2195int ib_find_pkey(struct ib_device *device,
2196                 u8 port_num, u16 pkey, u16 *index);
2197
2198struct ib_pd *ib_alloc_pd(struct ib_device *device);
2199
2200void ib_dealloc_pd(struct ib_pd *pd);
2201
2202/**
2203 * ib_create_ah - Creates an address handle for the given address vector.
2204 * @pd: The protection domain associated with the address handle.
2205 * @ah_attr: The attributes of the address vector.
2206 *
2207 * The address handle is used to reference a local or global destination
2208 * in all UD QP post sends.
2209 */
2210struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2211
2212/**
2213 * ib_init_ah_from_wc - Initializes address handle attributes from a
2214 *   work completion.
2215 * @device: Device on which the received message arrived.
2216 * @port_num: Port on which the received message arrived.
2217 * @wc: Work completion associated with the received message.
2218 * @grh: References the received global route header.  This parameter is
2219 *   ignored unless the work completion indicates that the GRH is valid.
2220 * @ah_attr: Returned attributes that can be used when creating an address
2221 *   handle for replying to the message.
2222 */
2223int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2224                       const struct ib_wc *wc, const struct ib_grh *grh,
2225                       struct ib_ah_attr *ah_attr);
2226
2227/**
2228 * ib_create_ah_from_wc - Creates an address handle associated with the
2229 *   sender of the specified work completion.
2230 * @pd: The protection domain associated with the address handle.
2231 * @wc: Work completion information associated with a received message.
2232 * @grh: References the received global route header.  This parameter is
2233 *   ignored unless the work completion indicates that the GRH is valid.
2234 * @port_num: The outbound port number to associate with the address.
2235 *
2236 * The address handle is used to reference a local or global destination
2237 * in all UD QP post sends.
2238 */
2239struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2240                                   const struct ib_grh *grh, u8 port_num);
2241
2242/**
2243 * ib_modify_ah - Modifies the address vector associated with an address
2244 *   handle.
2245 * @ah: The address handle to modify.
2246 * @ah_attr: The new address vector attributes to associate with the
2247 *   address handle.
2248 */
2249int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2250
2251/**
2252 * ib_query_ah - Queries the address vector associated with an address
2253 *   handle.
2254 * @ah: The address handle to query.
2255 * @ah_attr: The address vector attributes associated with the address
2256 *   handle.
2257 */
2258int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2259
2260/**
2261 * ib_destroy_ah - Destroys an address handle.
2262 * @ah: The address handle to destroy.
2263 */
2264int ib_destroy_ah(struct ib_ah *ah);
2265
2266/**
2267 * ib_create_srq - Creates a SRQ associated with the specified protection
2268 *   domain.
2269 * @pd: The protection domain associated with the SRQ.
2270 * @srq_init_attr: A list of initial attributes required to create the
2271 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2272 *   the actual capabilities of the created SRQ.
2273 *
2274 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2275 * requested size of the SRQ, and set to the actual values allocated
2276 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2277 * will always be at least as large as the requested values.
2278 */
2279struct ib_srq *ib_create_srq(struct ib_pd *pd,
2280                             struct ib_srq_init_attr *srq_init_attr);
2281
2282/**
2283 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2284 * @srq: The SRQ to modify.
2285 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2286 *   the current values of selected SRQ attributes are returned.
2287 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2288 *   are being modified.
2289 *
2290 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2291 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2292 * the number of receives queued drops below the limit.
2293 */
2294int ib_modify_srq(struct ib_srq *srq,
2295                  struct ib_srq_attr *srq_attr,
2296                  enum ib_srq_attr_mask srq_attr_mask);
2297
2298/**
2299 * ib_query_srq - Returns the attribute list and current values for the
2300 *   specified SRQ.
2301 * @srq: The SRQ to query.
2302 * @srq_attr: The attributes of the specified SRQ.
2303 */
2304int ib_query_srq(struct ib_srq *srq,
2305                 struct ib_srq_attr *srq_attr);
2306
2307/**
2308 * ib_destroy_srq - Destroys the specified SRQ.
2309 * @srq: The SRQ to destroy.
2310 */
2311int ib_destroy_srq(struct ib_srq *srq);
2312
2313/**
2314 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2315 * @srq: The SRQ to post the work request on.
2316 * @recv_wr: A list of work requests to post on the receive queue.
2317 * @bad_recv_wr: On an immediate failure, this parameter will reference
2318 *   the work request that failed to be posted on the QP.
2319 */
2320static inline int ib_post_srq_recv(struct ib_srq *srq,
2321                                   struct ib_recv_wr *recv_wr,
2322                                   struct ib_recv_wr **bad_recv_wr)
2323{
2324        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2325}
2326
2327/**
2328 * ib_create_qp - Creates a QP associated with the specified protection
2329 *   domain.
2330 * @pd: The protection domain associated with the QP.
2331 * @qp_init_attr: A list of initial attributes required to create the
2332 *   QP.  If QP creation succeeds, then the attributes are updated to
2333 *   the actual capabilities of the created QP.
2334 */
2335struct ib_qp *ib_create_qp(struct ib_pd *pd,
2336                           struct ib_qp_init_attr *qp_init_attr);
2337
2338/**
2339 * ib_modify_qp - Modifies the attributes for the specified QP and then
2340 *   transitions the QP to the given state.
2341 * @qp: The QP to modify.
2342 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
2343 *   the current values of selected QP attributes are returned.
2344 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2345 *   are being modified.
2346 */
2347int ib_modify_qp(struct ib_qp *qp,
2348                 struct ib_qp_attr *qp_attr,
2349                 int qp_attr_mask);
2350
2351/**
2352 * ib_query_qp - Returns the attribute list and current values for the
2353 *   specified QP.
2354 * @qp: The QP to query.
2355 * @qp_attr: The attributes of the specified QP.
2356 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2357 * @qp_init_attr: Additional attributes of the selected QP.
2358 *
2359 * The qp_attr_mask may be used to limit the query to gathering only the
2360 * selected attributes.
2361 */
2362int ib_query_qp(struct ib_qp *qp,
2363                struct ib_qp_attr *qp_attr,
2364                int qp_attr_mask,
2365                struct ib_qp_init_attr *qp_init_attr);
2366
2367/**
2368 * ib_destroy_qp - Destroys the specified QP.
2369 * @qp: The QP to destroy.
2370 */
2371int ib_destroy_qp(struct ib_qp *qp);
2372
2373/**
2374 * ib_open_qp - Obtain a reference to an existing sharable QP.
2375 * @xrcd - XRC domain
2376 * @qp_open_attr: Attributes identifying the QP to open.
2377 *
2378 * Returns a reference to a sharable QP.
2379 */
2380struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2381                         struct ib_qp_open_attr *qp_open_attr);
2382
2383/**
2384 * ib_close_qp - Release an external reference to a QP.
2385 * @qp: The QP handle to release
2386 *
2387 * The opened QP handle is released by the caller.  The underlying
2388 * shared QP is not destroyed until all internal references are released.
2389 */
2390int ib_close_qp(struct ib_qp *qp);
2391
2392/**
2393 * ib_post_send - Posts a list of work requests to the send queue of
2394 *   the specified QP.
2395 * @qp: The QP to post the work request on.
2396 * @send_wr: A list of work requests to post on the send queue.
2397 * @bad_send_wr: On an immediate failure, this parameter will reference
2398 *   the work request that failed to be posted on the QP.
2399 *
2400 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2401 * error is returned, the QP state shall not be affected,
2402 * ib_post_send() will return an immediate error after queueing any
2403 * earlier work requests in the list.
2404 */
2405static inline int ib_post_send(struct ib_qp *qp,
2406                               struct ib_send_wr *send_wr,
2407                               struct ib_send_wr **bad_send_wr)
2408{
2409        return qp->device->post_send(qp, send_wr, bad_send_wr);
2410}
2411
2412/**
2413 * ib_post_recv - Posts a list of work requests to the receive queue of
2414 *   the specified QP.
2415 * @qp: The QP to post the work request on.
2416 * @recv_wr: A list of work requests to post on the receive queue.
2417 * @bad_recv_wr: On an immediate failure, this parameter will reference
2418 *   the work request that failed to be posted on the QP.
2419 */
2420static inline int ib_post_recv(struct ib_qp *qp,
2421                               struct ib_recv_wr *recv_wr,
2422                               struct ib_recv_wr **bad_recv_wr)
2423{
2424        return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2425}
2426
2427/**
2428 * ib_create_cq - Creates a CQ on the specified device.
2429 * @device: The device on which to create the CQ.
2430 * @comp_handler: A user-specified callback that is invoked when a
2431 *   completion event occurs on the CQ.
2432 * @event_handler: A user-specified callback that is invoked when an
2433 *   asynchronous event not associated with a completion occurs on the CQ.
2434 * @cq_context: Context associated with the CQ returned to the user via
2435 *   the associated completion and event handlers.
2436 * @cq_attr: The attributes the CQ should be created upon.
2437 *
2438 * Users can examine the cq structure to determine the actual CQ size.
2439 */
2440struct ib_cq *ib_create_cq(struct ib_device *device,
2441                           ib_comp_handler comp_handler,
2442                           void (*event_handler)(struct ib_event *, void *),
2443                           void *cq_context,
2444                           const struct ib_cq_init_attr *cq_attr);
2445
2446/**
2447 * ib_resize_cq - Modifies the capacity of the CQ.
2448 * @cq: The CQ to resize.
2449 * @cqe: The minimum size of the CQ.
2450 *
2451 * Users can examine the cq structure to determine the actual CQ size.
2452 */
2453int ib_resize_cq(struct ib_cq *cq, int cqe);
2454
2455/**
2456 * ib_modify_cq - Modifies moderation params of the CQ
2457 * @cq: The CQ to modify.
2458 * @cq_count: number of CQEs that will trigger an event
2459 * @cq_period: max period of time in usec before triggering an event
2460 *
2461 */
2462int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2463
2464/**
2465 * ib_destroy_cq - Destroys the specified CQ.
2466 * @cq: The CQ to destroy.
2467 */
2468int ib_destroy_cq(struct ib_cq *cq);
2469
2470/**
2471 * ib_poll_cq - poll a CQ for completion(s)
2472 * @cq:the CQ being polled
2473 * @num_entries:maximum number of completions to return
2474 * @wc:array of at least @num_entries &struct ib_wc where completions
2475 *   will be returned
2476 *
2477 * Poll a CQ for (possibly multiple) completions.  If the return value
2478 * is < 0, an error occurred.  If the return value is >= 0, it is the
2479 * number of completions returned.  If the return value is
2480 * non-negative and < num_entries, then the CQ was emptied.
2481 */
2482static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2483                             struct ib_wc *wc)
2484{
2485        return cq->device->poll_cq(cq, num_entries, wc);
2486}
2487
2488/**
2489 * ib_peek_cq - Returns the number of unreaped completions currently
2490 *   on the specified CQ.
2491 * @cq: The CQ to peek.
2492 * @wc_cnt: A minimum number of unreaped completions to check for.
2493 *
2494 * If the number of unreaped completions is greater than or equal to wc_cnt,
2495 * this function returns wc_cnt, otherwise, it returns the actual number of
2496 * unreaped completions.
2497 */
2498int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2499
2500/**
2501 * ib_req_notify_cq - Request completion notification on a CQ.
2502 * @cq: The CQ to generate an event for.
2503 * @flags:
2504 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2505 *   to request an event on the next solicited event or next work
2506 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2507 *   may also be |ed in to request a hint about missed events, as
2508 *   described below.
2509 *
2510 * Return Value:
2511 *    < 0 means an error occurred while requesting notification
2512 *   == 0 means notification was requested successfully, and if
2513 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2514 *        were missed and it is safe to wait for another event.  In
2515 *        this case is it guaranteed that any work completions added
2516 *        to the CQ since the last CQ poll will trigger a completion
2517 *        notification event.
2518 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2519 *        in.  It means that the consumer must poll the CQ again to
2520 *        make sure it is empty to avoid missing an event because of a
2521 *        race between requesting notification and an entry being
2522 *        added to the CQ.  This return value means it is possible
2523 *        (but not guaranteed) that a work completion has been added
2524 *        to the CQ since the last poll without triggering a
2525 *        completion notification event.
2526 */
2527static inline int ib_req_notify_cq(struct ib_cq *cq,
2528                                   enum ib_cq_notify_flags flags)
2529{
2530        return cq->device->req_notify_cq(cq, flags);
2531}
2532
2533/**
2534 * ib_req_ncomp_notif - Request completion notification when there are
2535 *   at least the specified number of unreaped completions on the CQ.
2536 * @cq: The CQ to generate an event for.
2537 * @wc_cnt: The number of unreaped completions that should be on the
2538 *   CQ before an event is generated.
2539 */
2540static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2541{
2542        return cq->device->req_ncomp_notif ?
2543                cq->device->req_ncomp_notif(cq, wc_cnt) :
2544                -ENOSYS;
2545}
2546
2547/**
2548 * ib_get_dma_mr - Returns a memory region for system memory that is
2549 *   usable for DMA.
2550 * @pd: The protection domain associated with the memory region.
2551 * @mr_access_flags: Specifies the memory access rights.
2552 *
2553 * Note that the ib_dma_*() functions defined below must be used
2554 * to create/destroy addresses used with the Lkey or Rkey returned
2555 * by ib_get_dma_mr().
2556 */
2557struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
2558
2559/**
2560 * ib_dma_mapping_error - check a DMA addr for error
2561 * @dev: The device for which the dma_addr was created
2562 * @dma_addr: The DMA address to check
2563 */
2564static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2565{
2566        if (dev->dma_ops)
2567                return dev->dma_ops->mapping_error(dev, dma_addr);
2568        return dma_mapping_error(dev->dma_device, dma_addr);
2569}
2570
2571/**
2572 * ib_dma_map_single - Map a kernel virtual address to DMA address
2573 * @dev: The device for which the dma_addr is to be created
2574 * @cpu_addr: The kernel virtual address
2575 * @size: The size of the region in bytes
2576 * @direction: The direction of the DMA
2577 */
2578static inline u64 ib_dma_map_single(struct ib_device *dev,
2579                                    void *cpu_addr, size_t size,
2580                                    enum dma_data_direction direction)
2581{
2582        if (dev->dma_ops)
2583                return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2584        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2585}
2586
2587/**
2588 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2589 * @dev: The device for which the DMA address was created
2590 * @addr: The DMA address
2591 * @size: The size of the region in bytes
2592 * @direction: The direction of the DMA
2593 */
2594static inline void ib_dma_unmap_single(struct ib_device *dev,
2595                                       u64 addr, size_t size,
2596                                       enum dma_data_direction direction)
2597{
2598        if (dev->dma_ops)
2599                dev->dma_ops->unmap_single(dev, addr, size, direction);
2600        else
2601                dma_unmap_single(dev->dma_device, addr, size, direction);
2602}
2603
2604static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2605                                          void *cpu_addr, size_t size,
2606                                          enum dma_data_direction direction,
2607                                          struct dma_attrs *attrs)
2608{
2609        return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2610                                    direction, attrs);
2611}
2612
2613static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2614                                             u64 addr, size_t size,
2615                                             enum dma_data_direction direction,
2616                                             struct dma_attrs *attrs)
2617{
2618        return dma_unmap_single_attrs(dev->dma_device, addr, size,
2619                                      direction, attrs);
2620}
2621
2622/**
2623 * ib_dma_map_page - Map a physical page to DMA address
2624 * @dev: The device for which the dma_addr is to be created
2625 * @page: The page to be mapped
2626 * @offset: The offset within the page
2627 * @size: The size of the region in bytes
2628 * @direction: The direction of the DMA
2629 */
2630static inline u64 ib_dma_map_page(struct ib_device *dev,
2631                                  struct page *page,
2632                                  unsigned long offset,
2633                                  size_t size,
2634                                         enum dma_data_direction direction)
2635{
2636        if (dev->dma_ops)
2637                return dev->dma_ops->map_page(dev, page, offset, size, direction);
2638        return dma_map_page(dev->dma_device, page, offset, size, direction);
2639}
2640
2641/**
2642 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
2643 * @dev: The device for which the DMA address was created
2644 * @addr: The DMA address
2645 * @size: The size of the region in bytes
2646 * @direction: The direction of the DMA
2647 */
2648static inline void ib_dma_unmap_page(struct ib_device *dev,
2649                                     u64 addr, size_t size,
2650                                     enum dma_data_direction direction)
2651{
2652        if (dev->dma_ops)
2653                dev->dma_ops->unmap_page(dev, addr, size, direction);
2654        else
2655                dma_unmap_page(dev->dma_device, addr, size, direction);
2656}
2657
2658/**
2659 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
2660 * @dev: The device for which the DMA addresses are to be created
2661 * @sg: The array of scatter/gather entries
2662 * @nents: The number of scatter/gather entries
2663 * @direction: The direction of the DMA
2664 */
2665static inline int ib_dma_map_sg(struct ib_device *dev,
2666                                struct scatterlist *sg, int nents,
2667                                enum dma_data_direction direction)
2668{
2669        if (dev->dma_ops)
2670                return dev->dma_ops->map_sg(dev, sg, nents, direction);
2671        return dma_map_sg(dev->dma_device, sg, nents, direction);
2672}
2673
2674/**
2675 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
2676 * @dev: The device for which the DMA addresses were created
2677 * @sg: The array of scatter/gather entries
2678 * @nents: The number of scatter/gather entries
2679 * @direction: The direction of the DMA
2680 */
2681static inline void ib_dma_unmap_sg(struct ib_device *dev,
2682                                   struct scatterlist *sg, int nents,
2683                                   enum dma_data_direction direction)
2684{
2685        if (dev->dma_ops)
2686                dev->dma_ops->unmap_sg(dev, sg, nents, direction);
2687        else
2688                dma_unmap_sg(dev->dma_device, sg, nents, direction);
2689}
2690
2691static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
2692                                      struct scatterlist *sg, int nents,
2693                                      enum dma_data_direction direction,
2694                                      struct dma_attrs *attrs)
2695{
2696        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2697}
2698
2699static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
2700                                         struct scatterlist *sg, int nents,
2701                                         enum dma_data_direction direction,
2702                                         struct dma_attrs *attrs)
2703{
2704        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
2705}
2706/**
2707 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
2708 * @dev: The device for which the DMA addresses were created
2709 * @sg: The scatter/gather entry
2710 *
2711 * Note: this function is obsolete. To do: change all occurrences of
2712 * ib_sg_dma_address() into sg_dma_address().
2713 */
2714static inline u64 ib_sg_dma_address(struct ib_device *dev,
2715                                    struct scatterlist *sg)
2716{
2717        return sg_dma_address(sg);
2718}
2719
2720/**
2721 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
2722 * @dev: The device for which the DMA addresses were created
2723 * @sg: The scatter/gather entry
2724 *
2725 * Note: this function is obsolete. To do: change all occurrences of
2726 * ib_sg_dma_len() into sg_dma_len().
2727 */
2728static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
2729                                         struct scatterlist *sg)
2730{
2731        return sg_dma_len(sg);
2732}
2733
2734/**
2735 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
2736 * @dev: The device for which the DMA address was created
2737 * @addr: The DMA address
2738 * @size: The size of the region in bytes
2739 * @dir: The direction of the DMA
2740 */
2741static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
2742                                              u64 addr,
2743                                              size_t size,
2744                                              enum dma_data_direction dir)
2745{
2746        if (dev->dma_ops)
2747                dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
2748        else
2749                dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2750}
2751
2752/**
2753 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
2754 * @dev: The device for which the DMA address was created
2755 * @addr: The DMA address
2756 * @size: The size of the region in bytes
2757 * @dir: The direction of the DMA
2758 */
2759static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
2760                                                 u64 addr,
2761                                                 size_t size,
2762                                                 enum dma_data_direction dir)
2763{
2764        if (dev->dma_ops)
2765                dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
2766        else
2767                dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2768}
2769
2770/**
2771 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
2772 * @dev: The device for which the DMA address is requested
2773 * @size: The size of the region to allocate in bytes
2774 * @dma_handle: A pointer for returning the DMA address of the region
2775 * @flag: memory allocator flags
2776 */
2777static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
2778                                           size_t size,
2779                                           u64 *dma_handle,
2780                                           gfp_t flag)
2781{
2782        if (dev->dma_ops)
2783                return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
2784        else {
2785                dma_addr_t handle;
2786                void *ret;
2787
2788                ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
2789                *dma_handle = handle;
2790                return ret;
2791        }
2792}
2793
2794/**
2795 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
2796 * @dev: The device for which the DMA addresses were allocated
2797 * @size: The size of the region
2798 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
2799 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
2800 */
2801static inline void ib_dma_free_coherent(struct ib_device *dev,
2802                                        size_t size, void *cpu_addr,
2803                                        u64 dma_handle)
2804{
2805        if (dev->dma_ops)
2806                dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2807        else
2808                dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2809}
2810
2811/**
2812 * ib_query_mr - Retrieves information about a specific memory region.
2813 * @mr: The memory region to retrieve information about.
2814 * @mr_attr: The attributes of the specified memory region.
2815 */
2816int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2817
2818/**
2819 * ib_dereg_mr - Deregisters a memory region and removes it from the
2820 *   HCA translation table.
2821 * @mr: The memory region to deregister.
2822 *
2823 * This function can fail, if the memory region has memory windows bound to it.
2824 */
2825int ib_dereg_mr(struct ib_mr *mr);
2826
2827struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
2828                          enum ib_mr_type mr_type,
2829                          u32 max_num_sg);
2830
2831/**
2832 * ib_alloc_fast_reg_page_list - Allocates a page list array
2833 * @device - ib device pointer.
2834 * @page_list_len - size of the page list array to be allocated.
2835 *
2836 * This allocates and returns a struct ib_fast_reg_page_list * and a
2837 * page_list array that is at least page_list_len in size.  The actual
2838 * size is returned in max_page_list_len.  The caller is responsible
2839 * for initializing the contents of the page_list array before posting
2840 * a send work request with the IB_WC_FAST_REG_MR opcode.
2841 *
2842 * The page_list array entries must be translated using one of the
2843 * ib_dma_*() functions just like the addresses passed to
2844 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2845 * ib_fast_reg_page_list must not be modified by the caller until the
2846 * IB_WC_FAST_REG_MR work request completes.
2847 */
2848struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2849                                struct ib_device *device, int page_list_len);
2850
2851/**
2852 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2853 *   page list array.
2854 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2855 */
2856void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2857
2858/**
2859 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2860 *   R_Key and L_Key.
2861 * @mr - struct ib_mr pointer to be updated.
2862 * @newkey - new key to be used.
2863 */
2864static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2865{
2866        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2867        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2868}
2869
2870/**
2871 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2872 * for calculating a new rkey for type 2 memory windows.
2873 * @rkey - the rkey to increment.
2874 */
2875static inline u32 ib_inc_rkey(u32 rkey)
2876{
2877        const u32 mask = 0x000000ff;
2878        return ((rkey + 1) & mask) | (rkey & ~mask);
2879}
2880
2881/**
2882 * ib_alloc_mw - Allocates a memory window.
2883 * @pd: The protection domain associated with the memory window.
2884 * @type: The type of the memory window (1 or 2).
2885 */
2886struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2887
2888/**
2889 * ib_bind_mw - Posts a work request to the send queue of the specified
2890 *   QP, which binds the memory window to the given address range and
2891 *   remote access attributes.
2892 * @qp: QP to post the bind work request on.
2893 * @mw: The memory window to bind.
2894 * @mw_bind: Specifies information about the memory window, including
2895 *   its address range, remote access rights, and associated memory region.
2896 *
2897 * If there is no immediate error, the function will update the rkey member
2898 * of the mw parameter to its new value. The bind operation can still fail
2899 * asynchronously.
2900 */
2901static inline int ib_bind_mw(struct ib_qp *qp,
2902                             struct ib_mw *mw,
2903                             struct ib_mw_bind *mw_bind)
2904{
2905        /* XXX reference counting in corresponding MR? */
2906        return mw->device->bind_mw ?
2907                mw->device->bind_mw(qp, mw, mw_bind) :
2908                -ENOSYS;
2909}
2910
2911/**
2912 * ib_dealloc_mw - Deallocates a memory window.
2913 * @mw: The memory window to deallocate.
2914 */
2915int ib_dealloc_mw(struct ib_mw *mw);
2916
2917/**
2918 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2919 * @pd: The protection domain associated with the unmapped region.
2920 * @mr_access_flags: Specifies the memory access rights.
2921 * @fmr_attr: Attributes of the unmapped region.
2922 *
2923 * A fast memory region must be mapped before it can be used as part of
2924 * a work request.
2925 */
2926struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2927                            int mr_access_flags,
2928                            struct ib_fmr_attr *fmr_attr);
2929
2930/**
2931 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2932 * @fmr: The fast memory region to associate with the pages.
2933 * @page_list: An array of physical pages to map to the fast memory region.
2934 * @list_len: The number of pages in page_list.
2935 * @iova: The I/O virtual address to use with the mapped region.
2936 */
2937static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2938                                  u64 *page_list, int list_len,
2939                                  u64 iova)
2940{
2941        return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2942}
2943
2944/**
2945 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2946 * @fmr_list: A linked list of fast memory regions to unmap.
2947 */
2948int ib_unmap_fmr(struct list_head *fmr_list);
2949
2950/**
2951 * ib_dealloc_fmr - Deallocates a fast memory region.
2952 * @fmr: The fast memory region to deallocate.
2953 */
2954int ib_dealloc_fmr(struct ib_fmr *fmr);
2955
2956/**
2957 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2958 * @qp: QP to attach to the multicast group.  The QP must be type
2959 *   IB_QPT_UD.
2960 * @gid: Multicast group GID.
2961 * @lid: Multicast group LID in host byte order.
2962 *
2963 * In order to send and receive multicast packets, subnet
2964 * administration must have created the multicast group and configured
2965 * the fabric appropriately.  The port associated with the specified
2966 * QP must also be a member of the multicast group.
2967 */
2968int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2969
2970/**
2971 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2972 * @qp: QP to detach from the multicast group.
2973 * @gid: Multicast group GID.
2974 * @lid: Multicast group LID in host byte order.
2975 */
2976int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2977
2978/**
2979 * ib_alloc_xrcd - Allocates an XRC domain.
2980 * @device: The device on which to allocate the XRC domain.
2981 */
2982struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2983
2984/**
2985 * ib_dealloc_xrcd - Deallocates an XRC domain.
2986 * @xrcd: The XRC domain to deallocate.
2987 */
2988int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2989
2990struct ib_flow *ib_create_flow(struct ib_qp *qp,
2991                               struct ib_flow_attr *flow_attr, int domain);
2992int ib_destroy_flow(struct ib_flow *flow_id);
2993
2994static inline int ib_check_mr_access(int flags)
2995{
2996        /*
2997         * Local write permission is required if remote write or
2998         * remote atomic permission is also requested.
2999         */
3000        if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3001            !(flags & IB_ACCESS_LOCAL_WRITE))
3002                return -EINVAL;
3003
3004        return 0;
3005}
3006
3007/**
3008 * ib_check_mr_status: lightweight check of MR status.
3009 *     This routine may provide status checks on a selected
3010 *     ib_mr. first use is for signature status check.
3011 *
3012 * @mr: A memory region.
3013 * @check_mask: Bitmask of which checks to perform from
3014 *     ib_mr_status_check enumeration.
3015 * @mr_status: The container of relevant status checks.
3016 *     failed checks will be indicated in the status bitmask
3017 *     and the relevant info shall be in the error item.
3018 */
3019int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3020                       struct ib_mr_status *mr_status);
3021
3022struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3023                                            u16 pkey, const union ib_gid *gid,
3024                                            const struct sockaddr *addr);
3025
3026#endif /* IB_VERBS_H */
3027