linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50#include <linux/workqueue.h>
  51
  52#include <linux/atomic.h>
  53#include <asm/uaccess.h>
  54
  55extern struct workqueue_struct *ib_wq;
  56
  57union ib_gid {
  58        u8      raw[16];
  59        struct {
  60                __be64  subnet_prefix;
  61                __be64  interface_id;
  62        } global;
  63};
  64
  65enum rdma_node_type {
  66        /* IB values map to NodeInfo:NodeType. */
  67        RDMA_NODE_IB_CA         = 1,
  68        RDMA_NODE_IB_SWITCH,
  69        RDMA_NODE_IB_ROUTER,
  70        RDMA_NODE_RNIC
  71};
  72
  73enum rdma_transport_type {
  74        RDMA_TRANSPORT_IB,
  75        RDMA_TRANSPORT_IWARP
  76};
  77
  78enum rdma_transport_type
  79rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
  80
  81enum rdma_link_layer {
  82        IB_LINK_LAYER_UNSPECIFIED,
  83        IB_LINK_LAYER_INFINIBAND,
  84        IB_LINK_LAYER_ETHERNET,
  85};
  86
  87enum ib_device_cap_flags {
  88        IB_DEVICE_RESIZE_MAX_WR         = 1,
  89        IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),
  90        IB_DEVICE_BAD_QKEY_CNTR         = (1<<2),
  91        IB_DEVICE_RAW_MULTI             = (1<<3),
  92        IB_DEVICE_AUTO_PATH_MIG         = (1<<4),
  93        IB_DEVICE_CHANGE_PHY_PORT       = (1<<5),
  94        IB_DEVICE_UD_AV_PORT_ENFORCE    = (1<<6),
  95        IB_DEVICE_CURR_QP_STATE_MOD     = (1<<7),
  96        IB_DEVICE_SHUTDOWN_PORT         = (1<<8),
  97        IB_DEVICE_INIT_TYPE             = (1<<9),
  98        IB_DEVICE_PORT_ACTIVE_EVENT     = (1<<10),
  99        IB_DEVICE_SYS_IMAGE_GUID        = (1<<11),
 100        IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
 101        IB_DEVICE_SRQ_RESIZE            = (1<<13),
 102        IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
 103        IB_DEVICE_LOCAL_DMA_LKEY        = (1<<15),
 104        IB_DEVICE_RESERVED              = (1<<16), /* old SEND_W_INV */
 105        IB_DEVICE_MEM_WINDOW            = (1<<17),
 106        /*
 107         * Devices should set IB_DEVICE_UD_IP_SUM if they support
 108         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 109         * messages and can verify the validity of checksum for
 110         * incoming messages.  Setting this flag implies that the
 111         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 112         */
 113        IB_DEVICE_UD_IP_CSUM            = (1<<18),
 114        IB_DEVICE_UD_TSO                = (1<<19),
 115        IB_DEVICE_XRC                   = (1<<20),
 116        IB_DEVICE_MEM_MGT_EXTENSIONS    = (1<<21),
 117        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 118};
 119
 120enum ib_atomic_cap {
 121        IB_ATOMIC_NONE,
 122        IB_ATOMIC_HCA,
 123        IB_ATOMIC_GLOB
 124};
 125
 126struct ib_device_attr {
 127        u64                     fw_ver;
 128        __be64                  sys_image_guid;
 129        u64                     max_mr_size;
 130        u64                     page_size_cap;
 131        u32                     vendor_id;
 132        u32                     vendor_part_id;
 133        u32                     hw_ver;
 134        int                     max_qp;
 135        int                     max_qp_wr;
 136        int                     device_cap_flags;
 137        int                     max_sge;
 138        int                     max_sge_rd;
 139        int                     max_cq;
 140        int                     max_cqe;
 141        int                     max_mr;
 142        int                     max_pd;
 143        int                     max_qp_rd_atom;
 144        int                     max_ee_rd_atom;
 145        int                     max_res_rd_atom;
 146        int                     max_qp_init_rd_atom;
 147        int                     max_ee_init_rd_atom;
 148        enum ib_atomic_cap      atomic_cap;
 149        enum ib_atomic_cap      masked_atomic_cap;
 150        int                     max_ee;
 151        int                     max_rdd;
 152        int                     max_mw;
 153        int                     max_raw_ipv6_qp;
 154        int                     max_raw_ethy_qp;
 155        int                     max_mcast_grp;
 156        int                     max_mcast_qp_attach;
 157        int                     max_total_mcast_qp_attach;
 158        int                     max_ah;
 159        int                     max_fmr;
 160        int                     max_map_per_fmr;
 161        int                     max_srq;
 162        int                     max_srq_wr;
 163        int                     max_srq_sge;
 164        unsigned int            max_fast_reg_page_list_len;
 165        u16                     max_pkeys;
 166        u8                      local_ca_ack_delay;
 167};
 168
 169enum ib_mtu {
 170        IB_MTU_256  = 1,
 171        IB_MTU_512  = 2,
 172        IB_MTU_1024 = 3,
 173        IB_MTU_2048 = 4,
 174        IB_MTU_4096 = 5
 175};
 176
 177static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 178{
 179        switch (mtu) {
 180        case IB_MTU_256:  return  256;
 181        case IB_MTU_512:  return  512;
 182        case IB_MTU_1024: return 1024;
 183        case IB_MTU_2048: return 2048;
 184        case IB_MTU_4096: return 4096;
 185        default:          return -1;
 186        }
 187}
 188
 189enum ib_port_state {
 190        IB_PORT_NOP             = 0,
 191        IB_PORT_DOWN            = 1,
 192        IB_PORT_INIT            = 2,
 193        IB_PORT_ARMED           = 3,
 194        IB_PORT_ACTIVE          = 4,
 195        IB_PORT_ACTIVE_DEFER    = 5
 196};
 197
 198enum ib_port_cap_flags {
 199        IB_PORT_SM                              = 1 <<  1,
 200        IB_PORT_NOTICE_SUP                      = 1 <<  2,
 201        IB_PORT_TRAP_SUP                        = 1 <<  3,
 202        IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 203        IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
 204        IB_PORT_SL_MAP_SUP                      = 1 <<  6,
 205        IB_PORT_MKEY_NVRAM                      = 1 <<  7,
 206        IB_PORT_PKEY_NVRAM                      = 1 <<  8,
 207        IB_PORT_LED_INFO_SUP                    = 1 <<  9,
 208        IB_PORT_SM_DISABLED                     = 1 << 10,
 209        IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
 210        IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
 211        IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
 212        IB_PORT_CM_SUP                          = 1 << 16,
 213        IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
 214        IB_PORT_REINIT_SUP                      = 1 << 18,
 215        IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
 216        IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
 217        IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
 218        IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
 219        IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
 220        IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
 221        IB_PORT_CLIENT_REG_SUP                  = 1 << 25
 222};
 223
 224enum ib_port_width {
 225        IB_WIDTH_1X     = 1,
 226        IB_WIDTH_4X     = 2,
 227        IB_WIDTH_8X     = 4,
 228        IB_WIDTH_12X    = 8
 229};
 230
 231static inline int ib_width_enum_to_int(enum ib_port_width width)
 232{
 233        switch (width) {
 234        case IB_WIDTH_1X:  return  1;
 235        case IB_WIDTH_4X:  return  4;
 236        case IB_WIDTH_8X:  return  8;
 237        case IB_WIDTH_12X: return 12;
 238        default:          return -1;
 239        }
 240}
 241
 242struct ib_protocol_stats {
 243        /* TBD... */
 244};
 245
 246struct iw_protocol_stats {
 247        u64     ipInReceives;
 248        u64     ipInHdrErrors;
 249        u64     ipInTooBigErrors;
 250        u64     ipInNoRoutes;
 251        u64     ipInAddrErrors;
 252        u64     ipInUnknownProtos;
 253        u64     ipInTruncatedPkts;
 254        u64     ipInDiscards;
 255        u64     ipInDelivers;
 256        u64     ipOutForwDatagrams;
 257        u64     ipOutRequests;
 258        u64     ipOutDiscards;
 259        u64     ipOutNoRoutes;
 260        u64     ipReasmTimeout;
 261        u64     ipReasmReqds;
 262        u64     ipReasmOKs;
 263        u64     ipReasmFails;
 264        u64     ipFragOKs;
 265        u64     ipFragFails;
 266        u64     ipFragCreates;
 267        u64     ipInMcastPkts;
 268        u64     ipOutMcastPkts;
 269        u64     ipInBcastPkts;
 270        u64     ipOutBcastPkts;
 271
 272        u64     tcpRtoAlgorithm;
 273        u64     tcpRtoMin;
 274        u64     tcpRtoMax;
 275        u64     tcpMaxConn;
 276        u64     tcpActiveOpens;
 277        u64     tcpPassiveOpens;
 278        u64     tcpAttemptFails;
 279        u64     tcpEstabResets;
 280        u64     tcpCurrEstab;
 281        u64     tcpInSegs;
 282        u64     tcpOutSegs;
 283        u64     tcpRetransSegs;
 284        u64     tcpInErrs;
 285        u64     tcpOutRsts;
 286};
 287
 288union rdma_protocol_stats {
 289        struct ib_protocol_stats        ib;
 290        struct iw_protocol_stats        iw;
 291};
 292
 293struct ib_port_attr {
 294        enum ib_port_state      state;
 295        enum ib_mtu             max_mtu;
 296        enum ib_mtu             active_mtu;
 297        int                     gid_tbl_len;
 298        u32                     port_cap_flags;
 299        u32                     max_msg_sz;
 300        u32                     bad_pkey_cntr;
 301        u32                     qkey_viol_cntr;
 302        u16                     pkey_tbl_len;
 303        u16                     lid;
 304        u16                     sm_lid;
 305        u8                      lmc;
 306        u8                      max_vl_num;
 307        u8                      sm_sl;
 308        u8                      subnet_timeout;
 309        u8                      init_type_reply;
 310        u8                      active_width;
 311        u8                      active_speed;
 312        u8                      phys_state;
 313};
 314
 315enum ib_device_modify_flags {
 316        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 317        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 318};
 319
 320struct ib_device_modify {
 321        u64     sys_image_guid;
 322        char    node_desc[64];
 323};
 324
 325enum ib_port_modify_flags {
 326        IB_PORT_SHUTDOWN                = 1,
 327        IB_PORT_INIT_TYPE               = (1<<2),
 328        IB_PORT_RESET_QKEY_CNTR         = (1<<3)
 329};
 330
 331struct ib_port_modify {
 332        u32     set_port_cap_mask;
 333        u32     clr_port_cap_mask;
 334        u8      init_type;
 335};
 336
 337enum ib_event_type {
 338        IB_EVENT_CQ_ERR,
 339        IB_EVENT_QP_FATAL,
 340        IB_EVENT_QP_REQ_ERR,
 341        IB_EVENT_QP_ACCESS_ERR,
 342        IB_EVENT_COMM_EST,
 343        IB_EVENT_SQ_DRAINED,
 344        IB_EVENT_PATH_MIG,
 345        IB_EVENT_PATH_MIG_ERR,
 346        IB_EVENT_DEVICE_FATAL,
 347        IB_EVENT_PORT_ACTIVE,
 348        IB_EVENT_PORT_ERR,
 349        IB_EVENT_LID_CHANGE,
 350        IB_EVENT_PKEY_CHANGE,
 351        IB_EVENT_SM_CHANGE,
 352        IB_EVENT_SRQ_ERR,
 353        IB_EVENT_SRQ_LIMIT_REACHED,
 354        IB_EVENT_QP_LAST_WQE_REACHED,
 355        IB_EVENT_CLIENT_REREGISTER,
 356        IB_EVENT_GID_CHANGE,
 357};
 358
 359struct ib_event {
 360        struct ib_device        *device;
 361        union {
 362                struct ib_cq    *cq;
 363                struct ib_qp    *qp;
 364                struct ib_srq   *srq;
 365                u8              port_num;
 366        } element;
 367        enum ib_event_type      event;
 368};
 369
 370struct ib_event_handler {
 371        struct ib_device *device;
 372        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 373        struct list_head  list;
 374};
 375
 376#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 377        do {                                                    \
 378                (_ptr)->device  = _device;                      \
 379                (_ptr)->handler = _handler;                     \
 380                INIT_LIST_HEAD(&(_ptr)->list);                  \
 381        } while (0)
 382
 383struct ib_global_route {
 384        union ib_gid    dgid;
 385        u32             flow_label;
 386        u8              sgid_index;
 387        u8              hop_limit;
 388        u8              traffic_class;
 389};
 390
 391struct ib_grh {
 392        __be32          version_tclass_flow;
 393        __be16          paylen;
 394        u8              next_hdr;
 395        u8              hop_limit;
 396        union ib_gid    sgid;
 397        union ib_gid    dgid;
 398};
 399
 400enum {
 401        IB_MULTICAST_QPN = 0xffffff
 402};
 403
 404#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 405
 406enum ib_ah_flags {
 407        IB_AH_GRH       = 1
 408};
 409
 410enum ib_rate {
 411        IB_RATE_PORT_CURRENT = 0,
 412        IB_RATE_2_5_GBPS = 2,
 413        IB_RATE_5_GBPS   = 5,
 414        IB_RATE_10_GBPS  = 3,
 415        IB_RATE_20_GBPS  = 6,
 416        IB_RATE_30_GBPS  = 4,
 417        IB_RATE_40_GBPS  = 7,
 418        IB_RATE_60_GBPS  = 8,
 419        IB_RATE_80_GBPS  = 9,
 420        IB_RATE_120_GBPS = 10,
 421        IB_RATE_14_GBPS  = 11,
 422        IB_RATE_56_GBPS  = 12,
 423        IB_RATE_112_GBPS = 13,
 424        IB_RATE_168_GBPS = 14,
 425        IB_RATE_25_GBPS  = 15,
 426        IB_RATE_100_GBPS = 16,
 427        IB_RATE_200_GBPS = 17,
 428        IB_RATE_300_GBPS = 18
 429};
 430
 431/**
 432 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 433 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 434 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 435 * @rate: rate to convert.
 436 */
 437int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
 438
 439/**
 440 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
 441 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
 442 * @rate: rate to convert.
 443 */
 444int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
 445
 446/**
 447 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 448 * enum.
 449 * @mult: multiple to convert.
 450 */
 451enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
 452
 453struct ib_ah_attr {
 454        struct ib_global_route  grh;
 455        u16                     dlid;
 456        u8                      sl;
 457        u8                      src_path_bits;
 458        u8                      static_rate;
 459        u8                      ah_flags;
 460        u8                      port_num;
 461};
 462
 463enum ib_wc_status {
 464        IB_WC_SUCCESS,
 465        IB_WC_LOC_LEN_ERR,
 466        IB_WC_LOC_QP_OP_ERR,
 467        IB_WC_LOC_EEC_OP_ERR,
 468        IB_WC_LOC_PROT_ERR,
 469        IB_WC_WR_FLUSH_ERR,
 470        IB_WC_MW_BIND_ERR,
 471        IB_WC_BAD_RESP_ERR,
 472        IB_WC_LOC_ACCESS_ERR,
 473        IB_WC_REM_INV_REQ_ERR,
 474        IB_WC_REM_ACCESS_ERR,
 475        IB_WC_REM_OP_ERR,
 476        IB_WC_RETRY_EXC_ERR,
 477        IB_WC_RNR_RETRY_EXC_ERR,
 478        IB_WC_LOC_RDD_VIOL_ERR,
 479        IB_WC_REM_INV_RD_REQ_ERR,
 480        IB_WC_REM_ABORT_ERR,
 481        IB_WC_INV_EECN_ERR,
 482        IB_WC_INV_EEC_STATE_ERR,
 483        IB_WC_FATAL_ERR,
 484        IB_WC_RESP_TIMEOUT_ERR,
 485        IB_WC_GENERAL_ERR
 486};
 487
 488enum ib_wc_opcode {
 489        IB_WC_SEND,
 490        IB_WC_RDMA_WRITE,
 491        IB_WC_RDMA_READ,
 492        IB_WC_COMP_SWAP,
 493        IB_WC_FETCH_ADD,
 494        IB_WC_BIND_MW,
 495        IB_WC_LSO,
 496        IB_WC_LOCAL_INV,
 497        IB_WC_FAST_REG_MR,
 498        IB_WC_MASKED_COMP_SWAP,
 499        IB_WC_MASKED_FETCH_ADD,
 500/*
 501 * Set value of IB_WC_RECV so consumers can test if a completion is a
 502 * receive by testing (opcode & IB_WC_RECV).
 503 */
 504        IB_WC_RECV                      = 1 << 7,
 505        IB_WC_RECV_RDMA_WITH_IMM
 506};
 507
 508enum ib_wc_flags {
 509        IB_WC_GRH               = 1,
 510        IB_WC_WITH_IMM          = (1<<1),
 511        IB_WC_WITH_INVALIDATE   = (1<<2),
 512};
 513
 514struct ib_wc {
 515        u64                     wr_id;
 516        enum ib_wc_status       status;
 517        enum ib_wc_opcode       opcode;
 518        u32                     vendor_err;
 519        u32                     byte_len;
 520        struct ib_qp           *qp;
 521        union {
 522                __be32          imm_data;
 523                u32             invalidate_rkey;
 524        } ex;
 525        u32                     src_qp;
 526        int                     wc_flags;
 527        u16                     pkey_index;
 528        u16                     slid;
 529        u8                      sl;
 530        u8                      dlid_path_bits;
 531        u8                      port_num;       /* valid only for DR SMPs on switches */
 532        int                     csum_ok;
 533};
 534
 535enum ib_cq_notify_flags {
 536        IB_CQ_SOLICITED                 = 1 << 0,
 537        IB_CQ_NEXT_COMP                 = 1 << 1,
 538        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 539        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
 540};
 541
 542enum ib_srq_type {
 543        IB_SRQT_BASIC,
 544        IB_SRQT_XRC
 545};
 546
 547enum ib_srq_attr_mask {
 548        IB_SRQ_MAX_WR   = 1 << 0,
 549        IB_SRQ_LIMIT    = 1 << 1,
 550};
 551
 552struct ib_srq_attr {
 553        u32     max_wr;
 554        u32     max_sge;
 555        u32     srq_limit;
 556};
 557
 558struct ib_srq_init_attr {
 559        void                  (*event_handler)(struct ib_event *, void *);
 560        void                   *srq_context;
 561        struct ib_srq_attr      attr;
 562        enum ib_srq_type        srq_type;
 563
 564        union {
 565                struct {
 566                        struct ib_xrcd *xrcd;
 567                        struct ib_cq   *cq;
 568                } xrc;
 569        } ext;
 570};
 571
 572struct ib_qp_cap {
 573        u32     max_send_wr;
 574        u32     max_recv_wr;
 575        u32     max_send_sge;
 576        u32     max_recv_sge;
 577        u32     max_inline_data;
 578};
 579
 580enum ib_sig_type {
 581        IB_SIGNAL_ALL_WR,
 582        IB_SIGNAL_REQ_WR
 583};
 584
 585enum ib_qp_type {
 586        /*
 587         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
 588         * here (and in that order) since the MAD layer uses them as
 589         * indices into a 2-entry table.
 590         */
 591        IB_QPT_SMI,
 592        IB_QPT_GSI,
 593
 594        IB_QPT_RC,
 595        IB_QPT_UC,
 596        IB_QPT_UD,
 597        IB_QPT_RAW_IPV6,
 598        IB_QPT_RAW_ETHERTYPE,
 599        /* Save 8 for RAW_PACKET */
 600        IB_QPT_XRC_INI = 9,
 601        IB_QPT_XRC_TGT,
 602        IB_QPT_MAX
 603};
 604
 605enum ib_qp_create_flags {
 606        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
 607        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
 608};
 609
 610struct ib_qp_init_attr {
 611        void                  (*event_handler)(struct ib_event *, void *);
 612        void                   *qp_context;
 613        struct ib_cq           *send_cq;
 614        struct ib_cq           *recv_cq;
 615        struct ib_srq          *srq;
 616        struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
 617        struct ib_qp_cap        cap;
 618        enum ib_sig_type        sq_sig_type;
 619        enum ib_qp_type         qp_type;
 620        enum ib_qp_create_flags create_flags;
 621        u8                      port_num; /* special QP types only */
 622};
 623
 624struct ib_qp_open_attr {
 625        void                  (*event_handler)(struct ib_event *, void *);
 626        void                   *qp_context;
 627        u32                     qp_num;
 628        enum ib_qp_type         qp_type;
 629};
 630
 631enum ib_rnr_timeout {
 632        IB_RNR_TIMER_655_36 =  0,
 633        IB_RNR_TIMER_000_01 =  1,
 634        IB_RNR_TIMER_000_02 =  2,
 635        IB_RNR_TIMER_000_03 =  3,
 636        IB_RNR_TIMER_000_04 =  4,
 637        IB_RNR_TIMER_000_06 =  5,
 638        IB_RNR_TIMER_000_08 =  6,
 639        IB_RNR_TIMER_000_12 =  7,
 640        IB_RNR_TIMER_000_16 =  8,
 641        IB_RNR_TIMER_000_24 =  9,
 642        IB_RNR_TIMER_000_32 = 10,
 643        IB_RNR_TIMER_000_48 = 11,
 644        IB_RNR_TIMER_000_64 = 12,
 645        IB_RNR_TIMER_000_96 = 13,
 646        IB_RNR_TIMER_001_28 = 14,
 647        IB_RNR_TIMER_001_92 = 15,
 648        IB_RNR_TIMER_002_56 = 16,
 649        IB_RNR_TIMER_003_84 = 17,
 650        IB_RNR_TIMER_005_12 = 18,
 651        IB_RNR_TIMER_007_68 = 19,
 652        IB_RNR_TIMER_010_24 = 20,
 653        IB_RNR_TIMER_015_36 = 21,
 654        IB_RNR_TIMER_020_48 = 22,
 655        IB_RNR_TIMER_030_72 = 23,
 656        IB_RNR_TIMER_040_96 = 24,
 657        IB_RNR_TIMER_061_44 = 25,
 658        IB_RNR_TIMER_081_92 = 26,
 659        IB_RNR_TIMER_122_88 = 27,
 660        IB_RNR_TIMER_163_84 = 28,
 661        IB_RNR_TIMER_245_76 = 29,
 662        IB_RNR_TIMER_327_68 = 30,
 663        IB_RNR_TIMER_491_52 = 31
 664};
 665
 666enum ib_qp_attr_mask {
 667        IB_QP_STATE                     = 1,
 668        IB_QP_CUR_STATE                 = (1<<1),
 669        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
 670        IB_QP_ACCESS_FLAGS              = (1<<3),
 671        IB_QP_PKEY_INDEX                = (1<<4),
 672        IB_QP_PORT                      = (1<<5),
 673        IB_QP_QKEY                      = (1<<6),
 674        IB_QP_AV                        = (1<<7),
 675        IB_QP_PATH_MTU                  = (1<<8),
 676        IB_QP_TIMEOUT                   = (1<<9),
 677        IB_QP_RETRY_CNT                 = (1<<10),
 678        IB_QP_RNR_RETRY                 = (1<<11),
 679        IB_QP_RQ_PSN                    = (1<<12),
 680        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
 681        IB_QP_ALT_PATH                  = (1<<14),
 682        IB_QP_MIN_RNR_TIMER             = (1<<15),
 683        IB_QP_SQ_PSN                    = (1<<16),
 684        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
 685        IB_QP_PATH_MIG_STATE            = (1<<18),
 686        IB_QP_CAP                       = (1<<19),
 687        IB_QP_DEST_QPN                  = (1<<20)
 688};
 689
 690enum ib_qp_state {
 691        IB_QPS_RESET,
 692        IB_QPS_INIT,
 693        IB_QPS_RTR,
 694        IB_QPS_RTS,
 695        IB_QPS_SQD,
 696        IB_QPS_SQE,
 697        IB_QPS_ERR
 698};
 699
 700enum ib_mig_state {
 701        IB_MIG_MIGRATED,
 702        IB_MIG_REARM,
 703        IB_MIG_ARMED
 704};
 705
 706struct ib_qp_attr {
 707        enum ib_qp_state        qp_state;
 708        enum ib_qp_state        cur_qp_state;
 709        enum ib_mtu             path_mtu;
 710        enum ib_mig_state       path_mig_state;
 711        u32                     qkey;
 712        u32                     rq_psn;
 713        u32                     sq_psn;
 714        u32                     dest_qp_num;
 715        int                     qp_access_flags;
 716        struct ib_qp_cap        cap;
 717        struct ib_ah_attr       ah_attr;
 718        struct ib_ah_attr       alt_ah_attr;
 719        u16                     pkey_index;
 720        u16                     alt_pkey_index;
 721        u8                      en_sqd_async_notify;
 722        u8                      sq_draining;
 723        u8                      max_rd_atomic;
 724        u8                      max_dest_rd_atomic;
 725        u8                      min_rnr_timer;
 726        u8                      port_num;
 727        u8                      timeout;
 728        u8                      retry_cnt;
 729        u8                      rnr_retry;
 730        u8                      alt_port_num;
 731        u8                      alt_timeout;
 732};
 733
 734enum ib_wr_opcode {
 735        IB_WR_RDMA_WRITE,
 736        IB_WR_RDMA_WRITE_WITH_IMM,
 737        IB_WR_SEND,
 738        IB_WR_SEND_WITH_IMM,
 739        IB_WR_RDMA_READ,
 740        IB_WR_ATOMIC_CMP_AND_SWP,
 741        IB_WR_ATOMIC_FETCH_AND_ADD,
 742        IB_WR_LSO,
 743        IB_WR_SEND_WITH_INV,
 744        IB_WR_RDMA_READ_WITH_INV,
 745        IB_WR_LOCAL_INV,
 746        IB_WR_FAST_REG_MR,
 747        IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
 748        IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 749};
 750
 751enum ib_send_flags {
 752        IB_SEND_FENCE           = 1,
 753        IB_SEND_SIGNALED        = (1<<1),
 754        IB_SEND_SOLICITED       = (1<<2),
 755        IB_SEND_INLINE          = (1<<3),
 756        IB_SEND_IP_CSUM         = (1<<4)
 757};
 758
 759struct ib_sge {
 760        u64     addr;
 761        u32     length;
 762        u32     lkey;
 763};
 764
 765struct ib_fast_reg_page_list {
 766        struct ib_device       *device;
 767        u64                    *page_list;
 768        unsigned int            max_page_list_len;
 769};
 770
 771struct ib_send_wr {
 772        struct ib_send_wr      *next;
 773        u64                     wr_id;
 774        struct ib_sge          *sg_list;
 775        int                     num_sge;
 776        enum ib_wr_opcode       opcode;
 777        int                     send_flags;
 778        union {
 779                __be32          imm_data;
 780                u32             invalidate_rkey;
 781        } ex;
 782        union {
 783                struct {
 784                        u64     remote_addr;
 785                        u32     rkey;
 786                } rdma;
 787                struct {
 788                        u64     remote_addr;
 789                        u64     compare_add;
 790                        u64     swap;
 791                        u64     compare_add_mask;
 792                        u64     swap_mask;
 793                        u32     rkey;
 794                } atomic;
 795                struct {
 796                        struct ib_ah *ah;
 797                        void   *header;
 798                        int     hlen;
 799                        int     mss;
 800                        u32     remote_qpn;
 801                        u32     remote_qkey;
 802                        u16     pkey_index; /* valid for GSI only */
 803                        u8      port_num;   /* valid for DR SMPs on switch only */
 804                } ud;
 805                struct {
 806                        u64                             iova_start;
 807                        struct ib_fast_reg_page_list   *page_list;
 808                        unsigned int                    page_shift;
 809                        unsigned int                    page_list_len;
 810                        u32                             length;
 811                        int                             access_flags;
 812                        u32                             rkey;
 813                } fast_reg;
 814        } wr;
 815        u32                     xrc_remote_srq_num;     /* XRC TGT QPs only */
 816};
 817
 818struct ib_recv_wr {
 819        struct ib_recv_wr      *next;
 820        u64                     wr_id;
 821        struct ib_sge          *sg_list;
 822        int                     num_sge;
 823};
 824
 825enum ib_access_flags {
 826        IB_ACCESS_LOCAL_WRITE   = 1,
 827        IB_ACCESS_REMOTE_WRITE  = (1<<1),
 828        IB_ACCESS_REMOTE_READ   = (1<<2),
 829        IB_ACCESS_REMOTE_ATOMIC = (1<<3),
 830        IB_ACCESS_MW_BIND       = (1<<4)
 831};
 832
 833struct ib_phys_buf {
 834        u64      addr;
 835        u64      size;
 836};
 837
 838struct ib_mr_attr {
 839        struct ib_pd    *pd;
 840        u64             device_virt_addr;
 841        u64             size;
 842        int             mr_access_flags;
 843        u32             lkey;
 844        u32             rkey;
 845};
 846
 847enum ib_mr_rereg_flags {
 848        IB_MR_REREG_TRANS       = 1,
 849        IB_MR_REREG_PD          = (1<<1),
 850        IB_MR_REREG_ACCESS      = (1<<2)
 851};
 852
 853struct ib_mw_bind {
 854        struct ib_mr   *mr;
 855        u64             wr_id;
 856        u64             addr;
 857        u32             length;
 858        int             send_flags;
 859        int             mw_access_flags;
 860};
 861
 862struct ib_fmr_attr {
 863        int     max_pages;
 864        int     max_maps;
 865        u8      page_shift;
 866};
 867
 868struct ib_ucontext {
 869        struct ib_device       *device;
 870        struct list_head        pd_list;
 871        struct list_head        mr_list;
 872        struct list_head        mw_list;
 873        struct list_head        cq_list;
 874        struct list_head        qp_list;
 875        struct list_head        srq_list;
 876        struct list_head        ah_list;
 877        struct list_head        xrcd_list;
 878        int                     closing;
 879};
 880
 881struct ib_uobject {
 882        u64                     user_handle;    /* handle given to us by userspace */
 883        struct ib_ucontext     *context;        /* associated user context */
 884        void                   *object;         /* containing object */
 885        struct list_head        list;           /* link to context's list */
 886        int                     id;             /* index into kernel idr */
 887        struct kref             ref;
 888        struct rw_semaphore     mutex;          /* protects .live */
 889        int                     live;
 890};
 891
 892struct ib_udata {
 893        void __user *inbuf;
 894        void __user *outbuf;
 895        size_t       inlen;
 896        size_t       outlen;
 897};
 898
 899struct ib_pd {
 900        struct ib_device       *device;
 901        struct ib_uobject      *uobject;
 902        atomic_t                usecnt; /* count all resources */
 903};
 904
 905struct ib_xrcd {
 906        struct ib_device       *device;
 907        atomic_t                usecnt; /* count all exposed resources */
 908        struct inode           *inode;
 909
 910        struct mutex            tgt_qp_mutex;
 911        struct list_head        tgt_qp_list;
 912};
 913
 914struct ib_ah {
 915        struct ib_device        *device;
 916        struct ib_pd            *pd;
 917        struct ib_uobject       *uobject;
 918};
 919
 920typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 921
 922struct ib_cq {
 923        struct ib_device       *device;
 924        struct ib_uobject      *uobject;
 925        ib_comp_handler         comp_handler;
 926        void                  (*event_handler)(struct ib_event *, void *);
 927        void                   *cq_context;
 928        int                     cqe;
 929        atomic_t                usecnt; /* count number of work queues */
 930};
 931
 932struct ib_srq {
 933        struct ib_device       *device;
 934        struct ib_pd           *pd;
 935        struct ib_uobject      *uobject;
 936        void                  (*event_handler)(struct ib_event *, void *);
 937        void                   *srq_context;
 938        enum ib_srq_type        srq_type;
 939        atomic_t                usecnt;
 940
 941        union {
 942                struct {
 943                        struct ib_xrcd *xrcd;
 944                        struct ib_cq   *cq;
 945                        u32             srq_num;
 946                } xrc;
 947        } ext;
 948};
 949
 950struct ib_qp {
 951        struct ib_device       *device;
 952        struct ib_pd           *pd;
 953        struct ib_cq           *send_cq;
 954        struct ib_cq           *recv_cq;
 955        struct ib_srq          *srq;
 956        struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
 957        struct list_head        xrcd_list;
 958        atomic_t                usecnt; /* count times opened */
 959        struct list_head        open_list;
 960        struct ib_qp           *real_qp;
 961        struct ib_uobject      *uobject;
 962        void                  (*event_handler)(struct ib_event *, void *);
 963        void                   *qp_context;
 964        u32                     qp_num;
 965        enum ib_qp_type         qp_type;
 966};
 967
 968struct ib_mr {
 969        struct ib_device  *device;
 970        struct ib_pd      *pd;
 971        struct ib_uobject *uobject;
 972        u32                lkey;
 973        u32                rkey;
 974        atomic_t           usecnt; /* count number of MWs */
 975};
 976
 977struct ib_mw {
 978        struct ib_device        *device;
 979        struct ib_pd            *pd;
 980        struct ib_uobject       *uobject;
 981        u32                     rkey;
 982};
 983
 984struct ib_fmr {
 985        struct ib_device        *device;
 986        struct ib_pd            *pd;
 987        struct list_head        list;
 988        u32                     lkey;
 989        u32                     rkey;
 990};
 991
 992struct ib_mad;
 993struct ib_grh;
 994
 995enum ib_process_mad_flags {
 996        IB_MAD_IGNORE_MKEY      = 1,
 997        IB_MAD_IGNORE_BKEY      = 2,
 998        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
 999};
1000
1001enum ib_mad_result {
1002        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1003        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1004        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1005        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1006};
1007
1008#define IB_DEVICE_NAME_MAX 64
1009
1010struct ib_cache {
1011        rwlock_t                lock;
1012        struct ib_event_handler event_handler;
1013        struct ib_pkey_cache  **pkey_cache;
1014        struct ib_gid_cache   **gid_cache;
1015        u8                     *lmc_cache;
1016};
1017
1018struct ib_dma_mapping_ops {
1019        int             (*mapping_error)(struct ib_device *dev,
1020                                         u64 dma_addr);
1021        u64             (*map_single)(struct ib_device *dev,
1022                                      void *ptr, size_t size,
1023                                      enum dma_data_direction direction);
1024        void            (*unmap_single)(struct ib_device *dev,
1025                                        u64 addr, size_t size,
1026                                        enum dma_data_direction direction);
1027        u64             (*map_page)(struct ib_device *dev,
1028                                    struct page *page, unsigned long offset,
1029                                    size_t size,
1030                                    enum dma_data_direction direction);
1031        void            (*unmap_page)(struct ib_device *dev,
1032                                      u64 addr, size_t size,
1033                                      enum dma_data_direction direction);
1034        int             (*map_sg)(struct ib_device *dev,
1035                                  struct scatterlist *sg, int nents,
1036                                  enum dma_data_direction direction);
1037        void            (*unmap_sg)(struct ib_device *dev,
1038                                    struct scatterlist *sg, int nents,
1039                                    enum dma_data_direction direction);
1040        u64             (*dma_address)(struct ib_device *dev,
1041                                       struct scatterlist *sg);
1042        unsigned int    (*dma_len)(struct ib_device *dev,
1043                                   struct scatterlist *sg);
1044        void            (*sync_single_for_cpu)(struct ib_device *dev,
1045                                               u64 dma_handle,
1046                                               size_t size,
1047                                               enum dma_data_direction dir);
1048        void            (*sync_single_for_device)(struct ib_device *dev,
1049                                                  u64 dma_handle,
1050                                                  size_t size,
1051                                                  enum dma_data_direction dir);
1052        void            *(*alloc_coherent)(struct ib_device *dev,
1053                                           size_t size,
1054                                           u64 *dma_handle,
1055                                           gfp_t flag);
1056        void            (*free_coherent)(struct ib_device *dev,
1057                                         size_t size, void *cpu_addr,
1058                                         u64 dma_handle);
1059};
1060
1061struct iw_cm_verbs;
1062
1063struct ib_device {
1064        struct device                *dma_device;
1065
1066        char                          name[IB_DEVICE_NAME_MAX];
1067
1068        struct list_head              event_handler_list;
1069        spinlock_t                    event_handler_lock;
1070
1071        spinlock_t                    client_data_lock;
1072        struct list_head              core_list;
1073        struct list_head              client_data_list;
1074
1075        struct ib_cache               cache;
1076        int                          *pkey_tbl_len;
1077        int                          *gid_tbl_len;
1078
1079        int                           num_comp_vectors;
1080
1081        struct iw_cm_verbs           *iwcm;
1082
1083        int                        (*get_protocol_stats)(struct ib_device *device,
1084                                                         union rdma_protocol_stats *stats);
1085        int                        (*query_device)(struct ib_device *device,
1086                                                   struct ib_device_attr *device_attr);
1087        int                        (*query_port)(struct ib_device *device,
1088                                                 u8 port_num,
1089                                                 struct ib_port_attr *port_attr);
1090        enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
1091                                                     u8 port_num);
1092        int                        (*query_gid)(struct ib_device *device,
1093                                                u8 port_num, int index,
1094                                                union ib_gid *gid);
1095        int                        (*query_pkey)(struct ib_device *device,
1096                                                 u8 port_num, u16 index, u16 *pkey);
1097        int                        (*modify_device)(struct ib_device *device,
1098                                                    int device_modify_mask,
1099                                                    struct ib_device_modify *device_modify);
1100        int                        (*modify_port)(struct ib_device *device,
1101                                                  u8 port_num, int port_modify_mask,
1102                                                  struct ib_port_modify *port_modify);
1103        struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1104                                                     struct ib_udata *udata);
1105        int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1106        int                        (*mmap)(struct ib_ucontext *context,
1107                                           struct vm_area_struct *vma);
1108        struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1109                                               struct ib_ucontext *context,
1110                                               struct ib_udata *udata);
1111        int                        (*dealloc_pd)(struct ib_pd *pd);
1112        struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1113                                                struct ib_ah_attr *ah_attr);
1114        int                        (*modify_ah)(struct ib_ah *ah,
1115                                                struct ib_ah_attr *ah_attr);
1116        int                        (*query_ah)(struct ib_ah *ah,
1117                                               struct ib_ah_attr *ah_attr);
1118        int                        (*destroy_ah)(struct ib_ah *ah);
1119        struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1120                                                 struct ib_srq_init_attr *srq_init_attr,
1121                                                 struct ib_udata *udata);
1122        int                        (*modify_srq)(struct ib_srq *srq,
1123                                                 struct ib_srq_attr *srq_attr,
1124                                                 enum ib_srq_attr_mask srq_attr_mask,
1125                                                 struct ib_udata *udata);
1126        int                        (*query_srq)(struct ib_srq *srq,
1127                                                struct ib_srq_attr *srq_attr);
1128        int                        (*destroy_srq)(struct ib_srq *srq);
1129        int                        (*post_srq_recv)(struct ib_srq *srq,
1130                                                    struct ib_recv_wr *recv_wr,
1131                                                    struct ib_recv_wr **bad_recv_wr);
1132        struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1133                                                struct ib_qp_init_attr *qp_init_attr,
1134                                                struct ib_udata *udata);
1135        int                        (*modify_qp)(struct ib_qp *qp,
1136                                                struct ib_qp_attr *qp_attr,
1137                                                int qp_attr_mask,
1138                                                struct ib_udata *udata);
1139        int                        (*query_qp)(struct ib_qp *qp,
1140                                               struct ib_qp_attr *qp_attr,
1141                                               int qp_attr_mask,
1142                                               struct ib_qp_init_attr *qp_init_attr);
1143        int                        (*destroy_qp)(struct ib_qp *qp);
1144        int                        (*post_send)(struct ib_qp *qp,
1145                                                struct ib_send_wr *send_wr,
1146                                                struct ib_send_wr **bad_send_wr);
1147        int                        (*post_recv)(struct ib_qp *qp,
1148                                                struct ib_recv_wr *recv_wr,
1149                                                struct ib_recv_wr **bad_recv_wr);
1150        struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1151                                                int comp_vector,
1152                                                struct ib_ucontext *context,
1153                                                struct ib_udata *udata);
1154        int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1155                                                u16 cq_period);
1156        int                        (*destroy_cq)(struct ib_cq *cq);
1157        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1158                                                struct ib_udata *udata);
1159        int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1160                                              struct ib_wc *wc);
1161        int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1162        int                        (*req_notify_cq)(struct ib_cq *cq,
1163                                                    enum ib_cq_notify_flags flags);
1164        int                        (*req_ncomp_notif)(struct ib_cq *cq,
1165                                                      int wc_cnt);
1166        struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1167                                                 int mr_access_flags);
1168        struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1169                                                  struct ib_phys_buf *phys_buf_array,
1170                                                  int num_phys_buf,
1171                                                  int mr_access_flags,
1172                                                  u64 *iova_start);
1173        struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1174                                                  u64 start, u64 length,
1175                                                  u64 virt_addr,
1176                                                  int mr_access_flags,
1177                                                  struct ib_udata *udata);
1178        int                        (*query_mr)(struct ib_mr *mr,
1179                                               struct ib_mr_attr *mr_attr);
1180        int                        (*dereg_mr)(struct ib_mr *mr);
1181        struct ib_mr *             (*alloc_fast_reg_mr)(struct ib_pd *pd,
1182                                               int max_page_list_len);
1183        struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1184                                                                   int page_list_len);
1185        void                       (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1186        int                        (*rereg_phys_mr)(struct ib_mr *mr,
1187                                                    int mr_rereg_mask,
1188                                                    struct ib_pd *pd,
1189                                                    struct ib_phys_buf *phys_buf_array,
1190                                                    int num_phys_buf,
1191                                                    int mr_access_flags,
1192                                                    u64 *iova_start);
1193        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1194        int                        (*bind_mw)(struct ib_qp *qp,
1195                                              struct ib_mw *mw,
1196                                              struct ib_mw_bind *mw_bind);
1197        int                        (*dealloc_mw)(struct ib_mw *mw);
1198        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
1199                                                int mr_access_flags,
1200                                                struct ib_fmr_attr *fmr_attr);
1201        int                        (*map_phys_fmr)(struct ib_fmr *fmr,
1202                                                   u64 *page_list, int list_len,
1203                                                   u64 iova);
1204        int                        (*unmap_fmr)(struct list_head *fmr_list);
1205        int                        (*dealloc_fmr)(struct ib_fmr *fmr);
1206        int                        (*attach_mcast)(struct ib_qp *qp,
1207                                                   union ib_gid *gid,
1208                                                   u16 lid);
1209        int                        (*detach_mcast)(struct ib_qp *qp,
1210                                                   union ib_gid *gid,
1211                                                   u16 lid);
1212        int                        (*process_mad)(struct ib_device *device,
1213                                                  int process_mad_flags,
1214                                                  u8 port_num,
1215                                                  struct ib_wc *in_wc,
1216                                                  struct ib_grh *in_grh,
1217                                                  struct ib_mad *in_mad,
1218                                                  struct ib_mad *out_mad);
1219        struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
1220                                                 struct ib_ucontext *ucontext,
1221                                                 struct ib_udata *udata);
1222        int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1223
1224        struct ib_dma_mapping_ops   *dma_ops;
1225
1226        struct module               *owner;
1227        struct device                dev;
1228        struct kobject               *ports_parent;
1229        struct list_head             port_list;
1230
1231        enum {
1232                IB_DEV_UNINITIALIZED,
1233                IB_DEV_REGISTERED,
1234                IB_DEV_UNREGISTERED
1235        }                            reg_state;
1236
1237        int                          uverbs_abi_ver;
1238        u64                          uverbs_cmd_mask;
1239
1240        char                         node_desc[64];
1241        __be64                       node_guid;
1242        u32                          local_dma_lkey;
1243        u8                           node_type;
1244        u8                           phys_port_cnt;
1245};
1246
1247struct ib_client {
1248        char  *name;
1249        void (*add)   (struct ib_device *);
1250        void (*remove)(struct ib_device *);
1251
1252        struct list_head list;
1253};
1254
1255struct ib_device *ib_alloc_device(size_t size);
1256void ib_dealloc_device(struct ib_device *device);
1257
1258int ib_register_device(struct ib_device *device,
1259                       int (*port_callback)(struct ib_device *,
1260                                            u8, struct kobject *));
1261void ib_unregister_device(struct ib_device *device);
1262
1263int ib_register_client   (struct ib_client *client);
1264void ib_unregister_client(struct ib_client *client);
1265
1266void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1267void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1268                         void *data);
1269
1270static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1271{
1272        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1273}
1274
1275static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1276{
1277        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1278}
1279
1280/**
1281 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1282 * contains all required attributes and no attributes not allowed for
1283 * the given QP state transition.
1284 * @cur_state: Current QP state
1285 * @next_state: Next QP state
1286 * @type: QP type
1287 * @mask: Mask of supplied QP attributes
1288 *
1289 * This function is a helper function that a low-level driver's
1290 * modify_qp method can use to validate the consumer's input.  It
1291 * checks that cur_state and next_state are valid QP states, that a
1292 * transition from cur_state to next_state is allowed by the IB spec,
1293 * and that the attribute mask supplied is allowed for the transition.
1294 */
1295int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1296                       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1297
1298int ib_register_event_handler  (struct ib_event_handler *event_handler);
1299int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1300void ib_dispatch_event(struct ib_event *event);
1301
1302int ib_query_device(struct ib_device *device,
1303                    struct ib_device_attr *device_attr);
1304
1305int ib_query_port(struct ib_device *device,
1306                  u8 port_num, struct ib_port_attr *port_attr);
1307
1308enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1309                                               u8 port_num);
1310
1311int ib_query_gid(struct ib_device *device,
1312                 u8 port_num, int index, union ib_gid *gid);
1313
1314int ib_query_pkey(struct ib_device *device,
1315                  u8 port_num, u16 index, u16 *pkey);
1316
1317int ib_modify_device(struct ib_device *device,
1318                     int device_modify_mask,
1319                     struct ib_device_modify *device_modify);
1320
1321int ib_modify_port(struct ib_device *device,
1322                   u8 port_num, int port_modify_mask,
1323                   struct ib_port_modify *port_modify);
1324
1325int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1326                u8 *port_num, u16 *index);
1327
1328int ib_find_pkey(struct ib_device *device,
1329                 u8 port_num, u16 pkey, u16 *index);
1330
1331/**
1332 * ib_alloc_pd - Allocates an unused protection domain.
1333 * @device: The device on which to allocate the protection domain.
1334 *
1335 * A protection domain object provides an association between QPs, shared
1336 * receive queues, address handles, memory regions, and memory windows.
1337 */
1338struct ib_pd *ib_alloc_pd(struct ib_device *device);
1339
1340/**
1341 * ib_dealloc_pd - Deallocates a protection domain.
1342 * @pd: The protection domain to deallocate.
1343 */
1344int ib_dealloc_pd(struct ib_pd *pd);
1345
1346/**
1347 * ib_create_ah - Creates an address handle for the given address vector.
1348 * @pd: The protection domain associated with the address handle.
1349 * @ah_attr: The attributes of the address vector.
1350 *
1351 * The address handle is used to reference a local or global destination
1352 * in all UD QP post sends.
1353 */
1354struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1355
1356/**
1357 * ib_init_ah_from_wc - Initializes address handle attributes from a
1358 *   work completion.
1359 * @device: Device on which the received message arrived.
1360 * @port_num: Port on which the received message arrived.
1361 * @wc: Work completion associated with the received message.
1362 * @grh: References the received global route header.  This parameter is
1363 *   ignored unless the work completion indicates that the GRH is valid.
1364 * @ah_attr: Returned attributes that can be used when creating an address
1365 *   handle for replying to the message.
1366 */
1367int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1368                       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1369
1370/**
1371 * ib_create_ah_from_wc - Creates an address handle associated with the
1372 *   sender of the specified work completion.
1373 * @pd: The protection domain associated with the address handle.
1374 * @wc: Work completion information associated with a received message.
1375 * @grh: References the received global route header.  This parameter is
1376 *   ignored unless the work completion indicates that the GRH is valid.
1377 * @port_num: The outbound port number to associate with the address.
1378 *
1379 * The address handle is used to reference a local or global destination
1380 * in all UD QP post sends.
1381 */
1382struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1383                                   struct ib_grh *grh, u8 port_num);
1384
1385/**
1386 * ib_modify_ah - Modifies the address vector associated with an address
1387 *   handle.
1388 * @ah: The address handle to modify.
1389 * @ah_attr: The new address vector attributes to associate with the
1390 *   address handle.
1391 */
1392int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1393
1394/**
1395 * ib_query_ah - Queries the address vector associated with an address
1396 *   handle.
1397 * @ah: The address handle to query.
1398 * @ah_attr: The address vector attributes associated with the address
1399 *   handle.
1400 */
1401int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1402
1403/**
1404 * ib_destroy_ah - Destroys an address handle.
1405 * @ah: The address handle to destroy.
1406 */
1407int ib_destroy_ah(struct ib_ah *ah);
1408
1409/**
1410 * ib_create_srq - Creates a SRQ associated with the specified protection
1411 *   domain.
1412 * @pd: The protection domain associated with the SRQ.
1413 * @srq_init_attr: A list of initial attributes required to create the
1414 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1415 *   the actual capabilities of the created SRQ.
1416 *
1417 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1418 * requested size of the SRQ, and set to the actual values allocated
1419 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1420 * will always be at least as large as the requested values.
1421 */
1422struct ib_srq *ib_create_srq(struct ib_pd *pd,
1423                             struct ib_srq_init_attr *srq_init_attr);
1424
1425/**
1426 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1427 * @srq: The SRQ to modify.
1428 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1429 *   the current values of selected SRQ attributes are returned.
1430 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1431 *   are being modified.
1432 *
1433 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1434 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1435 * the number of receives queued drops below the limit.
1436 */
1437int ib_modify_srq(struct ib_srq *srq,
1438                  struct ib_srq_attr *srq_attr,
1439                  enum ib_srq_attr_mask srq_attr_mask);
1440
1441/**
1442 * ib_query_srq - Returns the attribute list and current values for the
1443 *   specified SRQ.
1444 * @srq: The SRQ to query.
1445 * @srq_attr: The attributes of the specified SRQ.
1446 */
1447int ib_query_srq(struct ib_srq *srq,
1448                 struct ib_srq_attr *srq_attr);
1449
1450/**
1451 * ib_destroy_srq - Destroys the specified SRQ.
1452 * @srq: The SRQ to destroy.
1453 */
1454int ib_destroy_srq(struct ib_srq *srq);
1455
1456/**
1457 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1458 * @srq: The SRQ to post the work request on.
1459 * @recv_wr: A list of work requests to post on the receive queue.
1460 * @bad_recv_wr: On an immediate failure, this parameter will reference
1461 *   the work request that failed to be posted on the QP.
1462 */
1463static inline int ib_post_srq_recv(struct ib_srq *srq,
1464                                   struct ib_recv_wr *recv_wr,
1465                                   struct ib_recv_wr **bad_recv_wr)
1466{
1467        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1468}
1469
1470/**
1471 * ib_create_qp - Creates a QP associated with the specified protection
1472 *   domain.
1473 * @pd: The protection domain associated with the QP.
1474 * @qp_init_attr: A list of initial attributes required to create the
1475 *   QP.  If QP creation succeeds, then the attributes are updated to
1476 *   the actual capabilities of the created QP.
1477 */
1478struct ib_qp *ib_create_qp(struct ib_pd *pd,
1479                           struct ib_qp_init_attr *qp_init_attr);
1480
1481/**
1482 * ib_modify_qp - Modifies the attributes for the specified QP and then
1483 *   transitions the QP to the given state.
1484 * @qp: The QP to modify.
1485 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1486 *   the current values of selected QP attributes are returned.
1487 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1488 *   are being modified.
1489 */
1490int ib_modify_qp(struct ib_qp *qp,
1491                 struct ib_qp_attr *qp_attr,
1492                 int qp_attr_mask);
1493
1494/**
1495 * ib_query_qp - Returns the attribute list and current values for the
1496 *   specified QP.
1497 * @qp: The QP to query.
1498 * @qp_attr: The attributes of the specified QP.
1499 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1500 * @qp_init_attr: Additional attributes of the selected QP.
1501 *
1502 * The qp_attr_mask may be used to limit the query to gathering only the
1503 * selected attributes.
1504 */
1505int ib_query_qp(struct ib_qp *qp,
1506                struct ib_qp_attr *qp_attr,
1507                int qp_attr_mask,
1508                struct ib_qp_init_attr *qp_init_attr);
1509
1510/**
1511 * ib_destroy_qp - Destroys the specified QP.
1512 * @qp: The QP to destroy.
1513 */
1514int ib_destroy_qp(struct ib_qp *qp);
1515
1516/**
1517 * ib_open_qp - Obtain a reference to an existing sharable QP.
1518 * @xrcd - XRC domain
1519 * @qp_open_attr: Attributes identifying the QP to open.
1520 *
1521 * Returns a reference to a sharable QP.
1522 */
1523struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1524                         struct ib_qp_open_attr *qp_open_attr);
1525
1526/**
1527 * ib_close_qp - Release an external reference to a QP.
1528 * @qp: The QP handle to release
1529 *
1530 * The opened QP handle is released by the caller.  The underlying
1531 * shared QP is not destroyed until all internal references are released.
1532 */
1533int ib_close_qp(struct ib_qp *qp);
1534
1535/**
1536 * ib_post_send - Posts a list of work requests to the send queue of
1537 *   the specified QP.
1538 * @qp: The QP to post the work request on.
1539 * @send_wr: A list of work requests to post on the send queue.
1540 * @bad_send_wr: On an immediate failure, this parameter will reference
1541 *   the work request that failed to be posted on the QP.
1542 *
1543 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1544 * error is returned, the QP state shall not be affected,
1545 * ib_post_send() will return an immediate error after queueing any
1546 * earlier work requests in the list.
1547 */
1548static inline int ib_post_send(struct ib_qp *qp,
1549                               struct ib_send_wr *send_wr,
1550                               struct ib_send_wr **bad_send_wr)
1551{
1552        return qp->device->post_send(qp, send_wr, bad_send_wr);
1553}
1554
1555/**
1556 * ib_post_recv - Posts a list of work requests to the receive queue of
1557 *   the specified QP.
1558 * @qp: The QP to post the work request on.
1559 * @recv_wr: A list of work requests to post on the receive queue.
1560 * @bad_recv_wr: On an immediate failure, this parameter will reference
1561 *   the work request that failed to be posted on the QP.
1562 */
1563static inline int ib_post_recv(struct ib_qp *qp,
1564                               struct ib_recv_wr *recv_wr,
1565                               struct ib_recv_wr **bad_recv_wr)
1566{
1567        return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1568}
1569
1570/**
1571 * ib_create_cq - Creates a CQ on the specified device.
1572 * @device: The device on which to create the CQ.
1573 * @comp_handler: A user-specified callback that is invoked when a
1574 *   completion event occurs on the CQ.
1575 * @event_handler: A user-specified callback that is invoked when an
1576 *   asynchronous event not associated with a completion occurs on the CQ.
1577 * @cq_context: Context associated with the CQ returned to the user via
1578 *   the associated completion and event handlers.
1579 * @cqe: The minimum size of the CQ.
1580 * @comp_vector - Completion vector used to signal completion events.
1581 *     Must be >= 0 and < context->num_comp_vectors.
1582 *
1583 * Users can examine the cq structure to determine the actual CQ size.
1584 */
1585struct ib_cq *ib_create_cq(struct ib_device *device,
1586                           ib_comp_handler comp_handler,
1587                           void (*event_handler)(struct ib_event *, void *),
1588                           void *cq_context, int cqe, int comp_vector);
1589
1590/**
1591 * ib_resize_cq - Modifies the capacity of the CQ.
1592 * @cq: The CQ to resize.
1593 * @cqe: The minimum size of the CQ.
1594 *
1595 * Users can examine the cq structure to determine the actual CQ size.
1596 */
1597int ib_resize_cq(struct ib_cq *cq, int cqe);
1598
1599/**
1600 * ib_modify_cq - Modifies moderation params of the CQ
1601 * @cq: The CQ to modify.
1602 * @cq_count: number of CQEs that will trigger an event
1603 * @cq_period: max period of time in usec before triggering an event
1604 *
1605 */
1606int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1607
1608/**
1609 * ib_destroy_cq - Destroys the specified CQ.
1610 * @cq: The CQ to destroy.
1611 */
1612int ib_destroy_cq(struct ib_cq *cq);
1613
1614/**
1615 * ib_poll_cq - poll a CQ for completion(s)
1616 * @cq:the CQ being polled
1617 * @num_entries:maximum number of completions to return
1618 * @wc:array of at least @num_entries &struct ib_wc where completions
1619 *   will be returned
1620 *
1621 * Poll a CQ for (possibly multiple) completions.  If the return value
1622 * is < 0, an error occurred.  If the return value is >= 0, it is the
1623 * number of completions returned.  If the return value is
1624 * non-negative and < num_entries, then the CQ was emptied.
1625 */
1626static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1627                             struct ib_wc *wc)
1628{
1629        return cq->device->poll_cq(cq, num_entries, wc);
1630}
1631
1632/**
1633 * ib_peek_cq - Returns the number of unreaped completions currently
1634 *   on the specified CQ.
1635 * @cq: The CQ to peek.
1636 * @wc_cnt: A minimum number of unreaped completions to check for.
1637 *
1638 * If the number of unreaped completions is greater than or equal to wc_cnt,
1639 * this function returns wc_cnt, otherwise, it returns the actual number of
1640 * unreaped completions.
1641 */
1642int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1643
1644/**
1645 * ib_req_notify_cq - Request completion notification on a CQ.
1646 * @cq: The CQ to generate an event for.
1647 * @flags:
1648 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1649 *   to request an event on the next solicited event or next work
1650 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1651 *   may also be |ed in to request a hint about missed events, as
1652 *   described below.
1653 *
1654 * Return Value:
1655 *    < 0 means an error occurred while requesting notification
1656 *   == 0 means notification was requested successfully, and if
1657 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1658 *        were missed and it is safe to wait for another event.  In
1659 *        this case is it guaranteed that any work completions added
1660 *        to the CQ since the last CQ poll will trigger a completion
1661 *        notification event.
1662 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1663 *        in.  It means that the consumer must poll the CQ again to
1664 *        make sure it is empty to avoid missing an event because of a
1665 *        race between requesting notification and an entry being
1666 *        added to the CQ.  This return value means it is possible
1667 *        (but not guaranteed) that a work completion has been added
1668 *        to the CQ since the last poll without triggering a
1669 *        completion notification event.
1670 */
1671static inline int ib_req_notify_cq(struct ib_cq *cq,
1672                                   enum ib_cq_notify_flags flags)
1673{
1674        return cq->device->req_notify_cq(cq, flags);
1675}
1676
1677/**
1678 * ib_req_ncomp_notif - Request completion notification when there are
1679 *   at least the specified number of unreaped completions on the CQ.
1680 * @cq: The CQ to generate an event for.
1681 * @wc_cnt: The number of unreaped completions that should be on the
1682 *   CQ before an event is generated.
1683 */
1684static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1685{
1686        return cq->device->req_ncomp_notif ?
1687                cq->device->req_ncomp_notif(cq, wc_cnt) :
1688                -ENOSYS;
1689}
1690
1691/**
1692 * ib_get_dma_mr - Returns a memory region for system memory that is
1693 *   usable for DMA.
1694 * @pd: The protection domain associated with the memory region.
1695 * @mr_access_flags: Specifies the memory access rights.
1696 *
1697 * Note that the ib_dma_*() functions defined below must be used
1698 * to create/destroy addresses used with the Lkey or Rkey returned
1699 * by ib_get_dma_mr().
1700 */
1701struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1702
1703/**
1704 * ib_dma_mapping_error - check a DMA addr for error
1705 * @dev: The device for which the dma_addr was created
1706 * @dma_addr: The DMA address to check
1707 */
1708static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1709{
1710        if (dev->dma_ops)
1711                return dev->dma_ops->mapping_error(dev, dma_addr);
1712        return dma_mapping_error(dev->dma_device, dma_addr);
1713}
1714
1715/**
1716 * ib_dma_map_single - Map a kernel virtual address to DMA address
1717 * @dev: The device for which the dma_addr is to be created
1718 * @cpu_addr: The kernel virtual address
1719 * @size: The size of the region in bytes
1720 * @direction: The direction of the DMA
1721 */
1722static inline u64 ib_dma_map_single(struct ib_device *dev,
1723                                    void *cpu_addr, size_t size,
1724                                    enum dma_data_direction direction)
1725{
1726        if (dev->dma_ops)
1727                return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1728        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1729}
1730
1731/**
1732 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1733 * @dev: The device for which the DMA address was created
1734 * @addr: The DMA address
1735 * @size: The size of the region in bytes
1736 * @direction: The direction of the DMA
1737 */
1738static inline void ib_dma_unmap_single(struct ib_device *dev,
1739                                       u64 addr, size_t size,
1740                                       enum dma_data_direction direction)
1741{
1742        if (dev->dma_ops)
1743                dev->dma_ops->unmap_single(dev, addr, size, direction);
1744        else
1745                dma_unmap_single(dev->dma_device, addr, size, direction);
1746}
1747
1748static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1749                                          void *cpu_addr, size_t size,
1750                                          enum dma_data_direction direction,
1751                                          struct dma_attrs *attrs)
1752{
1753        return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1754                                    direction, attrs);
1755}
1756
1757static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1758                                             u64 addr, size_t size,
1759                                             enum dma_data_direction direction,
1760                                             struct dma_attrs *attrs)
1761{
1762        return dma_unmap_single_attrs(dev->dma_device, addr, size,
1763                                      direction, attrs);
1764}
1765
1766/**
1767 * ib_dma_map_page - Map a physical page to DMA address
1768 * @dev: The device for which the dma_addr is to be created
1769 * @page: The page to be mapped
1770 * @offset: The offset within the page
1771 * @size: The size of the region in bytes
1772 * @direction: The direction of the DMA
1773 */
1774static inline u64 ib_dma_map_page(struct ib_device *dev,
1775                                  struct page *page,
1776                                  unsigned long offset,
1777                                  size_t size,
1778                                         enum dma_data_direction direction)
1779{
1780        if (dev->dma_ops)
1781                return dev->dma_ops->map_page(dev, page, offset, size, direction);
1782        return dma_map_page(dev->dma_device, page, offset, size, direction);
1783}
1784
1785/**
1786 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1787 * @dev: The device for which the DMA address was created
1788 * @addr: The DMA address
1789 * @size: The size of the region in bytes
1790 * @direction: The direction of the DMA
1791 */
1792static inline void ib_dma_unmap_page(struct ib_device *dev,
1793                                     u64 addr, size_t size,
1794                                     enum dma_data_direction direction)
1795{
1796        if (dev->dma_ops)
1797                dev->dma_ops->unmap_page(dev, addr, size, direction);
1798        else
1799                dma_unmap_page(dev->dma_device, addr, size, direction);
1800}
1801
1802/**
1803 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1804 * @dev: The device for which the DMA addresses are to be created
1805 * @sg: The array of scatter/gather entries
1806 * @nents: The number of scatter/gather entries
1807 * @direction: The direction of the DMA
1808 */
1809static inline int ib_dma_map_sg(struct ib_device *dev,
1810                                struct scatterlist *sg, int nents,
1811                                enum dma_data_direction direction)
1812{
1813        if (dev->dma_ops)
1814                return dev->dma_ops->map_sg(dev, sg, nents, direction);
1815        return dma_map_sg(dev->dma_device, sg, nents, direction);
1816}
1817
1818/**
1819 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1820 * @dev: The device for which the DMA addresses were created
1821 * @sg: The array of scatter/gather entries
1822 * @nents: The number of scatter/gather entries
1823 * @direction: The direction of the DMA
1824 */
1825static inline void ib_dma_unmap_sg(struct ib_device *dev,
1826                                   struct scatterlist *sg, int nents,
1827                                   enum dma_data_direction direction)
1828{
1829        if (dev->dma_ops)
1830                dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1831        else
1832                dma_unmap_sg(dev->dma_device, sg, nents, direction);
1833}
1834
1835static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1836                                      struct scatterlist *sg, int nents,
1837                                      enum dma_data_direction direction,
1838                                      struct dma_attrs *attrs)
1839{
1840        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1841}
1842
1843static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1844                                         struct scatterlist *sg, int nents,
1845                                         enum dma_data_direction direction,
1846                                         struct dma_attrs *attrs)
1847{
1848        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1849}
1850/**
1851 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1852 * @dev: The device for which the DMA addresses were created
1853 * @sg: The scatter/gather entry
1854 */
1855static inline u64 ib_sg_dma_address(struct ib_device *dev,
1856                                    struct scatterlist *sg)
1857{
1858        if (dev->dma_ops)
1859                return dev->dma_ops->dma_address(dev, sg);
1860        return sg_dma_address(sg);
1861}
1862
1863/**
1864 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1865 * @dev: The device for which the DMA addresses were created
1866 * @sg: The scatter/gather entry
1867 */
1868static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1869                                         struct scatterlist *sg)
1870{
1871        if (dev->dma_ops)
1872                return dev->dma_ops->dma_len(dev, sg);
1873        return sg_dma_len(sg);
1874}
1875
1876/**
1877 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1878 * @dev: The device for which the DMA address was created
1879 * @addr: The DMA address
1880 * @size: The size of the region in bytes
1881 * @dir: The direction of the DMA
1882 */
1883static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1884                                              u64 addr,
1885                                              size_t size,
1886                                              enum dma_data_direction dir)
1887{
1888        if (dev->dma_ops)
1889                dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1890        else
1891                dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1892}
1893
1894/**
1895 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1896 * @dev: The device for which the DMA address was created
1897 * @addr: The DMA address
1898 * @size: The size of the region in bytes
1899 * @dir: The direction of the DMA
1900 */
1901static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1902                                                 u64 addr,
1903                                                 size_t size,
1904                                                 enum dma_data_direction dir)
1905{
1906        if (dev->dma_ops)
1907                dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1908        else
1909                dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1910}
1911
1912/**
1913 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1914 * @dev: The device for which the DMA address is requested
1915 * @size: The size of the region to allocate in bytes
1916 * @dma_handle: A pointer for returning the DMA address of the region
1917 * @flag: memory allocator flags
1918 */
1919static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1920                                           size_t size,
1921                                           u64 *dma_handle,
1922                                           gfp_t flag)
1923{
1924        if (dev->dma_ops)
1925                return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1926        else {
1927                dma_addr_t handle;
1928                void *ret;
1929
1930                ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1931                *dma_handle = handle;
1932                return ret;
1933        }
1934}
1935
1936/**
1937 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1938 * @dev: The device for which the DMA addresses were allocated
1939 * @size: The size of the region
1940 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1941 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1942 */
1943static inline void ib_dma_free_coherent(struct ib_device *dev,
1944                                        size_t size, void *cpu_addr,
1945                                        u64 dma_handle)
1946{
1947        if (dev->dma_ops)
1948                dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1949        else
1950                dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1951}
1952
1953/**
1954 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1955 *   by an HCA.
1956 * @pd: The protection domain associated assigned to the registered region.
1957 * @phys_buf_array: Specifies a list of physical buffers to use in the
1958 *   memory region.
1959 * @num_phys_buf: Specifies the size of the phys_buf_array.
1960 * @mr_access_flags: Specifies the memory access rights.
1961 * @iova_start: The offset of the region's starting I/O virtual address.
1962 */
1963struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1964                             struct ib_phys_buf *phys_buf_array,
1965                             int num_phys_buf,
1966                             int mr_access_flags,
1967                             u64 *iova_start);
1968
1969/**
1970 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1971 *   Conceptually, this call performs the functions deregister memory region
1972 *   followed by register physical memory region.  Where possible,
1973 *   resources are reused instead of deallocated and reallocated.
1974 * @mr: The memory region to modify.
1975 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1976 *   properties of the memory region are being modified.
1977 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1978 *   the new protection domain to associated with the memory region,
1979 *   otherwise, this parameter is ignored.
1980 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1981 *   field specifies a list of physical buffers to use in the new
1982 *   translation, otherwise, this parameter is ignored.
1983 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1984 *   field specifies the size of the phys_buf_array, otherwise, this
1985 *   parameter is ignored.
1986 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1987 *   field specifies the new memory access rights, otherwise, this
1988 *   parameter is ignored.
1989 * @iova_start: The offset of the region's starting I/O virtual address.
1990 */
1991int ib_rereg_phys_mr(struct ib_mr *mr,
1992                     int mr_rereg_mask,
1993                     struct ib_pd *pd,
1994                     struct ib_phys_buf *phys_buf_array,
1995                     int num_phys_buf,
1996                     int mr_access_flags,
1997                     u64 *iova_start);
1998
1999/**
2000 * ib_query_mr - Retrieves information about a specific memory region.
2001 * @mr: The memory region to retrieve information about.
2002 * @mr_attr: The attributes of the specified memory region.
2003 */
2004int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2005
2006/**
2007 * ib_dereg_mr - Deregisters a memory region and removes it from the
2008 *   HCA translation table.
2009 * @mr: The memory region to deregister.
2010 */
2011int ib_dereg_mr(struct ib_mr *mr);
2012
2013/**
2014 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2015 *   IB_WR_FAST_REG_MR send work request.
2016 * @pd: The protection domain associated with the region.
2017 * @max_page_list_len: requested max physical buffer list length to be
2018 *   used with fast register work requests for this MR.
2019 */
2020struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2021
2022/**
2023 * ib_alloc_fast_reg_page_list - Allocates a page list array
2024 * @device - ib device pointer.
2025 * @page_list_len - size of the page list array to be allocated.
2026 *
2027 * This allocates and returns a struct ib_fast_reg_page_list * and a
2028 * page_list array that is at least page_list_len in size.  The actual
2029 * size is returned in max_page_list_len.  The caller is responsible
2030 * for initializing the contents of the page_list array before posting
2031 * a send work request with the IB_WC_FAST_REG_MR opcode.
2032 *
2033 * The page_list array entries must be translated using one of the
2034 * ib_dma_*() functions just like the addresses passed to
2035 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2036 * ib_fast_reg_page_list must not be modified by the caller until the
2037 * IB_WC_FAST_REG_MR work request completes.
2038 */
2039struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2040                                struct ib_device *device, int page_list_len);
2041
2042/**
2043 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2044 *   page list array.
2045 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2046 */
2047void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2048
2049/**
2050 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2051 *   R_Key and L_Key.
2052 * @mr - struct ib_mr pointer to be updated.
2053 * @newkey - new key to be used.
2054 */
2055static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2056{
2057        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2058        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2059}
2060
2061/**
2062 * ib_alloc_mw - Allocates a memory window.
2063 * @pd: The protection domain associated with the memory window.
2064 */
2065struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2066
2067/**
2068 * ib_bind_mw - Posts a work request to the send queue of the specified
2069 *   QP, which binds the memory window to the given address range and
2070 *   remote access attributes.
2071 * @qp: QP to post the bind work request on.
2072 * @mw: The memory window to bind.
2073 * @mw_bind: Specifies information about the memory window, including
2074 *   its address range, remote access rights, and associated memory region.
2075 */
2076static inline int ib_bind_mw(struct ib_qp *qp,
2077                             struct ib_mw *mw,
2078                             struct ib_mw_bind *mw_bind)
2079{
2080        /* XXX reference counting in corresponding MR? */
2081        return mw->device->bind_mw ?
2082                mw->device->bind_mw(qp, mw, mw_bind) :
2083                -ENOSYS;
2084}
2085
2086/**
2087 * ib_dealloc_mw - Deallocates a memory window.
2088 * @mw: The memory window to deallocate.
2089 */
2090int ib_dealloc_mw(struct ib_mw *mw);
2091
2092/**
2093 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2094 * @pd: The protection domain associated with the unmapped region.
2095 * @mr_access_flags: Specifies the memory access rights.
2096 * @fmr_attr: Attributes of the unmapped region.
2097 *
2098 * A fast memory region must be mapped before it can be used as part of
2099 * a work request.
2100 */
2101struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2102                            int mr_access_flags,
2103                            struct ib_fmr_attr *fmr_attr);
2104
2105/**
2106 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2107 * @fmr: The fast memory region to associate with the pages.
2108 * @page_list: An array of physical pages to map to the fast memory region.
2109 * @list_len: The number of pages in page_list.
2110 * @iova: The I/O virtual address to use with the mapped region.
2111 */
2112static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
2113                                  u64 *page_list, int list_len,
2114                                  u64 iova)
2115{
2116        return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2117}
2118
2119/**
2120 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2121 * @fmr_list: A linked list of fast memory regions to unmap.
2122 */
2123int ib_unmap_fmr(struct list_head *fmr_list);
2124
2125/**
2126 * ib_dealloc_fmr - Deallocates a fast memory region.
2127 * @fmr: The fast memory region to deallocate.
2128 */
2129int ib_dealloc_fmr(struct ib_fmr *fmr);
2130
2131/**
2132 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2133 * @qp: QP to attach to the multicast group.  The QP must be type
2134 *   IB_QPT_UD.
2135 * @gid: Multicast group GID.
2136 * @lid: Multicast group LID in host byte order.
2137 *
2138 * In order to send and receive multicast packets, subnet
2139 * administration must have created the multicast group and configured
2140 * the fabric appropriately.  The port associated with the specified
2141 * QP must also be a member of the multicast group.
2142 */
2143int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2144
2145/**
2146 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2147 * @qp: QP to detach from the multicast group.
2148 * @gid: Multicast group GID.
2149 * @lid: Multicast group LID in host byte order.
2150 */
2151int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2152
2153/**
2154 * ib_alloc_xrcd - Allocates an XRC domain.
2155 * @device: The device on which to allocate the XRC domain.
2156 */
2157struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2158
2159/**
2160 * ib_dealloc_xrcd - Deallocates an XRC domain.
2161 * @xrcd: The XRC domain to deallocate.
2162 */
2163int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2164
2165#endif /* IB_VERBS_H */
2166