linux/include/rdma/ib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#if !defined(IB_VERBS_H)
  40#define IB_VERBS_H
  41
  42#include <linux/types.h>
  43#include <linux/device.h>
  44#include <linux/mm.h>
  45#include <linux/dma-mapping.h>
  46#include <linux/kref.h>
  47#include <linux/list.h>
  48#include <linux/rwsem.h>
  49#include <linux/scatterlist.h>
  50
  51#include <asm/atomic.h>
  52#include <asm/uaccess.h>
  53
  54union ib_gid {
  55        u8      raw[16];
  56        struct {
  57                __be64  subnet_prefix;
  58                __be64  interface_id;
  59        } global;
  60};
  61
  62enum rdma_node_type {
  63        /* IB values map to NodeInfo:NodeType. */
  64        RDMA_NODE_IB_CA         = 1,
  65        RDMA_NODE_IB_SWITCH,
  66        RDMA_NODE_IB_ROUTER,
  67        RDMA_NODE_RNIC
  68};
  69
  70enum rdma_transport_type {
  71        RDMA_TRANSPORT_IB,
  72        RDMA_TRANSPORT_IWARP
  73};
  74
  75enum rdma_transport_type
  76rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
  77
  78enum ib_device_cap_flags {
  79        IB_DEVICE_RESIZE_MAX_WR         = 1,
  80        IB_DEVICE_BAD_PKEY_CNTR         = (1<<1),
  81        IB_DEVICE_BAD_QKEY_CNTR         = (1<<2),
  82        IB_DEVICE_RAW_MULTI             = (1<<3),
  83        IB_DEVICE_AUTO_PATH_MIG         = (1<<4),
  84        IB_DEVICE_CHANGE_PHY_PORT       = (1<<5),
  85        IB_DEVICE_UD_AV_PORT_ENFORCE    = (1<<6),
  86        IB_DEVICE_CURR_QP_STATE_MOD     = (1<<7),
  87        IB_DEVICE_SHUTDOWN_PORT         = (1<<8),
  88        IB_DEVICE_INIT_TYPE             = (1<<9),
  89        IB_DEVICE_PORT_ACTIVE_EVENT     = (1<<10),
  90        IB_DEVICE_SYS_IMAGE_GUID        = (1<<11),
  91        IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
  92        IB_DEVICE_SRQ_RESIZE            = (1<<13),
  93        IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
  94        IB_DEVICE_LOCAL_DMA_LKEY        = (1<<15),
  95        IB_DEVICE_RESERVED              = (1<<16), /* old SEND_W_INV */
  96        IB_DEVICE_MEM_WINDOW            = (1<<17),
  97        /*
  98         * Devices should set IB_DEVICE_UD_IP_SUM if they support
  99         * insertion of UDP and TCP checksum on outgoing UD IPoIB
 100         * messages and can verify the validity of checksum for
 101         * incoming messages.  Setting this flag implies that the
 102         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
 103         */
 104        IB_DEVICE_UD_IP_CSUM            = (1<<18),
 105        IB_DEVICE_UD_TSO                = (1<<19),
 106        IB_DEVICE_MEM_MGT_EXTENSIONS    = (1<<21),
 107        IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 108};
 109
 110enum ib_atomic_cap {
 111        IB_ATOMIC_NONE,
 112        IB_ATOMIC_HCA,
 113        IB_ATOMIC_GLOB
 114};
 115
 116struct ib_device_attr {
 117        u64                     fw_ver;
 118        __be64                  sys_image_guid;
 119        u64                     max_mr_size;
 120        u64                     page_size_cap;
 121        u32                     vendor_id;
 122        u32                     vendor_part_id;
 123        u32                     hw_ver;
 124        int                     max_qp;
 125        int                     max_qp_wr;
 126        int                     device_cap_flags;
 127        int                     max_sge;
 128        int                     max_sge_rd;
 129        int                     max_cq;
 130        int                     max_cqe;
 131        int                     max_mr;
 132        int                     max_pd;
 133        int                     max_qp_rd_atom;
 134        int                     max_ee_rd_atom;
 135        int                     max_res_rd_atom;
 136        int                     max_qp_init_rd_atom;
 137        int                     max_ee_init_rd_atom;
 138        enum ib_atomic_cap      atomic_cap;
 139        int                     max_ee;
 140        int                     max_rdd;
 141        int                     max_mw;
 142        int                     max_raw_ipv6_qp;
 143        int                     max_raw_ethy_qp;
 144        int                     max_mcast_grp;
 145        int                     max_mcast_qp_attach;
 146        int                     max_total_mcast_qp_attach;
 147        int                     max_ah;
 148        int                     max_fmr;
 149        int                     max_map_per_fmr;
 150        int                     max_srq;
 151        int                     max_srq_wr;
 152        int                     max_srq_sge;
 153        unsigned int            max_fast_reg_page_list_len;
 154        u16                     max_pkeys;
 155        u8                      local_ca_ack_delay;
 156};
 157
 158enum ib_mtu {
 159        IB_MTU_256  = 1,
 160        IB_MTU_512  = 2,
 161        IB_MTU_1024 = 3,
 162        IB_MTU_2048 = 4,
 163        IB_MTU_4096 = 5
 164};
 165
 166static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
 167{
 168        switch (mtu) {
 169        case IB_MTU_256:  return  256;
 170        case IB_MTU_512:  return  512;
 171        case IB_MTU_1024: return 1024;
 172        case IB_MTU_2048: return 2048;
 173        case IB_MTU_4096: return 4096;
 174        default:          return -1;
 175        }
 176}
 177
 178enum ib_port_state {
 179        IB_PORT_NOP             = 0,
 180        IB_PORT_DOWN            = 1,
 181        IB_PORT_INIT            = 2,
 182        IB_PORT_ARMED           = 3,
 183        IB_PORT_ACTIVE          = 4,
 184        IB_PORT_ACTIVE_DEFER    = 5
 185};
 186
 187enum ib_port_cap_flags {
 188        IB_PORT_SM                              = 1 <<  1,
 189        IB_PORT_NOTICE_SUP                      = 1 <<  2,
 190        IB_PORT_TRAP_SUP                        = 1 <<  3,
 191        IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
 192        IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
 193        IB_PORT_SL_MAP_SUP                      = 1 <<  6,
 194        IB_PORT_MKEY_NVRAM                      = 1 <<  7,
 195        IB_PORT_PKEY_NVRAM                      = 1 <<  8,
 196        IB_PORT_LED_INFO_SUP                    = 1 <<  9,
 197        IB_PORT_SM_DISABLED                     = 1 << 10,
 198        IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
 199        IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
 200        IB_PORT_CM_SUP                          = 1 << 16,
 201        IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
 202        IB_PORT_REINIT_SUP                      = 1 << 18,
 203        IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
 204        IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
 205        IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
 206        IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
 207        IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
 208        IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
 209        IB_PORT_CLIENT_REG_SUP                  = 1 << 25
 210};
 211
 212enum ib_port_width {
 213        IB_WIDTH_1X     = 1,
 214        IB_WIDTH_4X     = 2,
 215        IB_WIDTH_8X     = 4,
 216        IB_WIDTH_12X    = 8
 217};
 218
 219static inline int ib_width_enum_to_int(enum ib_port_width width)
 220{
 221        switch (width) {
 222        case IB_WIDTH_1X:  return  1;
 223        case IB_WIDTH_4X:  return  4;
 224        case IB_WIDTH_8X:  return  8;
 225        case IB_WIDTH_12X: return 12;
 226        default:          return -1;
 227        }
 228}
 229
 230struct ib_protocol_stats {
 231        /* TBD... */
 232};
 233
 234struct iw_protocol_stats {
 235        u64     ipInReceives;
 236        u64     ipInHdrErrors;
 237        u64     ipInTooBigErrors;
 238        u64     ipInNoRoutes;
 239        u64     ipInAddrErrors;
 240        u64     ipInUnknownProtos;
 241        u64     ipInTruncatedPkts;
 242        u64     ipInDiscards;
 243        u64     ipInDelivers;
 244        u64     ipOutForwDatagrams;
 245        u64     ipOutRequests;
 246        u64     ipOutDiscards;
 247        u64     ipOutNoRoutes;
 248        u64     ipReasmTimeout;
 249        u64     ipReasmReqds;
 250        u64     ipReasmOKs;
 251        u64     ipReasmFails;
 252        u64     ipFragOKs;
 253        u64     ipFragFails;
 254        u64     ipFragCreates;
 255        u64     ipInMcastPkts;
 256        u64     ipOutMcastPkts;
 257        u64     ipInBcastPkts;
 258        u64     ipOutBcastPkts;
 259
 260        u64     tcpRtoAlgorithm;
 261        u64     tcpRtoMin;
 262        u64     tcpRtoMax;
 263        u64     tcpMaxConn;
 264        u64     tcpActiveOpens;
 265        u64     tcpPassiveOpens;
 266        u64     tcpAttemptFails;
 267        u64     tcpEstabResets;
 268        u64     tcpCurrEstab;
 269        u64     tcpInSegs;
 270        u64     tcpOutSegs;
 271        u64     tcpRetransSegs;
 272        u64     tcpInErrs;
 273        u64     tcpOutRsts;
 274};
 275
 276union rdma_protocol_stats {
 277        struct ib_protocol_stats        ib;
 278        struct iw_protocol_stats        iw;
 279};
 280
 281struct ib_port_attr {
 282        enum ib_port_state      state;
 283        enum ib_mtu             max_mtu;
 284        enum ib_mtu             active_mtu;
 285        int                     gid_tbl_len;
 286        u32                     port_cap_flags;
 287        u32                     max_msg_sz;
 288        u32                     bad_pkey_cntr;
 289        u32                     qkey_viol_cntr;
 290        u16                     pkey_tbl_len;
 291        u16                     lid;
 292        u16                     sm_lid;
 293        u8                      lmc;
 294        u8                      max_vl_num;
 295        u8                      sm_sl;
 296        u8                      subnet_timeout;
 297        u8                      init_type_reply;
 298        u8                      active_width;
 299        u8                      active_speed;
 300        u8                      phys_state;
 301};
 302
 303enum ib_device_modify_flags {
 304        IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
 305        IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
 306};
 307
 308struct ib_device_modify {
 309        u64     sys_image_guid;
 310        char    node_desc[64];
 311};
 312
 313enum ib_port_modify_flags {
 314        IB_PORT_SHUTDOWN                = 1,
 315        IB_PORT_INIT_TYPE               = (1<<2),
 316        IB_PORT_RESET_QKEY_CNTR         = (1<<3)
 317};
 318
 319struct ib_port_modify {
 320        u32     set_port_cap_mask;
 321        u32     clr_port_cap_mask;
 322        u8      init_type;
 323};
 324
 325enum ib_event_type {
 326        IB_EVENT_CQ_ERR,
 327        IB_EVENT_QP_FATAL,
 328        IB_EVENT_QP_REQ_ERR,
 329        IB_EVENT_QP_ACCESS_ERR,
 330        IB_EVENT_COMM_EST,
 331        IB_EVENT_SQ_DRAINED,
 332        IB_EVENT_PATH_MIG,
 333        IB_EVENT_PATH_MIG_ERR,
 334        IB_EVENT_DEVICE_FATAL,
 335        IB_EVENT_PORT_ACTIVE,
 336        IB_EVENT_PORT_ERR,
 337        IB_EVENT_LID_CHANGE,
 338        IB_EVENT_PKEY_CHANGE,
 339        IB_EVENT_SM_CHANGE,
 340        IB_EVENT_SRQ_ERR,
 341        IB_EVENT_SRQ_LIMIT_REACHED,
 342        IB_EVENT_QP_LAST_WQE_REACHED,
 343        IB_EVENT_CLIENT_REREGISTER
 344};
 345
 346struct ib_event {
 347        struct ib_device        *device;
 348        union {
 349                struct ib_cq    *cq;
 350                struct ib_qp    *qp;
 351                struct ib_srq   *srq;
 352                u8              port_num;
 353        } element;
 354        enum ib_event_type      event;
 355};
 356
 357struct ib_event_handler {
 358        struct ib_device *device;
 359        void            (*handler)(struct ib_event_handler *, struct ib_event *);
 360        struct list_head  list;
 361};
 362
 363#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
 364        do {                                                    \
 365                (_ptr)->device  = _device;                      \
 366                (_ptr)->handler = _handler;                     \
 367                INIT_LIST_HEAD(&(_ptr)->list);                  \
 368        } while (0)
 369
 370struct ib_global_route {
 371        union ib_gid    dgid;
 372        u32             flow_label;
 373        u8              sgid_index;
 374        u8              hop_limit;
 375        u8              traffic_class;
 376};
 377
 378struct ib_grh {
 379        __be32          version_tclass_flow;
 380        __be16          paylen;
 381        u8              next_hdr;
 382        u8              hop_limit;
 383        union ib_gid    sgid;
 384        union ib_gid    dgid;
 385};
 386
 387enum {
 388        IB_MULTICAST_QPN = 0xffffff
 389};
 390
 391#define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
 392
 393enum ib_ah_flags {
 394        IB_AH_GRH       = 1
 395};
 396
 397enum ib_rate {
 398        IB_RATE_PORT_CURRENT = 0,
 399        IB_RATE_2_5_GBPS = 2,
 400        IB_RATE_5_GBPS   = 5,
 401        IB_RATE_10_GBPS  = 3,
 402        IB_RATE_20_GBPS  = 6,
 403        IB_RATE_30_GBPS  = 4,
 404        IB_RATE_40_GBPS  = 7,
 405        IB_RATE_60_GBPS  = 8,
 406        IB_RATE_80_GBPS  = 9,
 407        IB_RATE_120_GBPS = 10
 408};
 409
 410/**
 411 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
 412 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
 413 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
 414 * @rate: rate to convert.
 415 */
 416int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
 417
 418/**
 419 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
 420 * enum.
 421 * @mult: multiple to convert.
 422 */
 423enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
 424
 425struct ib_ah_attr {
 426        struct ib_global_route  grh;
 427        u16                     dlid;
 428        u8                      sl;
 429        u8                      src_path_bits;
 430        u8                      static_rate;
 431        u8                      ah_flags;
 432        u8                      port_num;
 433};
 434
 435enum ib_wc_status {
 436        IB_WC_SUCCESS,
 437        IB_WC_LOC_LEN_ERR,
 438        IB_WC_LOC_QP_OP_ERR,
 439        IB_WC_LOC_EEC_OP_ERR,
 440        IB_WC_LOC_PROT_ERR,
 441        IB_WC_WR_FLUSH_ERR,
 442        IB_WC_MW_BIND_ERR,
 443        IB_WC_BAD_RESP_ERR,
 444        IB_WC_LOC_ACCESS_ERR,
 445        IB_WC_REM_INV_REQ_ERR,
 446        IB_WC_REM_ACCESS_ERR,
 447        IB_WC_REM_OP_ERR,
 448        IB_WC_RETRY_EXC_ERR,
 449        IB_WC_RNR_RETRY_EXC_ERR,
 450        IB_WC_LOC_RDD_VIOL_ERR,
 451        IB_WC_REM_INV_RD_REQ_ERR,
 452        IB_WC_REM_ABORT_ERR,
 453        IB_WC_INV_EECN_ERR,
 454        IB_WC_INV_EEC_STATE_ERR,
 455        IB_WC_FATAL_ERR,
 456        IB_WC_RESP_TIMEOUT_ERR,
 457        IB_WC_GENERAL_ERR
 458};
 459
 460enum ib_wc_opcode {
 461        IB_WC_SEND,
 462        IB_WC_RDMA_WRITE,
 463        IB_WC_RDMA_READ,
 464        IB_WC_COMP_SWAP,
 465        IB_WC_FETCH_ADD,
 466        IB_WC_BIND_MW,
 467        IB_WC_LSO,
 468        IB_WC_LOCAL_INV,
 469        IB_WC_FAST_REG_MR,
 470/*
 471 * Set value of IB_WC_RECV so consumers can test if a completion is a
 472 * receive by testing (opcode & IB_WC_RECV).
 473 */
 474        IB_WC_RECV                      = 1 << 7,
 475        IB_WC_RECV_RDMA_WITH_IMM
 476};
 477
 478enum ib_wc_flags {
 479        IB_WC_GRH               = 1,
 480        IB_WC_WITH_IMM          = (1<<1),
 481        IB_WC_WITH_INVALIDATE   = (1<<2),
 482};
 483
 484struct ib_wc {
 485        u64                     wr_id;
 486        enum ib_wc_status       status;
 487        enum ib_wc_opcode       opcode;
 488        u32                     vendor_err;
 489        u32                     byte_len;
 490        struct ib_qp           *qp;
 491        union {
 492                __be32          imm_data;
 493                u32             invalidate_rkey;
 494        } ex;
 495        u32                     src_qp;
 496        int                     wc_flags;
 497        u16                     pkey_index;
 498        u16                     slid;
 499        u8                      sl;
 500        u8                      dlid_path_bits;
 501        u8                      port_num;       /* valid only for DR SMPs on switches */
 502        int                     csum_ok;
 503};
 504
 505enum ib_cq_notify_flags {
 506        IB_CQ_SOLICITED                 = 1 << 0,
 507        IB_CQ_NEXT_COMP                 = 1 << 1,
 508        IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
 509        IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
 510};
 511
 512enum ib_srq_attr_mask {
 513        IB_SRQ_MAX_WR   = 1 << 0,
 514        IB_SRQ_LIMIT    = 1 << 1,
 515};
 516
 517struct ib_srq_attr {
 518        u32     max_wr;
 519        u32     max_sge;
 520        u32     srq_limit;
 521};
 522
 523struct ib_srq_init_attr {
 524        void                  (*event_handler)(struct ib_event *, void *);
 525        void                   *srq_context;
 526        struct ib_srq_attr      attr;
 527};
 528
 529struct ib_qp_cap {
 530        u32     max_send_wr;
 531        u32     max_recv_wr;
 532        u32     max_send_sge;
 533        u32     max_recv_sge;
 534        u32     max_inline_data;
 535};
 536
 537enum ib_sig_type {
 538        IB_SIGNAL_ALL_WR,
 539        IB_SIGNAL_REQ_WR
 540};
 541
 542enum ib_qp_type {
 543        /*
 544         * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
 545         * here (and in that order) since the MAD layer uses them as
 546         * indices into a 2-entry table.
 547         */
 548        IB_QPT_SMI,
 549        IB_QPT_GSI,
 550
 551        IB_QPT_RC,
 552        IB_QPT_UC,
 553        IB_QPT_UD,
 554        IB_QPT_RAW_IPV6,
 555        IB_QPT_RAW_ETY
 556};
 557
 558enum ib_qp_create_flags {
 559        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
 560        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
 561};
 562
 563struct ib_qp_init_attr {
 564        void                  (*event_handler)(struct ib_event *, void *);
 565        void                   *qp_context;
 566        struct ib_cq           *send_cq;
 567        struct ib_cq           *recv_cq;
 568        struct ib_srq          *srq;
 569        struct ib_qp_cap        cap;
 570        enum ib_sig_type        sq_sig_type;
 571        enum ib_qp_type         qp_type;
 572        enum ib_qp_create_flags create_flags;
 573        u8                      port_num; /* special QP types only */
 574};
 575
 576enum ib_rnr_timeout {
 577        IB_RNR_TIMER_655_36 =  0,
 578        IB_RNR_TIMER_000_01 =  1,
 579        IB_RNR_TIMER_000_02 =  2,
 580        IB_RNR_TIMER_000_03 =  3,
 581        IB_RNR_TIMER_000_04 =  4,
 582        IB_RNR_TIMER_000_06 =  5,
 583        IB_RNR_TIMER_000_08 =  6,
 584        IB_RNR_TIMER_000_12 =  7,
 585        IB_RNR_TIMER_000_16 =  8,
 586        IB_RNR_TIMER_000_24 =  9,
 587        IB_RNR_TIMER_000_32 = 10,
 588        IB_RNR_TIMER_000_48 = 11,
 589        IB_RNR_TIMER_000_64 = 12,
 590        IB_RNR_TIMER_000_96 = 13,
 591        IB_RNR_TIMER_001_28 = 14,
 592        IB_RNR_TIMER_001_92 = 15,
 593        IB_RNR_TIMER_002_56 = 16,
 594        IB_RNR_TIMER_003_84 = 17,
 595        IB_RNR_TIMER_005_12 = 18,
 596        IB_RNR_TIMER_007_68 = 19,
 597        IB_RNR_TIMER_010_24 = 20,
 598        IB_RNR_TIMER_015_36 = 21,
 599        IB_RNR_TIMER_020_48 = 22,
 600        IB_RNR_TIMER_030_72 = 23,
 601        IB_RNR_TIMER_040_96 = 24,
 602        IB_RNR_TIMER_061_44 = 25,
 603        IB_RNR_TIMER_081_92 = 26,
 604        IB_RNR_TIMER_122_88 = 27,
 605        IB_RNR_TIMER_163_84 = 28,
 606        IB_RNR_TIMER_245_76 = 29,
 607        IB_RNR_TIMER_327_68 = 30,
 608        IB_RNR_TIMER_491_52 = 31
 609};
 610
 611enum ib_qp_attr_mask {
 612        IB_QP_STATE                     = 1,
 613        IB_QP_CUR_STATE                 = (1<<1),
 614        IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
 615        IB_QP_ACCESS_FLAGS              = (1<<3),
 616        IB_QP_PKEY_INDEX                = (1<<4),
 617        IB_QP_PORT                      = (1<<5),
 618        IB_QP_QKEY                      = (1<<6),
 619        IB_QP_AV                        = (1<<7),
 620        IB_QP_PATH_MTU                  = (1<<8),
 621        IB_QP_TIMEOUT                   = (1<<9),
 622        IB_QP_RETRY_CNT                 = (1<<10),
 623        IB_QP_RNR_RETRY                 = (1<<11),
 624        IB_QP_RQ_PSN                    = (1<<12),
 625        IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
 626        IB_QP_ALT_PATH                  = (1<<14),
 627        IB_QP_MIN_RNR_TIMER             = (1<<15),
 628        IB_QP_SQ_PSN                    = (1<<16),
 629        IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
 630        IB_QP_PATH_MIG_STATE            = (1<<18),
 631        IB_QP_CAP                       = (1<<19),
 632        IB_QP_DEST_QPN                  = (1<<20)
 633};
 634
 635enum ib_qp_state {
 636        IB_QPS_RESET,
 637        IB_QPS_INIT,
 638        IB_QPS_RTR,
 639        IB_QPS_RTS,
 640        IB_QPS_SQD,
 641        IB_QPS_SQE,
 642        IB_QPS_ERR
 643};
 644
 645enum ib_mig_state {
 646        IB_MIG_MIGRATED,
 647        IB_MIG_REARM,
 648        IB_MIG_ARMED
 649};
 650
 651struct ib_qp_attr {
 652        enum ib_qp_state        qp_state;
 653        enum ib_qp_state        cur_qp_state;
 654        enum ib_mtu             path_mtu;
 655        enum ib_mig_state       path_mig_state;
 656        u32                     qkey;
 657        u32                     rq_psn;
 658        u32                     sq_psn;
 659        u32                     dest_qp_num;
 660        int                     qp_access_flags;
 661        struct ib_qp_cap        cap;
 662        struct ib_ah_attr       ah_attr;
 663        struct ib_ah_attr       alt_ah_attr;
 664        u16                     pkey_index;
 665        u16                     alt_pkey_index;
 666        u8                      en_sqd_async_notify;
 667        u8                      sq_draining;
 668        u8                      max_rd_atomic;
 669        u8                      max_dest_rd_atomic;
 670        u8                      min_rnr_timer;
 671        u8                      port_num;
 672        u8                      timeout;
 673        u8                      retry_cnt;
 674        u8                      rnr_retry;
 675        u8                      alt_port_num;
 676        u8                      alt_timeout;
 677};
 678
 679enum ib_wr_opcode {
 680        IB_WR_RDMA_WRITE,
 681        IB_WR_RDMA_WRITE_WITH_IMM,
 682        IB_WR_SEND,
 683        IB_WR_SEND_WITH_IMM,
 684        IB_WR_RDMA_READ,
 685        IB_WR_ATOMIC_CMP_AND_SWP,
 686        IB_WR_ATOMIC_FETCH_AND_ADD,
 687        IB_WR_LSO,
 688        IB_WR_SEND_WITH_INV,
 689        IB_WR_RDMA_READ_WITH_INV,
 690        IB_WR_LOCAL_INV,
 691        IB_WR_FAST_REG_MR,
 692};
 693
 694enum ib_send_flags {
 695        IB_SEND_FENCE           = 1,
 696        IB_SEND_SIGNALED        = (1<<1),
 697        IB_SEND_SOLICITED       = (1<<2),
 698        IB_SEND_INLINE          = (1<<3),
 699        IB_SEND_IP_CSUM         = (1<<4)
 700};
 701
 702struct ib_sge {
 703        u64     addr;
 704        u32     length;
 705        u32     lkey;
 706};
 707
 708struct ib_fast_reg_page_list {
 709        struct ib_device       *device;
 710        u64                    *page_list;
 711        unsigned int            max_page_list_len;
 712};
 713
 714struct ib_send_wr {
 715        struct ib_send_wr      *next;
 716        u64                     wr_id;
 717        struct ib_sge          *sg_list;
 718        int                     num_sge;
 719        enum ib_wr_opcode       opcode;
 720        int                     send_flags;
 721        union {
 722                __be32          imm_data;
 723                u32             invalidate_rkey;
 724        } ex;
 725        union {
 726                struct {
 727                        u64     remote_addr;
 728                        u32     rkey;
 729                } rdma;
 730                struct {
 731                        u64     remote_addr;
 732                        u64     compare_add;
 733                        u64     swap;
 734                        u32     rkey;
 735                } atomic;
 736                struct {
 737                        struct ib_ah *ah;
 738                        void   *header;
 739                        int     hlen;
 740                        int     mss;
 741                        u32     remote_qpn;
 742                        u32     remote_qkey;
 743                        u16     pkey_index; /* valid for GSI only */
 744                        u8      port_num;   /* valid for DR SMPs on switch only */
 745                } ud;
 746                struct {
 747                        u64                             iova_start;
 748                        struct ib_fast_reg_page_list   *page_list;
 749                        unsigned int                    page_shift;
 750                        unsigned int                    page_list_len;
 751                        u32                             length;
 752                        int                             access_flags;
 753                        u32                             rkey;
 754                } fast_reg;
 755        } wr;
 756};
 757
 758struct ib_recv_wr {
 759        struct ib_recv_wr      *next;
 760        u64                     wr_id;
 761        struct ib_sge          *sg_list;
 762        int                     num_sge;
 763};
 764
 765enum ib_access_flags {
 766        IB_ACCESS_LOCAL_WRITE   = 1,
 767        IB_ACCESS_REMOTE_WRITE  = (1<<1),
 768        IB_ACCESS_REMOTE_READ   = (1<<2),
 769        IB_ACCESS_REMOTE_ATOMIC = (1<<3),
 770        IB_ACCESS_MW_BIND       = (1<<4)
 771};
 772
 773struct ib_phys_buf {
 774        u64      addr;
 775        u64      size;
 776};
 777
 778struct ib_mr_attr {
 779        struct ib_pd    *pd;
 780        u64             device_virt_addr;
 781        u64             size;
 782        int             mr_access_flags;
 783        u32             lkey;
 784        u32             rkey;
 785};
 786
 787enum ib_mr_rereg_flags {
 788        IB_MR_REREG_TRANS       = 1,
 789        IB_MR_REREG_PD          = (1<<1),
 790        IB_MR_REREG_ACCESS      = (1<<2)
 791};
 792
 793struct ib_mw_bind {
 794        struct ib_mr   *mr;
 795        u64             wr_id;
 796        u64             addr;
 797        u32             length;
 798        int             send_flags;
 799        int             mw_access_flags;
 800};
 801
 802struct ib_fmr_attr {
 803        int     max_pages;
 804        int     max_maps;
 805        u8      page_shift;
 806};
 807
 808struct ib_ucontext {
 809        struct ib_device       *device;
 810        struct list_head        pd_list;
 811        struct list_head        mr_list;
 812        struct list_head        mw_list;
 813        struct list_head        cq_list;
 814        struct list_head        qp_list;
 815        struct list_head        srq_list;
 816        struct list_head        ah_list;
 817        int                     closing;
 818};
 819
 820struct ib_uobject {
 821        u64                     user_handle;    /* handle given to us by userspace */
 822        struct ib_ucontext     *context;        /* associated user context */
 823        void                   *object;         /* containing object */
 824        struct list_head        list;           /* link to context's list */
 825        int                     id;             /* index into kernel idr */
 826        struct kref             ref;
 827        struct rw_semaphore     mutex;          /* protects .live */
 828        int                     live;
 829};
 830
 831struct ib_udata {
 832        void __user *inbuf;
 833        void __user *outbuf;
 834        size_t       inlen;
 835        size_t       outlen;
 836};
 837
 838struct ib_pd {
 839        struct ib_device       *device;
 840        struct ib_uobject      *uobject;
 841        atomic_t                usecnt; /* count all resources */
 842};
 843
 844struct ib_ah {
 845        struct ib_device        *device;
 846        struct ib_pd            *pd;
 847        struct ib_uobject       *uobject;
 848};
 849
 850typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
 851
 852struct ib_cq {
 853        struct ib_device       *device;
 854        struct ib_uobject      *uobject;
 855        ib_comp_handler         comp_handler;
 856        void                  (*event_handler)(struct ib_event *, void *);
 857        void                   *cq_context;
 858        int                     cqe;
 859        atomic_t                usecnt; /* count number of work queues */
 860};
 861
 862struct ib_srq {
 863        struct ib_device       *device;
 864        struct ib_pd           *pd;
 865        struct ib_uobject      *uobject;
 866        void                  (*event_handler)(struct ib_event *, void *);
 867        void                   *srq_context;
 868        atomic_t                usecnt;
 869};
 870
 871struct ib_qp {
 872        struct ib_device       *device;
 873        struct ib_pd           *pd;
 874        struct ib_cq           *send_cq;
 875        struct ib_cq           *recv_cq;
 876        struct ib_srq          *srq;
 877        struct ib_uobject      *uobject;
 878        void                  (*event_handler)(struct ib_event *, void *);
 879        void                   *qp_context;
 880        u32                     qp_num;
 881        enum ib_qp_type         qp_type;
 882};
 883
 884struct ib_mr {
 885        struct ib_device  *device;
 886        struct ib_pd      *pd;
 887        struct ib_uobject *uobject;
 888        u32                lkey;
 889        u32                rkey;
 890        atomic_t           usecnt; /* count number of MWs */
 891};
 892
 893struct ib_mw {
 894        struct ib_device        *device;
 895        struct ib_pd            *pd;
 896        struct ib_uobject       *uobject;
 897        u32                     rkey;
 898};
 899
 900struct ib_fmr {
 901        struct ib_device        *device;
 902        struct ib_pd            *pd;
 903        struct list_head        list;
 904        u32                     lkey;
 905        u32                     rkey;
 906};
 907
 908struct ib_mad;
 909struct ib_grh;
 910
 911enum ib_process_mad_flags {
 912        IB_MAD_IGNORE_MKEY      = 1,
 913        IB_MAD_IGNORE_BKEY      = 2,
 914        IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
 915};
 916
 917enum ib_mad_result {
 918        IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
 919        IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
 920        IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
 921        IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
 922};
 923
 924#define IB_DEVICE_NAME_MAX 64
 925
 926struct ib_cache {
 927        rwlock_t                lock;
 928        struct ib_event_handler event_handler;
 929        struct ib_pkey_cache  **pkey_cache;
 930        struct ib_gid_cache   **gid_cache;
 931        u8                     *lmc_cache;
 932};
 933
 934struct ib_dma_mapping_ops {
 935        int             (*mapping_error)(struct ib_device *dev,
 936                                         u64 dma_addr);
 937        u64             (*map_single)(struct ib_device *dev,
 938                                      void *ptr, size_t size,
 939                                      enum dma_data_direction direction);
 940        void            (*unmap_single)(struct ib_device *dev,
 941                                        u64 addr, size_t size,
 942                                        enum dma_data_direction direction);
 943        u64             (*map_page)(struct ib_device *dev,
 944                                    struct page *page, unsigned long offset,
 945                                    size_t size,
 946                                    enum dma_data_direction direction);
 947        void            (*unmap_page)(struct ib_device *dev,
 948                                      u64 addr, size_t size,
 949                                      enum dma_data_direction direction);
 950        int             (*map_sg)(struct ib_device *dev,
 951                                  struct scatterlist *sg, int nents,
 952                                  enum dma_data_direction direction);
 953        void            (*unmap_sg)(struct ib_device *dev,
 954                                    struct scatterlist *sg, int nents,
 955                                    enum dma_data_direction direction);
 956        u64             (*dma_address)(struct ib_device *dev,
 957                                       struct scatterlist *sg);
 958        unsigned int    (*dma_len)(struct ib_device *dev,
 959                                   struct scatterlist *sg);
 960        void            (*sync_single_for_cpu)(struct ib_device *dev,
 961                                               u64 dma_handle,
 962                                               size_t size,
 963                                               enum dma_data_direction dir);
 964        void            (*sync_single_for_device)(struct ib_device *dev,
 965                                                  u64 dma_handle,
 966                                                  size_t size,
 967                                                  enum dma_data_direction dir);
 968        void            *(*alloc_coherent)(struct ib_device *dev,
 969                                           size_t size,
 970                                           u64 *dma_handle,
 971                                           gfp_t flag);
 972        void            (*free_coherent)(struct ib_device *dev,
 973                                         size_t size, void *cpu_addr,
 974                                         u64 dma_handle);
 975};
 976
 977struct iw_cm_verbs;
 978
 979struct ib_device {
 980        struct device                *dma_device;
 981
 982        char                          name[IB_DEVICE_NAME_MAX];
 983
 984        struct list_head              event_handler_list;
 985        spinlock_t                    event_handler_lock;
 986
 987        struct list_head              core_list;
 988        struct list_head              client_data_list;
 989        spinlock_t                    client_data_lock;
 990
 991        struct ib_cache               cache;
 992        int                          *pkey_tbl_len;
 993        int                          *gid_tbl_len;
 994
 995        int                           num_comp_vectors;
 996
 997        struct iw_cm_verbs           *iwcm;
 998
 999        int                        (*get_protocol_stats)(struct ib_device *device,
1000                                                         union rdma_protocol_stats *stats);
1001        int                        (*query_device)(struct ib_device *device,
1002                                                   struct ib_device_attr *device_attr);
1003        int                        (*query_port)(struct ib_device *device,
1004                                                 u8 port_num,
1005                                                 struct ib_port_attr *port_attr);
1006        int                        (*query_gid)(struct ib_device *device,
1007                                                u8 port_num, int index,
1008                                                union ib_gid *gid);
1009        int                        (*query_pkey)(struct ib_device *device,
1010                                                 u8 port_num, u16 index, u16 *pkey);
1011        int                        (*modify_device)(struct ib_device *device,
1012                                                    int device_modify_mask,
1013                                                    struct ib_device_modify *device_modify);
1014        int                        (*modify_port)(struct ib_device *device,
1015                                                  u8 port_num, int port_modify_mask,
1016                                                  struct ib_port_modify *port_modify);
1017        struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1018                                                     struct ib_udata *udata);
1019        int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1020        int                        (*mmap)(struct ib_ucontext *context,
1021                                           struct vm_area_struct *vma);
1022        struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1023                                               struct ib_ucontext *context,
1024                                               struct ib_udata *udata);
1025        int                        (*dealloc_pd)(struct ib_pd *pd);
1026        struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1027                                                struct ib_ah_attr *ah_attr);
1028        int                        (*modify_ah)(struct ib_ah *ah,
1029                                                struct ib_ah_attr *ah_attr);
1030        int                        (*query_ah)(struct ib_ah *ah,
1031                                               struct ib_ah_attr *ah_attr);
1032        int                        (*destroy_ah)(struct ib_ah *ah);
1033        struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1034                                                 struct ib_srq_init_attr *srq_init_attr,
1035                                                 struct ib_udata *udata);
1036        int                        (*modify_srq)(struct ib_srq *srq,
1037                                                 struct ib_srq_attr *srq_attr,
1038                                                 enum ib_srq_attr_mask srq_attr_mask,
1039                                                 struct ib_udata *udata);
1040        int                        (*query_srq)(struct ib_srq *srq,
1041                                                struct ib_srq_attr *srq_attr);
1042        int                        (*destroy_srq)(struct ib_srq *srq);
1043        int                        (*post_srq_recv)(struct ib_srq *srq,
1044                                                    struct ib_recv_wr *recv_wr,
1045                                                    struct ib_recv_wr **bad_recv_wr);
1046        struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1047                                                struct ib_qp_init_attr *qp_init_attr,
1048                                                struct ib_udata *udata);
1049        int                        (*modify_qp)(struct ib_qp *qp,
1050                                                struct ib_qp_attr *qp_attr,
1051                                                int qp_attr_mask,
1052                                                struct ib_udata *udata);
1053        int                        (*query_qp)(struct ib_qp *qp,
1054                                               struct ib_qp_attr *qp_attr,
1055                                               int qp_attr_mask,
1056                                               struct ib_qp_init_attr *qp_init_attr);
1057        int                        (*destroy_qp)(struct ib_qp *qp);
1058        int                        (*post_send)(struct ib_qp *qp,
1059                                                struct ib_send_wr *send_wr,
1060                                                struct ib_send_wr **bad_send_wr);
1061        int                        (*post_recv)(struct ib_qp *qp,
1062                                                struct ib_recv_wr *recv_wr,
1063                                                struct ib_recv_wr **bad_recv_wr);
1064        struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
1065                                                int comp_vector,
1066                                                struct ib_ucontext *context,
1067                                                struct ib_udata *udata);
1068        int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1069                                                u16 cq_period);
1070        int                        (*destroy_cq)(struct ib_cq *cq);
1071        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1072                                                struct ib_udata *udata);
1073        int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1074                                              struct ib_wc *wc);
1075        int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1076        int                        (*req_notify_cq)(struct ib_cq *cq,
1077                                                    enum ib_cq_notify_flags flags);
1078        int                        (*req_ncomp_notif)(struct ib_cq *cq,
1079                                                      int wc_cnt);
1080        struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
1081                                                 int mr_access_flags);
1082        struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
1083                                                  struct ib_phys_buf *phys_buf_array,
1084                                                  int num_phys_buf,
1085                                                  int mr_access_flags,
1086                                                  u64 *iova_start);
1087        struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1088                                                  u64 start, u64 length,
1089                                                  u64 virt_addr,
1090                                                  int mr_access_flags,
1091                                                  struct ib_udata *udata);
1092        int                        (*query_mr)(struct ib_mr *mr,
1093                                               struct ib_mr_attr *mr_attr);
1094        int                        (*dereg_mr)(struct ib_mr *mr);
1095        struct ib_mr *             (*alloc_fast_reg_mr)(struct ib_pd *pd,
1096                                               int max_page_list_len);
1097        struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1098                                                                   int page_list_len);
1099        void                       (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1100        int                        (*rereg_phys_mr)(struct ib_mr *mr,
1101                                                    int mr_rereg_mask,
1102                                                    struct ib_pd *pd,
1103                                                    struct ib_phys_buf *phys_buf_array,
1104                                                    int num_phys_buf,
1105                                                    int mr_access_flags,
1106                                                    u64 *iova_start);
1107        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1108        int                        (*bind_mw)(struct ib_qp *qp,
1109                                              struct ib_mw *mw,
1110                                              struct ib_mw_bind *mw_bind);
1111        int                        (*dealloc_mw)(struct ib_mw *mw);
1112        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
1113                                                int mr_access_flags,
1114                                                struct ib_fmr_attr *fmr_attr);
1115        int                        (*map_phys_fmr)(struct ib_fmr *fmr,
1116                                                   u64 *page_list, int list_len,
1117                                                   u64 iova);
1118        int                        (*unmap_fmr)(struct list_head *fmr_list);
1119        int                        (*dealloc_fmr)(struct ib_fmr *fmr);
1120        int                        (*attach_mcast)(struct ib_qp *qp,
1121                                                   union ib_gid *gid,
1122                                                   u16 lid);
1123        int                        (*detach_mcast)(struct ib_qp *qp,
1124                                                   union ib_gid *gid,
1125                                                   u16 lid);
1126        int                        (*process_mad)(struct ib_device *device,
1127                                                  int process_mad_flags,
1128                                                  u8 port_num,
1129                                                  struct ib_wc *in_wc,
1130                                                  struct ib_grh *in_grh,
1131                                                  struct ib_mad *in_mad,
1132                                                  struct ib_mad *out_mad);
1133
1134        struct ib_dma_mapping_ops   *dma_ops;
1135
1136        struct module               *owner;
1137        struct device                dev;
1138        struct kobject               *ports_parent;
1139        struct list_head             port_list;
1140
1141        enum {
1142                IB_DEV_UNINITIALIZED,
1143                IB_DEV_REGISTERED,
1144                IB_DEV_UNREGISTERED
1145        }                            reg_state;
1146
1147        u64                          uverbs_cmd_mask;
1148        int                          uverbs_abi_ver;
1149
1150        char                         node_desc[64];
1151        __be64                       node_guid;
1152        u32                          local_dma_lkey;
1153        u8                           node_type;
1154        u8                           phys_port_cnt;
1155};
1156
1157struct ib_client {
1158        char  *name;
1159        void (*add)   (struct ib_device *);
1160        void (*remove)(struct ib_device *);
1161
1162        struct list_head list;
1163};
1164
1165struct ib_device *ib_alloc_device(size_t size);
1166void ib_dealloc_device(struct ib_device *device);
1167
1168int ib_register_device   (struct ib_device *device);
1169void ib_unregister_device(struct ib_device *device);
1170
1171int ib_register_client   (struct ib_client *client);
1172void ib_unregister_client(struct ib_client *client);
1173
1174void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1175void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1176                         void *data);
1177
1178static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1179{
1180        return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1181}
1182
1183static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1184{
1185        return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1186}
1187
1188/**
1189 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1190 * contains all required attributes and no attributes not allowed for
1191 * the given QP state transition.
1192 * @cur_state: Current QP state
1193 * @next_state: Next QP state
1194 * @type: QP type
1195 * @mask: Mask of supplied QP attributes
1196 *
1197 * This function is a helper function that a low-level driver's
1198 * modify_qp method can use to validate the consumer's input.  It
1199 * checks that cur_state and next_state are valid QP states, that a
1200 * transition from cur_state to next_state is allowed by the IB spec,
1201 * and that the attribute mask supplied is allowed for the transition.
1202 */
1203int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1204                       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1205
1206int ib_register_event_handler  (struct ib_event_handler *event_handler);
1207int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1208void ib_dispatch_event(struct ib_event *event);
1209
1210int ib_query_device(struct ib_device *device,
1211                    struct ib_device_attr *device_attr);
1212
1213int ib_query_port(struct ib_device *device,
1214                  u8 port_num, struct ib_port_attr *port_attr);
1215
1216int ib_query_gid(struct ib_device *device,
1217                 u8 port_num, int index, union ib_gid *gid);
1218
1219int ib_query_pkey(struct ib_device *device,
1220                  u8 port_num, u16 index, u16 *pkey);
1221
1222int ib_modify_device(struct ib_device *device,
1223                     int device_modify_mask,
1224                     struct ib_device_modify *device_modify);
1225
1226int ib_modify_port(struct ib_device *device,
1227                   u8 port_num, int port_modify_mask,
1228                   struct ib_port_modify *port_modify);
1229
1230int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1231                u8 *port_num, u16 *index);
1232
1233int ib_find_pkey(struct ib_device *device,
1234                 u8 port_num, u16 pkey, u16 *index);
1235
1236/**
1237 * ib_alloc_pd - Allocates an unused protection domain.
1238 * @device: The device on which to allocate the protection domain.
1239 *
1240 * A protection domain object provides an association between QPs, shared
1241 * receive queues, address handles, memory regions, and memory windows.
1242 */
1243struct ib_pd *ib_alloc_pd(struct ib_device *device);
1244
1245/**
1246 * ib_dealloc_pd - Deallocates a protection domain.
1247 * @pd: The protection domain to deallocate.
1248 */
1249int ib_dealloc_pd(struct ib_pd *pd);
1250
1251/**
1252 * ib_create_ah - Creates an address handle for the given address vector.
1253 * @pd: The protection domain associated with the address handle.
1254 * @ah_attr: The attributes of the address vector.
1255 *
1256 * The address handle is used to reference a local or global destination
1257 * in all UD QP post sends.
1258 */
1259struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1260
1261/**
1262 * ib_init_ah_from_wc - Initializes address handle attributes from a
1263 *   work completion.
1264 * @device: Device on which the received message arrived.
1265 * @port_num: Port on which the received message arrived.
1266 * @wc: Work completion associated with the received message.
1267 * @grh: References the received global route header.  This parameter is
1268 *   ignored unless the work completion indicates that the GRH is valid.
1269 * @ah_attr: Returned attributes that can be used when creating an address
1270 *   handle for replying to the message.
1271 */
1272int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1273                       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1274
1275/**
1276 * ib_create_ah_from_wc - Creates an address handle associated with the
1277 *   sender of the specified work completion.
1278 * @pd: The protection domain associated with the address handle.
1279 * @wc: Work completion information associated with a received message.
1280 * @grh: References the received global route header.  This parameter is
1281 *   ignored unless the work completion indicates that the GRH is valid.
1282 * @port_num: The outbound port number to associate with the address.
1283 *
1284 * The address handle is used to reference a local or global destination
1285 * in all UD QP post sends.
1286 */
1287struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1288                                   struct ib_grh *grh, u8 port_num);
1289
1290/**
1291 * ib_modify_ah - Modifies the address vector associated with an address
1292 *   handle.
1293 * @ah: The address handle to modify.
1294 * @ah_attr: The new address vector attributes to associate with the
1295 *   address handle.
1296 */
1297int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1298
1299/**
1300 * ib_query_ah - Queries the address vector associated with an address
1301 *   handle.
1302 * @ah: The address handle to query.
1303 * @ah_attr: The address vector attributes associated with the address
1304 *   handle.
1305 */
1306int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1307
1308/**
1309 * ib_destroy_ah - Destroys an address handle.
1310 * @ah: The address handle to destroy.
1311 */
1312int ib_destroy_ah(struct ib_ah *ah);
1313
1314/**
1315 * ib_create_srq - Creates a SRQ associated with the specified protection
1316 *   domain.
1317 * @pd: The protection domain associated with the SRQ.
1318 * @srq_init_attr: A list of initial attributes required to create the
1319 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1320 *   the actual capabilities of the created SRQ.
1321 *
1322 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1323 * requested size of the SRQ, and set to the actual values allocated
1324 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1325 * will always be at least as large as the requested values.
1326 */
1327struct ib_srq *ib_create_srq(struct ib_pd *pd,
1328                             struct ib_srq_init_attr *srq_init_attr);
1329
1330/**
1331 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1332 * @srq: The SRQ to modify.
1333 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1334 *   the current values of selected SRQ attributes are returned.
1335 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1336 *   are being modified.
1337 *
1338 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1339 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1340 * the number of receives queued drops below the limit.
1341 */
1342int ib_modify_srq(struct ib_srq *srq,
1343                  struct ib_srq_attr *srq_attr,
1344                  enum ib_srq_attr_mask srq_attr_mask);
1345
1346/**
1347 * ib_query_srq - Returns the attribute list and current values for the
1348 *   specified SRQ.
1349 * @srq: The SRQ to query.
1350 * @srq_attr: The attributes of the specified SRQ.
1351 */
1352int ib_query_srq(struct ib_srq *srq,
1353                 struct ib_srq_attr *srq_attr);
1354
1355/**
1356 * ib_destroy_srq - Destroys the specified SRQ.
1357 * @srq: The SRQ to destroy.
1358 */
1359int ib_destroy_srq(struct ib_srq *srq);
1360
1361/**
1362 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1363 * @srq: The SRQ to post the work request on.
1364 * @recv_wr: A list of work requests to post on the receive queue.
1365 * @bad_recv_wr: On an immediate failure, this parameter will reference
1366 *   the work request that failed to be posted on the QP.
1367 */
1368static inline int ib_post_srq_recv(struct ib_srq *srq,
1369                                   struct ib_recv_wr *recv_wr,
1370                                   struct ib_recv_wr **bad_recv_wr)
1371{
1372        return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1373}
1374
1375/**
1376 * ib_create_qp - Creates a QP associated with the specified protection
1377 *   domain.
1378 * @pd: The protection domain associated with the QP.
1379 * @qp_init_attr: A list of initial attributes required to create the
1380 *   QP.  If QP creation succeeds, then the attributes are updated to
1381 *   the actual capabilities of the created QP.
1382 */
1383struct ib_qp *ib_create_qp(struct ib_pd *pd,
1384                           struct ib_qp_init_attr *qp_init_attr);
1385
1386/**
1387 * ib_modify_qp - Modifies the attributes for the specified QP and then
1388 *   transitions the QP to the given state.
1389 * @qp: The QP to modify.
1390 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1391 *   the current values of selected QP attributes are returned.
1392 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1393 *   are being modified.
1394 */
1395int ib_modify_qp(struct ib_qp *qp,
1396                 struct ib_qp_attr *qp_attr,
1397                 int qp_attr_mask);
1398
1399/**
1400 * ib_query_qp - Returns the attribute list and current values for the
1401 *   specified QP.
1402 * @qp: The QP to query.
1403 * @qp_attr: The attributes of the specified QP.
1404 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1405 * @qp_init_attr: Additional attributes of the selected QP.
1406 *
1407 * The qp_attr_mask may be used to limit the query to gathering only the
1408 * selected attributes.
1409 */
1410int ib_query_qp(struct ib_qp *qp,
1411                struct ib_qp_attr *qp_attr,
1412                int qp_attr_mask,
1413                struct ib_qp_init_attr *qp_init_attr);
1414
1415/**
1416 * ib_destroy_qp - Destroys the specified QP.
1417 * @qp: The QP to destroy.
1418 */
1419int ib_destroy_qp(struct ib_qp *qp);
1420
1421/**
1422 * ib_post_send - Posts a list of work requests to the send queue of
1423 *   the specified QP.
1424 * @qp: The QP to post the work request on.
1425 * @send_wr: A list of work requests to post on the send queue.
1426 * @bad_send_wr: On an immediate failure, this parameter will reference
1427 *   the work request that failed to be posted on the QP.
1428 */
1429static inline int ib_post_send(struct ib_qp *qp,
1430                               struct ib_send_wr *send_wr,
1431                               struct ib_send_wr **bad_send_wr)
1432{
1433        return qp->device->post_send(qp, send_wr, bad_send_wr);
1434}
1435
1436/**
1437 * ib_post_recv - Posts a list of work requests to the receive queue of
1438 *   the specified QP.
1439 * @qp: The QP to post the work request on.
1440 * @recv_wr: A list of work requests to post on the receive queue.
1441 * @bad_recv_wr: On an immediate failure, this parameter will reference
1442 *   the work request that failed to be posted on the QP.
1443 */
1444static inline int ib_post_recv(struct ib_qp *qp,
1445                               struct ib_recv_wr *recv_wr,
1446                               struct ib_recv_wr **bad_recv_wr)
1447{
1448        return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1449}
1450
1451/**
1452 * ib_create_cq - Creates a CQ on the specified device.
1453 * @device: The device on which to create the CQ.
1454 * @comp_handler: A user-specified callback that is invoked when a
1455 *   completion event occurs on the CQ.
1456 * @event_handler: A user-specified callback that is invoked when an
1457 *   asynchronous event not associated with a completion occurs on the CQ.
1458 * @cq_context: Context associated with the CQ returned to the user via
1459 *   the associated completion and event handlers.
1460 * @cqe: The minimum size of the CQ.
1461 * @comp_vector - Completion vector used to signal completion events.
1462 *     Must be >= 0 and < context->num_comp_vectors.
1463 *
1464 * Users can examine the cq structure to determine the actual CQ size.
1465 */
1466struct ib_cq *ib_create_cq(struct ib_device *device,
1467                           ib_comp_handler comp_handler,
1468                           void (*event_handler)(struct ib_event *, void *),
1469                           void *cq_context, int cqe, int comp_vector);
1470
1471/**
1472 * ib_resize_cq - Modifies the capacity of the CQ.
1473 * @cq: The CQ to resize.
1474 * @cqe: The minimum size of the CQ.
1475 *
1476 * Users can examine the cq structure to determine the actual CQ size.
1477 */
1478int ib_resize_cq(struct ib_cq *cq, int cqe);
1479
1480/**
1481 * ib_modify_cq - Modifies moderation params of the CQ
1482 * @cq: The CQ to modify.
1483 * @cq_count: number of CQEs that will trigger an event
1484 * @cq_period: max period of time in usec before triggering an event
1485 *
1486 */
1487int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1488
1489/**
1490 * ib_destroy_cq - Destroys the specified CQ.
1491 * @cq: The CQ to destroy.
1492 */
1493int ib_destroy_cq(struct ib_cq *cq);
1494
1495/**
1496 * ib_poll_cq - poll a CQ for completion(s)
1497 * @cq:the CQ being polled
1498 * @num_entries:maximum number of completions to return
1499 * @wc:array of at least @num_entries &struct ib_wc where completions
1500 *   will be returned
1501 *
1502 * Poll a CQ for (possibly multiple) completions.  If the return value
1503 * is < 0, an error occurred.  If the return value is >= 0, it is the
1504 * number of completions returned.  If the return value is
1505 * non-negative and < num_entries, then the CQ was emptied.
1506 */
1507static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1508                             struct ib_wc *wc)
1509{
1510        return cq->device->poll_cq(cq, num_entries, wc);
1511}
1512
1513/**
1514 * ib_peek_cq - Returns the number of unreaped completions currently
1515 *   on the specified CQ.
1516 * @cq: The CQ to peek.
1517 * @wc_cnt: A minimum number of unreaped completions to check for.
1518 *
1519 * If the number of unreaped completions is greater than or equal to wc_cnt,
1520 * this function returns wc_cnt, otherwise, it returns the actual number of
1521 * unreaped completions.
1522 */
1523int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1524
1525/**
1526 * ib_req_notify_cq - Request completion notification on a CQ.
1527 * @cq: The CQ to generate an event for.
1528 * @flags:
1529 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1530 *   to request an event on the next solicited event or next work
1531 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1532 *   may also be |ed in to request a hint about missed events, as
1533 *   described below.
1534 *
1535 * Return Value:
1536 *    < 0 means an error occurred while requesting notification
1537 *   == 0 means notification was requested successfully, and if
1538 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1539 *        were missed and it is safe to wait for another event.  In
1540 *        this case is it guaranteed that any work completions added
1541 *        to the CQ since the last CQ poll will trigger a completion
1542 *        notification event.
1543 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1544 *        in.  It means that the consumer must poll the CQ again to
1545 *        make sure it is empty to avoid missing an event because of a
1546 *        race between requesting notification and an entry being
1547 *        added to the CQ.  This return value means it is possible
1548 *        (but not guaranteed) that a work completion has been added
1549 *        to the CQ since the last poll without triggering a
1550 *        completion notification event.
1551 */
1552static inline int ib_req_notify_cq(struct ib_cq *cq,
1553                                   enum ib_cq_notify_flags flags)
1554{
1555        return cq->device->req_notify_cq(cq, flags);
1556}
1557
1558/**
1559 * ib_req_ncomp_notif - Request completion notification when there are
1560 *   at least the specified number of unreaped completions on the CQ.
1561 * @cq: The CQ to generate an event for.
1562 * @wc_cnt: The number of unreaped completions that should be on the
1563 *   CQ before an event is generated.
1564 */
1565static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1566{
1567        return cq->device->req_ncomp_notif ?
1568                cq->device->req_ncomp_notif(cq, wc_cnt) :
1569                -ENOSYS;
1570}
1571
1572/**
1573 * ib_get_dma_mr - Returns a memory region for system memory that is
1574 *   usable for DMA.
1575 * @pd: The protection domain associated with the memory region.
1576 * @mr_access_flags: Specifies the memory access rights.
1577 *
1578 * Note that the ib_dma_*() functions defined below must be used
1579 * to create/destroy addresses used with the Lkey or Rkey returned
1580 * by ib_get_dma_mr().
1581 */
1582struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1583
1584/**
1585 * ib_dma_mapping_error - check a DMA addr for error
1586 * @dev: The device for which the dma_addr was created
1587 * @dma_addr: The DMA address to check
1588 */
1589static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1590{
1591        if (dev->dma_ops)
1592                return dev->dma_ops->mapping_error(dev, dma_addr);
1593        return dma_mapping_error(dev->dma_device, dma_addr);
1594}
1595
1596/**
1597 * ib_dma_map_single - Map a kernel virtual address to DMA address
1598 * @dev: The device for which the dma_addr is to be created
1599 * @cpu_addr: The kernel virtual address
1600 * @size: The size of the region in bytes
1601 * @direction: The direction of the DMA
1602 */
1603static inline u64 ib_dma_map_single(struct ib_device *dev,
1604                                    void *cpu_addr, size_t size,
1605                                    enum dma_data_direction direction)
1606{
1607        if (dev->dma_ops)
1608                return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1609        return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1610}
1611
1612/**
1613 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1614 * @dev: The device for which the DMA address was created
1615 * @addr: The DMA address
1616 * @size: The size of the region in bytes
1617 * @direction: The direction of the DMA
1618 */
1619static inline void ib_dma_unmap_single(struct ib_device *dev,
1620                                       u64 addr, size_t size,
1621                                       enum dma_data_direction direction)
1622{
1623        if (dev->dma_ops)
1624                dev->dma_ops->unmap_single(dev, addr, size, direction);
1625        else
1626                dma_unmap_single(dev->dma_device, addr, size, direction);
1627}
1628
1629static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1630                                          void *cpu_addr, size_t size,
1631                                          enum dma_data_direction direction,
1632                                          struct dma_attrs *attrs)
1633{
1634        return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1635                                    direction, attrs);
1636}
1637
1638static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1639                                             u64 addr, size_t size,
1640                                             enum dma_data_direction direction,
1641                                             struct dma_attrs *attrs)
1642{
1643        return dma_unmap_single_attrs(dev->dma_device, addr, size,
1644                                      direction, attrs);
1645}
1646
1647/**
1648 * ib_dma_map_page - Map a physical page to DMA address
1649 * @dev: The device for which the dma_addr is to be created
1650 * @page: The page to be mapped
1651 * @offset: The offset within the page
1652 * @size: The size of the region in bytes
1653 * @direction: The direction of the DMA
1654 */
1655static inline u64 ib_dma_map_page(struct ib_device *dev,
1656                                  struct page *page,
1657                                  unsigned long offset,
1658                                  size_t size,
1659                                         enum dma_data_direction direction)
1660{
1661        if (dev->dma_ops)
1662                return dev->dma_ops->map_page(dev, page, offset, size, direction);
1663        return dma_map_page(dev->dma_device, page, offset, size, direction);
1664}
1665
1666/**
1667 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1668 * @dev: The device for which the DMA address was created
1669 * @addr: The DMA address
1670 * @size: The size of the region in bytes
1671 * @direction: The direction of the DMA
1672 */
1673static inline void ib_dma_unmap_page(struct ib_device *dev,
1674                                     u64 addr, size_t size,
1675                                     enum dma_data_direction direction)
1676{
1677        if (dev->dma_ops)
1678                dev->dma_ops->unmap_page(dev, addr, size, direction);
1679        else
1680                dma_unmap_page(dev->dma_device, addr, size, direction);
1681}
1682
1683/**
1684 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1685 * @dev: The device for which the DMA addresses are to be created
1686 * @sg: The array of scatter/gather entries
1687 * @nents: The number of scatter/gather entries
1688 * @direction: The direction of the DMA
1689 */
1690static inline int ib_dma_map_sg(struct ib_device *dev,
1691                                struct scatterlist *sg, int nents,
1692                                enum dma_data_direction direction)
1693{
1694        if (dev->dma_ops)
1695                return dev->dma_ops->map_sg(dev, sg, nents, direction);
1696        return dma_map_sg(dev->dma_device, sg, nents, direction);
1697}
1698
1699/**
1700 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1701 * @dev: The device for which the DMA addresses were created
1702 * @sg: The array of scatter/gather entries
1703 * @nents: The number of scatter/gather entries
1704 * @direction: The direction of the DMA
1705 */
1706static inline void ib_dma_unmap_sg(struct ib_device *dev,
1707                                   struct scatterlist *sg, int nents,
1708                                   enum dma_data_direction direction)
1709{
1710        if (dev->dma_ops)
1711                dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1712        else
1713                dma_unmap_sg(dev->dma_device, sg, nents, direction);
1714}
1715
1716static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1717                                      struct scatterlist *sg, int nents,
1718                                      enum dma_data_direction direction,
1719                                      struct dma_attrs *attrs)
1720{
1721        return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1722}
1723
1724static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1725                                         struct scatterlist *sg, int nents,
1726                                         enum dma_data_direction direction,
1727                                         struct dma_attrs *attrs)
1728{
1729        dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1730}
1731/**
1732 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1733 * @dev: The device for which the DMA addresses were created
1734 * @sg: The scatter/gather entry
1735 */
1736static inline u64 ib_sg_dma_address(struct ib_device *dev,
1737                                    struct scatterlist *sg)
1738{
1739        if (dev->dma_ops)
1740                return dev->dma_ops->dma_address(dev, sg);
1741        return sg_dma_address(sg);
1742}
1743
1744/**
1745 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1746 * @dev: The device for which the DMA addresses were created
1747 * @sg: The scatter/gather entry
1748 */
1749static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1750                                         struct scatterlist *sg)
1751{
1752        if (dev->dma_ops)
1753                return dev->dma_ops->dma_len(dev, sg);
1754        return sg_dma_len(sg);
1755}
1756
1757/**
1758 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1759 * @dev: The device for which the DMA address was created
1760 * @addr: The DMA address
1761 * @size: The size of the region in bytes
1762 * @dir: The direction of the DMA
1763 */
1764static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1765                                              u64 addr,
1766                                              size_t size,
1767                                              enum dma_data_direction dir)
1768{
1769        if (dev->dma_ops)
1770                dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1771        else
1772                dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1773}
1774
1775/**
1776 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1777 * @dev: The device for which the DMA address was created
1778 * @addr: The DMA address
1779 * @size: The size of the region in bytes
1780 * @dir: The direction of the DMA
1781 */
1782static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1783                                                 u64 addr,
1784                                                 size_t size,
1785                                                 enum dma_data_direction dir)
1786{
1787        if (dev->dma_ops)
1788                dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1789        else
1790                dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1791}
1792
1793/**
1794 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1795 * @dev: The device for which the DMA address is requested
1796 * @size: The size of the region to allocate in bytes
1797 * @dma_handle: A pointer for returning the DMA address of the region
1798 * @flag: memory allocator flags
1799 */
1800static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1801                                           size_t size,
1802                                           u64 *dma_handle,
1803                                           gfp_t flag)
1804{
1805        if (dev->dma_ops)
1806                return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1807        else {
1808                dma_addr_t handle;
1809                void *ret;
1810
1811                ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1812                *dma_handle = handle;
1813                return ret;
1814        }
1815}
1816
1817/**
1818 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1819 * @dev: The device for which the DMA addresses were allocated
1820 * @size: The size of the region
1821 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1822 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1823 */
1824static inline void ib_dma_free_coherent(struct ib_device *dev,
1825                                        size_t size, void *cpu_addr,
1826                                        u64 dma_handle)
1827{
1828        if (dev->dma_ops)
1829                dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1830        else
1831                dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1832}
1833
1834/**
1835 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1836 *   by an HCA.
1837 * @pd: The protection domain associated assigned to the registered region.
1838 * @phys_buf_array: Specifies a list of physical buffers to use in the
1839 *   memory region.
1840 * @num_phys_buf: Specifies the size of the phys_buf_array.
1841 * @mr_access_flags: Specifies the memory access rights.
1842 * @iova_start: The offset of the region's starting I/O virtual address.
1843 */
1844struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1845                             struct ib_phys_buf *phys_buf_array,
1846                             int num_phys_buf,
1847                             int mr_access_flags,
1848                             u64 *iova_start);
1849
1850/**
1851 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1852 *   Conceptually, this call performs the functions deregister memory region
1853 *   followed by register physical memory region.  Where possible,
1854 *   resources are reused instead of deallocated and reallocated.
1855 * @mr: The memory region to modify.
1856 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1857 *   properties of the memory region are being modified.
1858 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1859 *   the new protection domain to associated with the memory region,
1860 *   otherwise, this parameter is ignored.
1861 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1862 *   field specifies a list of physical buffers to use in the new
1863 *   translation, otherwise, this parameter is ignored.
1864 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1865 *   field specifies the size of the phys_buf_array, otherwise, this
1866 *   parameter is ignored.
1867 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1868 *   field specifies the new memory access rights, otherwise, this
1869 *   parameter is ignored.
1870 * @iova_start: The offset of the region's starting I/O virtual address.
1871 */
1872int ib_rereg_phys_mr(struct ib_mr *mr,
1873                     int mr_rereg_mask,
1874                     struct ib_pd *pd,
1875                     struct ib_phys_buf *phys_buf_array,
1876                     int num_phys_buf,
1877                     int mr_access_flags,
1878                     u64 *iova_start);
1879
1880/**
1881 * ib_query_mr - Retrieves information about a specific memory region.
1882 * @mr: The memory region to retrieve information about.
1883 * @mr_attr: The attributes of the specified memory region.
1884 */
1885int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1886
1887/**
1888 * ib_dereg_mr - Deregisters a memory region and removes it from the
1889 *   HCA translation table.
1890 * @mr: The memory region to deregister.
1891 */
1892int ib_dereg_mr(struct ib_mr *mr);
1893
1894/**
1895 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
1896 *   IB_WR_FAST_REG_MR send work request.
1897 * @pd: The protection domain associated with the region.
1898 * @max_page_list_len: requested max physical buffer list length to be
1899 *   used with fast register work requests for this MR.
1900 */
1901struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1902
1903/**
1904 * ib_alloc_fast_reg_page_list - Allocates a page list array
1905 * @device - ib device pointer.
1906 * @page_list_len - size of the page list array to be allocated.
1907 *
1908 * This allocates and returns a struct ib_fast_reg_page_list * and a
1909 * page_list array that is at least page_list_len in size.  The actual
1910 * size is returned in max_page_list_len.  The caller is responsible
1911 * for initializing the contents of the page_list array before posting
1912 * a send work request with the IB_WC_FAST_REG_MR opcode.
1913 *
1914 * The page_list array entries must be translated using one of the
1915 * ib_dma_*() functions just like the addresses passed to
1916 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
1917 * ib_fast_reg_page_list must not be modified by the caller until the
1918 * IB_WC_FAST_REG_MR work request completes.
1919 */
1920struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
1921                                struct ib_device *device, int page_list_len);
1922
1923/**
1924 * ib_free_fast_reg_page_list - Deallocates a previously allocated
1925 *   page list array.
1926 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
1927 */
1928void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
1929
1930/**
1931 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
1932 *   R_Key and L_Key.
1933 * @mr - struct ib_mr pointer to be updated.
1934 * @newkey - new key to be used.
1935 */
1936static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
1937{
1938        mr->lkey = (mr->lkey & 0xffffff00) | newkey;
1939        mr->rkey = (mr->rkey & 0xffffff00) | newkey;
1940}
1941
1942/**
1943 * ib_alloc_mw - Allocates a memory window.
1944 * @pd: The protection domain associated with the memory window.
1945 */
1946struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1947
1948/**
1949 * ib_bind_mw - Posts a work request to the send queue of the specified
1950 *   QP, which binds the memory window to the given address range and
1951 *   remote access attributes.
1952 * @qp: QP to post the bind work request on.
1953 * @mw: The memory window to bind.
1954 * @mw_bind: Specifies information about the memory window, including
1955 *   its address range, remote access rights, and associated memory region.
1956 */
1957static inline int ib_bind_mw(struct ib_qp *qp,
1958                             struct ib_mw *mw,
1959                             struct ib_mw_bind *mw_bind)
1960{
1961        /* XXX reference counting in corresponding MR? */
1962        return mw->device->bind_mw ?
1963                mw->device->bind_mw(qp, mw, mw_bind) :
1964                -ENOSYS;
1965}
1966
1967/**
1968 * ib_dealloc_mw - Deallocates a memory window.
1969 * @mw: The memory window to deallocate.
1970 */
1971int ib_dealloc_mw(struct ib_mw *mw);
1972
1973/**
1974 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1975 * @pd: The protection domain associated with the unmapped region.
1976 * @mr_access_flags: Specifies the memory access rights.
1977 * @fmr_attr: Attributes of the unmapped region.
1978 *
1979 * A fast memory region must be mapped before it can be used as part of
1980 * a work request.
1981 */
1982struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1983                            int mr_access_flags,
1984                            struct ib_fmr_attr *fmr_attr);
1985
1986/**
1987 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1988 * @fmr: The fast memory region to associate with the pages.
1989 * @page_list: An array of physical pages to map to the fast memory region.
1990 * @list_len: The number of pages in page_list.
1991 * @iova: The I/O virtual address to use with the mapped region.
1992 */
1993static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1994                                  u64 *page_list, int list_len,
1995                                  u64 iova)
1996{
1997        return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1998}
1999
2000/**
2001 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2002 * @fmr_list: A linked list of fast memory regions to unmap.
2003 */
2004int ib_unmap_fmr(struct list_head *fmr_list);
2005
2006/**
2007 * ib_dealloc_fmr - Deallocates a fast memory region.
2008 * @fmr: The fast memory region to deallocate.
2009 */
2010int ib_dealloc_fmr(struct ib_fmr *fmr);
2011
2012/**
2013 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2014 * @qp: QP to attach to the multicast group.  The QP must be type
2015 *   IB_QPT_UD.
2016 * @gid: Multicast group GID.
2017 * @lid: Multicast group LID in host byte order.
2018 *
2019 * In order to send and receive multicast packets, subnet
2020 * administration must have created the multicast group and configured
2021 * the fabric appropriately.  The port associated with the specified
2022 * QP must also be a member of the multicast group.
2023 */
2024int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2025
2026/**
2027 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2028 * @qp: QP to detach from the multicast group.
2029 * @gid: Multicast group GID.
2030 * @lid: Multicast group LID in host byte order.
2031 */
2032int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2033
2034#endif /* IB_VERBS_H */
2035