linux/drivers/infiniband/hw/qib/qib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
   4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#ifndef QIB_VERBS_H
  36#define QIB_VERBS_H
  37
  38#include <linux/types.h>
  39#include <linux/spinlock.h>
  40#include <linux/kernel.h>
  41#include <linux/interrupt.h>
  42#include <linux/kref.h>
  43#include <linux/workqueue.h>
  44#include <linux/kthread.h>
  45#include <linux/completion.h>
  46#include <rdma/ib_pack.h>
  47#include <rdma/ib_user_verbs.h>
  48
  49struct qib_ctxtdata;
  50struct qib_pportdata;
  51struct qib_devdata;
  52struct qib_verbs_txreq;
  53
  54#define QIB_MAX_RDMA_ATOMIC     16
  55#define QIB_GUIDS_PER_PORT      5
  56
  57#define QPN_MAX                 (1 << 24)
  58#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
  59
  60/*
  61 * Increment this value if any changes that break userspace ABI
  62 * compatibility are made.
  63 */
  64#define QIB_UVERBS_ABI_VERSION       2
  65
  66/*
  67 * Define an ib_cq_notify value that is not valid so we know when CQ
  68 * notifications are armed.
  69 */
  70#define IB_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
  71
  72#define IB_SEQ_NAK      (3 << 29)
  73
  74/* AETH NAK opcode values */
  75#define IB_RNR_NAK                      0x20
  76#define IB_NAK_PSN_ERROR                0x60
  77#define IB_NAK_INVALID_REQUEST          0x61
  78#define IB_NAK_REMOTE_ACCESS_ERROR      0x62
  79#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
  80#define IB_NAK_INVALID_RD_REQUEST       0x64
  81
  82/* Flags for checking QP state (see ib_qib_state_ops[]) */
  83#define QIB_POST_SEND_OK                0x01
  84#define QIB_POST_RECV_OK                0x02
  85#define QIB_PROCESS_RECV_OK             0x04
  86#define QIB_PROCESS_SEND_OK             0x08
  87#define QIB_PROCESS_NEXT_SEND_OK        0x10
  88#define QIB_FLUSH_SEND                  0x20
  89#define QIB_FLUSH_RECV                  0x40
  90#define QIB_PROCESS_OR_FLUSH_SEND \
  91        (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
  92
  93/* IB Performance Manager status values */
  94#define IB_PMA_SAMPLE_STATUS_DONE       0x00
  95#define IB_PMA_SAMPLE_STATUS_STARTED    0x01
  96#define IB_PMA_SAMPLE_STATUS_RUNNING    0x02
  97
  98/* Mandatory IB performance counter select values. */
  99#define IB_PMA_PORT_XMIT_DATA   cpu_to_be16(0x0001)
 100#define IB_PMA_PORT_RCV_DATA    cpu_to_be16(0x0002)
 101#define IB_PMA_PORT_XMIT_PKTS   cpu_to_be16(0x0003)
 102#define IB_PMA_PORT_RCV_PKTS    cpu_to_be16(0x0004)
 103#define IB_PMA_PORT_XMIT_WAIT   cpu_to_be16(0x0005)
 104
 105#define QIB_VENDOR_IPG          cpu_to_be16(0xFFA0)
 106
 107#define IB_BTH_REQ_ACK          (1 << 31)
 108#define IB_BTH_SOLICITED        (1 << 23)
 109#define IB_BTH_MIG_REQ          (1 << 22)
 110
 111/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
 112#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
 113
 114#define IB_GRH_VERSION          6
 115#define IB_GRH_VERSION_MASK     0xF
 116#define IB_GRH_VERSION_SHIFT    28
 117#define IB_GRH_TCLASS_MASK      0xFF
 118#define IB_GRH_TCLASS_SHIFT     20
 119#define IB_GRH_FLOW_MASK        0xFFFFF
 120#define IB_GRH_FLOW_SHIFT       0
 121#define IB_GRH_NEXT_HDR         0x1B
 122
 123#define IB_DEFAULT_GID_PREFIX   cpu_to_be64(0xfe80000000000000ULL)
 124
 125/* Values for set/get portinfo VLCap OperationalVLs */
 126#define IB_VL_VL0       1
 127#define IB_VL_VL0_1     2
 128#define IB_VL_VL0_3     3
 129#define IB_VL_VL0_7     4
 130#define IB_VL_VL0_14    5
 131
 132static inline int qib_num_vls(int vls)
 133{
 134        switch (vls) {
 135        default:
 136        case IB_VL_VL0:
 137                return 1;
 138        case IB_VL_VL0_1:
 139                return 2;
 140        case IB_VL_VL0_3:
 141                return 4;
 142        case IB_VL_VL0_7:
 143                return 8;
 144        case IB_VL_VL0_14:
 145                return 15;
 146        }
 147}
 148
 149struct ib_reth {
 150        __be64 vaddr;
 151        __be32 rkey;
 152        __be32 length;
 153} __attribute__ ((packed));
 154
 155struct ib_atomic_eth {
 156        __be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
 157        __be32 rkey;
 158        __be64 swap_data;
 159        __be64 compare_data;
 160} __attribute__ ((packed));
 161
 162struct qib_other_headers {
 163        __be32 bth[3];
 164        union {
 165                struct {
 166                        __be32 deth[2];
 167                        __be32 imm_data;
 168                } ud;
 169                struct {
 170                        struct ib_reth reth;
 171                        __be32 imm_data;
 172                } rc;
 173                struct {
 174                        __be32 aeth;
 175                        __be32 atomic_ack_eth[2];
 176                } at;
 177                __be32 imm_data;
 178                __be32 aeth;
 179                struct ib_atomic_eth atomic_eth;
 180        } u;
 181} __attribute__ ((packed));
 182
 183/*
 184 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
 185 * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
 186 * will be in the eager header buffer.  The remaining 12 or 16 bytes
 187 * are in the data buffer.
 188 */
 189struct qib_ib_header {
 190        __be16 lrh[4];
 191        union {
 192                struct {
 193                        struct ib_grh grh;
 194                        struct qib_other_headers oth;
 195                } l;
 196                struct qib_other_headers oth;
 197        } u;
 198} __attribute__ ((packed));
 199
 200struct qib_pio_header {
 201        __le32 pbc[2];
 202        struct qib_ib_header hdr;
 203} __attribute__ ((packed));
 204
 205/*
 206 * There is one struct qib_mcast for each multicast GID.
 207 * All attached QPs are then stored as a list of
 208 * struct qib_mcast_qp.
 209 */
 210struct qib_mcast_qp {
 211        struct list_head list;
 212        struct qib_qp *qp;
 213};
 214
 215struct qib_mcast {
 216        struct rb_node rb_node;
 217        union ib_gid mgid;
 218        struct list_head qp_list;
 219        wait_queue_head_t wait;
 220        atomic_t refcount;
 221        int n_attached;
 222};
 223
 224/* Protection domain */
 225struct qib_pd {
 226        struct ib_pd ibpd;
 227        int user;               /* non-zero if created from user space */
 228};
 229
 230/* Address Handle */
 231struct qib_ah {
 232        struct ib_ah ibah;
 233        struct ib_ah_attr attr;
 234        atomic_t refcount;
 235};
 236
 237/*
 238 * This structure is used by qib_mmap() to validate an offset
 239 * when an mmap() request is made.  The vm_area_struct then uses
 240 * this as its vm_private_data.
 241 */
 242struct qib_mmap_info {
 243        struct list_head pending_mmaps;
 244        struct ib_ucontext *context;
 245        void *obj;
 246        __u64 offset;
 247        struct kref ref;
 248        unsigned size;
 249};
 250
 251/*
 252 * This structure is used to contain the head pointer, tail pointer,
 253 * and completion queue entries as a single memory allocation so
 254 * it can be mmap'ed into user space.
 255 */
 256struct qib_cq_wc {
 257        u32 head;               /* index of next entry to fill */
 258        u32 tail;               /* index of next ib_poll_cq() entry */
 259        union {
 260                /* these are actually size ibcq.cqe + 1 */
 261                struct ib_uverbs_wc uqueue[0];
 262                struct ib_wc kqueue[0];
 263        };
 264};
 265
 266/*
 267 * The completion queue structure.
 268 */
 269struct qib_cq {
 270        struct ib_cq ibcq;
 271        struct kthread_work comptask;
 272        struct qib_devdata *dd;
 273        spinlock_t lock; /* protect changes in this struct */
 274        u8 notify;
 275        u8 triggered;
 276        struct qib_cq_wc *queue;
 277        struct qib_mmap_info *ip;
 278};
 279
 280/*
 281 * A segment is a linear region of low physical memory.
 282 * XXX Maybe we should use phys addr here and kmap()/kunmap().
 283 * Used by the verbs layer.
 284 */
 285struct qib_seg {
 286        void *vaddr;
 287        size_t length;
 288};
 289
 290/* The number of qib_segs that fit in a page. */
 291#define QIB_SEGSZ     (PAGE_SIZE / sizeof(struct qib_seg))
 292
 293struct qib_segarray {
 294        struct qib_seg segs[QIB_SEGSZ];
 295};
 296
 297struct qib_mregion {
 298        struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
 299        u64 user_base;          /* User's address for this region */
 300        u64 iova;               /* IB start address of this region */
 301        size_t length;
 302        u32 lkey;
 303        u32 offset;             /* offset (bytes) to start of region */
 304        int access_flags;
 305        u32 max_segs;           /* number of qib_segs in all the arrays */
 306        u32 mapsz;              /* size of the map array */
 307        u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
 308        u8  lkey_published;     /* in global table */
 309        struct completion comp; /* complete when refcount goes to zero */
 310        struct rcu_head list;
 311        atomic_t refcount;
 312        struct qib_segarray *map[0];    /* the segments */
 313};
 314
 315/*
 316 * These keep track of the copy progress within a memory region.
 317 * Used by the verbs layer.
 318 */
 319struct qib_sge {
 320        struct qib_mregion *mr;
 321        void *vaddr;            /* kernel virtual address of segment */
 322        u32 sge_length;         /* length of the SGE */
 323        u32 length;             /* remaining length of the segment */
 324        u16 m;                  /* current index: mr->map[m] */
 325        u16 n;                  /* current index: mr->map[m]->segs[n] */
 326};
 327
 328/* Memory region */
 329struct qib_mr {
 330        struct ib_mr ibmr;
 331        struct ib_umem *umem;
 332        struct qib_mregion mr;  /* must be last */
 333};
 334
 335/*
 336 * Send work request queue entry.
 337 * The size of the sg_list is determined when the QP is created and stored
 338 * in qp->s_max_sge.
 339 */
 340struct qib_swqe {
 341        struct ib_send_wr wr;   /* don't use wr.sg_list */
 342        u32 psn;                /* first packet sequence number */
 343        u32 lpsn;               /* last packet sequence number */
 344        u32 ssn;                /* send sequence number */
 345        u32 length;             /* total length of data in sg_list */
 346        struct qib_sge sg_list[0];
 347};
 348
 349/*
 350 * Receive work request queue entry.
 351 * The size of the sg_list is determined when the QP (or SRQ) is created
 352 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
 353 */
 354struct qib_rwqe {
 355        u64 wr_id;
 356        u8 num_sge;
 357        struct ib_sge sg_list[0];
 358};
 359
 360/*
 361 * This structure is used to contain the head pointer, tail pointer,
 362 * and receive work queue entries as a single memory allocation so
 363 * it can be mmap'ed into user space.
 364 * Note that the wq array elements are variable size so you can't
 365 * just index into the array to get the N'th element;
 366 * use get_rwqe_ptr() instead.
 367 */
 368struct qib_rwq {
 369        u32 head;               /* new work requests posted to the head */
 370        u32 tail;               /* receives pull requests from here. */
 371        struct qib_rwqe wq[0];
 372};
 373
 374struct qib_rq {
 375        struct qib_rwq *wq;
 376        u32 size;               /* size of RWQE array */
 377        u8 max_sge;
 378        spinlock_t lock /* protect changes in this struct */
 379                ____cacheline_aligned_in_smp;
 380};
 381
 382struct qib_srq {
 383        struct ib_srq ibsrq;
 384        struct qib_rq rq;
 385        struct qib_mmap_info *ip;
 386        /* send signal when number of RWQEs < limit */
 387        u32 limit;
 388};
 389
 390struct qib_sge_state {
 391        struct qib_sge *sg_list;      /* next SGE to be used if any */
 392        struct qib_sge sge;   /* progress state for the current SGE */
 393        u32 total_len;
 394        u8 num_sge;
 395};
 396
 397/*
 398 * This structure holds the information that the send tasklet needs
 399 * to send a RDMA read response or atomic operation.
 400 */
 401struct qib_ack_entry {
 402        u8 opcode;
 403        u8 sent;
 404        u32 psn;
 405        u32 lpsn;
 406        union {
 407                struct qib_sge rdma_sge;
 408                u64 atomic_data;
 409        };
 410};
 411
 412/*
 413 * Variables prefixed with s_ are for the requester (sender).
 414 * Variables prefixed with r_ are for the responder (receiver).
 415 * Variables prefixed with ack_ are for responder replies.
 416 *
 417 * Common variables are protected by both r_rq.lock and s_lock in that order
 418 * which only happens in modify_qp() or changing the QP 'state'.
 419 */
 420struct qib_qp {
 421        struct ib_qp ibqp;
 422        /* read mostly fields above and below */
 423        struct ib_ah_attr remote_ah_attr;
 424        struct ib_ah_attr alt_ah_attr;
 425        struct qib_qp __rcu *next;            /* link list for QPN hash table */
 426        struct qib_swqe *s_wq;  /* send work queue */
 427        struct qib_mmap_info *ip;
 428        struct qib_ib_header *s_hdr;     /* next packet header to send */
 429        unsigned long timeout_jiffies;  /* computed from timeout */
 430
 431        enum ib_mtu path_mtu;
 432        u32 remote_qpn;
 433        u32 pmtu;               /* decoded from path_mtu */
 434        u32 qkey;               /* QKEY for this QP (for UD or RD) */
 435        u32 s_size;             /* send work queue size */
 436        u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
 437
 438        u8 state;               /* QP state */
 439        u8 qp_access_flags;
 440        u8 alt_timeout;         /* Alternate path timeout for this QP */
 441        u8 timeout;             /* Timeout for this QP */
 442        u8 s_srate;
 443        u8 s_mig_state;
 444        u8 port_num;
 445        u8 s_pkey_index;        /* PKEY index to use */
 446        u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
 447        u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
 448        u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
 449        u8 s_retry_cnt;         /* number of times to retry */
 450        u8 s_rnr_retry_cnt;
 451        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
 452        u8 s_max_sge;           /* size of s_wq->sg_list */
 453        u8 s_draining;
 454
 455        /* start of read/write fields */
 456
 457        atomic_t refcount ____cacheline_aligned_in_smp;
 458        wait_queue_head_t wait;
 459
 460
 461        struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
 462                ____cacheline_aligned_in_smp;
 463        struct qib_sge_state s_rdma_read_sge;
 464
 465        spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
 466        unsigned long r_aflags;
 467        u64 r_wr_id;            /* ID for current receive WQE */
 468        u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
 469        u32 r_len;              /* total length of r_sge */
 470        u32 r_rcv_len;          /* receive data len processed */
 471        u32 r_psn;              /* expected rcv packet sequence number */
 472        u32 r_msn;              /* message sequence number */
 473
 474        u8 r_state;             /* opcode of last packet received */
 475        u8 r_flags;
 476        u8 r_head_ack_queue;    /* index into s_ack_queue[] */
 477
 478        struct list_head rspwait;       /* link for waititing to respond */
 479
 480        struct qib_sge_state r_sge;     /* current receive data */
 481        struct qib_rq r_rq;             /* receive work queue */
 482
 483        spinlock_t s_lock ____cacheline_aligned_in_smp;
 484        struct qib_sge_state *s_cur_sge;
 485        u32 s_flags;
 486        struct qib_verbs_txreq *s_tx;
 487        struct qib_swqe *s_wqe;
 488        struct qib_sge_state s_sge;     /* current send request data */
 489        struct qib_mregion *s_rdma_mr;
 490        atomic_t s_dma_busy;
 491        u32 s_cur_size;         /* size of send packet in bytes */
 492        u32 s_len;              /* total length of s_sge */
 493        u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
 494        u32 s_next_psn;         /* PSN for next request */
 495        u32 s_last_psn;         /* last response PSN processed */
 496        u32 s_sending_psn;      /* lowest PSN that is being sent */
 497        u32 s_sending_hpsn;     /* highest PSN that is being sent */
 498        u32 s_psn;              /* current packet sequence number */
 499        u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
 500        u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
 501        u32 s_head;             /* new entries added here */
 502        u32 s_tail;             /* next entry to process */
 503        u32 s_cur;              /* current work queue entry */
 504        u32 s_acked;            /* last un-ACK'ed entry */
 505        u32 s_last;             /* last completed entry */
 506        u32 s_ssn;              /* SSN of tail entry */
 507        u32 s_lsn;              /* limit sequence number (credit) */
 508        u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
 509        u16 s_rdma_ack_cnt;
 510        u8 s_state;             /* opcode of last packet sent */
 511        u8 s_ack_state;         /* opcode of packet to ACK */
 512        u8 s_nak_state;         /* non-zero if NAK is pending */
 513        u8 r_nak_state;         /* non-zero if NAK is pending */
 514        u8 s_retry;             /* requester retry counter */
 515        u8 s_rnr_retry;         /* requester RNR retry counter */
 516        u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
 517        u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
 518
 519        struct qib_sge_state s_ack_rdma_sge;
 520        struct timer_list s_timer;
 521        struct list_head iowait;        /* link for wait PIO buf */
 522
 523        struct work_struct s_work;
 524
 525        wait_queue_head_t wait_dma;
 526
 527        struct qib_sge r_sg_list[0] /* verified SGEs */
 528                ____cacheline_aligned_in_smp;
 529};
 530
 531/*
 532 * Atomic bit definitions for r_aflags.
 533 */
 534#define QIB_R_WRID_VALID        0
 535#define QIB_R_REWIND_SGE        1
 536
 537/*
 538 * Bit definitions for r_flags.
 539 */
 540#define QIB_R_REUSE_SGE 0x01
 541#define QIB_R_RDMAR_SEQ 0x02
 542#define QIB_R_RSP_NAK   0x04
 543#define QIB_R_RSP_SEND  0x08
 544#define QIB_R_COMM_EST  0x10
 545
 546/*
 547 * Bit definitions for s_flags.
 548 *
 549 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
 550 * QIB_S_BUSY - send tasklet is processing the QP
 551 * QIB_S_TIMER - the RC retry timer is active
 552 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
 553 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
 554 *                         before processing the next SWQE
 555 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
 556 *                         before processing the next SWQE
 557 * QIB_S_WAIT_RNR - waiting for RNR timeout
 558 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
 559 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
 560 *                  next send completion entry not via send DMA
 561 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
 562 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
 563 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
 564 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
 565 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
 566 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
 567 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
 568 */
 569#define QIB_S_SIGNAL_REQ_WR     0x0001
 570#define QIB_S_BUSY              0x0002
 571#define QIB_S_TIMER             0x0004
 572#define QIB_S_RESP_PENDING      0x0008
 573#define QIB_S_ACK_PENDING       0x0010
 574#define QIB_S_WAIT_FENCE        0x0020
 575#define QIB_S_WAIT_RDMAR        0x0040
 576#define QIB_S_WAIT_RNR          0x0080
 577#define QIB_S_WAIT_SSN_CREDIT   0x0100
 578#define QIB_S_WAIT_DMA          0x0200
 579#define QIB_S_WAIT_PIO          0x0400
 580#define QIB_S_WAIT_TX           0x0800
 581#define QIB_S_WAIT_DMA_DESC     0x1000
 582#define QIB_S_WAIT_KMEM         0x2000
 583#define QIB_S_WAIT_PSN          0x4000
 584#define QIB_S_WAIT_ACK          0x8000
 585#define QIB_S_SEND_ONE          0x10000
 586#define QIB_S_UNLIMITED_CREDIT  0x20000
 587
 588/*
 589 * Wait flags that would prevent any packet type from being sent.
 590 */
 591#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
 592        QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
 593
 594/*
 595 * Wait flags that would prevent send work requests from making progress.
 596 */
 597#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
 598        QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
 599        QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
 600
 601#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
 602
 603#define QIB_PSN_CREDIT  16
 604
 605/*
 606 * Since struct qib_swqe is not a fixed size, we can't simply index into
 607 * struct qib_qp.s_wq.  This function does the array index computation.
 608 */
 609static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
 610                                              unsigned n)
 611{
 612        return (struct qib_swqe *)((char *)qp->s_wq +
 613                                     (sizeof(struct qib_swqe) +
 614                                      qp->s_max_sge *
 615                                      sizeof(struct qib_sge)) * n);
 616}
 617
 618/*
 619 * Since struct qib_rwqe is not a fixed size, we can't simply index into
 620 * struct qib_rwq.wq.  This function does the array index computation.
 621 */
 622static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
 623{
 624        return (struct qib_rwqe *)
 625                ((char *) rq->wq->wq +
 626                 (sizeof(struct qib_rwqe) +
 627                  rq->max_sge * sizeof(struct ib_sge)) * n);
 628}
 629
 630/*
 631 * QPN-map pages start out as NULL, they get allocated upon
 632 * first use and are never deallocated. This way,
 633 * large bitmaps are not allocated unless large numbers of QPs are used.
 634 */
 635struct qpn_map {
 636        void *page;
 637};
 638
 639struct qib_qpn_table {
 640        spinlock_t lock; /* protect changes in this struct */
 641        unsigned flags;         /* flags for QP0/1 allocated for each port */
 642        u32 last;               /* last QP number allocated */
 643        u32 nmaps;              /* size of the map table */
 644        u16 limit;
 645        u16 mask;
 646        /* bit map of free QP numbers other than 0/1 */
 647        struct qpn_map map[QPNMAP_ENTRIES];
 648};
 649
 650struct qib_lkey_table {
 651        spinlock_t lock; /* protect changes in this struct */
 652        u32 next;               /* next unused index (speeds search) */
 653        u32 gen;                /* generation count */
 654        u32 max;                /* size of the table */
 655        struct qib_mregion __rcu **table;
 656};
 657
 658struct qib_opcode_stats {
 659        u64 n_packets;          /* number of packets */
 660        u64 n_bytes;            /* total number of bytes */
 661};
 662
 663struct qib_opcode_stats_perctx {
 664        struct qib_opcode_stats stats[128];
 665};
 666
 667struct qib_ibport {
 668        struct qib_qp __rcu *qp0;
 669        struct qib_qp __rcu *qp1;
 670        struct ib_mad_agent *send_agent;        /* agent for SMI (traps) */
 671        struct qib_ah *sm_ah;
 672        struct qib_ah *smi_ah;
 673        struct rb_root mcast_tree;
 674        spinlock_t lock;                /* protect changes in this struct */
 675
 676        /* non-zero when timer is set */
 677        unsigned long mkey_lease_timeout;
 678        unsigned long trap_timeout;
 679        __be64 gid_prefix;      /* in network order */
 680        __be64 mkey;
 681        __be64 guids[QIB_GUIDS_PER_PORT - 1];   /* writable GUIDs */
 682        u64 tid;                /* TID for traps */
 683        u64 n_unicast_xmit;     /* total unicast packets sent */
 684        u64 n_unicast_rcv;      /* total unicast packets received */
 685        u64 n_multicast_xmit;   /* total multicast packets sent */
 686        u64 n_multicast_rcv;    /* total multicast packets received */
 687        u64 z_symbol_error_counter;             /* starting count for PMA */
 688        u64 z_link_error_recovery_counter;      /* starting count for PMA */
 689        u64 z_link_downed_counter;              /* starting count for PMA */
 690        u64 z_port_rcv_errors;                  /* starting count for PMA */
 691        u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
 692        u64 z_port_xmit_discards;               /* starting count for PMA */
 693        u64 z_port_xmit_data;                   /* starting count for PMA */
 694        u64 z_port_rcv_data;                    /* starting count for PMA */
 695        u64 z_port_xmit_packets;                /* starting count for PMA */
 696        u64 z_port_rcv_packets;                 /* starting count for PMA */
 697        u32 z_local_link_integrity_errors;      /* starting count for PMA */
 698        u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
 699        u32 z_vl15_dropped;                     /* starting count for PMA */
 700        u32 n_rc_resends;
 701        u32 n_rc_acks;
 702        u32 n_rc_qacks;
 703        u32 n_rc_delayed_comp;
 704        u32 n_seq_naks;
 705        u32 n_rdma_seq;
 706        u32 n_rnr_naks;
 707        u32 n_other_naks;
 708        u32 n_loop_pkts;
 709        u32 n_pkt_drops;
 710        u32 n_vl15_dropped;
 711        u32 n_rc_timeouts;
 712        u32 n_dmawait;
 713        u32 n_unaligned;
 714        u32 n_rc_dupreq;
 715        u32 n_rc_seqnak;
 716        u32 port_cap_flags;
 717        u32 pma_sample_start;
 718        u32 pma_sample_interval;
 719        __be16 pma_counter_select[5];
 720        u16 pma_tag;
 721        u16 pkey_violations;
 722        u16 qkey_violations;
 723        u16 mkey_violations;
 724        u16 mkey_lease_period;
 725        u16 sm_lid;
 726        u16 repress_traps;
 727        u8 sm_sl;
 728        u8 mkeyprot;
 729        u8 subnet_timeout;
 730        u8 vl_high_limit;
 731        u8 sl_to_vl[16];
 732
 733};
 734
 735
 736struct qib_ibdev {
 737        struct ib_device ibdev;
 738        struct list_head pending_mmaps;
 739        spinlock_t mmap_offset_lock; /* protect mmap_offset */
 740        u32 mmap_offset;
 741        struct qib_mregion __rcu *dma_mr;
 742
 743        /* QP numbers are shared by all IB ports */
 744        struct qib_qpn_table qpn_table;
 745        struct qib_lkey_table lk_table;
 746        struct list_head piowait;       /* list for wait PIO buf */
 747        struct list_head dmawait;       /* list for wait DMA */
 748        struct list_head txwait;        /* list for wait qib_verbs_txreq */
 749        struct list_head memwait;       /* list for wait kernel memory */
 750        struct list_head txreq_free;
 751        struct timer_list mem_timer;
 752        struct qib_qp __rcu **qp_table;
 753        struct qib_pio_header *pio_hdrs;
 754        dma_addr_t pio_hdrs_phys;
 755        /* list of QPs waiting for RNR timer */
 756        spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
 757        u32 qp_table_size; /* size of the hash table */
 758        u32 qp_rnd; /* random bytes for hash */
 759        spinlock_t qpt_lock;
 760
 761        u32 n_piowait;
 762        u32 n_txwait;
 763
 764        u32 n_pds_allocated;    /* number of PDs allocated for device */
 765        spinlock_t n_pds_lock;
 766        u32 n_ahs_allocated;    /* number of AHs allocated for device */
 767        spinlock_t n_ahs_lock;
 768        u32 n_cqs_allocated;    /* number of CQs allocated for device */
 769        spinlock_t n_cqs_lock;
 770        u32 n_qps_allocated;    /* number of QPs allocated for device */
 771        spinlock_t n_qps_lock;
 772        u32 n_srqs_allocated;   /* number of SRQs allocated for device */
 773        spinlock_t n_srqs_lock;
 774        u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
 775        spinlock_t n_mcast_grps_lock;
 776#ifdef CONFIG_DEBUG_FS
 777        /* per HCA debugfs */
 778        struct dentry *qib_ibdev_dbg;
 779#endif
 780};
 781
 782struct qib_verbs_counters {
 783        u64 symbol_error_counter;
 784        u64 link_error_recovery_counter;
 785        u64 link_downed_counter;
 786        u64 port_rcv_errors;
 787        u64 port_rcv_remphys_errors;
 788        u64 port_xmit_discards;
 789        u64 port_xmit_data;
 790        u64 port_rcv_data;
 791        u64 port_xmit_packets;
 792        u64 port_rcv_packets;
 793        u32 local_link_integrity_errors;
 794        u32 excessive_buffer_overrun_errors;
 795        u32 vl15_dropped;
 796};
 797
 798static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
 799{
 800        return container_of(ibmr, struct qib_mr, ibmr);
 801}
 802
 803static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
 804{
 805        return container_of(ibpd, struct qib_pd, ibpd);
 806}
 807
 808static inline struct qib_ah *to_iah(struct ib_ah *ibah)
 809{
 810        return container_of(ibah, struct qib_ah, ibah);
 811}
 812
 813static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
 814{
 815        return container_of(ibcq, struct qib_cq, ibcq);
 816}
 817
 818static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
 819{
 820        return container_of(ibsrq, struct qib_srq, ibsrq);
 821}
 822
 823static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
 824{
 825        return container_of(ibqp, struct qib_qp, ibqp);
 826}
 827
 828static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
 829{
 830        return container_of(ibdev, struct qib_ibdev, ibdev);
 831}
 832
 833/*
 834 * Send if not busy or waiting for I/O and either
 835 * a RC response is pending or we can process send work requests.
 836 */
 837static inline int qib_send_ok(struct qib_qp *qp)
 838{
 839        return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
 840                (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
 841                 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
 842}
 843
 844/*
 845 * This must be called with s_lock held.
 846 */
 847void qib_schedule_send(struct qib_qp *qp);
 848
 849static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
 850{
 851        u16 p1 = pkey1 & 0x7FFF;
 852        u16 p2 = pkey2 & 0x7FFF;
 853
 854        /*
 855         * Low 15 bits must be non-zero and match, and
 856         * one of the two must be a full member.
 857         */
 858        return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
 859}
 860
 861void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
 862                   u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
 863void qib_cap_mask_chg(struct qib_ibport *ibp);
 864void qib_sys_guid_chg(struct qib_ibport *ibp);
 865void qib_node_desc_chg(struct qib_ibport *ibp);
 866int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 867                    struct ib_wc *in_wc, struct ib_grh *in_grh,
 868                    struct ib_mad *in_mad, struct ib_mad *out_mad);
 869int qib_create_agents(struct qib_ibdev *dev);
 870void qib_free_agents(struct qib_ibdev *dev);
 871
 872/*
 873 * Compare the lower 24 bits of the two values.
 874 * Returns an integer <, ==, or > than zero.
 875 */
 876static inline int qib_cmp24(u32 a, u32 b)
 877{
 878        return (((int) a) - ((int) b)) << 8;
 879}
 880
 881struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
 882
 883int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
 884                          u64 *rwords, u64 *spkts, u64 *rpkts,
 885                          u64 *xmit_wait);
 886
 887int qib_get_counters(struct qib_pportdata *ppd,
 888                     struct qib_verbs_counters *cntrs);
 889
 890int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 891
 892int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 893
 894int qib_mcast_tree_empty(struct qib_ibport *ibp);
 895
 896__be32 qib_compute_aeth(struct qib_qp *qp);
 897
 898struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
 899
 900struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
 901                            struct ib_qp_init_attr *init_attr,
 902                            struct ib_udata *udata);
 903
 904int qib_destroy_qp(struct ib_qp *ibqp);
 905
 906int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
 907
 908int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 909                  int attr_mask, struct ib_udata *udata);
 910
 911int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 912                 int attr_mask, struct ib_qp_init_attr *init_attr);
 913
 914unsigned qib_free_all_qps(struct qib_devdata *dd);
 915
 916void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
 917
 918void qib_free_qpn_table(struct qib_qpn_table *qpt);
 919
 920#ifdef CONFIG_DEBUG_FS
 921
 922struct qib_qp_iter;
 923
 924struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
 925
 926int qib_qp_iter_next(struct qib_qp_iter *iter);
 927
 928void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
 929
 930#endif
 931
 932void qib_get_credit(struct qib_qp *qp, u32 aeth);
 933
 934unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
 935
 936void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
 937
 938void qib_put_txreq(struct qib_verbs_txreq *tx);
 939
 940int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
 941                   u32 hdrwords, struct qib_sge_state *ss, u32 len);
 942
 943void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
 944                  int release);
 945
 946void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
 947
 948void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 949                int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 950
 951void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
 952                int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 953
 954int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
 955
 956struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
 957
 958void qib_rc_rnr_retry(unsigned long arg);
 959
 960void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
 961
 962void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
 963
 964int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
 965
 966void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 967                int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 968
 969int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
 970
 971void qib_free_lkey(struct qib_mregion *mr);
 972
 973int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
 974                struct qib_sge *isge, struct ib_sge *sge, int acc);
 975
 976int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
 977                u32 len, u64 vaddr, u32 rkey, int acc);
 978
 979int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 980                         struct ib_recv_wr **bad_wr);
 981
 982struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
 983                              struct ib_srq_init_attr *srq_init_attr,
 984                              struct ib_udata *udata);
 985
 986int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 987                   enum ib_srq_attr_mask attr_mask,
 988                   struct ib_udata *udata);
 989
 990int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
 991
 992int qib_destroy_srq(struct ib_srq *ibsrq);
 993
 994int qib_cq_init(struct qib_devdata *dd);
 995
 996void qib_cq_exit(struct qib_devdata *dd);
 997
 998void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
 999
1000int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
1001
1002struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
1003                            int comp_vector, struct ib_ucontext *context,
1004                            struct ib_udata *udata);
1005
1006int qib_destroy_cq(struct ib_cq *ibcq);
1007
1008int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
1009
1010int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
1011
1012struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
1013
1014struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
1015                              struct ib_phys_buf *buffer_list,
1016                              int num_phys_buf, int acc, u64 *iova_start);
1017
1018struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1019                              u64 virt_addr, int mr_access_flags,
1020                              struct ib_udata *udata);
1021
1022int qib_dereg_mr(struct ib_mr *ibmr);
1023
1024struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1025
1026struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
1027                                struct ib_device *ibdev, int page_list_len);
1028
1029void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1030
1031int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
1032
1033struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1034                             struct ib_fmr_attr *fmr_attr);
1035
1036int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1037                     int list_len, u64 iova);
1038
1039int qib_unmap_fmr(struct list_head *fmr_list);
1040
1041int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1042
1043static inline void qib_get_mr(struct qib_mregion *mr)
1044{
1045        atomic_inc(&mr->refcount);
1046}
1047
1048void mr_rcu_callback(struct rcu_head *list);
1049
1050static inline void qib_put_mr(struct qib_mregion *mr)
1051{
1052        if (unlikely(atomic_dec_and_test(&mr->refcount)))
1053                call_rcu(&mr->list, mr_rcu_callback);
1054}
1055
1056static inline void qib_put_ss(struct qib_sge_state *ss)
1057{
1058        while (ss->num_sge) {
1059                qib_put_mr(ss->sge.mr);
1060                if (--ss->num_sge)
1061                        ss->sge = *ss->sg_list++;
1062        }
1063}
1064
1065
1066void qib_release_mmap_info(struct kref *ref);
1067
1068struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1069                                           struct ib_ucontext *context,
1070                                           void *obj);
1071
1072void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1073                          u32 size, void *obj);
1074
1075int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1076
1077int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1078
1079void qib_migrate_qp(struct qib_qp *qp);
1080
1081int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1082                      int has_grh, struct qib_qp *qp, u32 bth0);
1083
1084u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1085                 struct ib_global_route *grh, u32 hwords, u32 nwords);
1086
1087void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1088                         u32 bth0, u32 bth2);
1089
1090void qib_do_send(struct work_struct *work);
1091
1092void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1093                       enum ib_wc_status status);
1094
1095void qib_send_rc_ack(struct qib_qp *qp);
1096
1097int qib_make_rc_req(struct qib_qp *qp);
1098
1099int qib_make_uc_req(struct qib_qp *qp);
1100
1101int qib_make_ud_req(struct qib_qp *qp);
1102
1103int qib_register_ib_device(struct qib_devdata *);
1104
1105void qib_unregister_ib_device(struct qib_devdata *);
1106
1107void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1108
1109void qib_ib_piobufavail(struct qib_devdata *);
1110
1111unsigned qib_get_npkeys(struct qib_devdata *);
1112
1113unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1114
1115extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1116
1117/*
1118 * Below  HCA-independent IB PhysPortState values, returned
1119 * by the f_ibphys_portstate() routine.
1120 */
1121#define IB_PHYSPORTSTATE_SLEEP 1
1122#define IB_PHYSPORTSTATE_POLL 2
1123#define IB_PHYSPORTSTATE_DISABLED 3
1124#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1125#define IB_PHYSPORTSTATE_LINKUP 5
1126#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1127#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1128#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1129#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1130#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1131#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1132#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1133#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1134
1135extern const int ib_qib_state_ops[];
1136
1137extern __be64 ib_qib_sys_image_guid;    /* in network order */
1138
1139extern unsigned int ib_qib_lkey_table_size;
1140
1141extern unsigned int ib_qib_max_cqes;
1142
1143extern unsigned int ib_qib_max_cqs;
1144
1145extern unsigned int ib_qib_max_qp_wrs;
1146
1147extern unsigned int ib_qib_max_qps;
1148
1149extern unsigned int ib_qib_max_sges;
1150
1151extern unsigned int ib_qib_max_mcast_grps;
1152
1153extern unsigned int ib_qib_max_mcast_qp_attached;
1154
1155extern unsigned int ib_qib_max_srqs;
1156
1157extern unsigned int ib_qib_max_srq_sges;
1158
1159extern unsigned int ib_qib_max_srq_wrs;
1160
1161extern const u32 ib_qib_rnr_table[];
1162
1163extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1164
1165#endif                          /* QIB_VERBS_H */
1166