linux/drivers/infiniband/hw/qib/qib_verbs.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
   4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#ifndef QIB_VERBS_H
  36#define QIB_VERBS_H
  37
  38#include <linux/types.h>
  39#include <linux/spinlock.h>
  40#include <linux/kernel.h>
  41#include <linux/interrupt.h>
  42#include <linux/kref.h>
  43#include <linux/workqueue.h>
  44#include <linux/completion.h>
  45#include <rdma/ib_pack.h>
  46#include <rdma/ib_user_verbs.h>
  47
  48struct qib_ctxtdata;
  49struct qib_pportdata;
  50struct qib_devdata;
  51struct qib_verbs_txreq;
  52
  53#define QIB_MAX_RDMA_ATOMIC     16
  54#define QIB_GUIDS_PER_PORT      5
  55
  56#define QPN_MAX                 (1 << 24)
  57#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
  58
  59/*
  60 * Increment this value if any changes that break userspace ABI
  61 * compatibility are made.
  62 */
  63#define QIB_UVERBS_ABI_VERSION       2
  64
  65/*
  66 * Define an ib_cq_notify value that is not valid so we know when CQ
  67 * notifications are armed.
  68 */
  69#define IB_CQ_NONE      (IB_CQ_NEXT_COMP + 1)
  70
  71#define IB_SEQ_NAK      (3 << 29)
  72
  73/* AETH NAK opcode values */
  74#define IB_RNR_NAK                      0x20
  75#define IB_NAK_PSN_ERROR                0x60
  76#define IB_NAK_INVALID_REQUEST          0x61
  77#define IB_NAK_REMOTE_ACCESS_ERROR      0x62
  78#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
  79#define IB_NAK_INVALID_RD_REQUEST       0x64
  80
  81/* Flags for checking QP state (see ib_qib_state_ops[]) */
  82#define QIB_POST_SEND_OK                0x01
  83#define QIB_POST_RECV_OK                0x02
  84#define QIB_PROCESS_RECV_OK             0x04
  85#define QIB_PROCESS_SEND_OK             0x08
  86#define QIB_PROCESS_NEXT_SEND_OK        0x10
  87#define QIB_FLUSH_SEND                  0x20
  88#define QIB_FLUSH_RECV                  0x40
  89#define QIB_PROCESS_OR_FLUSH_SEND \
  90        (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
  91
  92/* IB Performance Manager status values */
  93#define IB_PMA_SAMPLE_STATUS_DONE       0x00
  94#define IB_PMA_SAMPLE_STATUS_STARTED    0x01
  95#define IB_PMA_SAMPLE_STATUS_RUNNING    0x02
  96
  97/* Mandatory IB performance counter select values. */
  98#define IB_PMA_PORT_XMIT_DATA   cpu_to_be16(0x0001)
  99#define IB_PMA_PORT_RCV_DATA    cpu_to_be16(0x0002)
 100#define IB_PMA_PORT_XMIT_PKTS   cpu_to_be16(0x0003)
 101#define IB_PMA_PORT_RCV_PKTS    cpu_to_be16(0x0004)
 102#define IB_PMA_PORT_XMIT_WAIT   cpu_to_be16(0x0005)
 103
 104#define QIB_VENDOR_IPG          cpu_to_be16(0xFFA0)
 105
 106#define IB_BTH_REQ_ACK          (1 << 31)
 107#define IB_BTH_SOLICITED        (1 << 23)
 108#define IB_BTH_MIG_REQ          (1 << 22)
 109
 110/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
 111#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
 112
 113#define IB_GRH_VERSION          6
 114#define IB_GRH_VERSION_MASK     0xF
 115#define IB_GRH_VERSION_SHIFT    28
 116#define IB_GRH_TCLASS_MASK      0xFF
 117#define IB_GRH_TCLASS_SHIFT     20
 118#define IB_GRH_FLOW_MASK        0xFFFFF
 119#define IB_GRH_FLOW_SHIFT       0
 120#define IB_GRH_NEXT_HDR         0x1B
 121
 122#define IB_DEFAULT_GID_PREFIX   cpu_to_be64(0xfe80000000000000ULL)
 123
 124/* Values for set/get portinfo VLCap OperationalVLs */
 125#define IB_VL_VL0       1
 126#define IB_VL_VL0_1     2
 127#define IB_VL_VL0_3     3
 128#define IB_VL_VL0_7     4
 129#define IB_VL_VL0_14    5
 130
 131static inline int qib_num_vls(int vls)
 132{
 133        switch (vls) {
 134        default:
 135        case IB_VL_VL0:
 136                return 1;
 137        case IB_VL_VL0_1:
 138                return 2;
 139        case IB_VL_VL0_3:
 140                return 4;
 141        case IB_VL_VL0_7:
 142                return 8;
 143        case IB_VL_VL0_14:
 144                return 15;
 145        }
 146}
 147
 148struct ib_reth {
 149        __be64 vaddr;
 150        __be32 rkey;
 151        __be32 length;
 152} __attribute__ ((packed));
 153
 154struct ib_atomic_eth {
 155        __be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
 156        __be32 rkey;
 157        __be64 swap_data;
 158        __be64 compare_data;
 159} __attribute__ ((packed));
 160
 161struct qib_other_headers {
 162        __be32 bth[3];
 163        union {
 164                struct {
 165                        __be32 deth[2];
 166                        __be32 imm_data;
 167                } ud;
 168                struct {
 169                        struct ib_reth reth;
 170                        __be32 imm_data;
 171                } rc;
 172                struct {
 173                        __be32 aeth;
 174                        __be32 atomic_ack_eth[2];
 175                } at;
 176                __be32 imm_data;
 177                __be32 aeth;
 178                struct ib_atomic_eth atomic_eth;
 179        } u;
 180} __attribute__ ((packed));
 181
 182/*
 183 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
 184 * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
 185 * will be in the eager header buffer.  The remaining 12 or 16 bytes
 186 * are in the data buffer.
 187 */
 188struct qib_ib_header {
 189        __be16 lrh[4];
 190        union {
 191                struct {
 192                        struct ib_grh grh;
 193                        struct qib_other_headers oth;
 194                } l;
 195                struct qib_other_headers oth;
 196        } u;
 197} __attribute__ ((packed));
 198
 199struct qib_pio_header {
 200        __le32 pbc[2];
 201        struct qib_ib_header hdr;
 202} __attribute__ ((packed));
 203
 204/*
 205 * There is one struct qib_mcast for each multicast GID.
 206 * All attached QPs are then stored as a list of
 207 * struct qib_mcast_qp.
 208 */
 209struct qib_mcast_qp {
 210        struct list_head list;
 211        struct qib_qp *qp;
 212};
 213
 214struct qib_mcast {
 215        struct rb_node rb_node;
 216        union ib_gid mgid;
 217        struct list_head qp_list;
 218        wait_queue_head_t wait;
 219        atomic_t refcount;
 220        int n_attached;
 221};
 222
 223/* Protection domain */
 224struct qib_pd {
 225        struct ib_pd ibpd;
 226        int user;               /* non-zero if created from user space */
 227};
 228
 229/* Address Handle */
 230struct qib_ah {
 231        struct ib_ah ibah;
 232        struct ib_ah_attr attr;
 233        atomic_t refcount;
 234};
 235
 236/*
 237 * This structure is used by qib_mmap() to validate an offset
 238 * when an mmap() request is made.  The vm_area_struct then uses
 239 * this as its vm_private_data.
 240 */
 241struct qib_mmap_info {
 242        struct list_head pending_mmaps;
 243        struct ib_ucontext *context;
 244        void *obj;
 245        __u64 offset;
 246        struct kref ref;
 247        unsigned size;
 248};
 249
 250/*
 251 * This structure is used to contain the head pointer, tail pointer,
 252 * and completion queue entries as a single memory allocation so
 253 * it can be mmap'ed into user space.
 254 */
 255struct qib_cq_wc {
 256        u32 head;               /* index of next entry to fill */
 257        u32 tail;               /* index of next ib_poll_cq() entry */
 258        union {
 259                /* these are actually size ibcq.cqe + 1 */
 260                struct ib_uverbs_wc uqueue[0];
 261                struct ib_wc kqueue[0];
 262        };
 263};
 264
 265/*
 266 * The completion queue structure.
 267 */
 268struct qib_cq {
 269        struct ib_cq ibcq;
 270        struct work_struct comptask;
 271        spinlock_t lock; /* protect changes in this struct */
 272        u8 notify;
 273        u8 triggered;
 274        struct qib_cq_wc *queue;
 275        struct qib_mmap_info *ip;
 276};
 277
 278/*
 279 * A segment is a linear region of low physical memory.
 280 * XXX Maybe we should use phys addr here and kmap()/kunmap().
 281 * Used by the verbs layer.
 282 */
 283struct qib_seg {
 284        void *vaddr;
 285        size_t length;
 286};
 287
 288/* The number of qib_segs that fit in a page. */
 289#define QIB_SEGSZ     (PAGE_SIZE / sizeof(struct qib_seg))
 290
 291struct qib_segarray {
 292        struct qib_seg segs[QIB_SEGSZ];
 293};
 294
 295struct qib_mregion {
 296        struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
 297        u64 user_base;          /* User's address for this region */
 298        u64 iova;               /* IB start address of this region */
 299        size_t length;
 300        u32 lkey;
 301        u32 offset;             /* offset (bytes) to start of region */
 302        int access_flags;
 303        u32 max_segs;           /* number of qib_segs in all the arrays */
 304        u32 mapsz;              /* size of the map array */
 305        u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
 306        u8  lkey_published;     /* in global table */
 307        struct completion comp; /* complete when refcount goes to zero */
 308        struct rcu_head list;
 309        atomic_t refcount;
 310        struct qib_segarray *map[0];    /* the segments */
 311};
 312
 313/*
 314 * These keep track of the copy progress within a memory region.
 315 * Used by the verbs layer.
 316 */
 317struct qib_sge {
 318        struct qib_mregion *mr;
 319        void *vaddr;            /* kernel virtual address of segment */
 320        u32 sge_length;         /* length of the SGE */
 321        u32 length;             /* remaining length of the segment */
 322        u16 m;                  /* current index: mr->map[m] */
 323        u16 n;                  /* current index: mr->map[m]->segs[n] */
 324};
 325
 326/* Memory region */
 327struct qib_mr {
 328        struct ib_mr ibmr;
 329        struct ib_umem *umem;
 330        struct qib_mregion mr;  /* must be last */
 331};
 332
 333/*
 334 * Send work request queue entry.
 335 * The size of the sg_list is determined when the QP is created and stored
 336 * in qp->s_max_sge.
 337 */
 338struct qib_swqe {
 339        struct ib_send_wr wr;   /* don't use wr.sg_list */
 340        u32 psn;                /* first packet sequence number */
 341        u32 lpsn;               /* last packet sequence number */
 342        u32 ssn;                /* send sequence number */
 343        u32 length;             /* total length of data in sg_list */
 344        struct qib_sge sg_list[0];
 345};
 346
 347/*
 348 * Receive work request queue entry.
 349 * The size of the sg_list is determined when the QP (or SRQ) is created
 350 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
 351 */
 352struct qib_rwqe {
 353        u64 wr_id;
 354        u8 num_sge;
 355        struct ib_sge sg_list[0];
 356};
 357
 358/*
 359 * This structure is used to contain the head pointer, tail pointer,
 360 * and receive work queue entries as a single memory allocation so
 361 * it can be mmap'ed into user space.
 362 * Note that the wq array elements are variable size so you can't
 363 * just index into the array to get the N'th element;
 364 * use get_rwqe_ptr() instead.
 365 */
 366struct qib_rwq {
 367        u32 head;               /* new work requests posted to the head */
 368        u32 tail;               /* receives pull requests from here. */
 369        struct qib_rwqe wq[0];
 370};
 371
 372struct qib_rq {
 373        struct qib_rwq *wq;
 374        u32 size;               /* size of RWQE array */
 375        u8 max_sge;
 376        spinlock_t lock /* protect changes in this struct */
 377                ____cacheline_aligned_in_smp;
 378};
 379
 380struct qib_srq {
 381        struct ib_srq ibsrq;
 382        struct qib_rq rq;
 383        struct qib_mmap_info *ip;
 384        /* send signal when number of RWQEs < limit */
 385        u32 limit;
 386};
 387
 388struct qib_sge_state {
 389        struct qib_sge *sg_list;      /* next SGE to be used if any */
 390        struct qib_sge sge;   /* progress state for the current SGE */
 391        u32 total_len;
 392        u8 num_sge;
 393};
 394
 395/*
 396 * This structure holds the information that the send tasklet needs
 397 * to send a RDMA read response or atomic operation.
 398 */
 399struct qib_ack_entry {
 400        u8 opcode;
 401        u8 sent;
 402        u32 psn;
 403        u32 lpsn;
 404        union {
 405                struct qib_sge rdma_sge;
 406                u64 atomic_data;
 407        };
 408};
 409
 410/*
 411 * Variables prefixed with s_ are for the requester (sender).
 412 * Variables prefixed with r_ are for the responder (receiver).
 413 * Variables prefixed with ack_ are for responder replies.
 414 *
 415 * Common variables are protected by both r_rq.lock and s_lock in that order
 416 * which only happens in modify_qp() or changing the QP 'state'.
 417 */
 418struct qib_qp {
 419        struct ib_qp ibqp;
 420        /* read mostly fields above and below */
 421        struct ib_ah_attr remote_ah_attr;
 422        struct ib_ah_attr alt_ah_attr;
 423        struct qib_qp __rcu *next;            /* link list for QPN hash table */
 424        struct qib_swqe *s_wq;  /* send work queue */
 425        struct qib_mmap_info *ip;
 426        struct qib_ib_header *s_hdr;     /* next packet header to send */
 427        unsigned long timeout_jiffies;  /* computed from timeout */
 428
 429        enum ib_mtu path_mtu;
 430        u32 remote_qpn;
 431        u32 pmtu;               /* decoded from path_mtu */
 432        u32 qkey;               /* QKEY for this QP (for UD or RD) */
 433        u32 s_size;             /* send work queue size */
 434        u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
 435
 436        u8 state;               /* QP state */
 437        u8 qp_access_flags;
 438        u8 alt_timeout;         /* Alternate path timeout for this QP */
 439        u8 timeout;             /* Timeout for this QP */
 440        u8 s_srate;
 441        u8 s_mig_state;
 442        u8 port_num;
 443        u8 s_pkey_index;        /* PKEY index to use */
 444        u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
 445        u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
 446        u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
 447        u8 s_retry_cnt;         /* number of times to retry */
 448        u8 s_rnr_retry_cnt;
 449        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
 450        u8 s_max_sge;           /* size of s_wq->sg_list */
 451        u8 s_draining;
 452
 453        /* start of read/write fields */
 454
 455        atomic_t refcount ____cacheline_aligned_in_smp;
 456        wait_queue_head_t wait;
 457
 458
 459        struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
 460                ____cacheline_aligned_in_smp;
 461        struct qib_sge_state s_rdma_read_sge;
 462
 463        spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
 464        unsigned long r_aflags;
 465        u64 r_wr_id;            /* ID for current receive WQE */
 466        u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
 467        u32 r_len;              /* total length of r_sge */
 468        u32 r_rcv_len;          /* receive data len processed */
 469        u32 r_psn;              /* expected rcv packet sequence number */
 470        u32 r_msn;              /* message sequence number */
 471
 472        u8 r_state;             /* opcode of last packet received */
 473        u8 r_flags;
 474        u8 r_head_ack_queue;    /* index into s_ack_queue[] */
 475
 476        struct list_head rspwait;       /* link for waititing to respond */
 477
 478        struct qib_sge_state r_sge;     /* current receive data */
 479        struct qib_rq r_rq;             /* receive work queue */
 480
 481        spinlock_t s_lock ____cacheline_aligned_in_smp;
 482        struct qib_sge_state *s_cur_sge;
 483        u32 s_flags;
 484        struct qib_verbs_txreq *s_tx;
 485        struct qib_swqe *s_wqe;
 486        struct qib_sge_state s_sge;     /* current send request data */
 487        struct qib_mregion *s_rdma_mr;
 488        atomic_t s_dma_busy;
 489        u32 s_cur_size;         /* size of send packet in bytes */
 490        u32 s_len;              /* total length of s_sge */
 491        u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
 492        u32 s_next_psn;         /* PSN for next request */
 493        u32 s_last_psn;         /* last response PSN processed */
 494        u32 s_sending_psn;      /* lowest PSN that is being sent */
 495        u32 s_sending_hpsn;     /* highest PSN that is being sent */
 496        u32 s_psn;              /* current packet sequence number */
 497        u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
 498        u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
 499        u32 s_head;             /* new entries added here */
 500        u32 s_tail;             /* next entry to process */
 501        u32 s_cur;              /* current work queue entry */
 502        u32 s_acked;            /* last un-ACK'ed entry */
 503        u32 s_last;             /* last completed entry */
 504        u32 s_ssn;              /* SSN of tail entry */
 505        u32 s_lsn;              /* limit sequence number (credit) */
 506        u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
 507        u16 s_rdma_ack_cnt;
 508        u8 s_state;             /* opcode of last packet sent */
 509        u8 s_ack_state;         /* opcode of packet to ACK */
 510        u8 s_nak_state;         /* non-zero if NAK is pending */
 511        u8 r_nak_state;         /* non-zero if NAK is pending */
 512        u8 s_retry;             /* requester retry counter */
 513        u8 s_rnr_retry;         /* requester RNR retry counter */
 514        u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
 515        u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
 516
 517        struct qib_sge_state s_ack_rdma_sge;
 518        struct timer_list s_timer;
 519        struct list_head iowait;        /* link for wait PIO buf */
 520
 521        struct work_struct s_work;
 522
 523        wait_queue_head_t wait_dma;
 524
 525        struct qib_sge r_sg_list[0] /* verified SGEs */
 526                ____cacheline_aligned_in_smp;
 527};
 528
 529/*
 530 * Atomic bit definitions for r_aflags.
 531 */
 532#define QIB_R_WRID_VALID        0
 533#define QIB_R_REWIND_SGE        1
 534
 535/*
 536 * Bit definitions for r_flags.
 537 */
 538#define QIB_R_REUSE_SGE 0x01
 539#define QIB_R_RDMAR_SEQ 0x02
 540#define QIB_R_RSP_NAK   0x04
 541#define QIB_R_RSP_SEND  0x08
 542#define QIB_R_COMM_EST  0x10
 543
 544/*
 545 * Bit definitions for s_flags.
 546 *
 547 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
 548 * QIB_S_BUSY - send tasklet is processing the QP
 549 * QIB_S_TIMER - the RC retry timer is active
 550 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
 551 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
 552 *                         before processing the next SWQE
 553 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
 554 *                         before processing the next SWQE
 555 * QIB_S_WAIT_RNR - waiting for RNR timeout
 556 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
 557 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
 558 *                  next send completion entry not via send DMA
 559 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
 560 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
 561 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
 562 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
 563 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
 564 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
 565 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
 566 */
 567#define QIB_S_SIGNAL_REQ_WR     0x0001
 568#define QIB_S_BUSY              0x0002
 569#define QIB_S_TIMER             0x0004
 570#define QIB_S_RESP_PENDING      0x0008
 571#define QIB_S_ACK_PENDING       0x0010
 572#define QIB_S_WAIT_FENCE        0x0020
 573#define QIB_S_WAIT_RDMAR        0x0040
 574#define QIB_S_WAIT_RNR          0x0080
 575#define QIB_S_WAIT_SSN_CREDIT   0x0100
 576#define QIB_S_WAIT_DMA          0x0200
 577#define QIB_S_WAIT_PIO          0x0400
 578#define QIB_S_WAIT_TX           0x0800
 579#define QIB_S_WAIT_DMA_DESC     0x1000
 580#define QIB_S_WAIT_KMEM         0x2000
 581#define QIB_S_WAIT_PSN          0x4000
 582#define QIB_S_WAIT_ACK          0x8000
 583#define QIB_S_SEND_ONE          0x10000
 584#define QIB_S_UNLIMITED_CREDIT  0x20000
 585
 586/*
 587 * Wait flags that would prevent any packet type from being sent.
 588 */
 589#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
 590        QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
 591
 592/*
 593 * Wait flags that would prevent send work requests from making progress.
 594 */
 595#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
 596        QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
 597        QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
 598
 599#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
 600
 601#define QIB_PSN_CREDIT  16
 602
 603/*
 604 * Since struct qib_swqe is not a fixed size, we can't simply index into
 605 * struct qib_qp.s_wq.  This function does the array index computation.
 606 */
 607static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
 608                                              unsigned n)
 609{
 610        return (struct qib_swqe *)((char *)qp->s_wq +
 611                                     (sizeof(struct qib_swqe) +
 612                                      qp->s_max_sge *
 613                                      sizeof(struct qib_sge)) * n);
 614}
 615
 616/*
 617 * Since struct qib_rwqe is not a fixed size, we can't simply index into
 618 * struct qib_rwq.wq.  This function does the array index computation.
 619 */
 620static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
 621{
 622        return (struct qib_rwqe *)
 623                ((char *) rq->wq->wq +
 624                 (sizeof(struct qib_rwqe) +
 625                  rq->max_sge * sizeof(struct ib_sge)) * n);
 626}
 627
 628/*
 629 * QPN-map pages start out as NULL, they get allocated upon
 630 * first use and are never deallocated. This way,
 631 * large bitmaps are not allocated unless large numbers of QPs are used.
 632 */
 633struct qpn_map {
 634        void *page;
 635};
 636
 637struct qib_qpn_table {
 638        spinlock_t lock; /* protect changes in this struct */
 639        unsigned flags;         /* flags for QP0/1 allocated for each port */
 640        u32 last;               /* last QP number allocated */
 641        u32 nmaps;              /* size of the map table */
 642        u16 limit;
 643        u16 mask;
 644        /* bit map of free QP numbers other than 0/1 */
 645        struct qpn_map map[QPNMAP_ENTRIES];
 646};
 647
 648struct qib_lkey_table {
 649        spinlock_t lock; /* protect changes in this struct */
 650        u32 next;               /* next unused index (speeds search) */
 651        u32 gen;                /* generation count */
 652        u32 max;                /* size of the table */
 653        struct qib_mregion __rcu **table;
 654};
 655
 656struct qib_opcode_stats {
 657        u64 n_packets;          /* number of packets */
 658        u64 n_bytes;            /* total number of bytes */
 659};
 660
 661struct qib_ibport {
 662        struct qib_qp __rcu *qp0;
 663        struct qib_qp __rcu *qp1;
 664        struct ib_mad_agent *send_agent;        /* agent for SMI (traps) */
 665        struct qib_ah *sm_ah;
 666        struct qib_ah *smi_ah;
 667        struct rb_root mcast_tree;
 668        spinlock_t lock;                /* protect changes in this struct */
 669
 670        /* non-zero when timer is set */
 671        unsigned long mkey_lease_timeout;
 672        unsigned long trap_timeout;
 673        __be64 gid_prefix;      /* in network order */
 674        __be64 mkey;
 675        __be64 guids[QIB_GUIDS_PER_PORT - 1];   /* writable GUIDs */
 676        u64 tid;                /* TID for traps */
 677        u64 n_unicast_xmit;     /* total unicast packets sent */
 678        u64 n_unicast_rcv;      /* total unicast packets received */
 679        u64 n_multicast_xmit;   /* total multicast packets sent */
 680        u64 n_multicast_rcv;    /* total multicast packets received */
 681        u64 z_symbol_error_counter;             /* starting count for PMA */
 682        u64 z_link_error_recovery_counter;      /* starting count for PMA */
 683        u64 z_link_downed_counter;              /* starting count for PMA */
 684        u64 z_port_rcv_errors;                  /* starting count for PMA */
 685        u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
 686        u64 z_port_xmit_discards;               /* starting count for PMA */
 687        u64 z_port_xmit_data;                   /* starting count for PMA */
 688        u64 z_port_rcv_data;                    /* starting count for PMA */
 689        u64 z_port_xmit_packets;                /* starting count for PMA */
 690        u64 z_port_rcv_packets;                 /* starting count for PMA */
 691        u32 z_local_link_integrity_errors;      /* starting count for PMA */
 692        u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
 693        u32 z_vl15_dropped;                     /* starting count for PMA */
 694        u32 n_rc_resends;
 695        u32 n_rc_acks;
 696        u32 n_rc_qacks;
 697        u32 n_rc_delayed_comp;
 698        u32 n_seq_naks;
 699        u32 n_rdma_seq;
 700        u32 n_rnr_naks;
 701        u32 n_other_naks;
 702        u32 n_loop_pkts;
 703        u32 n_pkt_drops;
 704        u32 n_vl15_dropped;
 705        u32 n_rc_timeouts;
 706        u32 n_dmawait;
 707        u32 n_unaligned;
 708        u32 n_rc_dupreq;
 709        u32 n_rc_seqnak;
 710        u32 port_cap_flags;
 711        u32 pma_sample_start;
 712        u32 pma_sample_interval;
 713        __be16 pma_counter_select[5];
 714        u16 pma_tag;
 715        u16 pkey_violations;
 716        u16 qkey_violations;
 717        u16 mkey_violations;
 718        u16 mkey_lease_period;
 719        u16 sm_lid;
 720        u16 repress_traps;
 721        u8 sm_sl;
 722        u8 mkeyprot;
 723        u8 subnet_timeout;
 724        u8 vl_high_limit;
 725        u8 sl_to_vl[16];
 726
 727        struct qib_opcode_stats opstats[128];
 728};
 729
 730
 731struct qib_ibdev {
 732        struct ib_device ibdev;
 733        struct list_head pending_mmaps;
 734        spinlock_t mmap_offset_lock; /* protect mmap_offset */
 735        u32 mmap_offset;
 736        struct qib_mregion __rcu *dma_mr;
 737
 738        /* QP numbers are shared by all IB ports */
 739        struct qib_qpn_table qpn_table;
 740        struct qib_lkey_table lk_table;
 741        struct list_head piowait;       /* list for wait PIO buf */
 742        struct list_head dmawait;       /* list for wait DMA */
 743        struct list_head txwait;        /* list for wait qib_verbs_txreq */
 744        struct list_head memwait;       /* list for wait kernel memory */
 745        struct list_head txreq_free;
 746        struct timer_list mem_timer;
 747        struct qib_qp __rcu **qp_table;
 748        struct qib_pio_header *pio_hdrs;
 749        dma_addr_t pio_hdrs_phys;
 750        /* list of QPs waiting for RNR timer */
 751        spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
 752        u32 qp_table_size; /* size of the hash table */
 753        u32 qp_rnd; /* random bytes for hash */
 754        spinlock_t qpt_lock;
 755
 756        u32 n_piowait;
 757        u32 n_txwait;
 758
 759        u32 n_pds_allocated;    /* number of PDs allocated for device */
 760        spinlock_t n_pds_lock;
 761        u32 n_ahs_allocated;    /* number of AHs allocated for device */
 762        spinlock_t n_ahs_lock;
 763        u32 n_cqs_allocated;    /* number of CQs allocated for device */
 764        spinlock_t n_cqs_lock;
 765        u32 n_qps_allocated;    /* number of QPs allocated for device */
 766        spinlock_t n_qps_lock;
 767        u32 n_srqs_allocated;   /* number of SRQs allocated for device */
 768        spinlock_t n_srqs_lock;
 769        u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
 770        spinlock_t n_mcast_grps_lock;
 771};
 772
 773struct qib_verbs_counters {
 774        u64 symbol_error_counter;
 775        u64 link_error_recovery_counter;
 776        u64 link_downed_counter;
 777        u64 port_rcv_errors;
 778        u64 port_rcv_remphys_errors;
 779        u64 port_xmit_discards;
 780        u64 port_xmit_data;
 781        u64 port_rcv_data;
 782        u64 port_xmit_packets;
 783        u64 port_rcv_packets;
 784        u32 local_link_integrity_errors;
 785        u32 excessive_buffer_overrun_errors;
 786        u32 vl15_dropped;
 787};
 788
 789static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
 790{
 791        return container_of(ibmr, struct qib_mr, ibmr);
 792}
 793
 794static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
 795{
 796        return container_of(ibpd, struct qib_pd, ibpd);
 797}
 798
 799static inline struct qib_ah *to_iah(struct ib_ah *ibah)
 800{
 801        return container_of(ibah, struct qib_ah, ibah);
 802}
 803
 804static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
 805{
 806        return container_of(ibcq, struct qib_cq, ibcq);
 807}
 808
 809static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
 810{
 811        return container_of(ibsrq, struct qib_srq, ibsrq);
 812}
 813
 814static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
 815{
 816        return container_of(ibqp, struct qib_qp, ibqp);
 817}
 818
 819static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
 820{
 821        return container_of(ibdev, struct qib_ibdev, ibdev);
 822}
 823
 824/*
 825 * Send if not busy or waiting for I/O and either
 826 * a RC response is pending or we can process send work requests.
 827 */
 828static inline int qib_send_ok(struct qib_qp *qp)
 829{
 830        return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
 831                (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
 832                 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
 833}
 834
 835extern struct workqueue_struct *qib_cq_wq;
 836
 837/*
 838 * This must be called with s_lock held.
 839 */
 840void qib_schedule_send(struct qib_qp *qp);
 841
 842static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
 843{
 844        u16 p1 = pkey1 & 0x7FFF;
 845        u16 p2 = pkey2 & 0x7FFF;
 846
 847        /*
 848         * Low 15 bits must be non-zero and match, and
 849         * one of the two must be a full member.
 850         */
 851        return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
 852}
 853
 854void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
 855                   u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
 856void qib_cap_mask_chg(struct qib_ibport *ibp);
 857void qib_sys_guid_chg(struct qib_ibport *ibp);
 858void qib_node_desc_chg(struct qib_ibport *ibp);
 859int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 860                    struct ib_wc *in_wc, struct ib_grh *in_grh,
 861                    struct ib_mad *in_mad, struct ib_mad *out_mad);
 862int qib_create_agents(struct qib_ibdev *dev);
 863void qib_free_agents(struct qib_ibdev *dev);
 864
 865/*
 866 * Compare the lower 24 bits of the two values.
 867 * Returns an integer <, ==, or > than zero.
 868 */
 869static inline int qib_cmp24(u32 a, u32 b)
 870{
 871        return (((int) a) - ((int) b)) << 8;
 872}
 873
 874struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
 875
 876int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
 877                          u64 *rwords, u64 *spkts, u64 *rpkts,
 878                          u64 *xmit_wait);
 879
 880int qib_get_counters(struct qib_pportdata *ppd,
 881                     struct qib_verbs_counters *cntrs);
 882
 883int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 884
 885int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 886
 887int qib_mcast_tree_empty(struct qib_ibport *ibp);
 888
 889__be32 qib_compute_aeth(struct qib_qp *qp);
 890
 891struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
 892
 893struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
 894                            struct ib_qp_init_attr *init_attr,
 895                            struct ib_udata *udata);
 896
 897int qib_destroy_qp(struct ib_qp *ibqp);
 898
 899int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
 900
 901int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 902                  int attr_mask, struct ib_udata *udata);
 903
 904int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 905                 int attr_mask, struct ib_qp_init_attr *init_attr);
 906
 907unsigned qib_free_all_qps(struct qib_devdata *dd);
 908
 909void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
 910
 911void qib_free_qpn_table(struct qib_qpn_table *qpt);
 912
 913void qib_get_credit(struct qib_qp *qp, u32 aeth);
 914
 915unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
 916
 917void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
 918
 919void qib_put_txreq(struct qib_verbs_txreq *tx);
 920
 921int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
 922                   u32 hdrwords, struct qib_sge_state *ss, u32 len);
 923
 924void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
 925                  int release);
 926
 927void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
 928
 929void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 930                int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 931
 932void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
 933                int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 934
 935int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
 936
 937struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
 938
 939void qib_rc_rnr_retry(unsigned long arg);
 940
 941void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
 942
 943void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
 944
 945int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
 946
 947void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
 948                int has_grh, void *data, u32 tlen, struct qib_qp *qp);
 949
 950int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
 951
 952void qib_free_lkey(struct qib_mregion *mr);
 953
 954int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
 955                struct qib_sge *isge, struct ib_sge *sge, int acc);
 956
 957int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
 958                u32 len, u64 vaddr, u32 rkey, int acc);
 959
 960int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 961                         struct ib_recv_wr **bad_wr);
 962
 963struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
 964                              struct ib_srq_init_attr *srq_init_attr,
 965                              struct ib_udata *udata);
 966
 967int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 968                   enum ib_srq_attr_mask attr_mask,
 969                   struct ib_udata *udata);
 970
 971int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
 972
 973int qib_destroy_srq(struct ib_srq *ibsrq);
 974
 975void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
 976
 977int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
 978
 979struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
 980                            int comp_vector, struct ib_ucontext *context,
 981                            struct ib_udata *udata);
 982
 983int qib_destroy_cq(struct ib_cq *ibcq);
 984
 985int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
 986
 987int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
 988
 989struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
 990
 991struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
 992                              struct ib_phys_buf *buffer_list,
 993                              int num_phys_buf, int acc, u64 *iova_start);
 994
 995struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 996                              u64 virt_addr, int mr_access_flags,
 997                              struct ib_udata *udata);
 998
 999int qib_dereg_mr(struct ib_mr *ibmr);
1000
1001struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1002
1003struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
1004                                struct ib_device *ibdev, int page_list_len);
1005
1006void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1007
1008int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
1009
1010struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1011                             struct ib_fmr_attr *fmr_attr);
1012
1013int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1014                     int list_len, u64 iova);
1015
1016int qib_unmap_fmr(struct list_head *fmr_list);
1017
1018int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1019
1020static inline void qib_get_mr(struct qib_mregion *mr)
1021{
1022        atomic_inc(&mr->refcount);
1023}
1024
1025void mr_rcu_callback(struct rcu_head *list);
1026
1027static inline void qib_put_mr(struct qib_mregion *mr)
1028{
1029        if (unlikely(atomic_dec_and_test(&mr->refcount)))
1030                call_rcu(&mr->list, mr_rcu_callback);
1031}
1032
1033static inline void qib_put_ss(struct qib_sge_state *ss)
1034{
1035        while (ss->num_sge) {
1036                qib_put_mr(ss->sge.mr);
1037                if (--ss->num_sge)
1038                        ss->sge = *ss->sg_list++;
1039        }
1040}
1041
1042
1043void qib_release_mmap_info(struct kref *ref);
1044
1045struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1046                                           struct ib_ucontext *context,
1047                                           void *obj);
1048
1049void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1050                          u32 size, void *obj);
1051
1052int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1053
1054int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1055
1056void qib_migrate_qp(struct qib_qp *qp);
1057
1058int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1059                      int has_grh, struct qib_qp *qp, u32 bth0);
1060
1061u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1062                 struct ib_global_route *grh, u32 hwords, u32 nwords);
1063
1064void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1065                         u32 bth0, u32 bth2);
1066
1067void qib_do_send(struct work_struct *work);
1068
1069void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1070                       enum ib_wc_status status);
1071
1072void qib_send_rc_ack(struct qib_qp *qp);
1073
1074int qib_make_rc_req(struct qib_qp *qp);
1075
1076int qib_make_uc_req(struct qib_qp *qp);
1077
1078int qib_make_ud_req(struct qib_qp *qp);
1079
1080int qib_register_ib_device(struct qib_devdata *);
1081
1082void qib_unregister_ib_device(struct qib_devdata *);
1083
1084void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1085
1086void qib_ib_piobufavail(struct qib_devdata *);
1087
1088unsigned qib_get_npkeys(struct qib_devdata *);
1089
1090unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1091
1092extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1093
1094/*
1095 * Below  HCA-independent IB PhysPortState values, returned
1096 * by the f_ibphys_portstate() routine.
1097 */
1098#define IB_PHYSPORTSTATE_SLEEP 1
1099#define IB_PHYSPORTSTATE_POLL 2
1100#define IB_PHYSPORTSTATE_DISABLED 3
1101#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1102#define IB_PHYSPORTSTATE_LINKUP 5
1103#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1104#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1105#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1106#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1107#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1108#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1109#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1110#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1111
1112extern const int ib_qib_state_ops[];
1113
1114extern __be64 ib_qib_sys_image_guid;    /* in network order */
1115
1116extern unsigned int ib_qib_lkey_table_size;
1117
1118extern unsigned int ib_qib_max_cqes;
1119
1120extern unsigned int ib_qib_max_cqs;
1121
1122extern unsigned int ib_qib_max_qp_wrs;
1123
1124extern unsigned int ib_qib_max_qps;
1125
1126extern unsigned int ib_qib_max_sges;
1127
1128extern unsigned int ib_qib_max_mcast_grps;
1129
1130extern unsigned int ib_qib_max_mcast_qp_attached;
1131
1132extern unsigned int ib_qib_max_srqs;
1133
1134extern unsigned int ib_qib_max_srq_sges;
1135
1136extern unsigned int ib_qib_max_srq_wrs;
1137
1138extern const u32 ib_qib_rnr_table[];
1139
1140extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1141
1142#endif                          /* QIB_VERBS_H */
1143