linux/include/rdma/rdmavt_qp.h
<<
>>
Prefs
   1#ifndef DEF_RDMAVT_INCQP_H
   2#define DEF_RDMAVT_INCQP_H
   3
   4/*
   5 * Copyright(c) 2016 - 2018 Intel Corporation.
   6 *
   7 * This file is provided under a dual BSD/GPLv2 license.  When using or
   8 * redistributing this file, you may do so under either license.
   9 *
  10 * GPL LICENSE SUMMARY
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * BSD LICENSE
  22 *
  23 * Redistribution and use in source and binary forms, with or without
  24 * modification, are permitted provided that the following conditions
  25 * are met:
  26 *
  27 *  - Redistributions of source code must retain the above copyright
  28 *    notice, this list of conditions and the following disclaimer.
  29 *  - Redistributions in binary form must reproduce the above copyright
  30 *    notice, this list of conditions and the following disclaimer in
  31 *    the documentation and/or other materials provided with the
  32 *    distribution.
  33 *  - Neither the name of Intel Corporation nor the names of its
  34 *    contributors may be used to endorse or promote products derived
  35 *    from this software without specific prior written permission.
  36 *
  37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48 *
  49 */
  50
  51#include <rdma/rdma_vt.h>
  52#include <rdma/ib_pack.h>
  53#include <rdma/ib_verbs.h>
  54#include <rdma/rdmavt_cq.h>
  55/*
  56 * Atomic bit definitions for r_aflags.
  57 */
  58#define RVT_R_WRID_VALID        0
  59#define RVT_R_REWIND_SGE        1
  60
  61/*
  62 * Bit definitions for r_flags.
  63 */
  64#define RVT_R_REUSE_SGE 0x01
  65#define RVT_R_RDMAR_SEQ 0x02
  66#define RVT_R_RSP_NAK   0x04
  67#define RVT_R_RSP_SEND  0x08
  68#define RVT_R_COMM_EST  0x10
  69
  70/*
  71 * Bit definitions for s_flags.
  72 *
  73 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
  74 * RVT_S_BUSY - send tasklet is processing the QP
  75 * RVT_S_TIMER - the RC retry timer is active
  76 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
  77 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
  78 *                         before processing the next SWQE
  79 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
  80 *                         before processing the next SWQE
  81 * RVT_S_WAIT_RNR - waiting for RNR timeout
  82 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
  83 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
  84 *                  next send completion entry not via send DMA
  85 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
  86 * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
  87 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
  88 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
  89 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
  90 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
  91 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
  92 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
  93 * RVT_S_ECN - a BECN was queued to the send engine
  94 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
  95 */
  96#define RVT_S_SIGNAL_REQ_WR     0x0001
  97#define RVT_S_BUSY              0x0002
  98#define RVT_S_TIMER             0x0004
  99#define RVT_S_RESP_PENDING      0x0008
 100#define RVT_S_ACK_PENDING       0x0010
 101#define RVT_S_WAIT_FENCE        0x0020
 102#define RVT_S_WAIT_RDMAR        0x0040
 103#define RVT_S_WAIT_RNR          0x0080
 104#define RVT_S_WAIT_SSN_CREDIT   0x0100
 105#define RVT_S_WAIT_DMA          0x0200
 106#define RVT_S_WAIT_PIO          0x0400
 107#define RVT_S_WAIT_TX           0x0800
 108#define RVT_S_WAIT_DMA_DESC     0x1000
 109#define RVT_S_WAIT_KMEM         0x2000
 110#define RVT_S_WAIT_PSN          0x4000
 111#define RVT_S_WAIT_ACK          0x8000
 112#define RVT_S_SEND_ONE          0x10000
 113#define RVT_S_UNLIMITED_CREDIT  0x20000
 114#define RVT_S_ECN               0x40000
 115#define RVT_S_MAX_BIT_MASK      0x800000
 116
 117/*
 118 * Drivers should use s_flags starting with bit 31 down to the bit next to
 119 * RVT_S_MAX_BIT_MASK
 120 */
 121
 122/*
 123 * Wait flags that would prevent any packet type from being sent.
 124 */
 125#define RVT_S_ANY_WAIT_IO \
 126        (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
 127         RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
 128
 129/*
 130 * Wait flags that would prevent send work requests from making progress.
 131 */
 132#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
 133        RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
 134        RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
 135
 136#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
 137
 138/* Number of bits to pay attention to in the opcode for checking qp type */
 139#define RVT_OPCODE_QP_MASK 0xE0
 140
 141/* Flags for checking QP state (see ib_rvt_state_ops[]) */
 142#define RVT_POST_SEND_OK                0x01
 143#define RVT_POST_RECV_OK                0x02
 144#define RVT_PROCESS_RECV_OK             0x04
 145#define RVT_PROCESS_SEND_OK             0x08
 146#define RVT_PROCESS_NEXT_SEND_OK        0x10
 147#define RVT_FLUSH_SEND                  0x20
 148#define RVT_FLUSH_RECV                  0x40
 149#define RVT_PROCESS_OR_FLUSH_SEND \
 150        (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
 151#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
 152        (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
 153
 154/*
 155 * Internal send flags
 156 */
 157#define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
 158#define RVT_SEND_COMPLETION_ONLY        (IB_SEND_RESERVED_START << 1)
 159
 160/*
 161 * Send work request queue entry.
 162 * The size of the sg_list is determined when the QP is created and stored
 163 * in qp->s_max_sge.
 164 */
 165struct rvt_swqe {
 166        union {
 167                struct ib_send_wr wr;   /* don't use wr.sg_list */
 168                struct ib_ud_wr ud_wr;
 169                struct ib_reg_wr reg_wr;
 170                struct ib_rdma_wr rdma_wr;
 171                struct ib_atomic_wr atomic_wr;
 172        };
 173        u32 psn;                /* first packet sequence number */
 174        u32 lpsn;               /* last packet sequence number */
 175        u32 ssn;                /* send sequence number */
 176        u32 length;             /* total length of data in sg_list */
 177        struct rvt_sge sg_list[0];
 178};
 179
 180/*
 181 * Receive work request queue entry.
 182 * The size of the sg_list is determined when the QP (or SRQ) is created
 183 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
 184 */
 185struct rvt_rwqe {
 186        u64 wr_id;
 187        u8 num_sge;
 188        struct ib_sge sg_list[0];
 189};
 190
 191/*
 192 * This structure is used to contain the head pointer, tail pointer,
 193 * and receive work queue entries as a single memory allocation so
 194 * it can be mmap'ed into user space.
 195 * Note that the wq array elements are variable size so you can't
 196 * just index into the array to get the N'th element;
 197 * use get_rwqe_ptr() instead.
 198 */
 199struct rvt_rwq {
 200        u32 head;               /* new work requests posted to the head */
 201        u32 tail;               /* receives pull requests from here. */
 202        struct rvt_rwqe wq[0];
 203};
 204
 205struct rvt_rq {
 206        struct rvt_rwq *wq;
 207        u32 size;               /* size of RWQE array */
 208        u8 max_sge;
 209        /* protect changes in this struct */
 210        spinlock_t lock ____cacheline_aligned_in_smp;
 211};
 212
 213/*
 214 * This structure is used by rvt_mmap() to validate an offset
 215 * when an mmap() request is made.  The vm_area_struct then uses
 216 * this as its vm_private_data.
 217 */
 218struct rvt_mmap_info {
 219        struct list_head pending_mmaps;
 220        struct ib_ucontext *context;
 221        void *obj;
 222        __u64 offset;
 223        struct kref ref;
 224        unsigned size;
 225};
 226
 227/*
 228 * This structure holds the information that the send tasklet needs
 229 * to send a RDMA read response or atomic operation.
 230 */
 231struct rvt_ack_entry {
 232        struct rvt_sge rdma_sge;
 233        u64 atomic_data;
 234        u32 psn;
 235        u32 lpsn;
 236        u8 opcode;
 237        u8 sent;
 238};
 239
 240#define RC_QP_SCALING_INTERVAL  5
 241
 242#define RVT_OPERATION_PRIV        0x00000001
 243#define RVT_OPERATION_ATOMIC      0x00000002
 244#define RVT_OPERATION_ATOMIC_SGE  0x00000004
 245#define RVT_OPERATION_LOCAL       0x00000008
 246#define RVT_OPERATION_USE_RESERVE 0x00000010
 247
 248#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
 249
 250/**
 251 * rvt_operation_params - op table entry
 252 * @length - the length to copy into the swqe entry
 253 * @qpt_support - a bit mask indicating QP type support
 254 * @flags - RVT_OPERATION flags (see above)
 255 *
 256 * This supports table driven post send so that
 257 * the driver can have differing an potentially
 258 * different sets of operations.
 259 *
 260 **/
 261
 262struct rvt_operation_params {
 263        size_t length;
 264        u32 qpt_support;
 265        u32 flags;
 266};
 267
 268/*
 269 * Common variables are protected by both r_rq.lock and s_lock in that order
 270 * which only happens in modify_qp() or changing the QP 'state'.
 271 */
 272struct rvt_qp {
 273        struct ib_qp ibqp;
 274        void *priv; /* Driver private data */
 275        /* read mostly fields above and below */
 276        struct rdma_ah_attr remote_ah_attr;
 277        struct rdma_ah_attr alt_ah_attr;
 278        struct rvt_qp __rcu *next;           /* link list for QPN hash table */
 279        struct rvt_swqe *s_wq;  /* send work queue */
 280        struct rvt_mmap_info *ip;
 281
 282        unsigned long timeout_jiffies;  /* computed from timeout */
 283
 284        int srate_mbps;         /* s_srate (below) converted to Mbit/s */
 285        pid_t pid;              /* pid for user mode QPs */
 286        u32 remote_qpn;
 287        u32 qkey;               /* QKEY for this QP (for UD or RD) */
 288        u32 s_size;             /* send work queue size */
 289
 290        u16 pmtu;               /* decoded from path_mtu */
 291        u8 log_pmtu;            /* shift for pmtu */
 292        u8 state;               /* QP state */
 293        u8 allowed_ops;         /* high order bits of allowed opcodes */
 294        u8 qp_access_flags;
 295        u8 alt_timeout;         /* Alternate path timeout for this QP */
 296        u8 timeout;             /* Timeout for this QP */
 297        u8 s_srate;
 298        u8 s_mig_state;
 299        u8 port_num;
 300        u8 s_pkey_index;        /* PKEY index to use */
 301        u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
 302        u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
 303        u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
 304        u8 s_retry_cnt;         /* number of times to retry */
 305        u8 s_rnr_retry_cnt;
 306        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
 307        u8 s_max_sge;           /* size of s_wq->sg_list */
 308        u8 s_draining;
 309
 310        /* start of read/write fields */
 311        atomic_t refcount ____cacheline_aligned_in_smp;
 312        wait_queue_head_t wait;
 313
 314        struct rvt_ack_entry *s_ack_queue;
 315        struct rvt_sge_state s_rdma_read_sge;
 316
 317        spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
 318        u32 r_psn;              /* expected rcv packet sequence number */
 319        unsigned long r_aflags;
 320        u64 r_wr_id;            /* ID for current receive WQE */
 321        u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
 322        u32 r_len;              /* total length of r_sge */
 323        u32 r_rcv_len;          /* receive data len processed */
 324        u32 r_msn;              /* message sequence number */
 325
 326        u8 r_state;             /* opcode of last packet received */
 327        u8 r_flags;
 328        u8 r_head_ack_queue;    /* index into s_ack_queue[] */
 329        u8 r_adefered;          /* defered ack count */
 330
 331        struct list_head rspwait;       /* link for waiting to respond */
 332
 333        struct rvt_sge_state r_sge;     /* current receive data */
 334        struct rvt_rq r_rq;             /* receive work queue */
 335
 336        /* post send line */
 337        spinlock_t s_hlock ____cacheline_aligned_in_smp;
 338        u32 s_head;             /* new entries added here */
 339        u32 s_next_psn;         /* PSN for next request */
 340        u32 s_avail;            /* number of entries avail */
 341        u32 s_ssn;              /* SSN of tail entry */
 342        atomic_t s_reserved_used; /* reserved entries in use */
 343
 344        spinlock_t s_lock ____cacheline_aligned_in_smp;
 345        u32 s_flags;
 346        struct rvt_sge_state *s_cur_sge;
 347        struct rvt_swqe *s_wqe;
 348        struct rvt_sge_state s_sge;     /* current send request data */
 349        struct rvt_mregion *s_rdma_mr;
 350        u32 s_len;              /* total length of s_sge */
 351        u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
 352        u32 s_last_psn;         /* last response PSN processed */
 353        u32 s_sending_psn;      /* lowest PSN that is being sent */
 354        u32 s_sending_hpsn;     /* highest PSN that is being sent */
 355        u32 s_psn;              /* current packet sequence number */
 356        u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
 357        u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
 358        u32 s_tail;             /* next entry to process */
 359        u32 s_cur;              /* current work queue entry */
 360        u32 s_acked;            /* last un-ACK'ed entry */
 361        u32 s_last;             /* last completed entry */
 362        u32 s_lsn;              /* limit sequence number (credit) */
 363        u32 s_ahgpsn;           /* set to the psn in the copy of the header */
 364        u16 s_cur_size;         /* size of send packet in bytes */
 365        u16 s_rdma_ack_cnt;
 366        u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
 367        s8 s_ahgidx;
 368        u8 s_state;             /* opcode of last packet sent */
 369        u8 s_ack_state;         /* opcode of packet to ACK */
 370        u8 s_nak_state;         /* non-zero if NAK is pending */
 371        u8 r_nak_state;         /* non-zero if NAK is pending */
 372        u8 s_retry;             /* requester retry counter */
 373        u8 s_rnr_retry;         /* requester RNR retry counter */
 374        u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
 375        u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
 376
 377        struct rvt_sge_state s_ack_rdma_sge;
 378        struct timer_list s_timer;
 379        struct hrtimer s_rnr_timer;
 380
 381        atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
 382
 383        /*
 384         * This sge list MUST be last. Do not add anything below here.
 385         */
 386        struct rvt_sge r_sg_list[0] /* verified SGEs */
 387                ____cacheline_aligned_in_smp;
 388};
 389
 390struct rvt_srq {
 391        struct ib_srq ibsrq;
 392        struct rvt_rq rq;
 393        struct rvt_mmap_info *ip;
 394        /* send signal when number of RWQEs < limit */
 395        u32 limit;
 396};
 397
 398#define RVT_QPN_MAX                 BIT(24)
 399#define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
 400#define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
 401#define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
 402#define RVT_QPN_MASK                IB_QPN_MASK
 403
 404/*
 405 * QPN-map pages start out as NULL, they get allocated upon
 406 * first use and are never deallocated. This way,
 407 * large bitmaps are not allocated unless large numbers of QPs are used.
 408 */
 409struct rvt_qpn_map {
 410        void *page;
 411};
 412
 413struct rvt_qpn_table {
 414        spinlock_t lock; /* protect changes to the qp table */
 415        unsigned flags;         /* flags for QP0/1 allocated for each port */
 416        u32 last;               /* last QP number allocated */
 417        u32 nmaps;              /* size of the map table */
 418        u16 limit;
 419        u8  incr;
 420        /* bit map of free QP numbers other than 0/1 */
 421        struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
 422};
 423
 424struct rvt_qp_ibdev {
 425        u32 qp_table_size;
 426        u32 qp_table_bits;
 427        struct rvt_qp __rcu **qp_table;
 428        spinlock_t qpt_lock; /* qptable lock */
 429        struct rvt_qpn_table qpn_table;
 430};
 431
 432/*
 433 * There is one struct rvt_mcast for each multicast GID.
 434 * All attached QPs are then stored as a list of
 435 * struct rvt_mcast_qp.
 436 */
 437struct rvt_mcast_qp {
 438        struct list_head list;
 439        struct rvt_qp *qp;
 440};
 441
 442struct rvt_mcast_addr {
 443        union ib_gid mgid;
 444        u16 lid;
 445};
 446
 447struct rvt_mcast {
 448        struct rb_node rb_node;
 449        struct rvt_mcast_addr mcast_addr;
 450        struct list_head qp_list;
 451        wait_queue_head_t wait;
 452        atomic_t refcount;
 453        int n_attached;
 454};
 455
 456/*
 457 * Since struct rvt_swqe is not a fixed size, we can't simply index into
 458 * struct rvt_qp.s_wq.  This function does the array index computation.
 459 */
 460static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
 461                                                unsigned n)
 462{
 463        return (struct rvt_swqe *)((char *)qp->s_wq +
 464                                     (sizeof(struct rvt_swqe) +
 465                                      qp->s_max_sge *
 466                                      sizeof(struct rvt_sge)) * n);
 467}
 468
 469/*
 470 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
 471 * struct rvt_rwq.wq.  This function does the array index computation.
 472 */
 473static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
 474{
 475        return (struct rvt_rwqe *)
 476                ((char *)rq->wq->wq +
 477                 (sizeof(struct rvt_rwqe) +
 478                  rq->max_sge * sizeof(struct ib_sge)) * n);
 479}
 480
 481/**
 482 * rvt_is_user_qp - return if this is user mode QP
 483 * @qp - the target QP
 484 */
 485static inline bool rvt_is_user_qp(struct rvt_qp *qp)
 486{
 487        return !!qp->pid;
 488}
 489
 490/**
 491 * rvt_get_qp - get a QP reference
 492 * @qp - the QP to hold
 493 */
 494static inline void rvt_get_qp(struct rvt_qp *qp)
 495{
 496        atomic_inc(&qp->refcount);
 497}
 498
 499/**
 500 * rvt_put_qp - release a QP reference
 501 * @qp - the QP to release
 502 */
 503static inline void rvt_put_qp(struct rvt_qp *qp)
 504{
 505        if (qp && atomic_dec_and_test(&qp->refcount))
 506                wake_up(&qp->wait);
 507}
 508
 509/**
 510 * rvt_put_swqe - drop mr refs held by swqe
 511 * @wqe - the send wqe
 512 *
 513 * This drops any mr references held by the swqe
 514 */
 515static inline void rvt_put_swqe(struct rvt_swqe *wqe)
 516{
 517        int i;
 518
 519        for (i = 0; i < wqe->wr.num_sge; i++) {
 520                struct rvt_sge *sge = &wqe->sg_list[i];
 521
 522                rvt_put_mr(sge->mr);
 523        }
 524}
 525
 526/**
 527 * rvt_qp_wqe_reserve - reserve operation
 528 * @qp - the rvt qp
 529 * @wqe - the send wqe
 530 *
 531 * This routine used in post send to record
 532 * a wqe relative reserved operation use.
 533 */
 534static inline void rvt_qp_wqe_reserve(
 535        struct rvt_qp *qp,
 536        struct rvt_swqe *wqe)
 537{
 538        atomic_inc(&qp->s_reserved_used);
 539}
 540
 541/**
 542 * rvt_qp_wqe_unreserve - clean reserved operation
 543 * @qp - the rvt qp
 544 * @wqe - the send wqe
 545 *
 546 * This decrements the reserve use count.
 547 *
 548 * This call MUST precede the change to
 549 * s_last to insure that post send sees a stable
 550 * s_avail.
 551 *
 552 * An smp_mp__after_atomic() is used to insure
 553 * the compiler does not juggle the order of the s_last
 554 * ring index and the decrementing of s_reserved_used.
 555 */
 556static inline void rvt_qp_wqe_unreserve(
 557        struct rvt_qp *qp,
 558        struct rvt_swqe *wqe)
 559{
 560        if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
 561                atomic_dec(&qp->s_reserved_used);
 562                /* insure no compiler re-order up to s_last change */
 563                smp_mb__after_atomic();
 564        }
 565}
 566
 567extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
 568
 569/**
 570 * rvt_qp_swqe_complete() - insert send completion
 571 * @qp - the qp
 572 * @wqe - the send wqe
 573 * @status - completion status
 574 *
 575 * Insert a send completion into the completion
 576 * queue if the qp indicates it should be done.
 577 *
 578 * See IBTA 10.7.3.1 for info on completion
 579 * control.
 580 */
 581static inline void rvt_qp_swqe_complete(
 582        struct rvt_qp *qp,
 583        struct rvt_swqe *wqe,
 584        enum ib_wc_opcode opcode,
 585        enum ib_wc_status status)
 586{
 587        if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
 588                return;
 589        if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
 590            (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 591             status != IB_WC_SUCCESS) {
 592                struct ib_wc wc;
 593
 594                memset(&wc, 0, sizeof(wc));
 595                wc.wr_id = wqe->wr.wr_id;
 596                wc.status = status;
 597                wc.opcode = opcode;
 598                wc.qp = &qp->ibqp;
 599                wc.byte_len = wqe->length;
 600                rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
 601                             status != IB_WC_SUCCESS);
 602        }
 603}
 604
 605/*
 606 * Compare the lower 24 bits of the msn values.
 607 * Returns an integer <, ==, or > than zero.
 608 */
 609static inline int rvt_cmp_msn(u32 a, u32 b)
 610{
 611        return (((int)a) - ((int)b)) << 8;
 612}
 613
 614/**
 615 * rvt_compute_aeth - compute the AETH (syndrome + MSN)
 616 * @qp: the queue pair to compute the AETH for
 617 *
 618 * Returns the AETH.
 619 */
 620__be32 rvt_compute_aeth(struct rvt_qp *qp);
 621
 622/**
 623 * rvt_get_credit - flush the send work queue of a QP
 624 * @qp: the qp who's send work queue to flush
 625 * @aeth: the Acknowledge Extended Transport Header
 626 *
 627 * The QP s_lock should be held.
 628 */
 629void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
 630
 631/**
 632 * @qp - the qp pair
 633 * @len - the length
 634 *
 635 * Perform a shift based mtu round up divide
 636 */
 637static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
 638{
 639        return (len + qp->pmtu - 1) >> qp->log_pmtu;
 640}
 641
 642/**
 643 * @qp - the qp pair
 644 * @len - the length
 645 *
 646 * Perform a shift based mtu divide
 647 */
 648static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
 649{
 650        return len >> qp->log_pmtu;
 651}
 652
 653/**
 654 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
 655 * @timeout - timeout input(0 - 31).
 656 *
 657 * Return a timeout value in jiffies.
 658 */
 659static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
 660{
 661        if (timeout > 31)
 662                timeout = 31;
 663
 664        return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
 665}
 666
 667extern const int  ib_rvt_state_ops[];
 668
 669struct rvt_dev_info;
 670int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
 671void rvt_comm_est(struct rvt_qp *qp);
 672int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
 673void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
 674unsigned long rvt_rnr_tbl_to_usec(u32 index);
 675enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
 676void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
 677void rvt_del_timers_sync(struct rvt_qp *qp);
 678void rvt_stop_rc_timers(struct rvt_qp *qp);
 679void rvt_add_retry_timer(struct rvt_qp *qp);
 680
 681/**
 682 * struct rvt_qp_iter - the iterator for QPs
 683 * @qp - the current QP
 684 *
 685 * This structure defines the current iterator
 686 * state for sequenced access to all QPs relative
 687 * to an rvt_dev_info.
 688 */
 689struct rvt_qp_iter {
 690        struct rvt_qp *qp;
 691        /* private: backpointer */
 692        struct rvt_dev_info *rdi;
 693        /* private: callback routine */
 694        void (*cb)(struct rvt_qp *qp, u64 v);
 695        /* private: for arg to callback routine */
 696        u64 v;
 697        /* private: number of SMI,GSI QPs for device */
 698        int specials;
 699        /* private: current iterator index */
 700        int n;
 701};
 702
 703struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
 704                                     u64 v,
 705                                     void (*cb)(struct rvt_qp *qp, u64 v));
 706int rvt_qp_iter_next(struct rvt_qp_iter *iter);
 707void rvt_qp_iter(struct rvt_dev_info *rdi,
 708                 u64 v,
 709                 void (*cb)(struct rvt_qp *qp, u64 v));
 710void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
 711#endif          /* DEF_RDMAVT_INCQP_H */
 712