linux/include/rdma/rdmavt_qp.h
<<
>>
Prefs
   1#ifndef DEF_RDMAVT_INCQP_H
   2#define DEF_RDMAVT_INCQP_H
   3
   4/*
   5 * Copyright(c) 2016 - 2019 Intel Corporation.
   6 *
   7 * This file is provided under a dual BSD/GPLv2 license.  When using or
   8 * redistributing this file, you may do so under either license.
   9 *
  10 * GPL LICENSE SUMMARY
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * BSD LICENSE
  22 *
  23 * Redistribution and use in source and binary forms, with or without
  24 * modification, are permitted provided that the following conditions
  25 * are met:
  26 *
  27 *  - Redistributions of source code must retain the above copyright
  28 *    notice, this list of conditions and the following disclaimer.
  29 *  - Redistributions in binary form must reproduce the above copyright
  30 *    notice, this list of conditions and the following disclaimer in
  31 *    the documentation and/or other materials provided with the
  32 *    distribution.
  33 *  - Neither the name of Intel Corporation nor the names of its
  34 *    contributors may be used to endorse or promote products derived
  35 *    from this software without specific prior written permission.
  36 *
  37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48 *
  49 */
  50
  51#include <rdma/rdma_vt.h>
  52#include <rdma/ib_pack.h>
  53#include <rdma/ib_verbs.h>
  54#include <rdma/rdmavt_cq.h>
  55#include <rdma/rvt-abi.h>
  56/*
  57 * Atomic bit definitions for r_aflags.
  58 */
  59#define RVT_R_WRID_VALID        0
  60#define RVT_R_REWIND_SGE        1
  61
  62/*
  63 * Bit definitions for r_flags.
  64 */
  65#define RVT_R_REUSE_SGE 0x01
  66#define RVT_R_RDMAR_SEQ 0x02
  67#define RVT_R_RSP_NAK   0x04
  68#define RVT_R_RSP_SEND  0x08
  69#define RVT_R_COMM_EST  0x10
  70
  71/*
  72 * Bit definitions for s_flags.
  73 *
  74 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
  75 * RVT_S_BUSY - send tasklet is processing the QP
  76 * RVT_S_TIMER - the RC retry timer is active
  77 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
  78 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
  79 *                         before processing the next SWQE
  80 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
  81 *                         before processing the next SWQE
  82 * RVT_S_WAIT_RNR - waiting for RNR timeout
  83 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
  84 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
  85 *                  next send completion entry not via send DMA
  86 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
  87 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
  88 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
  89 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
  90 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
  91 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
  92 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
  93 * RVT_S_ECN - a BECN was queued to the send engine
  94 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
  95 */
  96#define RVT_S_SIGNAL_REQ_WR     0x0001
  97#define RVT_S_BUSY              0x0002
  98#define RVT_S_TIMER             0x0004
  99#define RVT_S_RESP_PENDING      0x0008
 100#define RVT_S_ACK_PENDING       0x0010
 101#define RVT_S_WAIT_FENCE        0x0020
 102#define RVT_S_WAIT_RDMAR        0x0040
 103#define RVT_S_WAIT_RNR          0x0080
 104#define RVT_S_WAIT_SSN_CREDIT   0x0100
 105#define RVT_S_WAIT_DMA          0x0200
 106#define RVT_S_WAIT_PIO          0x0400
 107#define RVT_S_WAIT_TX           0x0800
 108#define RVT_S_WAIT_DMA_DESC     0x1000
 109#define RVT_S_WAIT_KMEM         0x2000
 110#define RVT_S_WAIT_PSN          0x4000
 111#define RVT_S_WAIT_ACK          0x8000
 112#define RVT_S_SEND_ONE          0x10000
 113#define RVT_S_UNLIMITED_CREDIT  0x20000
 114#define RVT_S_ECN               0x40000
 115#define RVT_S_MAX_BIT_MASK      0x800000
 116
 117/*
 118 * Drivers should use s_flags starting with bit 31 down to the bit next to
 119 * RVT_S_MAX_BIT_MASK
 120 */
 121
 122/*
 123 * Wait flags that would prevent any packet type from being sent.
 124 */
 125#define RVT_S_ANY_WAIT_IO \
 126        (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
 127         RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
 128
 129/*
 130 * Wait flags that would prevent send work requests from making progress.
 131 */
 132#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
 133        RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
 134        RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
 135
 136#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
 137
 138/* Number of bits to pay attention to in the opcode for checking qp type */
 139#define RVT_OPCODE_QP_MASK 0xE0
 140
 141/* Flags for checking QP state (see ib_rvt_state_ops[]) */
 142#define RVT_POST_SEND_OK                0x01
 143#define RVT_POST_RECV_OK                0x02
 144#define RVT_PROCESS_RECV_OK             0x04
 145#define RVT_PROCESS_SEND_OK             0x08
 146#define RVT_PROCESS_NEXT_SEND_OK        0x10
 147#define RVT_FLUSH_SEND                  0x20
 148#define RVT_FLUSH_RECV                  0x40
 149#define RVT_PROCESS_OR_FLUSH_SEND \
 150        (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
 151#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
 152        (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
 153
 154/*
 155 * Internal send flags
 156 */
 157#define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
 158#define RVT_SEND_COMPLETION_ONLY        (IB_SEND_RESERVED_START << 1)
 159
 160/**
 161 * rvt_ud_wr - IB UD work plus AH cache
 162 * @wr: valid IB work request
 163 * @attr: pointer to an allocated AH attribute
 164 *
 165 * Special case the UD WR so we can keep track of the AH attributes.
 166 *
 167 * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
 168 * MUST come after wr.  The ib_ud_wr is sized and copied in rvt_post_one_wr.
 169 * The copy assumes that wr is first.
 170 */
 171struct rvt_ud_wr {
 172        struct ib_ud_wr wr;
 173        struct rdma_ah_attr *attr;
 174};
 175
 176/*
 177 * Send work request queue entry.
 178 * The size of the sg_list is determined when the QP is created and stored
 179 * in qp->s_max_sge.
 180 */
 181struct rvt_swqe {
 182        union {
 183                struct ib_send_wr wr;   /* don't use wr.sg_list */
 184                struct rvt_ud_wr ud_wr;
 185                struct ib_reg_wr reg_wr;
 186                struct ib_rdma_wr rdma_wr;
 187                struct ib_atomic_wr atomic_wr;
 188        };
 189        u32 psn;                /* first packet sequence number */
 190        u32 lpsn;               /* last packet sequence number */
 191        u32 ssn;                /* send sequence number */
 192        u32 length;             /* total length of data in sg_list */
 193        void *priv;             /* driver dependent field */
 194        struct rvt_sge sg_list[];
 195};
 196
 197/**
 198 * struct rvt_krwq - kernel struct receive work request
 199 * @p_lock: lock to protect producer of the kernel buffer
 200 * @head: index of next entry to fill
 201 * @c_lock:lock to protect consumer of the kernel buffer
 202 * @tail: index of next entry to pull
 203 * @count: count is aproximate of total receive enteries posted
 204 * @rvt_rwqe: struct of receive work request queue entry
 205 *
 206 * This structure is used to contain the head pointer,
 207 * tail pointer and receive work queue entries for kernel
 208 * mode user.
 209 */
 210struct rvt_krwq {
 211        spinlock_t p_lock;      /* protect producer */
 212        u32 head;               /* new work requests posted to the head */
 213
 214        /* protect consumer */
 215        spinlock_t c_lock ____cacheline_aligned_in_smp;
 216        u32 tail;               /* receives pull requests from here. */
 217        u32 count;              /* approx count of receive entries posted */
 218        struct rvt_rwqe *curr_wq;
 219        struct rvt_rwqe wq[];
 220};
 221
 222/*
 223 * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
 224 * @swqe: valid Send WQE
 225 *
 226 */
 227static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
 228{
 229        return ibah_to_rvtah(swqe->ud_wr.wr.ah);
 230}
 231
 232/**
 233 * rvt_get_swqe_ah_attr - Return the cached ah attribute information
 234 * @swqe: valid Send WQE
 235 *
 236 */
 237static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
 238{
 239        return swqe->ud_wr.attr;
 240}
 241
 242/**
 243 * rvt_get_swqe_remote_qpn - Access the remote QPN value
 244 * @swqe: valid Send WQE
 245 *
 246 */
 247static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
 248{
 249        return swqe->ud_wr.wr.remote_qpn;
 250}
 251
 252/**
 253 * rvt_get_swqe_remote_qkey - Acces the remote qkey value
 254 * @swqe: valid Send WQE
 255 *
 256 */
 257static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
 258{
 259        return swqe->ud_wr.wr.remote_qkey;
 260}
 261
 262/**
 263 * rvt_get_swqe_pkey_index - Access the pkey index
 264 * @swqe: valid Send WQE
 265 *
 266 */
 267static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
 268{
 269        return swqe->ud_wr.wr.pkey_index;
 270}
 271
 272struct rvt_rq {
 273        struct rvt_rwq *wq;
 274        struct rvt_krwq *kwq;
 275        u32 size;               /* size of RWQE array */
 276        u8 max_sge;
 277        /* protect changes in this struct */
 278        spinlock_t lock ____cacheline_aligned_in_smp;
 279};
 280
 281/*
 282 * This structure holds the information that the send tasklet needs
 283 * to send a RDMA read response or atomic operation.
 284 */
 285struct rvt_ack_entry {
 286        struct rvt_sge rdma_sge;
 287        u64 atomic_data;
 288        u32 psn;
 289        u32 lpsn;
 290        u8 opcode;
 291        u8 sent;
 292        void *priv;
 293};
 294
 295#define RC_QP_SCALING_INTERVAL  5
 296
 297#define RVT_OPERATION_PRIV        0x00000001
 298#define RVT_OPERATION_ATOMIC      0x00000002
 299#define RVT_OPERATION_ATOMIC_SGE  0x00000004
 300#define RVT_OPERATION_LOCAL       0x00000008
 301#define RVT_OPERATION_USE_RESERVE 0x00000010
 302#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
 303
 304#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
 305
 306/**
 307 * rvt_operation_params - op table entry
 308 * @length - the length to copy into the swqe entry
 309 * @qpt_support - a bit mask indicating QP type support
 310 * @flags - RVT_OPERATION flags (see above)
 311 *
 312 * This supports table driven post send so that
 313 * the driver can have differing an potentially
 314 * different sets of operations.
 315 *
 316 **/
 317
 318struct rvt_operation_params {
 319        size_t length;
 320        u32 qpt_support;
 321        u32 flags;
 322};
 323
 324/*
 325 * Common variables are protected by both r_rq.lock and s_lock in that order
 326 * which only happens in modify_qp() or changing the QP 'state'.
 327 */
 328struct rvt_qp {
 329        struct ib_qp ibqp;
 330        void *priv; /* Driver private data */
 331        /* read mostly fields above and below */
 332        struct rdma_ah_attr remote_ah_attr;
 333        struct rdma_ah_attr alt_ah_attr;
 334        struct rvt_qp __rcu *next;           /* link list for QPN hash table */
 335        struct rvt_swqe *s_wq;  /* send work queue */
 336        struct rvt_mmap_info *ip;
 337
 338        unsigned long timeout_jiffies;  /* computed from timeout */
 339
 340        int srate_mbps;         /* s_srate (below) converted to Mbit/s */
 341        pid_t pid;              /* pid for user mode QPs */
 342        u32 remote_qpn;
 343        u32 qkey;               /* QKEY for this QP (for UD or RD) */
 344        u32 s_size;             /* send work queue size */
 345
 346        u16 pmtu;               /* decoded from path_mtu */
 347        u8 log_pmtu;            /* shift for pmtu */
 348        u8 state;               /* QP state */
 349        u8 allowed_ops;         /* high order bits of allowed opcodes */
 350        u8 qp_access_flags;
 351        u8 alt_timeout;         /* Alternate path timeout for this QP */
 352        u8 timeout;             /* Timeout for this QP */
 353        u8 s_srate;
 354        u8 s_mig_state;
 355        u8 port_num;
 356        u8 s_pkey_index;        /* PKEY index to use */
 357        u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
 358        u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
 359        u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
 360        u8 s_retry_cnt;         /* number of times to retry */
 361        u8 s_rnr_retry_cnt;
 362        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
 363        u8 s_max_sge;           /* size of s_wq->sg_list */
 364        u8 s_draining;
 365
 366        /* start of read/write fields */
 367        atomic_t refcount ____cacheline_aligned_in_smp;
 368        wait_queue_head_t wait;
 369
 370        struct rvt_ack_entry *s_ack_queue;
 371        struct rvt_sge_state s_rdma_read_sge;
 372
 373        spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
 374        u32 r_psn;              /* expected rcv packet sequence number */
 375        unsigned long r_aflags;
 376        u64 r_wr_id;            /* ID for current receive WQE */
 377        u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
 378        u32 r_len;              /* total length of r_sge */
 379        u32 r_rcv_len;          /* receive data len processed */
 380        u32 r_msn;              /* message sequence number */
 381
 382        u8 r_state;             /* opcode of last packet received */
 383        u8 r_flags;
 384        u8 r_head_ack_queue;    /* index into s_ack_queue[] */
 385        u8 r_adefered;          /* defered ack count */
 386
 387        struct list_head rspwait;       /* link for waiting to respond */
 388
 389        struct rvt_sge_state r_sge;     /* current receive data */
 390        struct rvt_rq r_rq;             /* receive work queue */
 391
 392        /* post send line */
 393        spinlock_t s_hlock ____cacheline_aligned_in_smp;
 394        u32 s_head;             /* new entries added here */
 395        u32 s_next_psn;         /* PSN for next request */
 396        u32 s_avail;            /* number of entries avail */
 397        u32 s_ssn;              /* SSN of tail entry */
 398        atomic_t s_reserved_used; /* reserved entries in use */
 399
 400        spinlock_t s_lock ____cacheline_aligned_in_smp;
 401        u32 s_flags;
 402        struct rvt_sge_state *s_cur_sge;
 403        struct rvt_swqe *s_wqe;
 404        struct rvt_sge_state s_sge;     /* current send request data */
 405        struct rvt_mregion *s_rdma_mr;
 406        u32 s_len;              /* total length of s_sge */
 407        u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
 408        u32 s_last_psn;         /* last response PSN processed */
 409        u32 s_sending_psn;      /* lowest PSN that is being sent */
 410        u32 s_sending_hpsn;     /* highest PSN that is being sent */
 411        u32 s_psn;              /* current packet sequence number */
 412        u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
 413        u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
 414        u32 s_tail;             /* next entry to process */
 415        u32 s_cur;              /* current work queue entry */
 416        u32 s_acked;            /* last un-ACK'ed entry */
 417        u32 s_last;             /* last completed entry */
 418        u32 s_lsn;              /* limit sequence number (credit) */
 419        u32 s_ahgpsn;           /* set to the psn in the copy of the header */
 420        u16 s_cur_size;         /* size of send packet in bytes */
 421        u16 s_rdma_ack_cnt;
 422        u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
 423        s8 s_ahgidx;
 424        u8 s_state;             /* opcode of last packet sent */
 425        u8 s_ack_state;         /* opcode of packet to ACK */
 426        u8 s_nak_state;         /* non-zero if NAK is pending */
 427        u8 r_nak_state;         /* non-zero if NAK is pending */
 428        u8 s_retry;             /* requester retry counter */
 429        u8 s_rnr_retry;         /* requester RNR retry counter */
 430        u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
 431        u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
 432        u8 s_acked_ack_queue;   /* index into s_ack_queue[] */
 433
 434        struct rvt_sge_state s_ack_rdma_sge;
 435        struct timer_list s_timer;
 436        struct hrtimer s_rnr_timer;
 437
 438        atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
 439
 440        /*
 441         * This sge list MUST be last. Do not add anything below here.
 442         */
 443        struct rvt_sge r_sg_list[0] /* verified SGEs */
 444                ____cacheline_aligned_in_smp;
 445};
 446
 447struct rvt_srq {
 448        struct ib_srq ibsrq;
 449        struct rvt_rq rq;
 450        struct rvt_mmap_info *ip;
 451        /* send signal when number of RWQEs < limit */
 452        u32 limit;
 453};
 454
 455static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
 456{
 457        return container_of(ibsrq, struct rvt_srq, ibsrq);
 458}
 459
 460static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
 461{
 462        return container_of(ibqp, struct rvt_qp, ibqp);
 463}
 464
 465#define RVT_QPN_MAX                 BIT(24)
 466#define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
 467#define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
 468#define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
 469#define RVT_QPN_MASK                IB_QPN_MASK
 470
 471/*
 472 * QPN-map pages start out as NULL, they get allocated upon
 473 * first use and are never deallocated. This way,
 474 * large bitmaps are not allocated unless large numbers of QPs are used.
 475 */
 476struct rvt_qpn_map {
 477        void *page;
 478};
 479
 480struct rvt_qpn_table {
 481        spinlock_t lock; /* protect changes to the qp table */
 482        unsigned flags;         /* flags for QP0/1 allocated for each port */
 483        u32 last;               /* last QP number allocated */
 484        u32 nmaps;              /* size of the map table */
 485        u16 limit;
 486        u8  incr;
 487        /* bit map of free QP numbers other than 0/1 */
 488        struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
 489};
 490
 491struct rvt_qp_ibdev {
 492        u32 qp_table_size;
 493        u32 qp_table_bits;
 494        struct rvt_qp __rcu **qp_table;
 495        spinlock_t qpt_lock; /* qptable lock */
 496        struct rvt_qpn_table qpn_table;
 497};
 498
 499/*
 500 * There is one struct rvt_mcast for each multicast GID.
 501 * All attached QPs are then stored as a list of
 502 * struct rvt_mcast_qp.
 503 */
 504struct rvt_mcast_qp {
 505        struct list_head list;
 506        struct rvt_qp *qp;
 507};
 508
 509struct rvt_mcast_addr {
 510        union ib_gid mgid;
 511        u16 lid;
 512};
 513
 514struct rvt_mcast {
 515        struct rb_node rb_node;
 516        struct rvt_mcast_addr mcast_addr;
 517        struct list_head qp_list;
 518        wait_queue_head_t wait;
 519        atomic_t refcount;
 520        int n_attached;
 521};
 522
 523/*
 524 * Since struct rvt_swqe is not a fixed size, we can't simply index into
 525 * struct rvt_qp.s_wq.  This function does the array index computation.
 526 */
 527static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
 528                                                unsigned n)
 529{
 530        return (struct rvt_swqe *)((char *)qp->s_wq +
 531                                     (sizeof(struct rvt_swqe) +
 532                                      qp->s_max_sge *
 533                                      sizeof(struct rvt_sge)) * n);
 534}
 535
 536/*
 537 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
 538 * struct rvt_rwq.wq.  This function does the array index computation.
 539 */
 540static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
 541{
 542        return (struct rvt_rwqe *)
 543                ((char *)rq->kwq->curr_wq +
 544                 (sizeof(struct rvt_rwqe) +
 545                  rq->max_sge * sizeof(struct ib_sge)) * n);
 546}
 547
 548/**
 549 * rvt_is_user_qp - return if this is user mode QP
 550 * @qp - the target QP
 551 */
 552static inline bool rvt_is_user_qp(struct rvt_qp *qp)
 553{
 554        return !!qp->pid;
 555}
 556
 557/**
 558 * rvt_get_qp - get a QP reference
 559 * @qp - the QP to hold
 560 */
 561static inline void rvt_get_qp(struct rvt_qp *qp)
 562{
 563        atomic_inc(&qp->refcount);
 564}
 565
 566/**
 567 * rvt_put_qp - release a QP reference
 568 * @qp - the QP to release
 569 */
 570static inline void rvt_put_qp(struct rvt_qp *qp)
 571{
 572        if (qp && atomic_dec_and_test(&qp->refcount))
 573                wake_up(&qp->wait);
 574}
 575
 576/**
 577 * rvt_put_swqe - drop mr refs held by swqe
 578 * @wqe - the send wqe
 579 *
 580 * This drops any mr references held by the swqe
 581 */
 582static inline void rvt_put_swqe(struct rvt_swqe *wqe)
 583{
 584        int i;
 585
 586        for (i = 0; i < wqe->wr.num_sge; i++) {
 587                struct rvt_sge *sge = &wqe->sg_list[i];
 588
 589                rvt_put_mr(sge->mr);
 590        }
 591}
 592
 593/**
 594 * rvt_qp_wqe_reserve - reserve operation
 595 * @qp - the rvt qp
 596 * @wqe - the send wqe
 597 *
 598 * This routine used in post send to record
 599 * a wqe relative reserved operation use.
 600 */
 601static inline void rvt_qp_wqe_reserve(
 602        struct rvt_qp *qp,
 603        struct rvt_swqe *wqe)
 604{
 605        atomic_inc(&qp->s_reserved_used);
 606}
 607
 608/**
 609 * rvt_qp_wqe_unreserve - clean reserved operation
 610 * @qp - the rvt qp
 611 * @flags - send wqe flags
 612 *
 613 * This decrements the reserve use count.
 614 *
 615 * This call MUST precede the change to
 616 * s_last to insure that post send sees a stable
 617 * s_avail.
 618 *
 619 * An smp_mp__after_atomic() is used to insure
 620 * the compiler does not juggle the order of the s_last
 621 * ring index and the decrementing of s_reserved_used.
 622 */
 623static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
 624{
 625        if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
 626                atomic_dec(&qp->s_reserved_used);
 627                /* insure no compiler re-order up to s_last change */
 628                smp_mb__after_atomic();
 629        }
 630}
 631
 632extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
 633
 634/*
 635 * Compare the lower 24 bits of the msn values.
 636 * Returns an integer <, ==, or > than zero.
 637 */
 638static inline int rvt_cmp_msn(u32 a, u32 b)
 639{
 640        return (((int)a) - ((int)b)) << 8;
 641}
 642
 643__be32 rvt_compute_aeth(struct rvt_qp *qp);
 644
 645void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
 646
 647u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
 648
 649/**
 650 * rvt_div_round_up_mtu - round up divide
 651 * @qp - the qp pair
 652 * @len - the length
 653 *
 654 * Perform a shift based mtu round up divide
 655 */
 656static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
 657{
 658        return (len + qp->pmtu - 1) >> qp->log_pmtu;
 659}
 660
 661/**
 662 * @qp - the qp pair
 663 * @len - the length
 664 *
 665 * Perform a shift based mtu divide
 666 */
 667static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
 668{
 669        return len >> qp->log_pmtu;
 670}
 671
 672/**
 673 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
 674 * @timeout - timeout input(0 - 31).
 675 *
 676 * Return a timeout value in jiffies.
 677 */
 678static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
 679{
 680        if (timeout > 31)
 681                timeout = 31;
 682
 683        return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
 684}
 685
 686/**
 687 * rvt_lookup_qpn - return the QP with the given QPN
 688 * @ibp: the ibport
 689 * @qpn: the QP number to look up
 690 *
 691 * The caller must hold the rcu_read_lock(), and keep the lock until
 692 * the returned qp is no longer in use.
 693 */
 694static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
 695                                            struct rvt_ibport *rvp,
 696                                            u32 qpn) __must_hold(RCU)
 697{
 698        struct rvt_qp *qp = NULL;
 699
 700        if (unlikely(qpn <= 1)) {
 701                qp = rcu_dereference(rvp->qp[qpn]);
 702        } else {
 703                u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
 704
 705                for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
 706                        qp = rcu_dereference(qp->next))
 707                        if (qp->ibqp.qp_num == qpn)
 708                                break;
 709        }
 710        return qp;
 711}
 712
 713/**
 714 * rvt_mod_retry_timer - mod a retry timer
 715 * @qp - the QP
 716 * @shift - timeout shift to wait for multiple packets
 717 * Modify a potentially already running retry timer
 718 */
 719static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
 720{
 721        struct ib_qp *ibqp = &qp->ibqp;
 722        struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
 723
 724        lockdep_assert_held(&qp->s_lock);
 725        qp->s_flags |= RVT_S_TIMER;
 726        /* 4.096 usec. * (1 << qp->timeout) */
 727        mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
 728                  (qp->timeout_jiffies << shift));
 729}
 730
 731static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
 732{
 733        return rvt_mod_retry_timer_ext(qp, 0);
 734}
 735
 736/**
 737 * rvt_put_qp_swqe - drop refs held by swqe
 738 * @qp: the send qp
 739 * @wqe: the send wqe
 740 *
 741 * This drops any references held by the swqe
 742 */
 743static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
 744{
 745        rvt_put_swqe(wqe);
 746        if (qp->allowed_ops == IB_OPCODE_UD)
 747                rdma_destroy_ah_attr(wqe->ud_wr.attr);
 748}
 749
 750/**
 751 * rvt_qp_sqwe_incr - increment ring index
 752 * @qp: the qp
 753 * @val: the starting value
 754 *
 755 * Return: the new value wrapping as appropriate
 756 */
 757static inline u32
 758rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
 759{
 760        if (++val >= qp->s_size)
 761                val = 0;
 762        return val;
 763}
 764
 765int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
 766
 767/**
 768 * rvt_recv_cq - add a new entry to completion queue
 769 *                      by receive queue
 770 * @qp: receive queue
 771 * @wc: work completion entry to add
 772 * @solicited: true if @entry is solicited
 773 *
 774 * This is wrapper function for rvt_enter_cq function call by
 775 * receive queue. If rvt_cq_enter return false, it means cq is
 776 * full and the qp is put into error state.
 777 */
 778static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
 779                               bool solicited)
 780{
 781        struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
 782
 783        if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
 784                rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
 785}
 786
 787/**
 788 * rvt_send_cq - add a new entry to completion queue
 789 *                        by send queue
 790 * @qp: send queue
 791 * @wc: work completion entry to add
 792 * @solicited: true if @entry is solicited
 793 *
 794 * This is wrapper function for rvt_enter_cq function call by
 795 * send queue. If rvt_cq_enter return false, it means cq is
 796 * full and the qp is put into error state.
 797 */
 798static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
 799                               bool solicited)
 800{
 801        struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
 802
 803        if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
 804                rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
 805}
 806
 807/**
 808 * rvt_qp_complete_swqe - insert send completion
 809 * @qp - the qp
 810 * @wqe - the send wqe
 811 * @opcode - wc operation (driver dependent)
 812 * @status - completion status
 813 *
 814 * Update the s_last information, and then insert a send
 815 * completion into the completion
 816 * queue if the qp indicates it should be done.
 817 *
 818 * See IBTA 10.7.3.1 for info on completion
 819 * control.
 820 *
 821 * Return: new last
 822 */
 823static inline u32
 824rvt_qp_complete_swqe(struct rvt_qp *qp,
 825                     struct rvt_swqe *wqe,
 826                     enum ib_wc_opcode opcode,
 827                     enum ib_wc_status status)
 828{
 829        bool need_completion;
 830        u64 wr_id;
 831        u32 byte_len, last;
 832        int flags = wqe->wr.send_flags;
 833
 834        rvt_qp_wqe_unreserve(qp, flags);
 835        rvt_put_qp_swqe(qp, wqe);
 836
 837        need_completion =
 838                !(flags & RVT_SEND_RESERVE_USED) &&
 839                (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
 840                (flags & IB_SEND_SIGNALED) ||
 841                status != IB_WC_SUCCESS);
 842        if (need_completion) {
 843                wr_id = wqe->wr.wr_id;
 844                byte_len = wqe->length;
 845                /* above fields required before writing s_last */
 846        }
 847        last = rvt_qp_swqe_incr(qp, qp->s_last);
 848        /* see rvt_qp_is_avail() */
 849        smp_store_release(&qp->s_last, last);
 850        if (need_completion) {
 851                struct ib_wc w = {
 852                        .wr_id = wr_id,
 853                        .status = status,
 854                        .opcode = opcode,
 855                        .qp = &qp->ibqp,
 856                        .byte_len = byte_len,
 857                };
 858                rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
 859        }
 860        return last;
 861}
 862
 863extern const int  ib_rvt_state_ops[];
 864
 865struct rvt_dev_info;
 866int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
 867void rvt_comm_est(struct rvt_qp *qp);
 868void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
 869unsigned long rvt_rnr_tbl_to_usec(u32 index);
 870enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
 871void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
 872void rvt_del_timers_sync(struct rvt_qp *qp);
 873void rvt_stop_rc_timers(struct rvt_qp *qp);
 874void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
 875static inline void rvt_add_retry_timer(struct rvt_qp *qp)
 876{
 877        rvt_add_retry_timer_ext(qp, 0);
 878}
 879
 880void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
 881                  void *data, u32 length,
 882                  bool release, bool copy_last);
 883void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 884                       enum ib_wc_status status);
 885void rvt_ruc_loopback(struct rvt_qp *qp);
 886
 887/**
 888 * struct rvt_qp_iter - the iterator for QPs
 889 * @qp - the current QP
 890 *
 891 * This structure defines the current iterator
 892 * state for sequenced access to all QPs relative
 893 * to an rvt_dev_info.
 894 */
 895struct rvt_qp_iter {
 896        struct rvt_qp *qp;
 897        /* private: backpointer */
 898        struct rvt_dev_info *rdi;
 899        /* private: callback routine */
 900        void (*cb)(struct rvt_qp *qp, u64 v);
 901        /* private: for arg to callback routine */
 902        u64 v;
 903        /* private: number of SMI,GSI QPs for device */
 904        int specials;
 905        /* private: current iterator index */
 906        int n;
 907};
 908
 909/**
 910 * ib_cq_tail - Return tail index of cq buffer
 911 * @send_cq - The cq for send
 912 *
 913 * This is called in qp_iter_print to get tail
 914 * of cq buffer.
 915 */
 916static inline u32 ib_cq_tail(struct ib_cq *send_cq)
 917{
 918        struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
 919
 920        return ibcq_to_rvtcq(send_cq)->ip ?
 921               RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
 922               ibcq_to_rvtcq(send_cq)->kqueue->tail;
 923}
 924
 925/**
 926 * ib_cq_head - Return head index of cq buffer
 927 * @send_cq - The cq for send
 928 *
 929 * This is called in qp_iter_print to get head
 930 * of cq buffer.
 931 */
 932static inline u32 ib_cq_head(struct ib_cq *send_cq)
 933{
 934        struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
 935
 936        return ibcq_to_rvtcq(send_cq)->ip ?
 937               RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
 938               ibcq_to_rvtcq(send_cq)->kqueue->head;
 939}
 940
 941/**
 942 * rvt_free_rq - free memory allocated for rvt_rq struct
 943 * @rvt_rq: request queue data structure
 944 *
 945 * This function should only be called if the rvt_mmap_info()
 946 * has not succeeded.
 947 */
 948static inline void rvt_free_rq(struct rvt_rq *rq)
 949{
 950        kvfree(rq->kwq);
 951        rq->kwq = NULL;
 952        vfree(rq->wq);
 953        rq->wq = NULL;
 954}
 955
 956/**
 957 * rvt_to_iport - Get the ibport pointer
 958 * @qp: the qp pointer
 959 *
 960 * This function returns the ibport pointer from the qp pointer.
 961 */
 962static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
 963{
 964        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 965
 966        return rdi->ports[qp->port_num - 1];
 967}
 968
 969/**
 970 * rvt_rc_credit_avail - Check if there are enough RC credits for the request
 971 * @qp: the qp
 972 * @wqe: the request
 973 *
 974 * This function returns false when there are not enough credits for the given
 975 * request and true otherwise.
 976 */
 977static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
 978{
 979        lockdep_assert_held(&qp->s_lock);
 980        if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
 981            rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
 982                struct rvt_ibport *rvp = rvt_to_iport(qp);
 983
 984                qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
 985                rvp->n_rc_crwaits++;
 986                return false;
 987        }
 988        return true;
 989}
 990
 991struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
 992                                     u64 v,
 993                                     void (*cb)(struct rvt_qp *qp, u64 v));
 994int rvt_qp_iter_next(struct rvt_qp_iter *iter);
 995void rvt_qp_iter(struct rvt_dev_info *rdi,
 996                 u64 v,
 997                 void (*cb)(struct rvt_qp *qp, u64 v));
 998void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
 999#endif          /* DEF_RDMAVT_INCQP_H */
1000