linux/include/rdma/rdmavt_qp.h
<<
>>
Prefs
   1#ifndef DEF_RDMAVT_INCQP_H
   2#define DEF_RDMAVT_INCQP_H
   3
   4/*
   5 * Copyright(c) 2016 - 2018 Intel Corporation.
   6 *
   7 * This file is provided under a dual BSD/GPLv2 license.  When using or
   8 * redistributing this file, you may do so under either license.
   9 *
  10 * GPL LICENSE SUMMARY
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * BSD LICENSE
  22 *
  23 * Redistribution and use in source and binary forms, with or without
  24 * modification, are permitted provided that the following conditions
  25 * are met:
  26 *
  27 *  - Redistributions of source code must retain the above copyright
  28 *    notice, this list of conditions and the following disclaimer.
  29 *  - Redistributions in binary form must reproduce the above copyright
  30 *    notice, this list of conditions and the following disclaimer in
  31 *    the documentation and/or other materials provided with the
  32 *    distribution.
  33 *  - Neither the name of Intel Corporation nor the names of its
  34 *    contributors may be used to endorse or promote products derived
  35 *    from this software without specific prior written permission.
  36 *
  37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48 *
  49 */
  50
  51#include <rdma/rdma_vt.h>
  52#include <rdma/ib_pack.h>
  53#include <rdma/ib_verbs.h>
  54#include <rdma/rdmavt_cq.h>
  55/*
  56 * Atomic bit definitions for r_aflags.
  57 */
  58#define RVT_R_WRID_VALID        0
  59#define RVT_R_REWIND_SGE        1
  60
  61/*
  62 * Bit definitions for r_flags.
  63 */
  64#define RVT_R_REUSE_SGE 0x01
  65#define RVT_R_RDMAR_SEQ 0x02
  66#define RVT_R_RSP_NAK   0x04
  67#define RVT_R_RSP_SEND  0x08
  68#define RVT_R_COMM_EST  0x10
  69
  70/*
  71 * Bit definitions for s_flags.
  72 *
  73 * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
  74 * RVT_S_BUSY - send tasklet is processing the QP
  75 * RVT_S_TIMER - the RC retry timer is active
  76 * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
  77 * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
  78 *                         before processing the next SWQE
  79 * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
  80 *                         before processing the next SWQE
  81 * RVT_S_WAIT_RNR - waiting for RNR timeout
  82 * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
  83 * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
  84 *                  next send completion entry not via send DMA
  85 * RVT_S_WAIT_PIO - waiting for a send buffer to be available
  86 * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
  87 * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
  88 * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
  89 * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
  90 * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
  91 * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
  92 * RVT_S_ECN - a BECN was queued to the send engine
  93 * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
  94 */
  95#define RVT_S_SIGNAL_REQ_WR     0x0001
  96#define RVT_S_BUSY              0x0002
  97#define RVT_S_TIMER             0x0004
  98#define RVT_S_RESP_PENDING      0x0008
  99#define RVT_S_ACK_PENDING       0x0010
 100#define RVT_S_WAIT_FENCE        0x0020
 101#define RVT_S_WAIT_RDMAR        0x0040
 102#define RVT_S_WAIT_RNR          0x0080
 103#define RVT_S_WAIT_SSN_CREDIT   0x0100
 104#define RVT_S_WAIT_DMA          0x0200
 105#define RVT_S_WAIT_PIO          0x0400
 106#define RVT_S_WAIT_TX           0x0800
 107#define RVT_S_WAIT_DMA_DESC     0x1000
 108#define RVT_S_WAIT_KMEM         0x2000
 109#define RVT_S_WAIT_PSN          0x4000
 110#define RVT_S_WAIT_ACK          0x8000
 111#define RVT_S_SEND_ONE          0x10000
 112#define RVT_S_UNLIMITED_CREDIT  0x20000
 113#define RVT_S_ECN               0x40000
 114#define RVT_S_MAX_BIT_MASK      0x800000
 115
 116/*
 117 * Drivers should use s_flags starting with bit 31 down to the bit next to
 118 * RVT_S_MAX_BIT_MASK
 119 */
 120
 121/*
 122 * Wait flags that would prevent any packet type from being sent.
 123 */
 124#define RVT_S_ANY_WAIT_IO \
 125        (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
 126         RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
 127
 128/*
 129 * Wait flags that would prevent send work requests from making progress.
 130 */
 131#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
 132        RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
 133        RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
 134
 135#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
 136
 137/* Number of bits to pay attention to in the opcode for checking qp type */
 138#define RVT_OPCODE_QP_MASK 0xE0
 139
 140/* Flags for checking QP state (see ib_rvt_state_ops[]) */
 141#define RVT_POST_SEND_OK                0x01
 142#define RVT_POST_RECV_OK                0x02
 143#define RVT_PROCESS_RECV_OK             0x04
 144#define RVT_PROCESS_SEND_OK             0x08
 145#define RVT_PROCESS_NEXT_SEND_OK        0x10
 146#define RVT_FLUSH_SEND                  0x20
 147#define RVT_FLUSH_RECV                  0x40
 148#define RVT_PROCESS_OR_FLUSH_SEND \
 149        (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
 150#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
 151        (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
 152
 153/*
 154 * Internal send flags
 155 */
 156#define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
 157#define RVT_SEND_COMPLETION_ONLY        (IB_SEND_RESERVED_START << 1)
 158
 159/*
 160 * Send work request queue entry.
 161 * The size of the sg_list is determined when the QP is created and stored
 162 * in qp->s_max_sge.
 163 */
 164struct rvt_swqe {
 165        union {
 166                struct ib_send_wr wr;   /* don't use wr.sg_list */
 167                struct ib_ud_wr ud_wr;
 168                struct ib_reg_wr reg_wr;
 169                struct ib_rdma_wr rdma_wr;
 170                struct ib_atomic_wr atomic_wr;
 171        };
 172        u32 psn;                /* first packet sequence number */
 173        u32 lpsn;               /* last packet sequence number */
 174        u32 ssn;                /* send sequence number */
 175        u32 length;             /* total length of data in sg_list */
 176        void *priv;             /* driver dependent field */
 177        struct rvt_sge sg_list[0];
 178};
 179
 180/*
 181 * Receive work request queue entry.
 182 * The size of the sg_list is determined when the QP (or SRQ) is created
 183 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
 184 */
 185struct rvt_rwqe {
 186        u64 wr_id;
 187        u8 num_sge;
 188        struct ib_sge sg_list[0];
 189};
 190
 191/*
 192 * This structure is used to contain the head pointer, tail pointer,
 193 * and receive work queue entries as a single memory allocation so
 194 * it can be mmap'ed into user space.
 195 * Note that the wq array elements are variable size so you can't
 196 * just index into the array to get the N'th element;
 197 * use get_rwqe_ptr() instead.
 198 */
 199struct rvt_rwq {
 200        u32 head;               /* new work requests posted to the head */
 201        u32 tail;               /* receives pull requests from here. */
 202        struct rvt_rwqe wq[0];
 203};
 204
 205struct rvt_rq {
 206        struct rvt_rwq *wq;
 207        u32 size;               /* size of RWQE array */
 208        u8 max_sge;
 209        /* protect changes in this struct */
 210        spinlock_t lock ____cacheline_aligned_in_smp;
 211};
 212
 213/*
 214 * This structure holds the information that the send tasklet needs
 215 * to send a RDMA read response or atomic operation.
 216 */
 217struct rvt_ack_entry {
 218        struct rvt_sge rdma_sge;
 219        u64 atomic_data;
 220        u32 psn;
 221        u32 lpsn;
 222        u8 opcode;
 223        u8 sent;
 224        void *priv;
 225};
 226
 227#define RC_QP_SCALING_INTERVAL  5
 228
 229#define RVT_OPERATION_PRIV        0x00000001
 230#define RVT_OPERATION_ATOMIC      0x00000002
 231#define RVT_OPERATION_ATOMIC_SGE  0x00000004
 232#define RVT_OPERATION_LOCAL       0x00000008
 233#define RVT_OPERATION_USE_RESERVE 0x00000010
 234#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
 235
 236#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
 237
 238/**
 239 * rvt_operation_params - op table entry
 240 * @length - the length to copy into the swqe entry
 241 * @qpt_support - a bit mask indicating QP type support
 242 * @flags - RVT_OPERATION flags (see above)
 243 *
 244 * This supports table driven post send so that
 245 * the driver can have differing an potentially
 246 * different sets of operations.
 247 *
 248 **/
 249
 250struct rvt_operation_params {
 251        size_t length;
 252        u32 qpt_support;
 253        u32 flags;
 254};
 255
 256/*
 257 * Common variables are protected by both r_rq.lock and s_lock in that order
 258 * which only happens in modify_qp() or changing the QP 'state'.
 259 */
 260struct rvt_qp {
 261        struct ib_qp ibqp;
 262        void *priv; /* Driver private data */
 263        /* read mostly fields above and below */
 264        struct rdma_ah_attr remote_ah_attr;
 265        struct rdma_ah_attr alt_ah_attr;
 266        struct rvt_qp __rcu *next;           /* link list for QPN hash table */
 267        struct rvt_swqe *s_wq;  /* send work queue */
 268        struct rvt_mmap_info *ip;
 269
 270        unsigned long timeout_jiffies;  /* computed from timeout */
 271
 272        int srate_mbps;         /* s_srate (below) converted to Mbit/s */
 273        pid_t pid;              /* pid for user mode QPs */
 274        u32 remote_qpn;
 275        u32 qkey;               /* QKEY for this QP (for UD or RD) */
 276        u32 s_size;             /* send work queue size */
 277
 278        u16 pmtu;               /* decoded from path_mtu */
 279        u8 log_pmtu;            /* shift for pmtu */
 280        u8 state;               /* QP state */
 281        u8 allowed_ops;         /* high order bits of allowed opcodes */
 282        u8 qp_access_flags;
 283        u8 alt_timeout;         /* Alternate path timeout for this QP */
 284        u8 timeout;             /* Timeout for this QP */
 285        u8 s_srate;
 286        u8 s_mig_state;
 287        u8 port_num;
 288        u8 s_pkey_index;        /* PKEY index to use */
 289        u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
 290        u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
 291        u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
 292        u8 s_retry_cnt;         /* number of times to retry */
 293        u8 s_rnr_retry_cnt;
 294        u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
 295        u8 s_max_sge;           /* size of s_wq->sg_list */
 296        u8 s_draining;
 297
 298        /* start of read/write fields */
 299        atomic_t refcount ____cacheline_aligned_in_smp;
 300        wait_queue_head_t wait;
 301
 302        struct rvt_ack_entry *s_ack_queue;
 303        struct rvt_sge_state s_rdma_read_sge;
 304
 305        spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
 306        u32 r_psn;              /* expected rcv packet sequence number */
 307        unsigned long r_aflags;
 308        u64 r_wr_id;            /* ID for current receive WQE */
 309        u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
 310        u32 r_len;              /* total length of r_sge */
 311        u32 r_rcv_len;          /* receive data len processed */
 312        u32 r_msn;              /* message sequence number */
 313
 314        u8 r_state;             /* opcode of last packet received */
 315        u8 r_flags;
 316        u8 r_head_ack_queue;    /* index into s_ack_queue[] */
 317        u8 r_adefered;          /* defered ack count */
 318
 319        struct list_head rspwait;       /* link for waiting to respond */
 320
 321        struct rvt_sge_state r_sge;     /* current receive data */
 322        struct rvt_rq r_rq;             /* receive work queue */
 323
 324        /* post send line */
 325        spinlock_t s_hlock ____cacheline_aligned_in_smp;
 326        u32 s_head;             /* new entries added here */
 327        u32 s_next_psn;         /* PSN for next request */
 328        u32 s_avail;            /* number of entries avail */
 329        u32 s_ssn;              /* SSN of tail entry */
 330        atomic_t s_reserved_used; /* reserved entries in use */
 331
 332        spinlock_t s_lock ____cacheline_aligned_in_smp;
 333        u32 s_flags;
 334        struct rvt_sge_state *s_cur_sge;
 335        struct rvt_swqe *s_wqe;
 336        struct rvt_sge_state s_sge;     /* current send request data */
 337        struct rvt_mregion *s_rdma_mr;
 338        u32 s_len;              /* total length of s_sge */
 339        u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
 340        u32 s_last_psn;         /* last response PSN processed */
 341        u32 s_sending_psn;      /* lowest PSN that is being sent */
 342        u32 s_sending_hpsn;     /* highest PSN that is being sent */
 343        u32 s_psn;              /* current packet sequence number */
 344        u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
 345        u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
 346        u32 s_tail;             /* next entry to process */
 347        u32 s_cur;              /* current work queue entry */
 348        u32 s_acked;            /* last un-ACK'ed entry */
 349        u32 s_last;             /* last completed entry */
 350        u32 s_lsn;              /* limit sequence number (credit) */
 351        u32 s_ahgpsn;           /* set to the psn in the copy of the header */
 352        u16 s_cur_size;         /* size of send packet in bytes */
 353        u16 s_rdma_ack_cnt;
 354        u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
 355        s8 s_ahgidx;
 356        u8 s_state;             /* opcode of last packet sent */
 357        u8 s_ack_state;         /* opcode of packet to ACK */
 358        u8 s_nak_state;         /* non-zero if NAK is pending */
 359        u8 r_nak_state;         /* non-zero if NAK is pending */
 360        u8 s_retry;             /* requester retry counter */
 361        u8 s_rnr_retry;         /* requester RNR retry counter */
 362        u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
 363        u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
 364        u8 s_acked_ack_queue;   /* index into s_ack_queue[] */
 365
 366        struct rvt_sge_state s_ack_rdma_sge;
 367        struct timer_list s_timer;
 368        struct hrtimer s_rnr_timer;
 369
 370        atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
 371
 372        /*
 373         * This sge list MUST be last. Do not add anything below here.
 374         */
 375        struct rvt_sge r_sg_list[0] /* verified SGEs */
 376                ____cacheline_aligned_in_smp;
 377};
 378
 379struct rvt_srq {
 380        struct ib_srq ibsrq;
 381        struct rvt_rq rq;
 382        struct rvt_mmap_info *ip;
 383        /* send signal when number of RWQEs < limit */
 384        u32 limit;
 385};
 386
 387static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
 388{
 389        return container_of(ibsrq, struct rvt_srq, ibsrq);
 390}
 391
 392static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
 393{
 394        return container_of(ibqp, struct rvt_qp, ibqp);
 395}
 396
 397#define RVT_QPN_MAX                 BIT(24)
 398#define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
 399#define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
 400#define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
 401#define RVT_QPN_MASK                IB_QPN_MASK
 402
 403/*
 404 * QPN-map pages start out as NULL, they get allocated upon
 405 * first use and are never deallocated. This way,
 406 * large bitmaps are not allocated unless large numbers of QPs are used.
 407 */
 408struct rvt_qpn_map {
 409        void *page;
 410};
 411
 412struct rvt_qpn_table {
 413        spinlock_t lock; /* protect changes to the qp table */
 414        unsigned flags;         /* flags for QP0/1 allocated for each port */
 415        u32 last;               /* last QP number allocated */
 416        u32 nmaps;              /* size of the map table */
 417        u16 limit;
 418        u8  incr;
 419        /* bit map of free QP numbers other than 0/1 */
 420        struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
 421};
 422
 423struct rvt_qp_ibdev {
 424        u32 qp_table_size;
 425        u32 qp_table_bits;
 426        struct rvt_qp __rcu **qp_table;
 427        spinlock_t qpt_lock; /* qptable lock */
 428        struct rvt_qpn_table qpn_table;
 429};
 430
 431/*
 432 * There is one struct rvt_mcast for each multicast GID.
 433 * All attached QPs are then stored as a list of
 434 * struct rvt_mcast_qp.
 435 */
 436struct rvt_mcast_qp {
 437        struct list_head list;
 438        struct rvt_qp *qp;
 439};
 440
 441struct rvt_mcast_addr {
 442        union ib_gid mgid;
 443        u16 lid;
 444};
 445
 446struct rvt_mcast {
 447        struct rb_node rb_node;
 448        struct rvt_mcast_addr mcast_addr;
 449        struct list_head qp_list;
 450        wait_queue_head_t wait;
 451        atomic_t refcount;
 452        int n_attached;
 453};
 454
 455/*
 456 * Since struct rvt_swqe is not a fixed size, we can't simply index into
 457 * struct rvt_qp.s_wq.  This function does the array index computation.
 458 */
 459static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
 460                                                unsigned n)
 461{
 462        return (struct rvt_swqe *)((char *)qp->s_wq +
 463                                     (sizeof(struct rvt_swqe) +
 464                                      qp->s_max_sge *
 465                                      sizeof(struct rvt_sge)) * n);
 466}
 467
 468/*
 469 * Since struct rvt_rwqe is not a fixed size, we can't simply index into
 470 * struct rvt_rwq.wq.  This function does the array index computation.
 471 */
 472static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
 473{
 474        return (struct rvt_rwqe *)
 475                ((char *)rq->wq->wq +
 476                 (sizeof(struct rvt_rwqe) +
 477                  rq->max_sge * sizeof(struct ib_sge)) * n);
 478}
 479
 480/**
 481 * rvt_is_user_qp - return if this is user mode QP
 482 * @qp - the target QP
 483 */
 484static inline bool rvt_is_user_qp(struct rvt_qp *qp)
 485{
 486        return !!qp->pid;
 487}
 488
 489/**
 490 * rvt_get_qp - get a QP reference
 491 * @qp - the QP to hold
 492 */
 493static inline void rvt_get_qp(struct rvt_qp *qp)
 494{
 495        atomic_inc(&qp->refcount);
 496}
 497
 498/**
 499 * rvt_put_qp - release a QP reference
 500 * @qp - the QP to release
 501 */
 502static inline void rvt_put_qp(struct rvt_qp *qp)
 503{
 504        if (qp && atomic_dec_and_test(&qp->refcount))
 505                wake_up(&qp->wait);
 506}
 507
 508/**
 509 * rvt_put_swqe - drop mr refs held by swqe
 510 * @wqe - the send wqe
 511 *
 512 * This drops any mr references held by the swqe
 513 */
 514static inline void rvt_put_swqe(struct rvt_swqe *wqe)
 515{
 516        int i;
 517
 518        for (i = 0; i < wqe->wr.num_sge; i++) {
 519                struct rvt_sge *sge = &wqe->sg_list[i];
 520
 521                rvt_put_mr(sge->mr);
 522        }
 523}
 524
 525/**
 526 * rvt_qp_wqe_reserve - reserve operation
 527 * @qp - the rvt qp
 528 * @wqe - the send wqe
 529 *
 530 * This routine used in post send to record
 531 * a wqe relative reserved operation use.
 532 */
 533static inline void rvt_qp_wqe_reserve(
 534        struct rvt_qp *qp,
 535        struct rvt_swqe *wqe)
 536{
 537        atomic_inc(&qp->s_reserved_used);
 538}
 539
 540/**
 541 * rvt_qp_wqe_unreserve - clean reserved operation
 542 * @qp - the rvt qp
 543 * @wqe - the send wqe
 544 *
 545 * This decrements the reserve use count.
 546 *
 547 * This call MUST precede the change to
 548 * s_last to insure that post send sees a stable
 549 * s_avail.
 550 *
 551 * An smp_mp__after_atomic() is used to insure
 552 * the compiler does not juggle the order of the s_last
 553 * ring index and the decrementing of s_reserved_used.
 554 */
 555static inline void rvt_qp_wqe_unreserve(
 556        struct rvt_qp *qp,
 557        struct rvt_swqe *wqe)
 558{
 559        if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
 560                atomic_dec(&qp->s_reserved_used);
 561                /* insure no compiler re-order up to s_last change */
 562                smp_mb__after_atomic();
 563        }
 564}
 565
 566extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
 567
 568/**
 569 * rvt_qp_swqe_complete() - insert send completion
 570 * @qp - the qp
 571 * @wqe - the send wqe
 572 * @status - completion status
 573 *
 574 * Insert a send completion into the completion
 575 * queue if the qp indicates it should be done.
 576 *
 577 * See IBTA 10.7.3.1 for info on completion
 578 * control.
 579 */
 580static inline void rvt_qp_swqe_complete(
 581        struct rvt_qp *qp,
 582        struct rvt_swqe *wqe,
 583        enum ib_wc_opcode opcode,
 584        enum ib_wc_status status)
 585{
 586        if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
 587                return;
 588        if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
 589            (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 590             status != IB_WC_SUCCESS) {
 591                struct ib_wc wc;
 592
 593                memset(&wc, 0, sizeof(wc));
 594                wc.wr_id = wqe->wr.wr_id;
 595                wc.status = status;
 596                wc.opcode = opcode;
 597                wc.qp = &qp->ibqp;
 598                wc.byte_len = wqe->length;
 599                rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
 600                             status != IB_WC_SUCCESS);
 601        }
 602}
 603
 604/*
 605 * Compare the lower 24 bits of the msn values.
 606 * Returns an integer <, ==, or > than zero.
 607 */
 608static inline int rvt_cmp_msn(u32 a, u32 b)
 609{
 610        return (((int)a) - ((int)b)) << 8;
 611}
 612
 613/**
 614 * rvt_compute_aeth - compute the AETH (syndrome + MSN)
 615 * @qp: the queue pair to compute the AETH for
 616 *
 617 * Returns the AETH.
 618 */
 619__be32 rvt_compute_aeth(struct rvt_qp *qp);
 620
 621/**
 622 * rvt_get_credit - flush the send work queue of a QP
 623 * @qp: the qp who's send work queue to flush
 624 * @aeth: the Acknowledge Extended Transport Header
 625 *
 626 * The QP s_lock should be held.
 627 */
 628void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
 629
 630/**
 631 * rvt_restart_sge - rewind the sge state for a wqe
 632 * @ss: the sge state pointer
 633 * @wqe: the wqe to rewind
 634 * @len: the data length from the start of the wqe in bytes
 635 *
 636 * Returns the remaining data length.
 637 */
 638u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
 639
 640/**
 641 * @qp - the qp pair
 642 * @len - the length
 643 *
 644 * Perform a shift based mtu round up divide
 645 */
 646static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
 647{
 648        return (len + qp->pmtu - 1) >> qp->log_pmtu;
 649}
 650
 651/**
 652 * @qp - the qp pair
 653 * @len - the length
 654 *
 655 * Perform a shift based mtu divide
 656 */
 657static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
 658{
 659        return len >> qp->log_pmtu;
 660}
 661
 662/**
 663 * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
 664 * @timeout - timeout input(0 - 31).
 665 *
 666 * Return a timeout value in jiffies.
 667 */
 668static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
 669{
 670        if (timeout > 31)
 671                timeout = 31;
 672
 673        return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
 674}
 675
 676/**
 677 * rvt_lookup_qpn - return the QP with the given QPN
 678 * @ibp: the ibport
 679 * @qpn: the QP number to look up
 680 *
 681 * The caller must hold the rcu_read_lock(), and keep the lock until
 682 * the returned qp is no longer in use.
 683 */
 684static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
 685                                            struct rvt_ibport *rvp,
 686                                            u32 qpn) __must_hold(RCU)
 687{
 688        struct rvt_qp *qp = NULL;
 689
 690        if (unlikely(qpn <= 1)) {
 691                qp = rcu_dereference(rvp->qp[qpn]);
 692        } else {
 693                u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
 694
 695                for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
 696                        qp = rcu_dereference(qp->next))
 697                        if (qp->ibqp.qp_num == qpn)
 698                                break;
 699        }
 700        return qp;
 701}
 702
 703/**
 704 * rvt_mod_retry_timer - mod a retry timer
 705 * @qp - the QP
 706 * @shift - timeout shift to wait for multiple packets
 707 * Modify a potentially already running retry timer
 708 */
 709static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
 710{
 711        struct ib_qp *ibqp = &qp->ibqp;
 712        struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
 713
 714        lockdep_assert_held(&qp->s_lock);
 715        qp->s_flags |= RVT_S_TIMER;
 716        /* 4.096 usec. * (1 << qp->timeout) */
 717        mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
 718                  (qp->timeout_jiffies << shift));
 719}
 720
 721static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
 722{
 723        return rvt_mod_retry_timer_ext(qp, 0);
 724}
 725
 726/**
 727 * rvt_put_qp_swqe - drop refs held by swqe
 728 * @qp: the send qp
 729 * @wqe: the send wqe
 730 *
 731 * This drops any references held by the swqe
 732 */
 733static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
 734{
 735        rvt_put_swqe(wqe);
 736        if (qp->allowed_ops == IB_OPCODE_UD)
 737                atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
 738}
 739
 740extern const int  ib_rvt_state_ops[];
 741
 742struct rvt_dev_info;
 743int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
 744void rvt_comm_est(struct rvt_qp *qp);
 745int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
 746void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
 747unsigned long rvt_rnr_tbl_to_usec(u32 index);
 748enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
 749void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
 750void rvt_del_timers_sync(struct rvt_qp *qp);
 751void rvt_stop_rc_timers(struct rvt_qp *qp);
 752void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
 753static inline void rvt_add_retry_timer(struct rvt_qp *qp)
 754{
 755        rvt_add_retry_timer_ext(qp, 0);
 756}
 757
 758void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
 759                  void *data, u32 length,
 760                  bool release, bool copy_last);
 761void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 762                       enum ib_wc_status status);
 763void rvt_ruc_loopback(struct rvt_qp *qp);
 764
 765/**
 766 * struct rvt_qp_iter - the iterator for QPs
 767 * @qp - the current QP
 768 *
 769 * This structure defines the current iterator
 770 * state for sequenced access to all QPs relative
 771 * to an rvt_dev_info.
 772 */
 773struct rvt_qp_iter {
 774        struct rvt_qp *qp;
 775        /* private: backpointer */
 776        struct rvt_dev_info *rdi;
 777        /* private: callback routine */
 778        void (*cb)(struct rvt_qp *qp, u64 v);
 779        /* private: for arg to callback routine */
 780        u64 v;
 781        /* private: number of SMI,GSI QPs for device */
 782        int specials;
 783        /* private: current iterator index */
 784        int n;
 785};
 786
 787struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
 788                                     u64 v,
 789                                     void (*cb)(struct rvt_qp *qp, u64 v));
 790int rvt_qp_iter_next(struct rvt_qp_iter *iter);
 791void rvt_qp_iter(struct rvt_dev_info *rdi,
 792                 u64 v,
 793                 void (*cb)(struct rvt_qp *qp, u64 v));
 794void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
 795#endif          /* DEF_RDMAVT_INCQP_H */
 796