linux/drivers/infiniband/sw/rdmavt/qp.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2016 - 2020 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47
  48#include <linux/hash.h>
  49#include <linux/bitops.h>
  50#include <linux/lockdep.h>
  51#include <linux/vmalloc.h>
  52#include <linux/slab.h>
  53#include <rdma/ib_verbs.h>
  54#include <rdma/ib_hdrs.h>
  55#include <rdma/opa_addr.h>
  56#include <rdma/uverbs_ioctl.h>
  57#include "qp.h"
  58#include "vt.h"
  59#include "trace.h"
  60
  61#define RVT_RWQ_COUNT_THRESHOLD 16
  62
  63static void rvt_rc_timeout(struct timer_list *t);
  64static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
  65                         enum ib_qp_type type);
  66
  67/*
  68 * Convert the AETH RNR timeout code into the number of microseconds.
  69 */
  70static const u32 ib_rvt_rnr_table[32] = {
  71        655360, /* 00: 655.36 */
  72        10,     /* 01:    .01 */
  73        20,     /* 02     .02 */
  74        30,     /* 03:    .03 */
  75        40,     /* 04:    .04 */
  76        60,     /* 05:    .06 */
  77        80,     /* 06:    .08 */
  78        120,    /* 07:    .12 */
  79        160,    /* 08:    .16 */
  80        240,    /* 09:    .24 */
  81        320,    /* 0A:    .32 */
  82        480,    /* 0B:    .48 */
  83        640,    /* 0C:    .64 */
  84        960,    /* 0D:    .96 */
  85        1280,   /* 0E:   1.28 */
  86        1920,   /* 0F:   1.92 */
  87        2560,   /* 10:   2.56 */
  88        3840,   /* 11:   3.84 */
  89        5120,   /* 12:   5.12 */
  90        7680,   /* 13:   7.68 */
  91        10240,  /* 14:  10.24 */
  92        15360,  /* 15:  15.36 */
  93        20480,  /* 16:  20.48 */
  94        30720,  /* 17:  30.72 */
  95        40960,  /* 18:  40.96 */
  96        61440,  /* 19:  61.44 */
  97        81920,  /* 1A:  81.92 */
  98        122880, /* 1B: 122.88 */
  99        163840, /* 1C: 163.84 */
 100        245760, /* 1D: 245.76 */
 101        327680, /* 1E: 327.68 */
 102        491520  /* 1F: 491.52 */
 103};
 104
 105/*
 106 * Note that it is OK to post send work requests in the SQE and ERR
 107 * states; rvt_do_send() will process them and generate error
 108 * completions as per IB 1.2 C10-96.
 109 */
 110const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
 111        [IB_QPS_RESET] = 0,
 112        [IB_QPS_INIT] = RVT_POST_RECV_OK,
 113        [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
 114        [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
 115            RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
 116            RVT_PROCESS_NEXT_SEND_OK,
 117        [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
 118            RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
 119        [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
 120            RVT_POST_SEND_OK | RVT_FLUSH_SEND,
 121        [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
 122            RVT_POST_SEND_OK | RVT_FLUSH_SEND,
 123};
 124EXPORT_SYMBOL(ib_rvt_state_ops);
 125
 126/* platform specific: return the last level cache (llc) size, in KiB */
 127static int rvt_wss_llc_size(void)
 128{
 129        /* assume that the boot CPU value is universal for all CPUs */
 130        return boot_cpu_data.x86_cache_size;
 131}
 132
 133/* platform specific: cacheless copy */
 134static void cacheless_memcpy(void *dst, void *src, size_t n)
 135{
 136        /*
 137         * Use the only available X64 cacheless copy.  Add a __user cast
 138         * to quiet sparse.  The src agument is already in the kernel so
 139         * there are no security issues.  The extra fault recovery machinery
 140         * is not invoked.
 141         */
 142        __copy_user_nocache(dst, (void __user *)src, n, 0);
 143}
 144
 145void rvt_wss_exit(struct rvt_dev_info *rdi)
 146{
 147        struct rvt_wss *wss = rdi->wss;
 148
 149        if (!wss)
 150                return;
 151
 152        /* coded to handle partially initialized and repeat callers */
 153        kfree(wss->entries);
 154        wss->entries = NULL;
 155        kfree(rdi->wss);
 156        rdi->wss = NULL;
 157}
 158
 159/*
 160 * rvt_wss_init - Init wss data structures
 161 *
 162 * Return: 0 on success
 163 */
 164int rvt_wss_init(struct rvt_dev_info *rdi)
 165{
 166        unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
 167        unsigned int wss_threshold = rdi->dparms.wss_threshold;
 168        unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
 169        long llc_size;
 170        long llc_bits;
 171        long table_size;
 172        long table_bits;
 173        struct rvt_wss *wss;
 174        int node = rdi->dparms.node;
 175
 176        if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
 177                rdi->wss = NULL;
 178                return 0;
 179        }
 180
 181        rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
 182        if (!rdi->wss)
 183                return -ENOMEM;
 184        wss = rdi->wss;
 185
 186        /* check for a valid percent range - default to 80 if none or invalid */
 187        if (wss_threshold < 1 || wss_threshold > 100)
 188                wss_threshold = 80;
 189
 190        /* reject a wildly large period */
 191        if (wss_clean_period > 1000000)
 192                wss_clean_period = 256;
 193
 194        /* reject a zero period */
 195        if (wss_clean_period == 0)
 196                wss_clean_period = 1;
 197
 198        /*
 199         * Calculate the table size - the next power of 2 larger than the
 200         * LLC size.  LLC size is in KiB.
 201         */
 202        llc_size = rvt_wss_llc_size() * 1024;
 203        table_size = roundup_pow_of_two(llc_size);
 204
 205        /* one bit per page in rounded up table */
 206        llc_bits = llc_size / PAGE_SIZE;
 207        table_bits = table_size / PAGE_SIZE;
 208        wss->pages_mask = table_bits - 1;
 209        wss->num_entries = table_bits / BITS_PER_LONG;
 210
 211        wss->threshold = (llc_bits * wss_threshold) / 100;
 212        if (wss->threshold == 0)
 213                wss->threshold = 1;
 214
 215        wss->clean_period = wss_clean_period;
 216        atomic_set(&wss->clean_counter, wss_clean_period);
 217
 218        wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
 219                                    GFP_KERNEL, node);
 220        if (!wss->entries) {
 221                rvt_wss_exit(rdi);
 222                return -ENOMEM;
 223        }
 224
 225        return 0;
 226}
 227
 228/*
 229 * Advance the clean counter.  When the clean period has expired,
 230 * clean an entry.
 231 *
 232 * This is implemented in atomics to avoid locking.  Because multiple
 233 * variables are involved, it can be racy which can lead to slightly
 234 * inaccurate information.  Since this is only a heuristic, this is
 235 * OK.  Any innaccuracies will clean themselves out as the counter
 236 * advances.  That said, it is unlikely the entry clean operation will
 237 * race - the next possible racer will not start until the next clean
 238 * period.
 239 *
 240 * The clean counter is implemented as a decrement to zero.  When zero
 241 * is reached an entry is cleaned.
 242 */
 243static void wss_advance_clean_counter(struct rvt_wss *wss)
 244{
 245        int entry;
 246        int weight;
 247        unsigned long bits;
 248
 249        /* become the cleaner if we decrement the counter to zero */
 250        if (atomic_dec_and_test(&wss->clean_counter)) {
 251                /*
 252                 * Set, not add, the clean period.  This avoids an issue
 253                 * where the counter could decrement below the clean period.
 254                 * Doing a set can result in lost decrements, slowing the
 255                 * clean advance.  Since this a heuristic, this possible
 256                 * slowdown is OK.
 257                 *
 258                 * An alternative is to loop, advancing the counter by a
 259                 * clean period until the result is > 0. However, this could
 260                 * lead to several threads keeping another in the clean loop.
 261                 * This could be mitigated by limiting the number of times
 262                 * we stay in the loop.
 263                 */
 264                atomic_set(&wss->clean_counter, wss->clean_period);
 265
 266                /*
 267                 * Uniquely grab the entry to clean and move to next.
 268                 * The current entry is always the lower bits of
 269                 * wss.clean_entry.  The table size, wss.num_entries,
 270                 * is always a power-of-2.
 271                 */
 272                entry = (atomic_inc_return(&wss->clean_entry) - 1)
 273                        & (wss->num_entries - 1);
 274
 275                /* clear the entry and count the bits */
 276                bits = xchg(&wss->entries[entry], 0);
 277                weight = hweight64((u64)bits);
 278                /* only adjust the contended total count if needed */
 279                if (weight)
 280                        atomic_sub(weight, &wss->total_count);
 281        }
 282}
 283
 284/*
 285 * Insert the given address into the working set array.
 286 */
 287static void wss_insert(struct rvt_wss *wss, void *address)
 288{
 289        u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
 290        u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
 291        u32 nr = page & (BITS_PER_LONG - 1);
 292
 293        if (!test_and_set_bit(nr, &wss->entries[entry]))
 294                atomic_inc(&wss->total_count);
 295
 296        wss_advance_clean_counter(wss);
 297}
 298
 299/*
 300 * Is the working set larger than the threshold?
 301 */
 302static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
 303{
 304        return atomic_read(&wss->total_count) >= wss->threshold;
 305}
 306
 307static void get_map_page(struct rvt_qpn_table *qpt,
 308                         struct rvt_qpn_map *map)
 309{
 310        unsigned long page = get_zeroed_page(GFP_KERNEL);
 311
 312        /*
 313         * Free the page if someone raced with us installing it.
 314         */
 315
 316        spin_lock(&qpt->lock);
 317        if (map->page)
 318                free_page(page);
 319        else
 320                map->page = (void *)page;
 321        spin_unlock(&qpt->lock);
 322}
 323
 324/**
 325 * init_qpn_table - initialize the QP number table for a device
 326 * @rdi: rvt dev struct
 327 * @qpt: the QPN table
 328 */
 329static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
 330{
 331        u32 offset, i;
 332        struct rvt_qpn_map *map;
 333        int ret = 0;
 334
 335        if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
 336                return -EINVAL;
 337
 338        spin_lock_init(&qpt->lock);
 339
 340        qpt->last = rdi->dparms.qpn_start;
 341        qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
 342
 343        /*
 344         * Drivers may want some QPs beyond what we need for verbs let them use
 345         * our qpn table. No need for two. Lets go ahead and mark the bitmaps
 346         * for those. The reserved range must be *after* the range which verbs
 347         * will pick from.
 348         */
 349
 350        /* Figure out number of bit maps needed before reserved range */
 351        qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
 352
 353        /* This should always be zero */
 354        offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
 355
 356        /* Starting with the first reserved bit map */
 357        map = &qpt->map[qpt->nmaps];
 358
 359        rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
 360                    rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
 361        for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
 362                if (!map->page) {
 363                        get_map_page(qpt, map);
 364                        if (!map->page) {
 365                                ret = -ENOMEM;
 366                                break;
 367                        }
 368                }
 369                set_bit(offset, map->page);
 370                offset++;
 371                if (offset == RVT_BITS_PER_PAGE) {
 372                        /* next page */
 373                        qpt->nmaps++;
 374                        map++;
 375                        offset = 0;
 376                }
 377        }
 378        return ret;
 379}
 380
 381/**
 382 * free_qpn_table - free the QP number table for a device
 383 * @qpt: the QPN table
 384 */
 385static void free_qpn_table(struct rvt_qpn_table *qpt)
 386{
 387        int i;
 388
 389        for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
 390                free_page((unsigned long)qpt->map[i].page);
 391}
 392
 393/**
 394 * rvt_driver_qp_init - Init driver qp resources
 395 * @rdi: rvt dev strucutre
 396 *
 397 * Return: 0 on success
 398 */
 399int rvt_driver_qp_init(struct rvt_dev_info *rdi)
 400{
 401        int i;
 402        int ret = -ENOMEM;
 403
 404        if (!rdi->dparms.qp_table_size)
 405                return -EINVAL;
 406
 407        /*
 408         * If driver is not doing any QP allocation then make sure it is
 409         * providing the necessary QP functions.
 410         */
 411        if (!rdi->driver_f.free_all_qps ||
 412            !rdi->driver_f.qp_priv_alloc ||
 413            !rdi->driver_f.qp_priv_free ||
 414            !rdi->driver_f.notify_qp_reset ||
 415            !rdi->driver_f.notify_restart_rc)
 416                return -EINVAL;
 417
 418        /* allocate parent object */
 419        rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
 420                                   rdi->dparms.node);
 421        if (!rdi->qp_dev)
 422                return -ENOMEM;
 423
 424        /* allocate hash table */
 425        rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
 426        rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
 427        rdi->qp_dev->qp_table =
 428                kmalloc_array_node(rdi->qp_dev->qp_table_size,
 429                             sizeof(*rdi->qp_dev->qp_table),
 430                             GFP_KERNEL, rdi->dparms.node);
 431        if (!rdi->qp_dev->qp_table)
 432                goto no_qp_table;
 433
 434        for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
 435                RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
 436
 437        spin_lock_init(&rdi->qp_dev->qpt_lock);
 438
 439        /* initialize qpn map */
 440        if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
 441                goto fail_table;
 442
 443        spin_lock_init(&rdi->n_qps_lock);
 444
 445        return 0;
 446
 447fail_table:
 448        kfree(rdi->qp_dev->qp_table);
 449        free_qpn_table(&rdi->qp_dev->qpn_table);
 450
 451no_qp_table:
 452        kfree(rdi->qp_dev);
 453
 454        return ret;
 455}
 456
 457/**
 458 * rvt_free_qp_cb - callback function to reset a qp
 459 * @qp: the qp to reset
 460 * @v: a 64-bit value
 461 *
 462 * This function resets the qp and removes it from the
 463 * qp hash table.
 464 */
 465static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
 466{
 467        unsigned int *qp_inuse = (unsigned int *)v;
 468        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 469
 470        /* Reset the qp and remove it from the qp hash list */
 471        rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
 472
 473        /* Increment the qp_inuse count */
 474        (*qp_inuse)++;
 475}
 476
 477/**
 478 * rvt_free_all_qps - check for QPs still in use
 479 * @rdi: rvt device info structure
 480 *
 481 * There should not be any QPs still in use.
 482 * Free memory for table.
 483 * Return the number of QPs still in use.
 484 */
 485static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
 486{
 487        unsigned int qp_inuse = 0;
 488
 489        qp_inuse += rvt_mcast_tree_empty(rdi);
 490
 491        rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
 492
 493        return qp_inuse;
 494}
 495
 496/**
 497 * rvt_qp_exit - clean up qps on device exit
 498 * @rdi: rvt dev structure
 499 *
 500 * Check for qp leaks and free resources.
 501 */
 502void rvt_qp_exit(struct rvt_dev_info *rdi)
 503{
 504        u32 qps_inuse = rvt_free_all_qps(rdi);
 505
 506        if (qps_inuse)
 507                rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
 508                           qps_inuse);
 509        if (!rdi->qp_dev)
 510                return;
 511
 512        kfree(rdi->qp_dev->qp_table);
 513        free_qpn_table(&rdi->qp_dev->qpn_table);
 514        kfree(rdi->qp_dev);
 515}
 516
 517static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
 518                              struct rvt_qpn_map *map, unsigned off)
 519{
 520        return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
 521}
 522
 523/**
 524 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
 525 *             IB_QPT_SMI/IB_QPT_GSI
 526 * @rdi: rvt device info structure
 527 * @qpt: queue pair number table pointer
 528 * @type: the QP type
 529 * @port_num: IB port number, 1 based, comes from core
 530 * @exclude_prefix: prefix of special queue pair number being allocated
 531 *
 532 * Return: The queue pair number
 533 */
 534static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
 535                     enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
 536{
 537        u32 i, offset, max_scan, qpn;
 538        struct rvt_qpn_map *map;
 539        u32 ret;
 540        u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
 541                RVT_AIP_QPN_MAX : RVT_QPN_MAX;
 542
 543        if (rdi->driver_f.alloc_qpn)
 544                return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
 545
 546        if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 547                unsigned n;
 548
 549                ret = type == IB_QPT_GSI;
 550                n = 1 << (ret + 2 * (port_num - 1));
 551                spin_lock(&qpt->lock);
 552                if (qpt->flags & n)
 553                        ret = -EINVAL;
 554                else
 555                        qpt->flags |= n;
 556                spin_unlock(&qpt->lock);
 557                goto bail;
 558        }
 559
 560        qpn = qpt->last + qpt->incr;
 561        if (qpn >= max_qpn)
 562                qpn = qpt->incr | ((qpt->last & 1) ^ 1);
 563        /* offset carries bit 0 */
 564        offset = qpn & RVT_BITS_PER_PAGE_MASK;
 565        map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
 566        max_scan = qpt->nmaps - !offset;
 567        for (i = 0;;) {
 568                if (unlikely(!map->page)) {
 569                        get_map_page(qpt, map);
 570                        if (unlikely(!map->page))
 571                                break;
 572                }
 573                do {
 574                        if (!test_and_set_bit(offset, map->page)) {
 575                                qpt->last = qpn;
 576                                ret = qpn;
 577                                goto bail;
 578                        }
 579                        offset += qpt->incr;
 580                        /*
 581                         * This qpn might be bogus if offset >= BITS_PER_PAGE.
 582                         * That is OK.   It gets re-assigned below
 583                         */
 584                        qpn = mk_qpn(qpt, map, offset);
 585                } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
 586                /*
 587                 * In order to keep the number of pages allocated to a
 588                 * minimum, we scan the all existing pages before increasing
 589                 * the size of the bitmap table.
 590                 */
 591                if (++i > max_scan) {
 592                        if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
 593                                break;
 594                        map = &qpt->map[qpt->nmaps++];
 595                        /* start at incr with current bit 0 */
 596                        offset = qpt->incr | (offset & 1);
 597                } else if (map < &qpt->map[qpt->nmaps]) {
 598                        ++map;
 599                        /* start at incr with current bit 0 */
 600                        offset = qpt->incr | (offset & 1);
 601                } else {
 602                        map = &qpt->map[0];
 603                        /* wrap to first map page, invert bit 0 */
 604                        offset = qpt->incr | ((offset & 1) ^ 1);
 605                }
 606                /* there can be no set bits in low-order QoS bits */
 607                WARN_ON(rdi->dparms.qos_shift > 1 &&
 608                        offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
 609                qpn = mk_qpn(qpt, map, offset);
 610        }
 611
 612        ret = -ENOMEM;
 613
 614bail:
 615        return ret;
 616}
 617
 618/**
 619 * rvt_clear_mr_refs - Drop help mr refs
 620 * @qp: rvt qp data structure
 621 * @clr_sends: If shoudl clear send side or not
 622 */
 623static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
 624{
 625        unsigned n;
 626        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 627
 628        if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
 629                rvt_put_ss(&qp->s_rdma_read_sge);
 630
 631        rvt_put_ss(&qp->r_sge);
 632
 633        if (clr_sends) {
 634                while (qp->s_last != qp->s_head) {
 635                        struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
 636
 637                        rvt_put_qp_swqe(qp, wqe);
 638                        if (++qp->s_last >= qp->s_size)
 639                                qp->s_last = 0;
 640                        smp_wmb(); /* see qp_set_savail */
 641                }
 642                if (qp->s_rdma_mr) {
 643                        rvt_put_mr(qp->s_rdma_mr);
 644                        qp->s_rdma_mr = NULL;
 645                }
 646        }
 647
 648        for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
 649                struct rvt_ack_entry *e = &qp->s_ack_queue[n];
 650
 651                if (e->rdma_sge.mr) {
 652                        rvt_put_mr(e->rdma_sge.mr);
 653                        e->rdma_sge.mr = NULL;
 654                }
 655        }
 656}
 657
 658/**
 659 * rvt_swqe_has_lkey - return true if lkey is used by swqe
 660 * @wqe: the send wqe
 661 * @lkey: the lkey
 662 *
 663 * Test the swqe for using lkey
 664 */
 665static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
 666{
 667        int i;
 668
 669        for (i = 0; i < wqe->wr.num_sge; i++) {
 670                struct rvt_sge *sge = &wqe->sg_list[i];
 671
 672                if (rvt_mr_has_lkey(sge->mr, lkey))
 673                        return true;
 674        }
 675        return false;
 676}
 677
 678/**
 679 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
 680 * @qp: the rvt_qp
 681 * @lkey: the lkey
 682 */
 683static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
 684{
 685        u32 s_last = qp->s_last;
 686
 687        while (s_last != qp->s_head) {
 688                struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
 689
 690                if (rvt_swqe_has_lkey(wqe, lkey))
 691                        return true;
 692
 693                if (++s_last >= qp->s_size)
 694                        s_last = 0;
 695        }
 696        if (qp->s_rdma_mr)
 697                if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
 698                        return true;
 699        return false;
 700}
 701
 702/**
 703 * rvt_qp_acks_has_lkey - return true if acks have lkey
 704 * @qp: the qp
 705 * @lkey: the lkey
 706 */
 707static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
 708{
 709        int i;
 710        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
 711
 712        for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
 713                struct rvt_ack_entry *e = &qp->s_ack_queue[i];
 714
 715                if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
 716                        return true;
 717        }
 718        return false;
 719}
 720
 721/**
 722 * rvt_qp_mr_clean - clean up remote ops for lkey
 723 * @qp: the qp
 724 * @lkey: the lkey that is being de-registered
 725 *
 726 * This routine checks if the lkey is being used by
 727 * the qp.
 728 *
 729 * If so, the qp is put into an error state to elminate
 730 * any references from the qp.
 731 */
 732void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
 733{
 734        bool lastwqe = false;
 735
 736        if (qp->ibqp.qp_type == IB_QPT_SMI ||
 737            qp->ibqp.qp_type == IB_QPT_GSI)
 738                /* avoid special QPs */
 739                return;
 740        spin_lock_irq(&qp->r_lock);
 741        spin_lock(&qp->s_hlock);
 742        spin_lock(&qp->s_lock);
 743
 744        if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
 745                goto check_lwqe;
 746
 747        if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
 748            rvt_qp_sends_has_lkey(qp, lkey) ||
 749            rvt_qp_acks_has_lkey(qp, lkey))
 750                lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
 751check_lwqe:
 752        spin_unlock(&qp->s_lock);
 753        spin_unlock(&qp->s_hlock);
 754        spin_unlock_irq(&qp->r_lock);
 755        if (lastwqe) {
 756                struct ib_event ev;
 757
 758                ev.device = qp->ibqp.device;
 759                ev.element.qp = &qp->ibqp;
 760                ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
 761                qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
 762        }
 763}
 764
 765/**
 766 * rvt_remove_qp - remove qp form table
 767 * @rdi: rvt dev struct
 768 * @qp: qp to remove
 769 *
 770 * Remove the QP from the table so it can't be found asynchronously by
 771 * the receive routine.
 772 */
 773static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 774{
 775        struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
 776        u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
 777        unsigned long flags;
 778        int removed = 1;
 779
 780        spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
 781
 782        if (rcu_dereference_protected(rvp->qp[0],
 783                        lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
 784                RCU_INIT_POINTER(rvp->qp[0], NULL);
 785        } else if (rcu_dereference_protected(rvp->qp[1],
 786                        lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
 787                RCU_INIT_POINTER(rvp->qp[1], NULL);
 788        } else {
 789                struct rvt_qp *q;
 790                struct rvt_qp __rcu **qpp;
 791
 792                removed = 0;
 793                qpp = &rdi->qp_dev->qp_table[n];
 794                for (; (q = rcu_dereference_protected(*qpp,
 795                        lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
 796                        qpp = &q->next) {
 797                        if (q == qp) {
 798                                RCU_INIT_POINTER(*qpp,
 799                                     rcu_dereference_protected(qp->next,
 800                                     lockdep_is_held(&rdi->qp_dev->qpt_lock)));
 801                                removed = 1;
 802                                trace_rvt_qpremove(qp, n);
 803                                break;
 804                        }
 805                }
 806        }
 807
 808        spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
 809        if (removed) {
 810                synchronize_rcu();
 811                rvt_put_qp(qp);
 812        }
 813}
 814
 815/**
 816 * rvt_alloc_rq - allocate memory for user or kernel buffer
 817 * @rq: receive queue data structure
 818 * @size: number of request queue entries
 819 * @node: The NUMA node
 820 * @udata: True if user data is available or not false
 821 *
 822 * Return: If memory allocation failed, return -ENONEM
 823 * This function is used by both shared receive
 824 * queues and non-shared receive queues to allocate
 825 * memory.
 826 */
 827int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
 828                 struct ib_udata *udata)
 829{
 830        if (udata) {
 831                rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
 832                if (!rq->wq)
 833                        goto bail;
 834                /* need kwq with no buffers */
 835                rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
 836                if (!rq->kwq)
 837                        goto bail;
 838                rq->kwq->curr_wq = rq->wq->wq;
 839        } else {
 840                /* need kwq with buffers */
 841                rq->kwq =
 842                        vzalloc_node(sizeof(struct rvt_krwq) + size, node);
 843                if (!rq->kwq)
 844                        goto bail;
 845                rq->kwq->curr_wq = rq->kwq->wq;
 846        }
 847
 848        spin_lock_init(&rq->kwq->p_lock);
 849        spin_lock_init(&rq->kwq->c_lock);
 850        return 0;
 851bail:
 852        rvt_free_rq(rq);
 853        return -ENOMEM;
 854}
 855
 856/**
 857 * rvt_init_qp - initialize the QP state to the reset state
 858 * @rdi: rvt dev struct
 859 * @qp: the QP to init or reinit
 860 * @type: the QP type
 861 *
 862 * This function is called from both rvt_create_qp() and
 863 * rvt_reset_qp().   The difference is that the reset
 864 * patch the necessary locks to protect against concurent
 865 * access.
 866 */
 867static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 868                        enum ib_qp_type type)
 869{
 870        qp->remote_qpn = 0;
 871        qp->qkey = 0;
 872        qp->qp_access_flags = 0;
 873        qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
 874        qp->s_hdrwords = 0;
 875        qp->s_wqe = NULL;
 876        qp->s_draining = 0;
 877        qp->s_next_psn = 0;
 878        qp->s_last_psn = 0;
 879        qp->s_sending_psn = 0;
 880        qp->s_sending_hpsn = 0;
 881        qp->s_psn = 0;
 882        qp->r_psn = 0;
 883        qp->r_msn = 0;
 884        if (type == IB_QPT_RC) {
 885                qp->s_state = IB_OPCODE_RC_SEND_LAST;
 886                qp->r_state = IB_OPCODE_RC_SEND_LAST;
 887        } else {
 888                qp->s_state = IB_OPCODE_UC_SEND_LAST;
 889                qp->r_state = IB_OPCODE_UC_SEND_LAST;
 890        }
 891        qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
 892        qp->r_nak_state = 0;
 893        qp->r_aflags = 0;
 894        qp->r_flags = 0;
 895        qp->s_head = 0;
 896        qp->s_tail = 0;
 897        qp->s_cur = 0;
 898        qp->s_acked = 0;
 899        qp->s_last = 0;
 900        qp->s_ssn = 1;
 901        qp->s_lsn = 0;
 902        qp->s_mig_state = IB_MIG_MIGRATED;
 903        qp->r_head_ack_queue = 0;
 904        qp->s_tail_ack_queue = 0;
 905        qp->s_acked_ack_queue = 0;
 906        qp->s_num_rd_atomic = 0;
 907        qp->r_sge.num_sge = 0;
 908        atomic_set(&qp->s_reserved_used, 0);
 909}
 910
 911/**
 912 * _rvt_reset_qp - initialize the QP state to the reset state
 913 * @rdi: rvt dev struct
 914 * @qp: the QP to reset
 915 * @type: the QP type
 916 *
 917 * r_lock, s_hlock, and s_lock are required to be held by the caller
 918 */
 919static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 920                          enum ib_qp_type type)
 921        __must_hold(&qp->s_lock)
 922        __must_hold(&qp->s_hlock)
 923        __must_hold(&qp->r_lock)
 924{
 925        lockdep_assert_held(&qp->r_lock);
 926        lockdep_assert_held(&qp->s_hlock);
 927        lockdep_assert_held(&qp->s_lock);
 928        if (qp->state != IB_QPS_RESET) {
 929                qp->state = IB_QPS_RESET;
 930
 931                /* Let drivers flush their waitlist */
 932                rdi->driver_f.flush_qp_waiters(qp);
 933                rvt_stop_rc_timers(qp);
 934                qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
 935                spin_unlock(&qp->s_lock);
 936                spin_unlock(&qp->s_hlock);
 937                spin_unlock_irq(&qp->r_lock);
 938
 939                /* Stop the send queue and the retry timer */
 940                rdi->driver_f.stop_send_queue(qp);
 941                rvt_del_timers_sync(qp);
 942                /* Wait for things to stop */
 943                rdi->driver_f.quiesce_qp(qp);
 944
 945                /* take qp out the hash and wait for it to be unused */
 946                rvt_remove_qp(rdi, qp);
 947
 948                /* grab the lock b/c it was locked at call time */
 949                spin_lock_irq(&qp->r_lock);
 950                spin_lock(&qp->s_hlock);
 951                spin_lock(&qp->s_lock);
 952
 953                rvt_clear_mr_refs(qp, 1);
 954                /*
 955                 * Let the driver do any tear down or re-init it needs to for
 956                 * a qp that has been reset
 957                 */
 958                rdi->driver_f.notify_qp_reset(qp);
 959        }
 960        rvt_init_qp(rdi, qp, type);
 961        lockdep_assert_held(&qp->r_lock);
 962        lockdep_assert_held(&qp->s_hlock);
 963        lockdep_assert_held(&qp->s_lock);
 964}
 965
 966/**
 967 * rvt_reset_qp - initialize the QP state to the reset state
 968 * @rdi: the device info
 969 * @qp: the QP to reset
 970 * @type: the QP type
 971 *
 972 * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
 973 * before calling _rvt_reset_qp().
 974 */
 975static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 976                         enum ib_qp_type type)
 977{
 978        spin_lock_irq(&qp->r_lock);
 979        spin_lock(&qp->s_hlock);
 980        spin_lock(&qp->s_lock);
 981        _rvt_reset_qp(rdi, qp, type);
 982        spin_unlock(&qp->s_lock);
 983        spin_unlock(&qp->s_hlock);
 984        spin_unlock_irq(&qp->r_lock);
 985}
 986
 987/**
 988 * rvt_free_qpn - Free a qpn from the bit map
 989 * @qpt: QP table
 990 * @qpn: queue pair number to free
 991 */
 992static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
 993{
 994        struct rvt_qpn_map *map;
 995
 996        if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
 997                qpn &= RVT_AIP_QP_SUFFIX;
 998
 999        map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
1000        if (map->page)
1001                clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1002}
1003
1004/**
1005 * get_allowed_ops - Given a QP type return the appropriate allowed OP
1006 * @type: valid, supported, QP type
1007 */
1008static u8 get_allowed_ops(enum ib_qp_type type)
1009{
1010        return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
1011                IB_OPCODE_UC : IB_OPCODE_UD;
1012}
1013
1014/**
1015 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
1016 * @qp: Valid QP with allowed_ops set
1017 *
1018 * The rvt_swqe data structure being used is a union, so this is
1019 * only valid for UD QPs.
1020 */
1021static void free_ud_wq_attr(struct rvt_qp *qp)
1022{
1023        struct rvt_swqe *wqe;
1024        int i;
1025
1026        for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1027                wqe = rvt_get_swqe_ptr(qp, i);
1028                kfree(wqe->ud_wr.attr);
1029                wqe->ud_wr.attr = NULL;
1030        }
1031}
1032
1033/**
1034 * alloc_ud_wq_attr - AH attribute cache for UD QPs
1035 * @qp: Valid QP with allowed_ops set
1036 * @node: Numa node for allocation
1037 *
1038 * The rvt_swqe data structure being used is a union, so this is
1039 * only valid for UD QPs.
1040 */
1041static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1042{
1043        struct rvt_swqe *wqe;
1044        int i;
1045
1046        for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1047                wqe = rvt_get_swqe_ptr(qp, i);
1048                wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1049                                               GFP_KERNEL, node);
1050                if (!wqe->ud_wr.attr) {
1051                        free_ud_wq_attr(qp);
1052                        return -ENOMEM;
1053                }
1054        }
1055
1056        return 0;
1057}
1058
1059/**
1060 * rvt_create_qp - create a queue pair for a device
1061 * @ibpd: the protection domain who's device we create the queue pair for
1062 * @init_attr: the attributes of the queue pair
1063 * @udata: user data for libibverbs.so
1064 *
1065 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1066 * unique idea of what queue pair numbers mean. For instance there is a reserved
1067 * range for PSM.
1068 *
1069 * Return: the queue pair on success, otherwise returns an errno.
1070 *
1071 * Called by the ib_create_qp() core verbs function.
1072 */
1073struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1074                            struct ib_qp_init_attr *init_attr,
1075                            struct ib_udata *udata)
1076{
1077        struct rvt_qp *qp;
1078        int err;
1079        struct rvt_swqe *swq = NULL;
1080        size_t sz;
1081        size_t sg_list_sz;
1082        struct ib_qp *ret = ERR_PTR(-ENOMEM);
1083        struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
1084        void *priv = NULL;
1085        size_t sqsize;
1086        u8 exclude_prefix = 0;
1087
1088        if (!rdi)
1089                return ERR_PTR(-EINVAL);
1090
1091        if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
1092                return ERR_PTR(-EOPNOTSUPP);
1093
1094        if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1095            init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
1096                return ERR_PTR(-EINVAL);
1097
1098        /* Check receive queue parameters if no SRQ is specified. */
1099        if (!init_attr->srq) {
1100                if (init_attr->cap.max_recv_sge >
1101                    rdi->dparms.props.max_recv_sge ||
1102                    init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1103                        return ERR_PTR(-EINVAL);
1104
1105                if (init_attr->cap.max_send_sge +
1106                    init_attr->cap.max_send_wr +
1107                    init_attr->cap.max_recv_sge +
1108                    init_attr->cap.max_recv_wr == 0)
1109                        return ERR_PTR(-EINVAL);
1110        }
1111        sqsize =
1112                init_attr->cap.max_send_wr + 1 +
1113                rdi->dparms.reserved_operations;
1114        switch (init_attr->qp_type) {
1115        case IB_QPT_SMI:
1116        case IB_QPT_GSI:
1117                if (init_attr->port_num == 0 ||
1118                    init_attr->port_num > ibpd->device->phys_port_cnt)
1119                        return ERR_PTR(-EINVAL);
1120                fallthrough;
1121        case IB_QPT_UC:
1122        case IB_QPT_RC:
1123        case IB_QPT_UD:
1124                sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1125                swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1126                if (!swq)
1127                        return ERR_PTR(-ENOMEM);
1128
1129                sz = sizeof(*qp);
1130                sg_list_sz = 0;
1131                if (init_attr->srq) {
1132                        struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1133
1134                        if (srq->rq.max_sge > 1)
1135                                sg_list_sz = sizeof(*qp->r_sg_list) *
1136                                        (srq->rq.max_sge - 1);
1137                } else if (init_attr->cap.max_recv_sge > 1)
1138                        sg_list_sz = sizeof(*qp->r_sg_list) *
1139                                (init_attr->cap.max_recv_sge - 1);
1140                qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1141                                  rdi->dparms.node);
1142                if (!qp)
1143                        goto bail_swq;
1144                qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1145
1146                RCU_INIT_POINTER(qp->next, NULL);
1147                if (init_attr->qp_type == IB_QPT_RC) {
1148                        qp->s_ack_queue =
1149                                kcalloc_node(rvt_max_atomic(rdi),
1150                                             sizeof(*qp->s_ack_queue),
1151                                             GFP_KERNEL,
1152                                             rdi->dparms.node);
1153                        if (!qp->s_ack_queue)
1154                                goto bail_qp;
1155                }
1156                /* initialize timers needed for rc qp */
1157                timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1158                hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1159                             HRTIMER_MODE_REL);
1160                qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1161
1162                /*
1163                 * Driver needs to set up it's private QP structure and do any
1164                 * initialization that is needed.
1165                 */
1166                priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1167                if (IS_ERR(priv)) {
1168                        ret = priv;
1169                        goto bail_qp;
1170                }
1171                qp->priv = priv;
1172                qp->timeout_jiffies =
1173                        usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1174                                1000UL);
1175                if (init_attr->srq) {
1176                        sz = 0;
1177                } else {
1178                        qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1179                        qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1180                        sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1181                                sizeof(struct rvt_rwqe);
1182                        err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1183                                           rdi->dparms.node, udata);
1184                        if (err) {
1185                                ret = ERR_PTR(err);
1186                                goto bail_driver_priv;
1187                        }
1188                }
1189
1190                /*
1191                 * ib_create_qp() will initialize qp->ibqp
1192                 * except for qp->ibqp.qp_num.
1193                 */
1194                spin_lock_init(&qp->r_lock);
1195                spin_lock_init(&qp->s_hlock);
1196                spin_lock_init(&qp->s_lock);
1197                atomic_set(&qp->refcount, 0);
1198                atomic_set(&qp->local_ops_pending, 0);
1199                init_waitqueue_head(&qp->wait);
1200                INIT_LIST_HEAD(&qp->rspwait);
1201                qp->state = IB_QPS_RESET;
1202                qp->s_wq = swq;
1203                qp->s_size = sqsize;
1204                qp->s_avail = init_attr->cap.max_send_wr;
1205                qp->s_max_sge = init_attr->cap.max_send_sge;
1206                if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1207                        qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1208                err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1209                if (err) {
1210                        ret = (ERR_PTR(err));
1211                        goto bail_rq_rvt;
1212                }
1213
1214                if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1215                        exclude_prefix = RVT_AIP_QP_PREFIX;
1216
1217                err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1218                                init_attr->qp_type,
1219                                init_attr->port_num,
1220                                exclude_prefix);
1221                if (err < 0) {
1222                        ret = ERR_PTR(err);
1223                        goto bail_rq_wq;
1224                }
1225                qp->ibqp.qp_num = err;
1226                if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1227                        qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
1228                qp->port_num = init_attr->port_num;
1229                rvt_init_qp(rdi, qp, init_attr->qp_type);
1230                if (rdi->driver_f.qp_priv_init) {
1231                        err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1232                        if (err) {
1233                                ret = ERR_PTR(err);
1234                                goto bail_rq_wq;
1235                        }
1236                }
1237                break;
1238
1239        default:
1240                /* Don't support raw QPs */
1241                return ERR_PTR(-EOPNOTSUPP);
1242        }
1243
1244        init_attr->cap.max_inline_data = 0;
1245
1246        /*
1247         * Return the address of the RWQ as the offset to mmap.
1248         * See rvt_mmap() for details.
1249         */
1250        if (udata && udata->outlen >= sizeof(__u64)) {
1251                if (!qp->r_rq.wq) {
1252                        __u64 offset = 0;
1253
1254                        err = ib_copy_to_udata(udata, &offset,
1255                                               sizeof(offset));
1256                        if (err) {
1257                                ret = ERR_PTR(err);
1258                                goto bail_qpn;
1259                        }
1260                } else {
1261                        u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1262
1263                        qp->ip = rvt_create_mmap_info(rdi, s, udata,
1264                                                      qp->r_rq.wq);
1265                        if (IS_ERR(qp->ip)) {
1266                                ret = ERR_CAST(qp->ip);
1267                                goto bail_qpn;
1268                        }
1269
1270                        err = ib_copy_to_udata(udata, &qp->ip->offset,
1271                                               sizeof(qp->ip->offset));
1272                        if (err) {
1273                                ret = ERR_PTR(err);
1274                                goto bail_ip;
1275                        }
1276                }
1277                qp->pid = current->pid;
1278        }
1279
1280        spin_lock(&rdi->n_qps_lock);
1281        if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1282                spin_unlock(&rdi->n_qps_lock);
1283                ret = ERR_PTR(-ENOMEM);
1284                goto bail_ip;
1285        }
1286
1287        rdi->n_qps_allocated++;
1288        /*
1289         * Maintain a busy_jiffies variable that will be added to the timeout
1290         * period in mod_retry_timer and add_retry_timer. This busy jiffies
1291         * is scaled by the number of rc qps created for the device to reduce
1292         * the number of timeouts occurring when there is a large number of
1293         * qps. busy_jiffies is incremented every rc qp scaling interval.
1294         * The scaling interval is selected based on extensive performance
1295         * evaluation of targeted workloads.
1296         */
1297        if (init_attr->qp_type == IB_QPT_RC) {
1298                rdi->n_rc_qps++;
1299                rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1300        }
1301        spin_unlock(&rdi->n_qps_lock);
1302
1303        if (qp->ip) {
1304                spin_lock_irq(&rdi->pending_lock);
1305                list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1306                spin_unlock_irq(&rdi->pending_lock);
1307        }
1308
1309        ret = &qp->ibqp;
1310
1311        return ret;
1312
1313bail_ip:
1314        if (qp->ip)
1315                kref_put(&qp->ip->ref, rvt_release_mmap_info);
1316
1317bail_qpn:
1318        rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1319
1320bail_rq_wq:
1321        free_ud_wq_attr(qp);
1322
1323bail_rq_rvt:
1324        rvt_free_rq(&qp->r_rq);
1325
1326bail_driver_priv:
1327        rdi->driver_f.qp_priv_free(rdi, qp);
1328
1329bail_qp:
1330        kfree(qp->s_ack_queue);
1331        kfree(qp);
1332
1333bail_swq:
1334        vfree(swq);
1335
1336        return ret;
1337}
1338
1339/**
1340 * rvt_error_qp - put a QP into the error state
1341 * @qp: the QP to put into the error state
1342 * @err: the receive completion error to signal if a RWQE is active
1343 *
1344 * Flushes both send and receive work queues.
1345 *
1346 * Return: true if last WQE event should be generated.
1347 * The QP r_lock and s_lock should be held and interrupts disabled.
1348 * If we are already in error state, just return.
1349 */
1350int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1351{
1352        struct ib_wc wc;
1353        int ret = 0;
1354        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1355
1356        lockdep_assert_held(&qp->r_lock);
1357        lockdep_assert_held(&qp->s_lock);
1358        if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1359                goto bail;
1360
1361        qp->state = IB_QPS_ERR;
1362
1363        if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1364                qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1365                del_timer(&qp->s_timer);
1366        }
1367
1368        if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1369                qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1370
1371        rdi->driver_f.notify_error_qp(qp);
1372
1373        /* Schedule the sending tasklet to drain the send work queue. */
1374        if (READ_ONCE(qp->s_last) != qp->s_head)
1375                rdi->driver_f.schedule_send(qp);
1376
1377        rvt_clear_mr_refs(qp, 0);
1378
1379        memset(&wc, 0, sizeof(wc));
1380        wc.qp = &qp->ibqp;
1381        wc.opcode = IB_WC_RECV;
1382
1383        if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1384                wc.wr_id = qp->r_wr_id;
1385                wc.status = err;
1386                rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1387        }
1388        wc.status = IB_WC_WR_FLUSH_ERR;
1389
1390        if (qp->r_rq.kwq) {
1391                u32 head;
1392                u32 tail;
1393                struct rvt_rwq *wq = NULL;
1394                struct rvt_krwq *kwq = NULL;
1395
1396                spin_lock(&qp->r_rq.kwq->c_lock);
1397                /* qp->ip used to validate if there is a  user buffer mmaped */
1398                if (qp->ip) {
1399                        wq = qp->r_rq.wq;
1400                        head = RDMA_READ_UAPI_ATOMIC(wq->head);
1401                        tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1402                } else {
1403                        kwq = qp->r_rq.kwq;
1404                        head = kwq->head;
1405                        tail = kwq->tail;
1406                }
1407                /* sanity check pointers before trusting them */
1408                if (head >= qp->r_rq.size)
1409                        head = 0;
1410                if (tail >= qp->r_rq.size)
1411                        tail = 0;
1412                while (tail != head) {
1413                        wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1414                        if (++tail >= qp->r_rq.size)
1415                                tail = 0;
1416                        rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1417                }
1418                if (qp->ip)
1419                        RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1420                else
1421                        kwq->tail = tail;
1422                spin_unlock(&qp->r_rq.kwq->c_lock);
1423        } else if (qp->ibqp.event_handler) {
1424                ret = 1;
1425        }
1426
1427bail:
1428        return ret;
1429}
1430EXPORT_SYMBOL(rvt_error_qp);
1431
1432/*
1433 * Put the QP into the hash table.
1434 * The hash table holds a reference to the QP.
1435 */
1436static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1437{
1438        struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1439        unsigned long flags;
1440
1441        rvt_get_qp(qp);
1442        spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1443
1444        if (qp->ibqp.qp_num <= 1) {
1445                rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1446        } else {
1447                u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1448
1449                qp->next = rdi->qp_dev->qp_table[n];
1450                rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1451                trace_rvt_qpinsert(qp, n);
1452        }
1453
1454        spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1455}
1456
1457/**
1458 * rvt_modify_qp - modify the attributes of a queue pair
1459 * @ibqp: the queue pair who's attributes we're modifying
1460 * @attr: the new attributes
1461 * @attr_mask: the mask of attributes to modify
1462 * @udata: user data for libibverbs.so
1463 *
1464 * Return: 0 on success, otherwise returns an errno.
1465 */
1466int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1467                  int attr_mask, struct ib_udata *udata)
1468{
1469        struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1470        struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1471        enum ib_qp_state cur_state, new_state;
1472        struct ib_event ev;
1473        int lastwqe = 0;
1474        int mig = 0;
1475        int pmtu = 0; /* for gcc warning only */
1476        int opa_ah;
1477
1478        if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1479                return -EOPNOTSUPP;
1480
1481        spin_lock_irq(&qp->r_lock);
1482        spin_lock(&qp->s_hlock);
1483        spin_lock(&qp->s_lock);
1484
1485        cur_state = attr_mask & IB_QP_CUR_STATE ?
1486                attr->cur_qp_state : qp->state;
1487        new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1488        opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1489
1490        if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1491                                attr_mask))
1492                goto inval;
1493
1494        if (rdi->driver_f.check_modify_qp &&
1495            rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1496                goto inval;
1497
1498        if (attr_mask & IB_QP_AV) {
1499                if (opa_ah) {
1500                        if (rdma_ah_get_dlid(&attr->ah_attr) >=
1501                                opa_get_mcast_base(OPA_MCAST_NR))
1502                                goto inval;
1503                } else {
1504                        if (rdma_ah_get_dlid(&attr->ah_attr) >=
1505                                be16_to_cpu(IB_MULTICAST_LID_BASE))
1506                                goto inval;
1507                }
1508
1509                if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1510                        goto inval;
1511        }
1512
1513        if (attr_mask & IB_QP_ALT_PATH) {
1514                if (opa_ah) {
1515                        if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1516                                opa_get_mcast_base(OPA_MCAST_NR))
1517                                goto inval;
1518                } else {
1519                        if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1520                                be16_to_cpu(IB_MULTICAST_LID_BASE))
1521                                goto inval;
1522                }
1523
1524                if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1525                        goto inval;
1526                if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1527                        goto inval;
1528        }
1529
1530        if (attr_mask & IB_QP_PKEY_INDEX)
1531                if (attr->pkey_index >= rvt_get_npkeys(rdi))
1532                        goto inval;
1533
1534        if (attr_mask & IB_QP_MIN_RNR_TIMER)
1535                if (attr->min_rnr_timer > 31)
1536                        goto inval;
1537
1538        if (attr_mask & IB_QP_PORT)
1539                if (qp->ibqp.qp_type == IB_QPT_SMI ||
1540                    qp->ibqp.qp_type == IB_QPT_GSI ||
1541                    attr->port_num == 0 ||
1542                    attr->port_num > ibqp->device->phys_port_cnt)
1543                        goto inval;
1544
1545        if (attr_mask & IB_QP_DEST_QPN)
1546                if (attr->dest_qp_num > RVT_QPN_MASK)
1547                        goto inval;
1548
1549        if (attr_mask & IB_QP_RETRY_CNT)
1550                if (attr->retry_cnt > 7)
1551                        goto inval;
1552
1553        if (attr_mask & IB_QP_RNR_RETRY)
1554                if (attr->rnr_retry > 7)
1555                        goto inval;
1556
1557        /*
1558         * Don't allow invalid path_mtu values.  OK to set greater
1559         * than the active mtu (or even the max_cap, if we have tuned
1560         * that to a small mtu.  We'll set qp->path_mtu
1561         * to the lesser of requested attribute mtu and active,
1562         * for packetizing messages.
1563         * Note that the QP port has to be set in INIT and MTU in RTR.
1564         */
1565        if (attr_mask & IB_QP_PATH_MTU) {
1566                pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1567                if (pmtu < 0)
1568                        goto inval;
1569        }
1570
1571        if (attr_mask & IB_QP_PATH_MIG_STATE) {
1572                if (attr->path_mig_state == IB_MIG_REARM) {
1573                        if (qp->s_mig_state == IB_MIG_ARMED)
1574                                goto inval;
1575                        if (new_state != IB_QPS_RTS)
1576                                goto inval;
1577                } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1578                        if (qp->s_mig_state == IB_MIG_REARM)
1579                                goto inval;
1580                        if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1581                                goto inval;
1582                        if (qp->s_mig_state == IB_MIG_ARMED)
1583                                mig = 1;
1584                } else {
1585                        goto inval;
1586                }
1587        }
1588
1589        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1590                if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1591                        goto inval;
1592
1593        switch (new_state) {
1594        case IB_QPS_RESET:
1595                if (qp->state != IB_QPS_RESET)
1596                        _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1597                break;
1598
1599        case IB_QPS_RTR:
1600                /* Allow event to re-trigger if QP set to RTR more than once */
1601                qp->r_flags &= ~RVT_R_COMM_EST;
1602                qp->state = new_state;
1603                break;
1604
1605        case IB_QPS_SQD:
1606                qp->s_draining = qp->s_last != qp->s_cur;
1607                qp->state = new_state;
1608                break;
1609
1610        case IB_QPS_SQE:
1611                if (qp->ibqp.qp_type == IB_QPT_RC)
1612                        goto inval;
1613                qp->state = new_state;
1614                break;
1615
1616        case IB_QPS_ERR:
1617                lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1618                break;
1619
1620        default:
1621                qp->state = new_state;
1622                break;
1623        }
1624
1625        if (attr_mask & IB_QP_PKEY_INDEX)
1626                qp->s_pkey_index = attr->pkey_index;
1627
1628        if (attr_mask & IB_QP_PORT)
1629                qp->port_num = attr->port_num;
1630
1631        if (attr_mask & IB_QP_DEST_QPN)
1632                qp->remote_qpn = attr->dest_qp_num;
1633
1634        if (attr_mask & IB_QP_SQ_PSN) {
1635                qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1636                qp->s_psn = qp->s_next_psn;
1637                qp->s_sending_psn = qp->s_next_psn;
1638                qp->s_last_psn = qp->s_next_psn - 1;
1639                qp->s_sending_hpsn = qp->s_last_psn;
1640        }
1641
1642        if (attr_mask & IB_QP_RQ_PSN)
1643                qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1644
1645        if (attr_mask & IB_QP_ACCESS_FLAGS)
1646                qp->qp_access_flags = attr->qp_access_flags;
1647
1648        if (attr_mask & IB_QP_AV) {
1649                rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1650                qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1651                qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1652        }
1653
1654        if (attr_mask & IB_QP_ALT_PATH) {
1655                rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1656                qp->s_alt_pkey_index = attr->alt_pkey_index;
1657        }
1658
1659        if (attr_mask & IB_QP_PATH_MIG_STATE) {
1660                qp->s_mig_state = attr->path_mig_state;
1661                if (mig) {
1662                        qp->remote_ah_attr = qp->alt_ah_attr;
1663                        qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1664                        qp->s_pkey_index = qp->s_alt_pkey_index;
1665                }
1666        }
1667
1668        if (attr_mask & IB_QP_PATH_MTU) {
1669                qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1670                qp->log_pmtu = ilog2(qp->pmtu);
1671        }
1672
1673        if (attr_mask & IB_QP_RETRY_CNT) {
1674                qp->s_retry_cnt = attr->retry_cnt;
1675                qp->s_retry = attr->retry_cnt;
1676        }
1677
1678        if (attr_mask & IB_QP_RNR_RETRY) {
1679                qp->s_rnr_retry_cnt = attr->rnr_retry;
1680                qp->s_rnr_retry = attr->rnr_retry;
1681        }
1682
1683        if (attr_mask & IB_QP_MIN_RNR_TIMER)
1684                qp->r_min_rnr_timer = attr->min_rnr_timer;
1685
1686        if (attr_mask & IB_QP_TIMEOUT) {
1687                qp->timeout = attr->timeout;
1688                qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1689        }
1690
1691        if (attr_mask & IB_QP_QKEY)
1692                qp->qkey = attr->qkey;
1693
1694        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1695                qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1696
1697        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1698                qp->s_max_rd_atomic = attr->max_rd_atomic;
1699
1700        if (rdi->driver_f.modify_qp)
1701                rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1702
1703        spin_unlock(&qp->s_lock);
1704        spin_unlock(&qp->s_hlock);
1705        spin_unlock_irq(&qp->r_lock);
1706
1707        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1708                rvt_insert_qp(rdi, qp);
1709
1710        if (lastwqe) {
1711                ev.device = qp->ibqp.device;
1712                ev.element.qp = &qp->ibqp;
1713                ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1714                qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1715        }
1716        if (mig) {
1717                ev.device = qp->ibqp.device;
1718                ev.element.qp = &qp->ibqp;
1719                ev.event = IB_EVENT_PATH_MIG;
1720                qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1721        }
1722        return 0;
1723
1724inval:
1725        spin_unlock(&qp->s_lock);
1726        spin_unlock(&qp->s_hlock);
1727        spin_unlock_irq(&qp->r_lock);
1728        return -EINVAL;
1729}
1730
1731/**
1732 * rvt_destroy_qp - destroy a queue pair
1733 * @ibqp: the queue pair to destroy
1734 * @udata: unused by the driver
1735 *
1736 * Note that this can be called while the QP is actively sending or
1737 * receiving!
1738 *
1739 * Return: 0 on success.
1740 */
1741int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1742{
1743        struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1744        struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1745
1746        rvt_reset_qp(rdi, qp, ibqp->qp_type);
1747
1748        wait_event(qp->wait, !atomic_read(&qp->refcount));
1749        /* qpn is now available for use again */
1750        rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1751
1752        spin_lock(&rdi->n_qps_lock);
1753        rdi->n_qps_allocated--;
1754        if (qp->ibqp.qp_type == IB_QPT_RC) {
1755                rdi->n_rc_qps--;
1756                rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1757        }
1758        spin_unlock(&rdi->n_qps_lock);
1759
1760        if (qp->ip)
1761                kref_put(&qp->ip->ref, rvt_release_mmap_info);
1762        kvfree(qp->r_rq.kwq);
1763        rdi->driver_f.qp_priv_free(rdi, qp);
1764        kfree(qp->s_ack_queue);
1765        rdma_destroy_ah_attr(&qp->remote_ah_attr);
1766        rdma_destroy_ah_attr(&qp->alt_ah_attr);
1767        free_ud_wq_attr(qp);
1768        vfree(qp->s_wq);
1769        kfree(qp);
1770        return 0;
1771}
1772
1773/**
1774 * rvt_query_qp - query an ipbq
1775 * @ibqp: IB qp to query
1776 * @attr: attr struct to fill in
1777 * @attr_mask: attr mask ignored
1778 * @init_attr: struct to fill in
1779 *
1780 * Return: always 0
1781 */
1782int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1783                 int attr_mask, struct ib_qp_init_attr *init_attr)
1784{
1785        struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1786        struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1787
1788        attr->qp_state = qp->state;
1789        attr->cur_qp_state = attr->qp_state;
1790        attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1791        attr->path_mig_state = qp->s_mig_state;
1792        attr->qkey = qp->qkey;
1793        attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1794        attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1795        attr->dest_qp_num = qp->remote_qpn;
1796        attr->qp_access_flags = qp->qp_access_flags;
1797        attr->cap.max_send_wr = qp->s_size - 1 -
1798                rdi->dparms.reserved_operations;
1799        attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1800        attr->cap.max_send_sge = qp->s_max_sge;
1801        attr->cap.max_recv_sge = qp->r_rq.max_sge;
1802        attr->cap.max_inline_data = 0;
1803        attr->ah_attr = qp->remote_ah_attr;
1804        attr->alt_ah_attr = qp->alt_ah_attr;
1805        attr->pkey_index = qp->s_pkey_index;
1806        attr->alt_pkey_index = qp->s_alt_pkey_index;
1807        attr->en_sqd_async_notify = 0;
1808        attr->sq_draining = qp->s_draining;
1809        attr->max_rd_atomic = qp->s_max_rd_atomic;
1810        attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1811        attr->min_rnr_timer = qp->r_min_rnr_timer;
1812        attr->port_num = qp->port_num;
1813        attr->timeout = qp->timeout;
1814        attr->retry_cnt = qp->s_retry_cnt;
1815        attr->rnr_retry = qp->s_rnr_retry_cnt;
1816        attr->alt_port_num =
1817                rdma_ah_get_port_num(&qp->alt_ah_attr);
1818        attr->alt_timeout = qp->alt_timeout;
1819
1820        init_attr->event_handler = qp->ibqp.event_handler;
1821        init_attr->qp_context = qp->ibqp.qp_context;
1822        init_attr->send_cq = qp->ibqp.send_cq;
1823        init_attr->recv_cq = qp->ibqp.recv_cq;
1824        init_attr->srq = qp->ibqp.srq;
1825        init_attr->cap = attr->cap;
1826        if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1827                init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1828        else
1829                init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1830        init_attr->qp_type = qp->ibqp.qp_type;
1831        init_attr->port_num = qp->port_num;
1832        return 0;
1833}
1834
1835/**
1836 * rvt_post_recv - post a receive on a QP
1837 * @ibqp: the QP to post the receive on
1838 * @wr: the WR to post
1839 * @bad_wr: the first bad WR is put here
1840 *
1841 * This may be called from interrupt context.
1842 *
1843 * Return: 0 on success otherwise errno
1844 */
1845int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1846                  const struct ib_recv_wr **bad_wr)
1847{
1848        struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1849        struct rvt_krwq *wq = qp->r_rq.kwq;
1850        unsigned long flags;
1851        int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1852                                !qp->ibqp.srq;
1853
1854        /* Check that state is OK to post receive. */
1855        if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1856                *bad_wr = wr;
1857                return -EINVAL;
1858        }
1859
1860        for (; wr; wr = wr->next) {
1861                struct rvt_rwqe *wqe;
1862                u32 next;
1863                int i;
1864
1865                if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1866                        *bad_wr = wr;
1867                        return -EINVAL;
1868                }
1869
1870                spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1871                next = wq->head + 1;
1872                if (next >= qp->r_rq.size)
1873                        next = 0;
1874                if (next == READ_ONCE(wq->tail)) {
1875                        spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1876                        *bad_wr = wr;
1877                        return -ENOMEM;
1878                }
1879                if (unlikely(qp_err_flush)) {
1880                        struct ib_wc wc;
1881
1882                        memset(&wc, 0, sizeof(wc));
1883                        wc.qp = &qp->ibqp;
1884                        wc.opcode = IB_WC_RECV;
1885                        wc.wr_id = wr->wr_id;
1886                        wc.status = IB_WC_WR_FLUSH_ERR;
1887                        rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1888                } else {
1889                        wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1890                        wqe->wr_id = wr->wr_id;
1891                        wqe->num_sge = wr->num_sge;
1892                        for (i = 0; i < wr->num_sge; i++) {
1893                                wqe->sg_list[i].addr = wr->sg_list[i].addr;
1894                                wqe->sg_list[i].length = wr->sg_list[i].length;
1895                                wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1896                        }
1897                        /*
1898                         * Make sure queue entry is written
1899                         * before the head index.
1900                         */
1901                        smp_store_release(&wq->head, next);
1902                }
1903                spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1904        }
1905        return 0;
1906}
1907
1908/**
1909 * rvt_qp_valid_operation - validate post send wr request
1910 * @qp: the qp
1911 * @post_parms: the post send table for the driver
1912 * @wr: the work request
1913 *
1914 * The routine validates the operation based on the
1915 * validation table an returns the length of the operation
1916 * which can extend beyond the ib_send_bw.  Operation
1917 * dependent flags key atomic operation validation.
1918 *
1919 * There is an exception for UD qps that validates the pd and
1920 * overrides the length to include the additional UD specific
1921 * length.
1922 *
1923 * Returns a negative error or the length of the work request
1924 * for building the swqe.
1925 */
1926static inline int rvt_qp_valid_operation(
1927        struct rvt_qp *qp,
1928        const struct rvt_operation_params *post_parms,
1929        const struct ib_send_wr *wr)
1930{
1931        int len;
1932
1933        if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1934                return -EINVAL;
1935        if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1936                return -EINVAL;
1937        if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1938            ibpd_to_rvtpd(qp->ibqp.pd)->user)
1939                return -EINVAL;
1940        if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1941            (wr->num_sge == 0 ||
1942             wr->sg_list[0].length < sizeof(u64) ||
1943             wr->sg_list[0].addr & (sizeof(u64) - 1)))
1944                return -EINVAL;
1945        if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1946            !qp->s_max_rd_atomic)
1947                return -EINVAL;
1948        len = post_parms[wr->opcode].length;
1949        /* UD specific */
1950        if (qp->ibqp.qp_type != IB_QPT_UC &&
1951            qp->ibqp.qp_type != IB_QPT_RC) {
1952                if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1953                        return -EINVAL;
1954                len = sizeof(struct ib_ud_wr);
1955        }
1956        return len;
1957}
1958
1959/**
1960 * rvt_qp_is_avail - determine queue capacity
1961 * @qp: the qp
1962 * @rdi: the rdmavt device
1963 * @reserved_op: is reserved operation
1964 *
1965 * This assumes the s_hlock is held but the s_last
1966 * qp variable is uncontrolled.
1967 *
1968 * For non reserved operations, the qp->s_avail
1969 * may be changed.
1970 *
1971 * The return value is zero or a -ENOMEM.
1972 */
1973static inline int rvt_qp_is_avail(
1974        struct rvt_qp *qp,
1975        struct rvt_dev_info *rdi,
1976        bool reserved_op)
1977{
1978        u32 slast;
1979        u32 avail;
1980        u32 reserved_used;
1981
1982        /* see rvt_qp_wqe_unreserve() */
1983        smp_mb__before_atomic();
1984        if (unlikely(reserved_op)) {
1985                /* see rvt_qp_wqe_unreserve() */
1986                reserved_used = atomic_read(&qp->s_reserved_used);
1987                if (reserved_used >= rdi->dparms.reserved_operations)
1988                        return -ENOMEM;
1989                return 0;
1990        }
1991        /* non-reserved operations */
1992        if (likely(qp->s_avail))
1993                return 0;
1994        /* See rvt_qp_complete_swqe() */
1995        slast = smp_load_acquire(&qp->s_last);
1996        if (qp->s_head >= slast)
1997                avail = qp->s_size - (qp->s_head - slast);
1998        else
1999                avail = slast - qp->s_head;
2000
2001        reserved_used = atomic_read(&qp->s_reserved_used);
2002        avail =  avail - 1 -
2003                (rdi->dparms.reserved_operations - reserved_used);
2004        /* insure we don't assign a negative s_avail */
2005        if ((s32)avail <= 0)
2006                return -ENOMEM;
2007        qp->s_avail = avail;
2008        if (WARN_ON(qp->s_avail >
2009                    (qp->s_size - 1 - rdi->dparms.reserved_operations)))
2010                rvt_pr_err(rdi,
2011                           "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
2012                           qp->ibqp.qp_num, qp->s_size, qp->s_avail,
2013                           qp->s_head, qp->s_tail, qp->s_cur,
2014                           qp->s_acked, qp->s_last);
2015        return 0;
2016}
2017
2018/**
2019 * rvt_post_one_wr - post one RC, UC, or UD send work request
2020 * @qp: the QP to post on
2021 * @wr: the work request to send
2022 * @call_send: kick the send engine into gear
2023 */
2024static int rvt_post_one_wr(struct rvt_qp *qp,
2025                           const struct ib_send_wr *wr,
2026                           bool *call_send)
2027{
2028        struct rvt_swqe *wqe;
2029        u32 next;
2030        int i;
2031        int j;
2032        int acc;
2033        struct rvt_lkey_table *rkt;
2034        struct rvt_pd *pd;
2035        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2036        u8 log_pmtu;
2037        int ret;
2038        size_t cplen;
2039        bool reserved_op;
2040        int local_ops_delayed = 0;
2041
2042        BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
2043
2044        /* IB spec says that num_sge == 0 is OK. */
2045        if (unlikely(wr->num_sge > qp->s_max_sge))
2046                return -EINVAL;
2047
2048        ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2049        if (ret < 0)
2050                return ret;
2051        cplen = ret;
2052
2053        /*
2054         * Local operations include fast register and local invalidate.
2055         * Fast register needs to be processed immediately because the
2056         * registered lkey may be used by following work requests and the
2057         * lkey needs to be valid at the time those requests are posted.
2058         * Local invalidate can be processed immediately if fencing is
2059         * not required and no previous local invalidate ops are pending.
2060         * Signaled local operations that have been processed immediately
2061         * need to have requests with "completion only" flags set posted
2062         * to the send queue in order to generate completions.
2063         */
2064        if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2065                switch (wr->opcode) {
2066                case IB_WR_REG_MR:
2067                        ret = rvt_fast_reg_mr(qp,
2068                                              reg_wr(wr)->mr,
2069                                              reg_wr(wr)->key,
2070                                              reg_wr(wr)->access);
2071                        if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2072                                return ret;
2073                        break;
2074                case IB_WR_LOCAL_INV:
2075                        if ((wr->send_flags & IB_SEND_FENCE) ||
2076                            atomic_read(&qp->local_ops_pending)) {
2077                                local_ops_delayed = 1;
2078                        } else {
2079                                ret = rvt_invalidate_rkey(
2080                                        qp, wr->ex.invalidate_rkey);
2081                                if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2082                                        return ret;
2083                        }
2084                        break;
2085                default:
2086                        return -EINVAL;
2087                }
2088        }
2089
2090        reserved_op = rdi->post_parms[wr->opcode].flags &
2091                        RVT_OPERATION_USE_RESERVE;
2092        /* check for avail */
2093        ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2094        if (ret)
2095                return ret;
2096        next = qp->s_head + 1;
2097        if (next >= qp->s_size)
2098                next = 0;
2099
2100        rkt = &rdi->lkey_table;
2101        pd = ibpd_to_rvtpd(qp->ibqp.pd);
2102        wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2103
2104        /* cplen has length from above */
2105        memcpy(&wqe->wr, wr, cplen);
2106
2107        wqe->length = 0;
2108        j = 0;
2109        if (wr->num_sge) {
2110                struct rvt_sge *last_sge = NULL;
2111
2112                acc = wr->opcode >= IB_WR_RDMA_READ ?
2113                        IB_ACCESS_LOCAL_WRITE : 0;
2114                for (i = 0; i < wr->num_sge; i++) {
2115                        u32 length = wr->sg_list[i].length;
2116
2117                        if (length == 0)
2118                                continue;
2119                        ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2120                                          &wr->sg_list[i], acc);
2121                        if (unlikely(ret < 0))
2122                                goto bail_inval_free;
2123                        wqe->length += length;
2124                        if (ret)
2125                                last_sge = &wqe->sg_list[j];
2126                        j += ret;
2127                }
2128                wqe->wr.num_sge = j;
2129        }
2130
2131        /*
2132         * Calculate and set SWQE PSN values prior to handing it off
2133         * to the driver's check routine. This give the driver the
2134         * opportunity to adjust PSN values based on internal checks.
2135         */
2136        log_pmtu = qp->log_pmtu;
2137        if (qp->allowed_ops == IB_OPCODE_UD) {
2138                struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2139
2140                log_pmtu = ah->log_pmtu;
2141                rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2142        }
2143
2144        if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2145                if (local_ops_delayed)
2146                        atomic_inc(&qp->local_ops_pending);
2147                else
2148                        wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2149                wqe->ssn = 0;
2150                wqe->psn = 0;
2151                wqe->lpsn = 0;
2152        } else {
2153                wqe->ssn = qp->s_ssn++;
2154                wqe->psn = qp->s_next_psn;
2155                wqe->lpsn = wqe->psn +
2156                                (wqe->length ?
2157                                        ((wqe->length - 1) >> log_pmtu) :
2158                                        0);
2159        }
2160
2161        /* general part of wqe valid - allow for driver checks */
2162        if (rdi->driver_f.setup_wqe) {
2163                ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2164                if (ret < 0)
2165                        goto bail_inval_free_ref;
2166        }
2167
2168        if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2169                qp->s_next_psn = wqe->lpsn + 1;
2170
2171        if (unlikely(reserved_op)) {
2172                wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2173                rvt_qp_wqe_reserve(qp, wqe);
2174        } else {
2175                wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2176                qp->s_avail--;
2177        }
2178        trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2179        smp_wmb(); /* see request builders */
2180        qp->s_head = next;
2181
2182        return 0;
2183
2184bail_inval_free_ref:
2185        if (qp->allowed_ops == IB_OPCODE_UD)
2186                rdma_destroy_ah_attr(wqe->ud_wr.attr);
2187bail_inval_free:
2188        /* release mr holds */
2189        while (j) {
2190                struct rvt_sge *sge = &wqe->sg_list[--j];
2191
2192                rvt_put_mr(sge->mr);
2193        }
2194        return ret;
2195}
2196
2197/**
2198 * rvt_post_send - post a send on a QP
2199 * @ibqp: the QP to post the send on
2200 * @wr: the list of work requests to post
2201 * @bad_wr: the first bad WR is put here
2202 *
2203 * This may be called from interrupt context.
2204 *
2205 * Return: 0 on success else errno
2206 */
2207int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2208                  const struct ib_send_wr **bad_wr)
2209{
2210        struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2211        struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2212        unsigned long flags = 0;
2213        bool call_send;
2214        unsigned nreq = 0;
2215        int err = 0;
2216
2217        spin_lock_irqsave(&qp->s_hlock, flags);
2218
2219        /*
2220         * Ensure QP state is such that we can send. If not bail out early,
2221         * there is no need to do this every time we post a send.
2222         */
2223        if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2224                spin_unlock_irqrestore(&qp->s_hlock, flags);
2225                return -EINVAL;
2226        }
2227
2228        /*
2229         * If the send queue is empty, and we only have a single WR then just go
2230         * ahead and kick the send engine into gear. Otherwise we will always
2231         * just schedule the send to happen later.
2232         */
2233        call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2234
2235        for (; wr; wr = wr->next) {
2236                err = rvt_post_one_wr(qp, wr, &call_send);
2237                if (unlikely(err)) {
2238                        *bad_wr = wr;
2239                        goto bail;
2240                }
2241                nreq++;
2242        }
2243bail:
2244        spin_unlock_irqrestore(&qp->s_hlock, flags);
2245        if (nreq) {
2246                /*
2247                 * Only call do_send if there is exactly one packet, and the
2248                 * driver said it was ok.
2249                 */
2250                if (nreq == 1 && call_send)
2251                        rdi->driver_f.do_send(qp);
2252                else
2253                        rdi->driver_f.schedule_send_no_lock(qp);
2254        }
2255        return err;
2256}
2257
2258/**
2259 * rvt_post_srq_recv - post a receive on a shared receive queue
2260 * @ibsrq: the SRQ to post the receive on
2261 * @wr: the list of work requests to post
2262 * @bad_wr: A pointer to the first WR to cause a problem is put here
2263 *
2264 * This may be called from interrupt context.
2265 *
2266 * Return: 0 on success else errno
2267 */
2268int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2269                      const struct ib_recv_wr **bad_wr)
2270{
2271        struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2272        struct rvt_krwq *wq;
2273        unsigned long flags;
2274
2275        for (; wr; wr = wr->next) {
2276                struct rvt_rwqe *wqe;
2277                u32 next;
2278                int i;
2279
2280                if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2281                        *bad_wr = wr;
2282                        return -EINVAL;
2283                }
2284
2285                spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2286                wq = srq->rq.kwq;
2287                next = wq->head + 1;
2288                if (next >= srq->rq.size)
2289                        next = 0;
2290                if (next == READ_ONCE(wq->tail)) {
2291                        spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2292                        *bad_wr = wr;
2293                        return -ENOMEM;
2294                }
2295
2296                wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2297                wqe->wr_id = wr->wr_id;
2298                wqe->num_sge = wr->num_sge;
2299                for (i = 0; i < wr->num_sge; i++) {
2300                        wqe->sg_list[i].addr = wr->sg_list[i].addr;
2301                        wqe->sg_list[i].length = wr->sg_list[i].length;
2302                        wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2303                }
2304                /* Make sure queue entry is written before the head index. */
2305                smp_store_release(&wq->head, next);
2306                spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2307        }
2308        return 0;
2309}
2310
2311/*
2312 * rvt used the internal kernel struct as part of its ABI, for now make sure
2313 * the kernel struct does not change layout. FIXME: rvt should never cast the
2314 * user struct to a kernel struct.
2315 */
2316static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2317{
2318        BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2319                     offsetof(struct rvt_wqe_sge, addr));
2320        BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2321                     offsetof(struct rvt_wqe_sge, length));
2322        BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2323                     offsetof(struct rvt_wqe_sge, lkey));
2324        return (struct ib_sge *)sge;
2325}
2326
2327/*
2328 * Validate a RWQE and fill in the SGE state.
2329 * Return 1 if OK.
2330 */
2331static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2332{
2333        int i, j, ret;
2334        struct ib_wc wc;
2335        struct rvt_lkey_table *rkt;
2336        struct rvt_pd *pd;
2337        struct rvt_sge_state *ss;
2338        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2339
2340        rkt = &rdi->lkey_table;
2341        pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2342        ss = &qp->r_sge;
2343        ss->sg_list = qp->r_sg_list;
2344        qp->r_len = 0;
2345        for (i = j = 0; i < wqe->num_sge; i++) {
2346                if (wqe->sg_list[i].length == 0)
2347                        continue;
2348                /* Check LKEY */
2349                ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2350                                  NULL, rvt_cast_sge(&wqe->sg_list[i]),
2351                                  IB_ACCESS_LOCAL_WRITE);
2352                if (unlikely(ret <= 0))
2353                        goto bad_lkey;
2354                qp->r_len += wqe->sg_list[i].length;
2355                j++;
2356        }
2357        ss->num_sge = j;
2358        ss->total_len = qp->r_len;
2359        return 1;
2360
2361bad_lkey:
2362        while (j) {
2363                struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2364
2365                rvt_put_mr(sge->mr);
2366        }
2367        ss->num_sge = 0;
2368        memset(&wc, 0, sizeof(wc));
2369        wc.wr_id = wqe->wr_id;
2370        wc.status = IB_WC_LOC_PROT_ERR;
2371        wc.opcode = IB_WC_RECV;
2372        wc.qp = &qp->ibqp;
2373        /* Signal solicited completion event. */
2374        rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2375        return 0;
2376}
2377
2378/**
2379 * get_rvt_head - get head indices of the circular buffer
2380 * @rq: data structure for request queue entry
2381 * @ip: the QP
2382 *
2383 * Return - head index value
2384 */
2385static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2386{
2387        u32 head;
2388
2389        if (ip)
2390                head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2391        else
2392                head = rq->kwq->head;
2393
2394        return head;
2395}
2396
2397/**
2398 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2399 * @qp: the QP
2400 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2401 *
2402 * Return -1 if there is a local error, 0 if no RWQE is available,
2403 * otherwise return 1.
2404 *
2405 * Can be called from interrupt level.
2406 */
2407int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2408{
2409        unsigned long flags;
2410        struct rvt_rq *rq;
2411        struct rvt_krwq *kwq = NULL;
2412        struct rvt_rwq *wq;
2413        struct rvt_srq *srq;
2414        struct rvt_rwqe *wqe;
2415        void (*handler)(struct ib_event *, void *);
2416        u32 tail;
2417        u32 head;
2418        int ret;
2419        void *ip = NULL;
2420
2421        if (qp->ibqp.srq) {
2422                srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2423                handler = srq->ibsrq.event_handler;
2424                rq = &srq->rq;
2425                ip = srq->ip;
2426        } else {
2427                srq = NULL;
2428                handler = NULL;
2429                rq = &qp->r_rq;
2430                ip = qp->ip;
2431        }
2432
2433        spin_lock_irqsave(&rq->kwq->c_lock, flags);
2434        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2435                ret = 0;
2436                goto unlock;
2437        }
2438        kwq = rq->kwq;
2439        if (ip) {
2440                wq = rq->wq;
2441                tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2442        } else {
2443                tail = kwq->tail;
2444        }
2445
2446        /* Validate tail before using it since it is user writable. */
2447        if (tail >= rq->size)
2448                tail = 0;
2449
2450        if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2451                head = get_rvt_head(rq, ip);
2452                kwq->count = rvt_get_rq_count(rq, head, tail);
2453        }
2454        if (unlikely(kwq->count == 0)) {
2455                ret = 0;
2456                goto unlock;
2457        }
2458        /* Make sure entry is read after the count is read. */
2459        smp_rmb();
2460        wqe = rvt_get_rwqe_ptr(rq, tail);
2461        /*
2462         * Even though we update the tail index in memory, the verbs
2463         * consumer is not supposed to post more entries until a
2464         * completion is generated.
2465         */
2466        if (++tail >= rq->size)
2467                tail = 0;
2468        if (ip)
2469                RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2470        else
2471                kwq->tail = tail;
2472        if (!wr_id_only && !init_sge(qp, wqe)) {
2473                ret = -1;
2474                goto unlock;
2475        }
2476        qp->r_wr_id = wqe->wr_id;
2477
2478        kwq->count--;
2479        ret = 1;
2480        set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2481        if (handler) {
2482                /*
2483                 * Validate head pointer value and compute
2484                 * the number of remaining WQEs.
2485                 */
2486                if (kwq->count < srq->limit) {
2487                        kwq->count =
2488                                rvt_get_rq_count(rq,
2489                                                 get_rvt_head(rq, ip), tail);
2490                        if (kwq->count < srq->limit) {
2491                                struct ib_event ev;
2492
2493                                srq->limit = 0;
2494                                spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2495                                ev.device = qp->ibqp.device;
2496                                ev.element.srq = qp->ibqp.srq;
2497                                ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2498                                handler(&ev, srq->ibsrq.srq_context);
2499                                goto bail;
2500                        }
2501                }
2502        }
2503unlock:
2504        spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2505bail:
2506        return ret;
2507}
2508EXPORT_SYMBOL(rvt_get_rwqe);
2509
2510/**
2511 * rvt_comm_est - handle trap with QP established
2512 * @qp: the QP
2513 */
2514void rvt_comm_est(struct rvt_qp *qp)
2515{
2516        qp->r_flags |= RVT_R_COMM_EST;
2517        if (qp->ibqp.event_handler) {
2518                struct ib_event ev;
2519
2520                ev.device = qp->ibqp.device;
2521                ev.element.qp = &qp->ibqp;
2522                ev.event = IB_EVENT_COMM_EST;
2523                qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2524        }
2525}
2526EXPORT_SYMBOL(rvt_comm_est);
2527
2528void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2529{
2530        unsigned long flags;
2531        int lastwqe;
2532
2533        spin_lock_irqsave(&qp->s_lock, flags);
2534        lastwqe = rvt_error_qp(qp, err);
2535        spin_unlock_irqrestore(&qp->s_lock, flags);
2536
2537        if (lastwqe) {
2538                struct ib_event ev;
2539
2540                ev.device = qp->ibqp.device;
2541                ev.element.qp = &qp->ibqp;
2542                ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2543                qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2544        }
2545}
2546EXPORT_SYMBOL(rvt_rc_error);
2547
2548/*
2549 *  rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2550 *  @index - the index
2551 *  return usec from an index into ib_rvt_rnr_table
2552 */
2553unsigned long rvt_rnr_tbl_to_usec(u32 index)
2554{
2555        return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2556}
2557EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2558
2559static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2560{
2561        return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2562                                  IB_AETH_CREDIT_MASK];
2563}
2564
2565/*
2566 *  rvt_add_retry_timer_ext - add/start a retry timer
2567 *  @qp - the QP
2568 *  @shift - timeout shift to wait for multiple packets
2569 *  add a retry timer on the QP
2570 */
2571void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2572{
2573        struct ib_qp *ibqp = &qp->ibqp;
2574        struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2575
2576        lockdep_assert_held(&qp->s_lock);
2577        qp->s_flags |= RVT_S_TIMER;
2578       /* 4.096 usec. * (1 << qp->timeout) */
2579        qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2580                              (qp->timeout_jiffies << shift);
2581        add_timer(&qp->s_timer);
2582}
2583EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2584
2585/**
2586 * rvt_add_rnr_timer - add/start an rnr timer on the QP
2587 * @qp: the QP
2588 * @aeth: aeth of RNR timeout, simulated aeth for loopback
2589 */
2590void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2591{
2592        u32 to;
2593
2594        lockdep_assert_held(&qp->s_lock);
2595        qp->s_flags |= RVT_S_WAIT_RNR;
2596        to = rvt_aeth_to_usec(aeth);
2597        trace_rvt_rnrnak_add(qp, to);
2598        hrtimer_start(&qp->s_rnr_timer,
2599                      ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2600}
2601EXPORT_SYMBOL(rvt_add_rnr_timer);
2602
2603/**
2604 * rvt_stop_rc_timers - stop all timers
2605 * @qp: the QP
2606 * stop any pending timers
2607 */
2608void rvt_stop_rc_timers(struct rvt_qp *qp)
2609{
2610        lockdep_assert_held(&qp->s_lock);
2611        /* Remove QP from all timers */
2612        if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2613                qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2614                del_timer(&qp->s_timer);
2615                hrtimer_try_to_cancel(&qp->s_rnr_timer);
2616        }
2617}
2618EXPORT_SYMBOL(rvt_stop_rc_timers);
2619
2620/**
2621 * rvt_stop_rnr_timer - stop an rnr timer
2622 * @qp: the QP
2623 *
2624 * stop an rnr timer and return if the timer
2625 * had been pending.
2626 */
2627static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2628{
2629        lockdep_assert_held(&qp->s_lock);
2630        /* Remove QP from rnr timer */
2631        if (qp->s_flags & RVT_S_WAIT_RNR) {
2632                qp->s_flags &= ~RVT_S_WAIT_RNR;
2633                trace_rvt_rnrnak_stop(qp, 0);
2634        }
2635}
2636
2637/**
2638 * rvt_del_timers_sync - wait for any timeout routines to exit
2639 * @qp: the QP
2640 */
2641void rvt_del_timers_sync(struct rvt_qp *qp)
2642{
2643        del_timer_sync(&qp->s_timer);
2644        hrtimer_cancel(&qp->s_rnr_timer);
2645}
2646EXPORT_SYMBOL(rvt_del_timers_sync);
2647
2648/*
2649 * This is called from s_timer for missing responses.
2650 */
2651static void rvt_rc_timeout(struct timer_list *t)
2652{
2653        struct rvt_qp *qp = from_timer(qp, t, s_timer);
2654        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2655        unsigned long flags;
2656
2657        spin_lock_irqsave(&qp->r_lock, flags);
2658        spin_lock(&qp->s_lock);
2659        if (qp->s_flags & RVT_S_TIMER) {
2660                struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2661
2662                qp->s_flags &= ~RVT_S_TIMER;
2663                rvp->n_rc_timeouts++;
2664                del_timer(&qp->s_timer);
2665                trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2666                if (rdi->driver_f.notify_restart_rc)
2667                        rdi->driver_f.notify_restart_rc(qp,
2668                                                        qp->s_last_psn + 1,
2669                                                        1);
2670                rdi->driver_f.schedule_send(qp);
2671        }
2672        spin_unlock(&qp->s_lock);
2673        spin_unlock_irqrestore(&qp->r_lock, flags);
2674}
2675
2676/*
2677 * This is called from s_timer for RNR timeouts.
2678 */
2679enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2680{
2681        struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2682        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2683        unsigned long flags;
2684
2685        spin_lock_irqsave(&qp->s_lock, flags);
2686        rvt_stop_rnr_timer(qp);
2687        trace_rvt_rnrnak_timeout(qp, 0);
2688        rdi->driver_f.schedule_send(qp);
2689        spin_unlock_irqrestore(&qp->s_lock, flags);
2690        return HRTIMER_NORESTART;
2691}
2692EXPORT_SYMBOL(rvt_rc_rnr_retry);
2693
2694/**
2695 * rvt_qp_iter_init - initial for QP iteration
2696 * @rdi: rvt devinfo
2697 * @v: u64 value
2698 * @cb: user-defined callback
2699 *
2700 * This returns an iterator suitable for iterating QPs
2701 * in the system.
2702 *
2703 * The @cb is a user-defined callback and @v is a 64-bit
2704 * value passed to and relevant for processing in the
2705 * @cb.  An example use case would be to alter QP processing
2706 * based on criteria not part of the rvt_qp.
2707 *
2708 * Use cases that require memory allocation to succeed
2709 * must preallocate appropriately.
2710 *
2711 * Return: a pointer to an rvt_qp_iter or NULL
2712 */
2713struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2714                                     u64 v,
2715                                     void (*cb)(struct rvt_qp *qp, u64 v))
2716{
2717        struct rvt_qp_iter *i;
2718
2719        i = kzalloc(sizeof(*i), GFP_KERNEL);
2720        if (!i)
2721                return NULL;
2722
2723        i->rdi = rdi;
2724        /* number of special QPs (SMI/GSI) for device */
2725        i->specials = rdi->ibdev.phys_port_cnt * 2;
2726        i->v = v;
2727        i->cb = cb;
2728
2729        return i;
2730}
2731EXPORT_SYMBOL(rvt_qp_iter_init);
2732
2733/**
2734 * rvt_qp_iter_next - return the next QP in iter
2735 * @iter: the iterator
2736 *
2737 * Fine grained QP iterator suitable for use
2738 * with debugfs seq_file mechanisms.
2739 *
2740 * Updates iter->qp with the current QP when the return
2741 * value is 0.
2742 *
2743 * Return: 0 - iter->qp is valid 1 - no more QPs
2744 */
2745int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2746        __must_hold(RCU)
2747{
2748        int n = iter->n;
2749        int ret = 1;
2750        struct rvt_qp *pqp = iter->qp;
2751        struct rvt_qp *qp;
2752        struct rvt_dev_info *rdi = iter->rdi;
2753
2754        /*
2755         * The approach is to consider the special qps
2756         * as additional table entries before the
2757         * real hash table.  Since the qp code sets
2758         * the qp->next hash link to NULL, this works just fine.
2759         *
2760         * iter->specials is 2 * # ports
2761         *
2762         * n = 0..iter->specials is the special qp indices
2763         *
2764         * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2765         * the potential hash bucket entries
2766         *
2767         */
2768        for (; n <  rdi->qp_dev->qp_table_size + iter->specials; n++) {
2769                if (pqp) {
2770                        qp = rcu_dereference(pqp->next);
2771                } else {
2772                        if (n < iter->specials) {
2773                                struct rvt_ibport *rvp;
2774                                int pidx;
2775
2776                                pidx = n % rdi->ibdev.phys_port_cnt;
2777                                rvp = rdi->ports[pidx];
2778                                qp = rcu_dereference(rvp->qp[n & 1]);
2779                        } else {
2780                                qp = rcu_dereference(
2781                                        rdi->qp_dev->qp_table[
2782                                                (n - iter->specials)]);
2783                        }
2784                }
2785                pqp = qp;
2786                if (qp) {
2787                        iter->qp = qp;
2788                        iter->n = n;
2789                        return 0;
2790                }
2791        }
2792        return ret;
2793}
2794EXPORT_SYMBOL(rvt_qp_iter_next);
2795
2796/**
2797 * rvt_qp_iter - iterate all QPs
2798 * @rdi: rvt devinfo
2799 * @v: a 64-bit value
2800 * @cb: a callback
2801 *
2802 * This provides a way for iterating all QPs.
2803 *
2804 * The @cb is a user-defined callback and @v is a 64-bit
2805 * value passed to and relevant for processing in the
2806 * cb.  An example use case would be to alter QP processing
2807 * based on criteria not part of the rvt_qp.
2808 *
2809 * The code has an internal iterator to simplify
2810 * non seq_file use cases.
2811 */
2812void rvt_qp_iter(struct rvt_dev_info *rdi,
2813                 u64 v,
2814                 void (*cb)(struct rvt_qp *qp, u64 v))
2815{
2816        int ret;
2817        struct rvt_qp_iter i = {
2818                .rdi = rdi,
2819                .specials = rdi->ibdev.phys_port_cnt * 2,
2820                .v = v,
2821                .cb = cb
2822        };
2823
2824        rcu_read_lock();
2825        do {
2826                ret = rvt_qp_iter_next(&i);
2827                if (!ret) {
2828                        rvt_get_qp(i.qp);
2829                        rcu_read_unlock();
2830                        i.cb(i.qp, i.v);
2831                        rcu_read_lock();
2832                        rvt_put_qp(i.qp);
2833                }
2834        } while (!ret);
2835        rcu_read_unlock();
2836}
2837EXPORT_SYMBOL(rvt_qp_iter);
2838
2839/*
2840 * This should be called with s_lock held.
2841 */
2842void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2843                       enum ib_wc_status status)
2844{
2845        u32 old_last, last;
2846        struct rvt_dev_info *rdi;
2847
2848        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2849                return;
2850        rdi = ib_to_rvt(qp->ibqp.device);
2851
2852        old_last = qp->s_last;
2853        trace_rvt_qp_send_completion(qp, wqe, old_last);
2854        last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2855                                    status);
2856        if (qp->s_acked == old_last)
2857                qp->s_acked = last;
2858        if (qp->s_cur == old_last)
2859                qp->s_cur = last;
2860        if (qp->s_tail == old_last)
2861                qp->s_tail = last;
2862        if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2863                qp->s_draining = 0;
2864}
2865EXPORT_SYMBOL(rvt_send_complete);
2866
2867/**
2868 * rvt_copy_sge - copy data to SGE memory
2869 * @qp: associated QP
2870 * @ss: the SGE state
2871 * @data: the data to copy
2872 * @length: the length of the data
2873 * @release: boolean to release MR
2874 * @copy_last: do a separate copy of the last 8 bytes
2875 */
2876void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2877                  void *data, u32 length,
2878                  bool release, bool copy_last)
2879{
2880        struct rvt_sge *sge = &ss->sge;
2881        int i;
2882        bool in_last = false;
2883        bool cacheless_copy = false;
2884        struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2885        struct rvt_wss *wss = rdi->wss;
2886        unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2887
2888        if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2889                cacheless_copy = length >= PAGE_SIZE;
2890        } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2891                if (length >= PAGE_SIZE) {
2892                        /*
2893                         * NOTE: this *assumes*:
2894                         * o The first vaddr is the dest.
2895                         * o If multiple pages, then vaddr is sequential.
2896                         */
2897                        wss_insert(wss, sge->vaddr);
2898                        if (length >= (2 * PAGE_SIZE))
2899                                wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2900
2901                        cacheless_copy = wss_exceeds_threshold(wss);
2902                } else {
2903                        wss_advance_clean_counter(wss);
2904                }
2905        }
2906
2907        if (copy_last) {
2908                if (length > 8) {
2909                        length -= 8;
2910                } else {
2911                        copy_last = false;
2912                        in_last = true;
2913                }
2914        }
2915
2916again:
2917        while (length) {
2918                u32 len = rvt_get_sge_length(sge, length);
2919
2920                WARN_ON_ONCE(len == 0);
2921                if (unlikely(in_last)) {
2922                        /* enforce byte transfer ordering */
2923                        for (i = 0; i < len; i++)
2924                                ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2925                } else if (cacheless_copy) {
2926                        cacheless_memcpy(sge->vaddr, data, len);
2927                } else {
2928                        memcpy(sge->vaddr, data, len);
2929                }
2930                rvt_update_sge(ss, len, release);
2931                data += len;
2932                length -= len;
2933        }
2934
2935        if (copy_last) {
2936                copy_last = false;
2937                in_last = true;
2938                length = 8;
2939                goto again;
2940        }
2941}
2942EXPORT_SYMBOL(rvt_copy_sge);
2943
2944static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2945                                          struct rvt_qp *sqp)
2946{
2947        rvp->n_pkt_drops++;
2948        /*
2949         * For RC, the requester would timeout and retry so
2950         * shortcut the timeouts and just signal too many retries.
2951         */
2952        return sqp->ibqp.qp_type == IB_QPT_RC ?
2953                IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2954}
2955
2956/**
2957 * rvt_ruc_loopback - handle UC and RC loopback requests
2958 * @sqp: the sending QP
2959 *
2960 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2961 * Note that although we are single threaded due to the send engine, we still
2962 * have to protect against post_send().  We don't have to worry about
2963 * receive interrupts since this is a connected protocol and all packets
2964 * will pass through here.
2965 */
2966void rvt_ruc_loopback(struct rvt_qp *sqp)
2967{
2968        struct rvt_ibport *rvp =  NULL;
2969        struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2970        struct rvt_qp *qp;
2971        struct rvt_swqe *wqe;
2972        struct rvt_sge *sge;
2973        unsigned long flags;
2974        struct ib_wc wc;
2975        u64 sdata;
2976        atomic64_t *maddr;
2977        enum ib_wc_status send_status;
2978        bool release;
2979        int ret;
2980        bool copy_last = false;
2981        int local_ops = 0;
2982
2983        rcu_read_lock();
2984        rvp = rdi->ports[sqp->port_num - 1];
2985
2986        /*
2987         * Note that we check the responder QP state after
2988         * checking the requester's state.
2989         */
2990
2991        qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2992                            sqp->remote_qpn);
2993
2994        spin_lock_irqsave(&sqp->s_lock, flags);
2995
2996        /* Return if we are already busy processing a work request. */
2997        if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2998            !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2999                goto unlock;
3000
3001        sqp->s_flags |= RVT_S_BUSY;
3002
3003again:
3004        if (sqp->s_last == READ_ONCE(sqp->s_head))
3005                goto clr_busy;
3006        wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
3007
3008        /* Return if it is not OK to start a new work request. */
3009        if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
3010                if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
3011                        goto clr_busy;
3012                /* We are in the error state, flush the work request. */
3013                send_status = IB_WC_WR_FLUSH_ERR;
3014                goto flush_send;
3015        }
3016
3017        /*
3018         * We can rely on the entry not changing without the s_lock
3019         * being held until we update s_last.
3020         * We increment s_cur to indicate s_last is in progress.
3021         */
3022        if (sqp->s_last == sqp->s_cur) {
3023                if (++sqp->s_cur >= sqp->s_size)
3024                        sqp->s_cur = 0;
3025        }
3026        spin_unlock_irqrestore(&sqp->s_lock, flags);
3027
3028        if (!qp) {
3029                send_status = loopback_qp_drop(rvp, sqp);
3030                goto serr_no_r_lock;
3031        }
3032        spin_lock_irqsave(&qp->r_lock, flags);
3033        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3034            qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3035                send_status = loopback_qp_drop(rvp, sqp);
3036                goto serr;
3037        }
3038
3039        memset(&wc, 0, sizeof(wc));
3040        send_status = IB_WC_SUCCESS;
3041
3042        release = true;
3043        sqp->s_sge.sge = wqe->sg_list[0];
3044        sqp->s_sge.sg_list = wqe->sg_list + 1;
3045        sqp->s_sge.num_sge = wqe->wr.num_sge;
3046        sqp->s_len = wqe->length;
3047        switch (wqe->wr.opcode) {
3048        case IB_WR_REG_MR:
3049                goto send_comp;
3050
3051        case IB_WR_LOCAL_INV:
3052                if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
3053                        if (rvt_invalidate_rkey(sqp,
3054                                                wqe->wr.ex.invalidate_rkey))
3055                                send_status = IB_WC_LOC_PROT_ERR;
3056                        local_ops = 1;
3057                }
3058                goto send_comp;
3059
3060        case IB_WR_SEND_WITH_INV:
3061        case IB_WR_SEND_WITH_IMM:
3062        case IB_WR_SEND:
3063                ret = rvt_get_rwqe(qp, false);
3064                if (ret < 0)
3065                        goto op_err;
3066                if (!ret)
3067                        goto rnr_nak;
3068                if (wqe->length > qp->r_len)
3069                        goto inv_err;
3070                switch (wqe->wr.opcode) {
3071                case IB_WR_SEND_WITH_INV:
3072                        if (!rvt_invalidate_rkey(qp,
3073                                                 wqe->wr.ex.invalidate_rkey)) {
3074                                wc.wc_flags = IB_WC_WITH_INVALIDATE;
3075                                wc.ex.invalidate_rkey =
3076                                        wqe->wr.ex.invalidate_rkey;
3077                        }
3078                        break;
3079                case IB_WR_SEND_WITH_IMM:
3080                        wc.wc_flags = IB_WC_WITH_IMM;
3081                        wc.ex.imm_data = wqe->wr.ex.imm_data;
3082                        break;
3083                default:
3084                        break;
3085                }
3086                break;
3087
3088        case IB_WR_RDMA_WRITE_WITH_IMM:
3089                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3090                        goto inv_err;
3091                wc.wc_flags = IB_WC_WITH_IMM;
3092                wc.ex.imm_data = wqe->wr.ex.imm_data;
3093                ret = rvt_get_rwqe(qp, true);
3094                if (ret < 0)
3095                        goto op_err;
3096                if (!ret)
3097                        goto rnr_nak;
3098                /* skip copy_last set and qp_access_flags recheck */
3099                goto do_write;
3100        case IB_WR_RDMA_WRITE:
3101                copy_last = rvt_is_user_qp(qp);
3102                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3103                        goto inv_err;
3104do_write:
3105                if (wqe->length == 0)
3106                        break;
3107                if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3108                                          wqe->rdma_wr.remote_addr,
3109                                          wqe->rdma_wr.rkey,
3110                                          IB_ACCESS_REMOTE_WRITE)))
3111                        goto acc_err;
3112                qp->r_sge.sg_list = NULL;
3113                qp->r_sge.num_sge = 1;
3114                qp->r_sge.total_len = wqe->length;
3115                break;
3116
3117        case IB_WR_RDMA_READ:
3118                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3119                        goto inv_err;
3120                if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3121                                          wqe->rdma_wr.remote_addr,
3122                                          wqe->rdma_wr.rkey,
3123                                          IB_ACCESS_REMOTE_READ)))
3124                        goto acc_err;
3125                release = false;
3126                sqp->s_sge.sg_list = NULL;
3127                sqp->s_sge.num_sge = 1;
3128                qp->r_sge.sge = wqe->sg_list[0];
3129                qp->r_sge.sg_list = wqe->sg_list + 1;
3130                qp->r_sge.num_sge = wqe->wr.num_sge;
3131                qp->r_sge.total_len = wqe->length;
3132                break;
3133
3134        case IB_WR_ATOMIC_CMP_AND_SWP:
3135        case IB_WR_ATOMIC_FETCH_AND_ADD:
3136                if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3137                        goto inv_err;
3138                if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3139                                          wqe->atomic_wr.remote_addr,
3140                                          wqe->atomic_wr.rkey,
3141                                          IB_ACCESS_REMOTE_ATOMIC)))
3142                        goto acc_err;
3143                /* Perform atomic OP and save result. */
3144                maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3145                sdata = wqe->atomic_wr.compare_add;
3146                *(u64 *)sqp->s_sge.sge.vaddr =
3147                        (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3148                        (u64)atomic64_add_return(sdata, maddr) - sdata :
3149                        (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3150                                      sdata, wqe->atomic_wr.swap);
3151                rvt_put_mr(qp->r_sge.sge.mr);
3152                qp->r_sge.num_sge = 0;
3153                goto send_comp;
3154
3155        default:
3156                send_status = IB_WC_LOC_QP_OP_ERR;
3157                goto serr;
3158        }
3159
3160        sge = &sqp->s_sge.sge;
3161        while (sqp->s_len) {
3162                u32 len = rvt_get_sge_length(sge, sqp->s_len);
3163
3164                WARN_ON_ONCE(len == 0);
3165                rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3166                             len, release, copy_last);
3167                rvt_update_sge(&sqp->s_sge, len, !release);
3168                sqp->s_len -= len;
3169        }
3170        if (release)
3171                rvt_put_ss(&qp->r_sge);
3172
3173        if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3174                goto send_comp;
3175
3176        if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3177                wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3178        else
3179                wc.opcode = IB_WC_RECV;
3180        wc.wr_id = qp->r_wr_id;
3181        wc.status = IB_WC_SUCCESS;
3182        wc.byte_len = wqe->length;
3183        wc.qp = &qp->ibqp;
3184        wc.src_qp = qp->remote_qpn;
3185        wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3186        wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3187        wc.port_num = 1;
3188        /* Signal completion event if the solicited bit is set. */
3189        rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3190
3191send_comp:
3192        spin_unlock_irqrestore(&qp->r_lock, flags);
3193        spin_lock_irqsave(&sqp->s_lock, flags);
3194        rvp->n_loop_pkts++;
3195flush_send:
3196        sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3197        rvt_send_complete(sqp, wqe, send_status);
3198        if (local_ops) {
3199                atomic_dec(&sqp->local_ops_pending);
3200                local_ops = 0;
3201        }
3202        goto again;
3203
3204rnr_nak:
3205        /* Handle RNR NAK */
3206        if (qp->ibqp.qp_type == IB_QPT_UC)
3207                goto send_comp;
3208        rvp->n_rnr_naks++;
3209        /*
3210         * Note: we don't need the s_lock held since the BUSY flag
3211         * makes this single threaded.
3212         */
3213        if (sqp->s_rnr_retry == 0) {
3214                send_status = IB_WC_RNR_RETRY_EXC_ERR;
3215                goto serr;
3216        }
3217        if (sqp->s_rnr_retry_cnt < 7)
3218                sqp->s_rnr_retry--;
3219        spin_unlock_irqrestore(&qp->r_lock, flags);
3220        spin_lock_irqsave(&sqp->s_lock, flags);
3221        if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3222                goto clr_busy;
3223        rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3224                                IB_AETH_CREDIT_SHIFT);
3225        goto clr_busy;
3226
3227op_err:
3228        send_status = IB_WC_REM_OP_ERR;
3229        wc.status = IB_WC_LOC_QP_OP_ERR;
3230        goto err;
3231
3232inv_err:
3233        send_status =
3234                sqp->ibqp.qp_type == IB_QPT_RC ?
3235                        IB_WC_REM_INV_REQ_ERR :
3236                        IB_WC_SUCCESS;
3237        wc.status = IB_WC_LOC_QP_OP_ERR;
3238        goto err;
3239
3240acc_err:
3241        send_status = IB_WC_REM_ACCESS_ERR;
3242        wc.status = IB_WC_LOC_PROT_ERR;
3243err:
3244        /* responder goes to error state */
3245        rvt_rc_error(qp, wc.status);
3246
3247serr:
3248        spin_unlock_irqrestore(&qp->r_lock, flags);
3249serr_no_r_lock:
3250        spin_lock_irqsave(&sqp->s_lock, flags);
3251        rvt_send_complete(sqp, wqe, send_status);
3252        if (sqp->ibqp.qp_type == IB_QPT_RC) {
3253                int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3254
3255                sqp->s_flags &= ~RVT_S_BUSY;
3256                spin_unlock_irqrestore(&sqp->s_lock, flags);
3257                if (lastwqe) {
3258                        struct ib_event ev;
3259
3260                        ev.device = sqp->ibqp.device;
3261                        ev.element.qp = &sqp->ibqp;
3262                        ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3263                        sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3264                }
3265                goto done;
3266        }
3267clr_busy:
3268        sqp->s_flags &= ~RVT_S_BUSY;
3269unlock:
3270        spin_unlock_irqrestore(&sqp->s_lock, flags);
3271done:
3272        rcu_read_unlock();
3273}
3274EXPORT_SYMBOL(rvt_ruc_loopback);
3275