linux/drivers/infiniband/sw/rdmavt/srq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
   2/*
   3 * Copyright(c) 2016 Intel Corporation.
   4 */
   5
   6#include <linux/err.h>
   7#include <linux/slab.h>
   8#include <linux/vmalloc.h>
   9#include <rdma/uverbs_ioctl.h>
  10
  11#include "srq.h"
  12#include "vt.h"
  13#include "qp.h"
  14/**
  15 * rvt_driver_srq_init - init srq resources on a per driver basis
  16 * @rdi: rvt dev structure
  17 *
  18 * Do any initialization needed when a driver registers with rdmavt.
  19 */
  20void rvt_driver_srq_init(struct rvt_dev_info *rdi)
  21{
  22        spin_lock_init(&rdi->n_srqs_lock);
  23        rdi->n_srqs_allocated = 0;
  24}
  25
  26/**
  27 * rvt_create_srq - create a shared receive queue
  28 * @ibsrq: the protection domain of the SRQ to create
  29 * @srq_init_attr: the attributes of the SRQ
  30 * @udata: data from libibverbs when creating a user SRQ
  31 *
  32 * Return: 0 on success
  33 */
  34int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
  35                   struct ib_udata *udata)
  36{
  37        struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
  38        struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
  39        u32 sz;
  40        int ret;
  41
  42        if (srq_init_attr->srq_type != IB_SRQT_BASIC)
  43                return -EOPNOTSUPP;
  44
  45        if (srq_init_attr->attr.max_sge == 0 ||
  46            srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
  47            srq_init_attr->attr.max_wr == 0 ||
  48            srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
  49                return -EINVAL;
  50
  51        /*
  52         * Need to use vmalloc() if we want to support large #s of entries.
  53         */
  54        srq->rq.size = srq_init_attr->attr.max_wr + 1;
  55        srq->rq.max_sge = srq_init_attr->attr.max_sge;
  56        sz = sizeof(struct ib_sge) * srq->rq.max_sge +
  57                sizeof(struct rvt_rwqe);
  58        if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
  59                         dev->dparms.node, udata)) {
  60                ret = -ENOMEM;
  61                goto bail_srq;
  62        }
  63
  64        /*
  65         * Return the address of the RWQ as the offset to mmap.
  66         * See rvt_mmap() for details.
  67         */
  68        if (udata && udata->outlen >= sizeof(__u64)) {
  69                u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
  70
  71                srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
  72                if (IS_ERR(srq->ip)) {
  73                        ret = PTR_ERR(srq->ip);
  74                        goto bail_wq;
  75                }
  76
  77                ret = ib_copy_to_udata(udata, &srq->ip->offset,
  78                                       sizeof(srq->ip->offset));
  79                if (ret)
  80                        goto bail_ip;
  81        }
  82
  83        /*
  84         * ib_create_srq() will initialize srq->ibsrq.
  85         */
  86        spin_lock_init(&srq->rq.lock);
  87        srq->limit = srq_init_attr->attr.srq_limit;
  88
  89        spin_lock(&dev->n_srqs_lock);
  90        if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
  91                spin_unlock(&dev->n_srqs_lock);
  92                ret = -ENOMEM;
  93                goto bail_ip;
  94        }
  95
  96        dev->n_srqs_allocated++;
  97        spin_unlock(&dev->n_srqs_lock);
  98
  99        if (srq->ip) {
 100                spin_lock_irq(&dev->pending_lock);
 101                list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
 102                spin_unlock_irq(&dev->pending_lock);
 103        }
 104
 105        return 0;
 106
 107bail_ip:
 108        kfree(srq->ip);
 109bail_wq:
 110        rvt_free_rq(&srq->rq);
 111bail_srq:
 112        return ret;
 113}
 114
 115/**
 116 * rvt_modify_srq - modify a shared receive queue
 117 * @ibsrq: the SRQ to modify
 118 * @attr: the new attributes of the SRQ
 119 * @attr_mask: indicates which attributes to modify
 120 * @udata: user data for libibverbs.so
 121 *
 122 * Return: 0 on success
 123 */
 124int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 125                   enum ib_srq_attr_mask attr_mask,
 126                   struct ib_udata *udata)
 127{
 128        struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 129        struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
 130        struct rvt_rq tmp_rq = {};
 131        int ret = 0;
 132
 133        if (attr_mask & IB_SRQ_MAX_WR) {
 134                struct rvt_krwq *okwq = NULL;
 135                struct rvt_rwq *owq = NULL;
 136                struct rvt_rwqe *p;
 137                u32 sz, size, n, head, tail;
 138
 139                /* Check that the requested sizes are below the limits. */
 140                if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
 141                    ((attr_mask & IB_SRQ_LIMIT) ?
 142                     attr->srq_limit : srq->limit) > attr->max_wr)
 143                        return -EINVAL;
 144                sz = sizeof(struct rvt_rwqe) +
 145                        srq->rq.max_sge * sizeof(struct ib_sge);
 146                size = attr->max_wr + 1;
 147                if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
 148                                 udata))
 149                        return -ENOMEM;
 150                /* Check that we can write the offset to mmap. */
 151                if (udata && udata->inlen >= sizeof(__u64)) {
 152                        __u64 offset_addr;
 153                        __u64 offset = 0;
 154
 155                        ret = ib_copy_from_udata(&offset_addr, udata,
 156                                                 sizeof(offset_addr));
 157                        if (ret)
 158                                goto bail_free;
 159                        udata->outbuf = (void __user *)
 160                                        (unsigned long)offset_addr;
 161                        ret = ib_copy_to_udata(udata, &offset,
 162                                               sizeof(offset));
 163                        if (ret)
 164                                goto bail_free;
 165                }
 166
 167                spin_lock_irq(&srq->rq.kwq->c_lock);
 168                /*
 169                 * validate head and tail pointer values and compute
 170                 * the number of remaining WQEs.
 171                 */
 172                if (udata) {
 173                        owq = srq->rq.wq;
 174                        head = RDMA_READ_UAPI_ATOMIC(owq->head);
 175                        tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
 176                } else {
 177                        okwq = srq->rq.kwq;
 178                        head = okwq->head;
 179                        tail = okwq->tail;
 180                }
 181                if (head >= srq->rq.size || tail >= srq->rq.size) {
 182                        ret = -EINVAL;
 183                        goto bail_unlock;
 184                }
 185                n = head;
 186                if (n < tail)
 187                        n += srq->rq.size - tail;
 188                else
 189                        n -= tail;
 190                if (size <= n) {
 191                        ret = -EINVAL;
 192                        goto bail_unlock;
 193                }
 194                n = 0;
 195                p = tmp_rq.kwq->curr_wq;
 196                while (tail != head) {
 197                        struct rvt_rwqe *wqe;
 198                        int i;
 199
 200                        wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
 201                        p->wr_id = wqe->wr_id;
 202                        p->num_sge = wqe->num_sge;
 203                        for (i = 0; i < wqe->num_sge; i++)
 204                                p->sg_list[i] = wqe->sg_list[i];
 205                        n++;
 206                        p = (struct rvt_rwqe *)((char *)p + sz);
 207                        if (++tail >= srq->rq.size)
 208                                tail = 0;
 209                }
 210                srq->rq.kwq = tmp_rq.kwq;
 211                if (udata) {
 212                        srq->rq.wq = tmp_rq.wq;
 213                        RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
 214                        RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
 215                } else {
 216                        tmp_rq.kwq->head = n;
 217                        tmp_rq.kwq->tail = 0;
 218                }
 219                srq->rq.size = size;
 220                if (attr_mask & IB_SRQ_LIMIT)
 221                        srq->limit = attr->srq_limit;
 222                spin_unlock_irq(&srq->rq.kwq->c_lock);
 223
 224                vfree(owq);
 225                kvfree(okwq);
 226
 227                if (srq->ip) {
 228                        struct rvt_mmap_info *ip = srq->ip;
 229                        struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
 230                        u32 s = sizeof(struct rvt_rwq) + size * sz;
 231
 232                        rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
 233
 234                        /*
 235                         * Return the offset to mmap.
 236                         * See rvt_mmap() for details.
 237                         */
 238                        if (udata && udata->inlen >= sizeof(__u64)) {
 239                                ret = ib_copy_to_udata(udata, &ip->offset,
 240                                                       sizeof(ip->offset));
 241                                if (ret)
 242                                        return ret;
 243                        }
 244
 245                        /*
 246                         * Put user mapping info onto the pending list
 247                         * unless it already is on the list.
 248                         */
 249                        spin_lock_irq(&dev->pending_lock);
 250                        if (list_empty(&ip->pending_mmaps))
 251                                list_add(&ip->pending_mmaps,
 252                                         &dev->pending_mmaps);
 253                        spin_unlock_irq(&dev->pending_lock);
 254                }
 255        } else if (attr_mask & IB_SRQ_LIMIT) {
 256                spin_lock_irq(&srq->rq.kwq->c_lock);
 257                if (attr->srq_limit >= srq->rq.size)
 258                        ret = -EINVAL;
 259                else
 260                        srq->limit = attr->srq_limit;
 261                spin_unlock_irq(&srq->rq.kwq->c_lock);
 262        }
 263        return ret;
 264
 265bail_unlock:
 266        spin_unlock_irq(&srq->rq.kwq->c_lock);
 267bail_free:
 268        rvt_free_rq(&tmp_rq);
 269        return ret;
 270}
 271
 272/**
 273 * rvt_query_srq - query srq data
 274 * @ibsrq: srq to query
 275 * @attr: return info in attr
 276 *
 277 * Return: always 0
 278 */
 279int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
 280{
 281        struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 282
 283        attr->max_wr = srq->rq.size - 1;
 284        attr->max_sge = srq->rq.max_sge;
 285        attr->srq_limit = srq->limit;
 286        return 0;
 287}
 288
 289/**
 290 * rvt_destroy_srq - destory an srq
 291 * @ibsrq: srq object to destroy
 292 * @udata: user data for libibverbs.so
 293 */
 294int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 295{
 296        struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 297        struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
 298
 299        spin_lock(&dev->n_srqs_lock);
 300        dev->n_srqs_allocated--;
 301        spin_unlock(&dev->n_srqs_lock);
 302        if (srq->ip)
 303                kref_put(&srq->ip->ref, rvt_release_mmap_info);
 304        kvfree(srq->rq.kwq);
 305        return 0;
 306}
 307