linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016-2017 VMware, Inc.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of EITHER the GNU General Public License
   6 * version 2 as published by the Free Software Foundation or the BSD
   7 * 2-Clause License. This program is distributed in the hope that it
   8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
   9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
  10 * See the GNU General Public License version 2 for more details at
  11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program available in the file COPYING in the main
  15 * directory of this source tree.
  16 *
  17 * The BSD 2-Clause License
  18 *
  19 *     Redistribution and use in source and binary forms, with or
  20 *     without modification, are permitted provided that the following
  21 *     conditions are met:
  22 *
  23 *      - Redistributions of source code must retain the above
  24 *        copyright notice, this list of conditions and the following
  25 *        disclaimer.
  26 *
  27 *      - Redistributions in binary form must reproduce the above
  28 *        copyright notice, this list of conditions and the following
  29 *        disclaimer in the documentation and/or other materials
  30 *        provided with the distribution.
  31 *
  32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
  37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  43 * OF THE POSSIBILITY OF SUCH DAMAGE.
  44 */
  45
  46#include <asm/page.h>
  47#include <linux/io.h>
  48#include <linux/wait.h>
  49#include <rdma/ib_addr.h>
  50#include <rdma/ib_smi.h>
  51#include <rdma/ib_user_verbs.h>
  52
  53#include "pvrdma.h"
  54
  55/**
  56 * pvrdma_query_srq - query shared receive queue
  57 * @ibsrq: the shared receive queue to query
  58 * @srq_attr: attributes to query and return to client
  59 *
  60 * @return: 0 for success, otherwise returns an errno.
  61 */
  62int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
  63{
  64        struct pvrdma_dev *dev = to_vdev(ibsrq->device);
  65        struct pvrdma_srq *srq = to_vsrq(ibsrq);
  66        union pvrdma_cmd_req req;
  67        union pvrdma_cmd_resp rsp;
  68        struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
  69        struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
  70        int ret;
  71
  72        memset(cmd, 0, sizeof(*cmd));
  73        cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
  74        cmd->srq_handle = srq->srq_handle;
  75
  76        ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
  77        if (ret < 0) {
  78                dev_warn(&dev->pdev->dev,
  79                         "could not query shared receive queue, error: %d\n",
  80                         ret);
  81                return -EINVAL;
  82        }
  83
  84        srq_attr->srq_limit = resp->attrs.srq_limit;
  85        srq_attr->max_wr = resp->attrs.max_wr;
  86        srq_attr->max_sge = resp->attrs.max_sge;
  87
  88        return 0;
  89}
  90
  91/**
  92 * pvrdma_create_srq - create shared receive queue
  93 * @pd: protection domain
  94 * @init_attr: shared receive queue attributes
  95 * @udata: user data
  96 *
  97 * @return: 0 on success, otherwise returns an errno.
  98 */
  99int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
 100                      struct ib_udata *udata)
 101{
 102        struct pvrdma_srq *srq = to_vsrq(ibsrq);
 103        struct pvrdma_dev *dev = to_vdev(ibsrq->device);
 104        union pvrdma_cmd_req req;
 105        union pvrdma_cmd_resp rsp;
 106        struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
 107        struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
 108        struct pvrdma_create_srq_resp srq_resp = {};
 109        struct pvrdma_create_srq ucmd;
 110        unsigned long flags;
 111        int ret;
 112
 113        if (!udata) {
 114                /* No support for kernel clients. */
 115                dev_warn(&dev->pdev->dev,
 116                         "no shared receive queue support for kernel client\n");
 117                return -EOPNOTSUPP;
 118        }
 119
 120        if (init_attr->srq_type != IB_SRQT_BASIC) {
 121                dev_warn(&dev->pdev->dev,
 122                         "shared receive queue type %d not supported\n",
 123                         init_attr->srq_type);
 124                return -EINVAL;
 125        }
 126
 127        if (init_attr->attr.max_wr  > dev->dsr->caps.max_srq_wr ||
 128            init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
 129                dev_warn(&dev->pdev->dev,
 130                         "shared receive queue size invalid\n");
 131                return -EINVAL;
 132        }
 133
 134        if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
 135                return -ENOMEM;
 136
 137        spin_lock_init(&srq->lock);
 138        refcount_set(&srq->refcnt, 1);
 139        init_completion(&srq->free);
 140
 141        dev_dbg(&dev->pdev->dev,
 142                "create shared receive queue from user space\n");
 143
 144        if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
 145                ret = -EFAULT;
 146                goto err_srq;
 147        }
 148
 149        srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0);
 150        if (IS_ERR(srq->umem)) {
 151                ret = PTR_ERR(srq->umem);
 152                goto err_srq;
 153        }
 154
 155        srq->npages = ib_umem_page_count(srq->umem);
 156
 157        if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
 158                dev_warn(&dev->pdev->dev,
 159                         "overflow pages in shared receive queue\n");
 160                ret = -EINVAL;
 161                goto err_umem;
 162        }
 163
 164        ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
 165        if (ret) {
 166                dev_warn(&dev->pdev->dev,
 167                         "could not allocate page directory\n");
 168                goto err_umem;
 169        }
 170
 171        pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
 172
 173        memset(cmd, 0, sizeof(*cmd));
 174        cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
 175        cmd->srq_type = init_attr->srq_type;
 176        cmd->nchunks = srq->npages;
 177        cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
 178        cmd->attrs.max_wr = init_attr->attr.max_wr;
 179        cmd->attrs.max_sge = init_attr->attr.max_sge;
 180        cmd->attrs.srq_limit = init_attr->attr.srq_limit;
 181        cmd->pdir_dma = srq->pdir.dir_dma;
 182
 183        ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
 184        if (ret < 0) {
 185                dev_warn(&dev->pdev->dev,
 186                         "could not create shared receive queue, error: %d\n",
 187                         ret);
 188                goto err_page_dir;
 189        }
 190
 191        srq->srq_handle = resp->srqn;
 192        srq_resp.srqn = resp->srqn;
 193        spin_lock_irqsave(&dev->srq_tbl_lock, flags);
 194        dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
 195        spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 196
 197        /* Copy udata back. */
 198        if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
 199                dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
 200                pvrdma_destroy_srq(&srq->ibsrq, udata);
 201                return -EINVAL;
 202        }
 203
 204        return 0;
 205
 206err_page_dir:
 207        pvrdma_page_dir_cleanup(dev, &srq->pdir);
 208err_umem:
 209        ib_umem_release(srq->umem);
 210err_srq:
 211        atomic_dec(&dev->num_srqs);
 212
 213        return ret;
 214}
 215
 216static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
 217{
 218        unsigned long flags;
 219
 220        spin_lock_irqsave(&dev->srq_tbl_lock, flags);
 221        dev->srq_tbl[srq->srq_handle] = NULL;
 222        spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 223
 224        if (refcount_dec_and_test(&srq->refcnt))
 225                complete(&srq->free);
 226        wait_for_completion(&srq->free);
 227
 228        /* There is no support for kernel clients, so this is safe. */
 229        ib_umem_release(srq->umem);
 230
 231        pvrdma_page_dir_cleanup(dev, &srq->pdir);
 232
 233        kfree(srq);
 234
 235        atomic_dec(&dev->num_srqs);
 236}
 237
 238/**
 239 * pvrdma_destroy_srq - destroy shared receive queue
 240 * @srq: the shared receive queue to destroy
 241 * @udata: user data or null for kernel object
 242 *
 243 * @return: 0 for success.
 244 */
 245void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 246{
 247        struct pvrdma_srq *vsrq = to_vsrq(srq);
 248        union pvrdma_cmd_req req;
 249        struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
 250        struct pvrdma_dev *dev = to_vdev(srq->device);
 251        int ret;
 252
 253        memset(cmd, 0, sizeof(*cmd));
 254        cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
 255        cmd->srq_handle = vsrq->srq_handle;
 256
 257        ret = pvrdma_cmd_post(dev, &req, NULL, 0);
 258        if (ret < 0)
 259                dev_warn(&dev->pdev->dev,
 260                         "destroy shared receive queue failed, error: %d\n",
 261                         ret);
 262
 263        pvrdma_free_srq(dev, vsrq);
 264}
 265
 266/**
 267 * pvrdma_modify_srq - modify shared receive queue attributes
 268 * @ibsrq: the shared receive queue to modify
 269 * @attr: the shared receive queue's new attributes
 270 * @attr_mask: attributes mask
 271 * @udata: user data
 272 *
 273 * @returns 0 on success, otherwise returns an errno.
 274 */
 275int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 276                      enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
 277{
 278        struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
 279        union pvrdma_cmd_req req;
 280        struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
 281        struct pvrdma_dev *dev = to_vdev(ibsrq->device);
 282        int ret;
 283
 284        /* Only support SRQ limit. */
 285        if (!(attr_mask & IB_SRQ_LIMIT))
 286                return -EINVAL;
 287
 288        memset(cmd, 0, sizeof(*cmd));
 289        cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
 290        cmd->srq_handle = vsrq->srq_handle;
 291        cmd->attrs.srq_limit = attr->srq_limit;
 292        cmd->attr_mask = attr_mask;
 293
 294        ret = pvrdma_cmd_post(dev, &req, NULL, 0);
 295        if (ret < 0) {
 296                dev_warn(&dev->pdev->dev,
 297                         "could not modify shared receive queue, error: %d\n",
 298                         ret);
 299
 300                return -EINVAL;
 301        }
 302
 303        return ret;
 304}
 305