linux/drivers/infiniband/core/uverbs_cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
   4 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
   5 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/file.h>
  37#include <linux/fs.h>
  38#include <linux/slab.h>
  39#include <linux/sched.h>
  40
  41#include <linux/uaccess.h>
  42
  43#include <rdma/uverbs_types.h>
  44#include <rdma/uverbs_std_types.h>
  45#include "rdma_core.h"
  46
  47#include "uverbs.h"
  48#include "core_priv.h"
  49
  50/*
  51 * Copy a response to userspace. If the provided 'resp' is larger than the
  52 * user buffer it is silently truncated. If the user provided a larger buffer
  53 * then the trailing portion is zero filled.
  54 *
  55 * These semantics are intended to support future extension of the output
  56 * structures.
  57 */
  58static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
  59                           size_t resp_len)
  60{
  61        int ret;
  62
  63        if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
  64                return uverbs_copy_to_struct_or_zero(
  65                        attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
  66
  67        if (copy_to_user(attrs->ucore.outbuf, resp,
  68                         min(attrs->ucore.outlen, resp_len)))
  69                return -EFAULT;
  70
  71        if (resp_len < attrs->ucore.outlen) {
  72                /*
  73                 * Zero fill any extra memory that user
  74                 * space might have provided.
  75                 */
  76                ret = clear_user(attrs->ucore.outbuf + resp_len,
  77                                 attrs->ucore.outlen - resp_len);
  78                if (ret)
  79                        return -EFAULT;
  80        }
  81
  82        return 0;
  83}
  84
  85/*
  86 * Copy a request from userspace. If the provided 'req' is larger than the
  87 * user buffer then the user buffer is zero extended into the 'req'. If 'req'
  88 * is smaller than the user buffer then the uncopied bytes in the user buffer
  89 * must be zero.
  90 */
  91static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
  92                          size_t req_len)
  93{
  94        if (copy_from_user(req, attrs->ucore.inbuf,
  95                           min(attrs->ucore.inlen, req_len)))
  96                return -EFAULT;
  97
  98        if (attrs->ucore.inlen < req_len) {
  99                memset(req + attrs->ucore.inlen, 0,
 100                       req_len - attrs->ucore.inlen);
 101        } else if (attrs->ucore.inlen > req_len) {
 102                if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
 103                                          attrs->ucore.inlen - req_len))
 104                        return -EOPNOTSUPP;
 105        }
 106        return 0;
 107}
 108
 109/*
 110 * Generate the value for the 'response_length' protocol used by write_ex.
 111 * This is the number of bytes the kernel actually wrote. Userspace can use
 112 * this to detect what structure members in the response the kernel
 113 * understood.
 114 */
 115static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
 116                                  size_t resp_len)
 117{
 118        return min_t(size_t, attrs->ucore.outlen, resp_len);
 119}
 120
 121/*
 122 * The iterator version of the request interface is for handlers that need to
 123 * step over a flex array at the end of a command header.
 124 */
 125struct uverbs_req_iter {
 126        const void __user *cur;
 127        const void __user *end;
 128};
 129
 130static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
 131                                struct uverbs_req_iter *iter,
 132                                void *req,
 133                                size_t req_len)
 134{
 135        if (attrs->ucore.inlen < req_len)
 136                return -ENOSPC;
 137
 138        if (copy_from_user(req, attrs->ucore.inbuf, req_len))
 139                return -EFAULT;
 140
 141        iter->cur = attrs->ucore.inbuf + req_len;
 142        iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
 143        return 0;
 144}
 145
 146static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
 147                               size_t len)
 148{
 149        if (iter->cur + len > iter->end)
 150                return -ENOSPC;
 151
 152        if (copy_from_user(val, iter->cur, len))
 153                return -EFAULT;
 154
 155        iter->cur += len;
 156        return 0;
 157}
 158
 159static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
 160                                                  size_t len)
 161{
 162        const void __user *res = iter->cur;
 163
 164        if (iter->cur + len > iter->end)
 165                return (void __force __user *)ERR_PTR(-ENOSPC);
 166        iter->cur += len;
 167        return res;
 168}
 169
 170static int uverbs_request_finish(struct uverbs_req_iter *iter)
 171{
 172        if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
 173                return -EOPNOTSUPP;
 174        return 0;
 175}
 176
 177/*
 178 * When calling a destroy function during an error unwind we need to pass in
 179 * the udata that is sanitized of all user arguments. Ie from the driver
 180 * perspective it looks like no udata was passed.
 181 */
 182struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
 183{
 184        attrs->driver_udata = (struct ib_udata){};
 185        return &attrs->driver_udata;
 186}
 187
 188static struct ib_uverbs_completion_event_file *
 189_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
 190{
 191        struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
 192                                               fd, attrs);
 193
 194        if (IS_ERR(uobj))
 195                return (void *)uobj;
 196
 197        uverbs_uobject_get(uobj);
 198        uobj_put_read(uobj);
 199
 200        return container_of(uobj, struct ib_uverbs_completion_event_file,
 201                            uobj);
 202}
 203#define ib_uverbs_lookup_comp_file(_fd, _ufile)                                \
 204        _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
 205
 206int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
 207{
 208        struct ib_uverbs_file *ufile = attrs->ufile;
 209        struct ib_ucontext *ucontext;
 210        struct ib_device *ib_dev;
 211
 212        ib_dev = srcu_dereference(ufile->device->ib_dev,
 213                                  &ufile->device->disassociate_srcu);
 214        if (!ib_dev)
 215                return -EIO;
 216
 217        ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
 218        if (!ucontext)
 219                return -ENOMEM;
 220
 221        ucontext->device = ib_dev;
 222        ucontext->ufile = ufile;
 223        xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
 224
 225        rdma_restrack_new(&ucontext->res, RDMA_RESTRACK_CTX);
 226        rdma_restrack_set_name(&ucontext->res, NULL);
 227        attrs->context = ucontext;
 228        return 0;
 229}
 230
 231int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
 232{
 233        struct ib_ucontext *ucontext = attrs->context;
 234        struct ib_uverbs_file *file = attrs->ufile;
 235        int ret;
 236
 237        if (!down_read_trylock(&file->hw_destroy_rwsem))
 238                return -EIO;
 239        mutex_lock(&file->ucontext_lock);
 240        if (file->ucontext) {
 241                ret = -EINVAL;
 242                goto err;
 243        }
 244
 245        ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
 246                                   RDMACG_RESOURCE_HCA_HANDLE);
 247        if (ret)
 248                goto err;
 249
 250        ret = ucontext->device->ops.alloc_ucontext(ucontext,
 251                                                   &attrs->driver_udata);
 252        if (ret)
 253                goto err_uncharge;
 254
 255        rdma_restrack_add(&ucontext->res);
 256
 257        /*
 258         * Make sure that ib_uverbs_get_ucontext() sees the pointer update
 259         * only after all writes to setup the ucontext have completed
 260         */
 261        smp_store_release(&file->ucontext, ucontext);
 262
 263        mutex_unlock(&file->ucontext_lock);
 264        up_read(&file->hw_destroy_rwsem);
 265        return 0;
 266
 267err_uncharge:
 268        ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
 269                           RDMACG_RESOURCE_HCA_HANDLE);
 270err:
 271        mutex_unlock(&file->ucontext_lock);
 272        up_read(&file->hw_destroy_rwsem);
 273        return ret;
 274}
 275
 276static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
 277{
 278        struct ib_uverbs_get_context_resp resp;
 279        struct ib_uverbs_get_context cmd;
 280        struct ib_device *ib_dev;
 281        struct ib_uobject *uobj;
 282        int ret;
 283
 284        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 285        if (ret)
 286                return ret;
 287
 288        ret = ib_alloc_ucontext(attrs);
 289        if (ret)
 290                return ret;
 291
 292        uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
 293        if (IS_ERR(uobj)) {
 294                ret = PTR_ERR(uobj);
 295                goto err_ucontext;
 296        }
 297
 298        resp = (struct ib_uverbs_get_context_resp){
 299                .num_comp_vectors = attrs->ufile->device->num_comp_vectors,
 300                .async_fd = uobj->id,
 301        };
 302        ret = uverbs_response(attrs, &resp, sizeof(resp));
 303        if (ret)
 304                goto err_uobj;
 305
 306        ret = ib_init_ucontext(attrs);
 307        if (ret)
 308                goto err_uobj;
 309
 310        ib_uverbs_init_async_event_file(
 311                container_of(uobj, struct ib_uverbs_async_event_file, uobj));
 312        rdma_alloc_commit_uobject(uobj, attrs);
 313        return 0;
 314
 315err_uobj:
 316        rdma_alloc_abort_uobject(uobj, attrs, false);
 317err_ucontext:
 318        rdma_restrack_put(&attrs->context->res);
 319        kfree(attrs->context);
 320        attrs->context = NULL;
 321        return ret;
 322}
 323
 324static void copy_query_dev_fields(struct ib_ucontext *ucontext,
 325                                  struct ib_uverbs_query_device_resp *resp,
 326                                  struct ib_device_attr *attr)
 327{
 328        struct ib_device *ib_dev = ucontext->device;
 329
 330        resp->fw_ver            = attr->fw_ver;
 331        resp->node_guid         = ib_dev->node_guid;
 332        resp->sys_image_guid    = attr->sys_image_guid;
 333        resp->max_mr_size       = attr->max_mr_size;
 334        resp->page_size_cap     = attr->page_size_cap;
 335        resp->vendor_id         = attr->vendor_id;
 336        resp->vendor_part_id    = attr->vendor_part_id;
 337        resp->hw_ver            = attr->hw_ver;
 338        resp->max_qp            = attr->max_qp;
 339        resp->max_qp_wr         = attr->max_qp_wr;
 340        resp->device_cap_flags  = lower_32_bits(attr->device_cap_flags);
 341        resp->max_sge           = min(attr->max_send_sge, attr->max_recv_sge);
 342        resp->max_sge_rd        = attr->max_sge_rd;
 343        resp->max_cq            = attr->max_cq;
 344        resp->max_cqe           = attr->max_cqe;
 345        resp->max_mr            = attr->max_mr;
 346        resp->max_pd            = attr->max_pd;
 347        resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
 348        resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
 349        resp->max_res_rd_atom   = attr->max_res_rd_atom;
 350        resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
 351        resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
 352        resp->atomic_cap                = attr->atomic_cap;
 353        resp->max_ee                    = attr->max_ee;
 354        resp->max_rdd                   = attr->max_rdd;
 355        resp->max_mw                    = attr->max_mw;
 356        resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
 357        resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
 358        resp->max_mcast_grp             = attr->max_mcast_grp;
 359        resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
 360        resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
 361        resp->max_ah                    = attr->max_ah;
 362        resp->max_srq                   = attr->max_srq;
 363        resp->max_srq_wr                = attr->max_srq_wr;
 364        resp->max_srq_sge               = attr->max_srq_sge;
 365        resp->max_pkeys                 = attr->max_pkeys;
 366        resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
 367        resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX);
 368}
 369
 370static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
 371{
 372        struct ib_uverbs_query_device      cmd;
 373        struct ib_uverbs_query_device_resp resp;
 374        struct ib_ucontext *ucontext;
 375        int ret;
 376
 377        ucontext = ib_uverbs_get_ucontext(attrs);
 378        if (IS_ERR(ucontext))
 379                return PTR_ERR(ucontext);
 380
 381        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 382        if (ret)
 383                return ret;
 384
 385        memset(&resp, 0, sizeof resp);
 386        copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
 387
 388        return uverbs_response(attrs, &resp, sizeof(resp));
 389}
 390
 391static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
 392{
 393        struct ib_uverbs_query_port      cmd;
 394        struct ib_uverbs_query_port_resp resp;
 395        struct ib_port_attr              attr;
 396        int                              ret;
 397        struct ib_ucontext *ucontext;
 398        struct ib_device *ib_dev;
 399
 400        ucontext = ib_uverbs_get_ucontext(attrs);
 401        if (IS_ERR(ucontext))
 402                return PTR_ERR(ucontext);
 403        ib_dev = ucontext->device;
 404
 405        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 406        if (ret)
 407                return ret;
 408
 409        ret = ib_query_port(ib_dev, cmd.port_num, &attr);
 410        if (ret)
 411                return ret;
 412
 413        memset(&resp, 0, sizeof resp);
 414        copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
 415
 416        return uverbs_response(attrs, &resp, sizeof(resp));
 417}
 418
 419static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
 420{
 421        struct ib_uverbs_alloc_pd_resp resp = {};
 422        struct ib_uverbs_alloc_pd      cmd;
 423        struct ib_uobject             *uobj;
 424        struct ib_pd                  *pd;
 425        int                            ret;
 426        struct ib_device *ib_dev;
 427
 428        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 429        if (ret)
 430                return ret;
 431
 432        uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
 433        if (IS_ERR(uobj))
 434                return PTR_ERR(uobj);
 435
 436        pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
 437        if (!pd) {
 438                ret = -ENOMEM;
 439                goto err;
 440        }
 441
 442        pd->device  = ib_dev;
 443        pd->uobject = uobj;
 444        atomic_set(&pd->usecnt, 0);
 445
 446        rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
 447        rdma_restrack_set_name(&pd->res, NULL);
 448
 449        ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
 450        if (ret)
 451                goto err_alloc;
 452        rdma_restrack_add(&pd->res);
 453
 454        uobj->object = pd;
 455        uobj_finalize_uobj_create(uobj, attrs);
 456
 457        resp.pd_handle = uobj->id;
 458        return uverbs_response(attrs, &resp, sizeof(resp));
 459
 460err_alloc:
 461        rdma_restrack_put(&pd->res);
 462        kfree(pd);
 463err:
 464        uobj_alloc_abort(uobj, attrs);
 465        return ret;
 466}
 467
 468static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
 469{
 470        struct ib_uverbs_dealloc_pd cmd;
 471        int ret;
 472
 473        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 474        if (ret)
 475                return ret;
 476
 477        return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
 478}
 479
 480struct xrcd_table_entry {
 481        struct rb_node  node;
 482        struct ib_xrcd *xrcd;
 483        struct inode   *inode;
 484};
 485
 486static int xrcd_table_insert(struct ib_uverbs_device *dev,
 487                            struct inode *inode,
 488                            struct ib_xrcd *xrcd)
 489{
 490        struct xrcd_table_entry *entry, *scan;
 491        struct rb_node **p = &dev->xrcd_tree.rb_node;
 492        struct rb_node *parent = NULL;
 493
 494        entry = kmalloc(sizeof *entry, GFP_KERNEL);
 495        if (!entry)
 496                return -ENOMEM;
 497
 498        entry->xrcd  = xrcd;
 499        entry->inode = inode;
 500
 501        while (*p) {
 502                parent = *p;
 503                scan = rb_entry(parent, struct xrcd_table_entry, node);
 504
 505                if (inode < scan->inode) {
 506                        p = &(*p)->rb_left;
 507                } else if (inode > scan->inode) {
 508                        p = &(*p)->rb_right;
 509                } else {
 510                        kfree(entry);
 511                        return -EEXIST;
 512                }
 513        }
 514
 515        rb_link_node(&entry->node, parent, p);
 516        rb_insert_color(&entry->node, &dev->xrcd_tree);
 517        igrab(inode);
 518        return 0;
 519}
 520
 521static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
 522                                                  struct inode *inode)
 523{
 524        struct xrcd_table_entry *entry;
 525        struct rb_node *p = dev->xrcd_tree.rb_node;
 526
 527        while (p) {
 528                entry = rb_entry(p, struct xrcd_table_entry, node);
 529
 530                if (inode < entry->inode)
 531                        p = p->rb_left;
 532                else if (inode > entry->inode)
 533                        p = p->rb_right;
 534                else
 535                        return entry;
 536        }
 537
 538        return NULL;
 539}
 540
 541static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
 542{
 543        struct xrcd_table_entry *entry;
 544
 545        entry = xrcd_table_search(dev, inode);
 546        if (!entry)
 547                return NULL;
 548
 549        return entry->xrcd;
 550}
 551
 552static void xrcd_table_delete(struct ib_uverbs_device *dev,
 553                              struct inode *inode)
 554{
 555        struct xrcd_table_entry *entry;
 556
 557        entry = xrcd_table_search(dev, inode);
 558        if (entry) {
 559                iput(inode);
 560                rb_erase(&entry->node, &dev->xrcd_tree);
 561                kfree(entry);
 562        }
 563}
 564
 565static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
 566{
 567        struct ib_uverbs_device *ibudev = attrs->ufile->device;
 568        struct ib_uverbs_open_xrcd_resp resp = {};
 569        struct ib_uverbs_open_xrcd      cmd;
 570        struct ib_uxrcd_object         *obj;
 571        struct ib_xrcd                 *xrcd = NULL;
 572        struct inode                   *inode = NULL;
 573        int                             new_xrcd = 0;
 574        struct ib_device *ib_dev;
 575        struct fd f = {};
 576        int ret;
 577
 578        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 579        if (ret)
 580                return ret;
 581
 582        mutex_lock(&ibudev->xrcd_tree_mutex);
 583
 584        if (cmd.fd != -1) {
 585                /* search for file descriptor */
 586                f = fdget(cmd.fd);
 587                if (!f.file) {
 588                        ret = -EBADF;
 589                        goto err_tree_mutex_unlock;
 590                }
 591
 592                inode = file_inode(f.file);
 593                xrcd = find_xrcd(ibudev, inode);
 594                if (!xrcd && !(cmd.oflags & O_CREAT)) {
 595                        /* no file descriptor. Need CREATE flag */
 596                        ret = -EAGAIN;
 597                        goto err_tree_mutex_unlock;
 598                }
 599
 600                if (xrcd && cmd.oflags & O_EXCL) {
 601                        ret = -EINVAL;
 602                        goto err_tree_mutex_unlock;
 603                }
 604        }
 605
 606        obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
 607                                                   &ib_dev);
 608        if (IS_ERR(obj)) {
 609                ret = PTR_ERR(obj);
 610                goto err_tree_mutex_unlock;
 611        }
 612
 613        if (!xrcd) {
 614                xrcd = ib_alloc_xrcd_user(ib_dev, inode, &attrs->driver_udata);
 615                if (IS_ERR(xrcd)) {
 616                        ret = PTR_ERR(xrcd);
 617                        goto err;
 618                }
 619                new_xrcd = 1;
 620        }
 621
 622        atomic_set(&obj->refcnt, 0);
 623        obj->uobject.object = xrcd;
 624
 625        if (inode) {
 626                if (new_xrcd) {
 627                        /* create new inode/xrcd table entry */
 628                        ret = xrcd_table_insert(ibudev, inode, xrcd);
 629                        if (ret)
 630                                goto err_dealloc_xrcd;
 631                }
 632                atomic_inc(&xrcd->usecnt);
 633        }
 634
 635        if (f.file)
 636                fdput(f);
 637
 638        mutex_unlock(&ibudev->xrcd_tree_mutex);
 639        uobj_finalize_uobj_create(&obj->uobject, attrs);
 640
 641        resp.xrcd_handle = obj->uobject.id;
 642        return uverbs_response(attrs, &resp, sizeof(resp));
 643
 644err_dealloc_xrcd:
 645        ib_dealloc_xrcd_user(xrcd, uverbs_get_cleared_udata(attrs));
 646
 647err:
 648        uobj_alloc_abort(&obj->uobject, attrs);
 649
 650err_tree_mutex_unlock:
 651        if (f.file)
 652                fdput(f);
 653
 654        mutex_unlock(&ibudev->xrcd_tree_mutex);
 655
 656        return ret;
 657}
 658
 659static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
 660{
 661        struct ib_uverbs_close_xrcd cmd;
 662        int ret;
 663
 664        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 665        if (ret)
 666                return ret;
 667
 668        return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
 669}
 670
 671int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
 672                           enum rdma_remove_reason why,
 673                           struct uverbs_attr_bundle *attrs)
 674{
 675        struct inode *inode;
 676        int ret;
 677        struct ib_uverbs_device *dev = attrs->ufile->device;
 678
 679        inode = xrcd->inode;
 680        if (inode && !atomic_dec_and_test(&xrcd->usecnt))
 681                return 0;
 682
 683        ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata);
 684        if (ret) {
 685                atomic_inc(&xrcd->usecnt);
 686                return ret;
 687        }
 688
 689        if (inode)
 690                xrcd_table_delete(dev, inode);
 691
 692        return 0;
 693}
 694
 695static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
 696{
 697        struct ib_uverbs_reg_mr_resp resp = {};
 698        struct ib_uverbs_reg_mr      cmd;
 699        struct ib_uobject           *uobj;
 700        struct ib_pd                *pd;
 701        struct ib_mr                *mr;
 702        int                          ret;
 703        struct ib_device *ib_dev;
 704
 705        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 706        if (ret)
 707                return ret;
 708
 709        if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
 710                return -EINVAL;
 711
 712        uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
 713        if (IS_ERR(uobj))
 714                return PTR_ERR(uobj);
 715
 716        ret = ib_check_mr_access(ib_dev, cmd.access_flags);
 717        if (ret)
 718                goto err_free;
 719
 720        pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
 721        if (!pd) {
 722                ret = -EINVAL;
 723                goto err_free;
 724        }
 725
 726        mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
 727                                         cmd.access_flags,
 728                                         &attrs->driver_udata);
 729        if (IS_ERR(mr)) {
 730                ret = PTR_ERR(mr);
 731                goto err_put;
 732        }
 733
 734        mr->device  = pd->device;
 735        mr->pd      = pd;
 736        mr->type    = IB_MR_TYPE_USER;
 737        mr->dm      = NULL;
 738        mr->sig_attrs = NULL;
 739        mr->uobject = uobj;
 740        atomic_inc(&pd->usecnt);
 741        mr->iova = cmd.hca_va;
 742
 743        rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
 744        rdma_restrack_set_name(&mr->res, NULL);
 745        rdma_restrack_add(&mr->res);
 746
 747        uobj->object = mr;
 748        uobj_put_obj_read(pd);
 749        uobj_finalize_uobj_create(uobj, attrs);
 750
 751        resp.lkey = mr->lkey;
 752        resp.rkey = mr->rkey;
 753        resp.mr_handle = uobj->id;
 754        return uverbs_response(attrs, &resp, sizeof(resp));
 755
 756err_put:
 757        uobj_put_obj_read(pd);
 758err_free:
 759        uobj_alloc_abort(uobj, attrs);
 760        return ret;
 761}
 762
 763static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
 764{
 765        struct ib_uverbs_rereg_mr      cmd;
 766        struct ib_uverbs_rereg_mr_resp resp;
 767        struct ib_mr                *mr;
 768        int                          ret;
 769        struct ib_uobject           *uobj;
 770        struct ib_uobject *new_uobj;
 771        struct ib_device *ib_dev;
 772        struct ib_pd *orig_pd;
 773        struct ib_pd *new_pd;
 774        struct ib_mr *new_mr;
 775
 776        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 777        if (ret)
 778                return ret;
 779
 780        if (!cmd.flags)
 781                return -EINVAL;
 782
 783        if (cmd.flags & ~IB_MR_REREG_SUPPORTED)
 784                return -EOPNOTSUPP;
 785
 786        if ((cmd.flags & IB_MR_REREG_TRANS) &&
 787            (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
 788                return -EINVAL;
 789
 790        uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
 791        if (IS_ERR(uobj))
 792                return PTR_ERR(uobj);
 793
 794        mr = uobj->object;
 795
 796        if (mr->dm) {
 797                ret = -EINVAL;
 798                goto put_uobjs;
 799        }
 800
 801        if (cmd.flags & IB_MR_REREG_ACCESS) {
 802                ret = ib_check_mr_access(mr->device, cmd.access_flags);
 803                if (ret)
 804                        goto put_uobjs;
 805        }
 806
 807        orig_pd = mr->pd;
 808        if (cmd.flags & IB_MR_REREG_PD) {
 809                new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
 810                                           attrs);
 811                if (!new_pd) {
 812                        ret = -EINVAL;
 813                        goto put_uobjs;
 814                }
 815        } else {
 816                new_pd = mr->pd;
 817        }
 818
 819        /*
 820         * The driver might create a new HW object as part of the rereg, we need
 821         * to have a uobject ready to hold it.
 822         */
 823        new_uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
 824        if (IS_ERR(new_uobj)) {
 825                ret = PTR_ERR(new_uobj);
 826                goto put_uobj_pd;
 827        }
 828
 829        new_mr = ib_dev->ops.rereg_user_mr(mr, cmd.flags, cmd.start, cmd.length,
 830                                           cmd.hca_va, cmd.access_flags, new_pd,
 831                                           &attrs->driver_udata);
 832        if (IS_ERR(new_mr)) {
 833                ret = PTR_ERR(new_mr);
 834                goto put_new_uobj;
 835        }
 836        if (new_mr) {
 837                new_mr->device = new_pd->device;
 838                new_mr->pd = new_pd;
 839                new_mr->type = IB_MR_TYPE_USER;
 840                new_mr->dm = NULL;
 841                new_mr->sig_attrs = NULL;
 842                new_mr->uobject = uobj;
 843                atomic_inc(&new_pd->usecnt);
 844                new_mr->iova = cmd.hca_va;
 845                new_uobj->object = new_mr;
 846
 847                rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR);
 848                rdma_restrack_set_name(&new_mr->res, NULL);
 849                rdma_restrack_add(&new_mr->res);
 850
 851                /*
 852                 * The new uobj for the new HW object is put into the same spot
 853                 * in the IDR and the old uobj & HW object is deleted.
 854                 */
 855                rdma_assign_uobject(uobj, new_uobj, attrs);
 856                rdma_alloc_commit_uobject(new_uobj, attrs);
 857                uobj_put_destroy(uobj);
 858                new_uobj = NULL;
 859                uobj = NULL;
 860                mr = new_mr;
 861        } else {
 862                if (cmd.flags & IB_MR_REREG_PD) {
 863                        atomic_dec(&orig_pd->usecnt);
 864                        mr->pd = new_pd;
 865                        atomic_inc(&new_pd->usecnt);
 866                }
 867                if (cmd.flags & IB_MR_REREG_TRANS)
 868                        mr->iova = cmd.hca_va;
 869        }
 870
 871        memset(&resp, 0, sizeof(resp));
 872        resp.lkey      = mr->lkey;
 873        resp.rkey      = mr->rkey;
 874
 875        ret = uverbs_response(attrs, &resp, sizeof(resp));
 876
 877put_new_uobj:
 878        if (new_uobj)
 879                uobj_alloc_abort(new_uobj, attrs);
 880put_uobj_pd:
 881        if (cmd.flags & IB_MR_REREG_PD)
 882                uobj_put_obj_read(new_pd);
 883
 884put_uobjs:
 885        if (uobj)
 886                uobj_put_write(uobj);
 887
 888        return ret;
 889}
 890
 891static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
 892{
 893        struct ib_uverbs_dereg_mr cmd;
 894        int ret;
 895
 896        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 897        if (ret)
 898                return ret;
 899
 900        return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
 901}
 902
 903static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
 904{
 905        struct ib_uverbs_alloc_mw      cmd;
 906        struct ib_uverbs_alloc_mw_resp resp = {};
 907        struct ib_uobject             *uobj;
 908        struct ib_pd                  *pd;
 909        struct ib_mw                  *mw;
 910        int                            ret;
 911        struct ib_device *ib_dev;
 912
 913        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 914        if (ret)
 915                return ret;
 916
 917        uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
 918        if (IS_ERR(uobj))
 919                return PTR_ERR(uobj);
 920
 921        pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
 922        if (!pd) {
 923                ret = -EINVAL;
 924                goto err_free;
 925        }
 926
 927        if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
 928                ret = -EINVAL;
 929                goto err_put;
 930        }
 931
 932        mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
 933        if (!mw) {
 934                ret = -ENOMEM;
 935                goto err_put;
 936        }
 937
 938        mw->device = ib_dev;
 939        mw->pd = pd;
 940        mw->uobject = uobj;
 941        mw->type = cmd.mw_type;
 942
 943        ret = pd->device->ops.alloc_mw(mw, &attrs->driver_udata);
 944        if (ret)
 945                goto err_alloc;
 946
 947        atomic_inc(&pd->usecnt);
 948
 949        uobj->object = mw;
 950        uobj_put_obj_read(pd);
 951        uobj_finalize_uobj_create(uobj, attrs);
 952
 953        resp.rkey = mw->rkey;
 954        resp.mw_handle = uobj->id;
 955        return uverbs_response(attrs, &resp, sizeof(resp));
 956
 957err_alloc:
 958        kfree(mw);
 959err_put:
 960        uobj_put_obj_read(pd);
 961err_free:
 962        uobj_alloc_abort(uobj, attrs);
 963        return ret;
 964}
 965
 966static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
 967{
 968        struct ib_uverbs_dealloc_mw cmd;
 969        int ret;
 970
 971        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 972        if (ret)
 973                return ret;
 974
 975        return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
 976}
 977
 978static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
 979{
 980        struct ib_uverbs_create_comp_channel       cmd;
 981        struct ib_uverbs_create_comp_channel_resp  resp;
 982        struct ib_uobject                         *uobj;
 983        struct ib_uverbs_completion_event_file    *ev_file;
 984        struct ib_device *ib_dev;
 985        int ret;
 986
 987        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
 988        if (ret)
 989                return ret;
 990
 991        uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
 992        if (IS_ERR(uobj))
 993                return PTR_ERR(uobj);
 994
 995        ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
 996                               uobj);
 997        ib_uverbs_init_event_queue(&ev_file->ev_queue);
 998        uobj_finalize_uobj_create(uobj, attrs);
 999
1000        resp.fd = uobj->id;
1001        return uverbs_response(attrs, &resp, sizeof(resp));
1002}
1003
1004static int create_cq(struct uverbs_attr_bundle *attrs,
1005                     struct ib_uverbs_ex_create_cq *cmd)
1006{
1007        struct ib_ucq_object           *obj;
1008        struct ib_uverbs_completion_event_file    *ev_file = NULL;
1009        struct ib_cq                   *cq;
1010        int                             ret;
1011        struct ib_uverbs_ex_create_cq_resp resp = {};
1012        struct ib_cq_init_attr attr = {};
1013        struct ib_device *ib_dev;
1014
1015        if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
1016                return -EINVAL;
1017
1018        obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
1019                                                 &ib_dev);
1020        if (IS_ERR(obj))
1021                return PTR_ERR(obj);
1022
1023        if (cmd->comp_channel >= 0) {
1024                ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
1025                if (IS_ERR(ev_file)) {
1026                        ret = PTR_ERR(ev_file);
1027                        goto err;
1028                }
1029        }
1030
1031        obj->uevent.uobject.user_handle = cmd->user_handle;
1032        INIT_LIST_HEAD(&obj->comp_list);
1033        INIT_LIST_HEAD(&obj->uevent.event_list);
1034
1035        attr.cqe = cmd->cqe;
1036        attr.comp_vector = cmd->comp_vector;
1037        attr.flags = cmd->flags;
1038
1039        cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
1040        if (!cq) {
1041                ret = -ENOMEM;
1042                goto err_file;
1043        }
1044        cq->device        = ib_dev;
1045        cq->uobject       = obj;
1046        cq->comp_handler  = ib_uverbs_comp_handler;
1047        cq->event_handler = ib_uverbs_cq_event_handler;
1048        cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
1049        atomic_set(&cq->usecnt, 0);
1050
1051        rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
1052        rdma_restrack_set_name(&cq->res, NULL);
1053
1054        ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
1055        if (ret)
1056                goto err_free;
1057        rdma_restrack_add(&cq->res);
1058
1059        obj->uevent.uobject.object = cq;
1060        obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
1061        if (obj->uevent.event_file)
1062                uverbs_uobject_get(&obj->uevent.event_file->uobj);
1063        uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1064
1065        resp.base.cq_handle = obj->uevent.uobject.id;
1066        resp.base.cqe = cq->cqe;
1067        resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1068        return uverbs_response(attrs, &resp, sizeof(resp));
1069
1070err_free:
1071        rdma_restrack_put(&cq->res);
1072        kfree(cq);
1073err_file:
1074        if (ev_file)
1075                ib_uverbs_release_ucq(ev_file, obj);
1076err:
1077        uobj_alloc_abort(&obj->uevent.uobject, attrs);
1078        return ret;
1079}
1080
1081static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
1082{
1083        struct ib_uverbs_create_cq      cmd;
1084        struct ib_uverbs_ex_create_cq   cmd_ex;
1085        int ret;
1086
1087        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1088        if (ret)
1089                return ret;
1090
1091        memset(&cmd_ex, 0, sizeof(cmd_ex));
1092        cmd_ex.user_handle = cmd.user_handle;
1093        cmd_ex.cqe = cmd.cqe;
1094        cmd_ex.comp_vector = cmd.comp_vector;
1095        cmd_ex.comp_channel = cmd.comp_channel;
1096
1097        return create_cq(attrs, &cmd_ex);
1098}
1099
1100static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
1101{
1102        struct ib_uverbs_ex_create_cq  cmd;
1103        int ret;
1104
1105        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1106        if (ret)
1107                return ret;
1108
1109        if (cmd.comp_mask)
1110                return -EINVAL;
1111
1112        if (cmd.reserved)
1113                return -EINVAL;
1114
1115        return create_cq(attrs, &cmd);
1116}
1117
1118static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
1119{
1120        struct ib_uverbs_resize_cq      cmd;
1121        struct ib_uverbs_resize_cq_resp resp = {};
1122        struct ib_cq                    *cq;
1123        int ret;
1124
1125        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1126        if (ret)
1127                return ret;
1128
1129        cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1130        if (!cq)
1131                return -EINVAL;
1132
1133        ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1134        if (ret)
1135                goto out;
1136
1137        resp.cqe = cq->cqe;
1138
1139        ret = uverbs_response(attrs, &resp, sizeof(resp));
1140out:
1141        rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1142                                UVERBS_LOOKUP_READ);
1143
1144        return ret;
1145}
1146
1147static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1148                           struct ib_wc *wc)
1149{
1150        struct ib_uverbs_wc tmp;
1151
1152        tmp.wr_id               = wc->wr_id;
1153        tmp.status              = wc->status;
1154        tmp.opcode              = wc->opcode;
1155        tmp.vendor_err          = wc->vendor_err;
1156        tmp.byte_len            = wc->byte_len;
1157        tmp.ex.imm_data         = wc->ex.imm_data;
1158        tmp.qp_num              = wc->qp->qp_num;
1159        tmp.src_qp              = wc->src_qp;
1160        tmp.wc_flags            = wc->wc_flags;
1161        tmp.pkey_index          = wc->pkey_index;
1162        if (rdma_cap_opa_ah(ib_dev, wc->port_num))
1163                tmp.slid        = OPA_TO_IB_UCAST_LID(wc->slid);
1164        else
1165                tmp.slid        = ib_lid_cpu16(wc->slid);
1166        tmp.sl                  = wc->sl;
1167        tmp.dlid_path_bits      = wc->dlid_path_bits;
1168        tmp.port_num            = wc->port_num;
1169        tmp.reserved            = 0;
1170
1171        if (copy_to_user(dest, &tmp, sizeof tmp))
1172                return -EFAULT;
1173
1174        return 0;
1175}
1176
1177static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1178{
1179        struct ib_uverbs_poll_cq       cmd;
1180        struct ib_uverbs_poll_cq_resp  resp;
1181        u8 __user                     *header_ptr;
1182        u8 __user                     *data_ptr;
1183        struct ib_cq                  *cq;
1184        struct ib_wc                   wc;
1185        int                            ret;
1186
1187        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1188        if (ret)
1189                return ret;
1190
1191        cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1192        if (!cq)
1193                return -EINVAL;
1194
1195        /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1196        header_ptr = attrs->ucore.outbuf;
1197        data_ptr = header_ptr + sizeof resp;
1198
1199        memset(&resp, 0, sizeof resp);
1200        while (resp.count < cmd.ne) {
1201                ret = ib_poll_cq(cq, 1, &wc);
1202                if (ret < 0)
1203                        goto out_put;
1204                if (!ret)
1205                        break;
1206
1207                ret = copy_wc_to_user(cq->device, data_ptr, &wc);
1208                if (ret)
1209                        goto out_put;
1210
1211                data_ptr += sizeof(struct ib_uverbs_wc);
1212                ++resp.count;
1213        }
1214
1215        if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1216                ret = -EFAULT;
1217                goto out_put;
1218        }
1219        ret = 0;
1220
1221        if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
1222                ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
1223
1224out_put:
1225        rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1226                                UVERBS_LOOKUP_READ);
1227        return ret;
1228}
1229
1230static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
1231{
1232        struct ib_uverbs_req_notify_cq cmd;
1233        struct ib_cq                  *cq;
1234        int ret;
1235
1236        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1237        if (ret)
1238                return ret;
1239
1240        cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1241        if (!cq)
1242                return -EINVAL;
1243
1244        ib_req_notify_cq(cq, cmd.solicited_only ?
1245                         IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1246
1247        rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1248                                UVERBS_LOOKUP_READ);
1249        return 0;
1250}
1251
1252static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
1253{
1254        struct ib_uverbs_destroy_cq      cmd;
1255        struct ib_uverbs_destroy_cq_resp resp;
1256        struct ib_uobject               *uobj;
1257        struct ib_ucq_object            *obj;
1258        int ret;
1259
1260        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1261        if (ret)
1262                return ret;
1263
1264        uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1265        if (IS_ERR(uobj))
1266                return PTR_ERR(uobj);
1267
1268        obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
1269        memset(&resp, 0, sizeof(resp));
1270        resp.comp_events_reported  = obj->comp_events_reported;
1271        resp.async_events_reported = obj->uevent.events_reported;
1272
1273        uobj_put_destroy(uobj);
1274
1275        return uverbs_response(attrs, &resp, sizeof(resp));
1276}
1277
1278static int create_qp(struct uverbs_attr_bundle *attrs,
1279                     struct ib_uverbs_ex_create_qp *cmd)
1280{
1281        struct ib_uqp_object            *obj;
1282        struct ib_device                *device;
1283        struct ib_pd                    *pd = NULL;
1284        struct ib_xrcd                  *xrcd = NULL;
1285        struct ib_uobject               *xrcd_uobj = ERR_PTR(-ENOENT);
1286        struct ib_cq                    *scq = NULL, *rcq = NULL;
1287        struct ib_srq                   *srq = NULL;
1288        struct ib_qp                    *qp;
1289        struct ib_qp_init_attr          attr = {};
1290        struct ib_uverbs_ex_create_qp_resp resp = {};
1291        int                             ret;
1292        struct ib_rwq_ind_table *ind_tbl = NULL;
1293        bool has_sq = true;
1294        struct ib_device *ib_dev;
1295
1296        switch (cmd->qp_type) {
1297        case IB_QPT_RAW_PACKET:
1298                if (!capable(CAP_NET_RAW))
1299                        return -EPERM;
1300                break;
1301        case IB_QPT_RC:
1302        case IB_QPT_UC:
1303        case IB_QPT_UD:
1304        case IB_QPT_XRC_INI:
1305        case IB_QPT_XRC_TGT:
1306        case IB_QPT_DRIVER:
1307                break;
1308        default:
1309                return -EINVAL;
1310        }
1311
1312        obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1313                                                 &ib_dev);
1314        if (IS_ERR(obj))
1315                return PTR_ERR(obj);
1316        obj->uxrcd = NULL;
1317        obj->uevent.uobject.user_handle = cmd->user_handle;
1318        mutex_init(&obj->mcast_lock);
1319
1320        if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
1321                ind_tbl = uobj_get_obj_read(rwq_ind_table,
1322                                            UVERBS_OBJECT_RWQ_IND_TBL,
1323                                            cmd->rwq_ind_tbl_handle, attrs);
1324                if (!ind_tbl) {
1325                        ret = -EINVAL;
1326                        goto err_put;
1327                }
1328
1329                attr.rwq_ind_tbl = ind_tbl;
1330        }
1331
1332        if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1333                ret = -EINVAL;
1334                goto err_put;
1335        }
1336
1337        if (ind_tbl && !cmd->max_send_wr)
1338                has_sq = false;
1339
1340        if (cmd->qp_type == IB_QPT_XRC_TGT) {
1341                xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1342                                          attrs);
1343
1344                if (IS_ERR(xrcd_uobj)) {
1345                        ret = -EINVAL;
1346                        goto err_put;
1347                }
1348
1349                xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1350                if (!xrcd) {
1351                        ret = -EINVAL;
1352                        goto err_put;
1353                }
1354                device = xrcd->device;
1355        } else {
1356                if (cmd->qp_type == IB_QPT_XRC_INI) {
1357                        cmd->max_recv_wr = 0;
1358                        cmd->max_recv_sge = 0;
1359                } else {
1360                        if (cmd->is_srq) {
1361                                srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
1362                                                        cmd->srq_handle, attrs);
1363                                if (!srq || srq->srq_type == IB_SRQT_XRC) {
1364                                        ret = -EINVAL;
1365                                        goto err_put;
1366                                }
1367                        }
1368
1369                        if (!ind_tbl) {
1370                                if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1371                                        rcq = uobj_get_obj_read(
1372                                                cq, UVERBS_OBJECT_CQ,
1373                                                cmd->recv_cq_handle, attrs);
1374                                        if (!rcq) {
1375                                                ret = -EINVAL;
1376                                                goto err_put;
1377                                        }
1378                                }
1379                        }
1380                }
1381
1382                if (has_sq)
1383                        scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
1384                                                cmd->send_cq_handle, attrs);
1385                if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
1386                        rcq = rcq ?: scq;
1387                pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
1388                                       attrs);
1389                if (!pd || (!scq && has_sq)) {
1390                        ret = -EINVAL;
1391                        goto err_put;
1392                }
1393
1394                device = pd->device;
1395        }
1396
1397        attr.event_handler = ib_uverbs_qp_event_handler;
1398        attr.send_cq       = scq;
1399        attr.recv_cq       = rcq;
1400        attr.srq           = srq;
1401        attr.xrcd          = xrcd;
1402        attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1403                                              IB_SIGNAL_REQ_WR;
1404        attr.qp_type       = cmd->qp_type;
1405        attr.create_flags  = 0;
1406
1407        attr.cap.max_send_wr     = cmd->max_send_wr;
1408        attr.cap.max_recv_wr     = cmd->max_recv_wr;
1409        attr.cap.max_send_sge    = cmd->max_send_sge;
1410        attr.cap.max_recv_sge    = cmd->max_recv_sge;
1411        attr.cap.max_inline_data = cmd->max_inline_data;
1412
1413        INIT_LIST_HEAD(&obj->uevent.event_list);
1414        INIT_LIST_HEAD(&obj->mcast_list);
1415
1416        attr.create_flags = cmd->create_flags;
1417        if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1418                                IB_QP_CREATE_CROSS_CHANNEL |
1419                                IB_QP_CREATE_MANAGED_SEND |
1420                                IB_QP_CREATE_MANAGED_RECV |
1421                                IB_QP_CREATE_SCATTER_FCS |
1422                                IB_QP_CREATE_CVLAN_STRIPPING |
1423                                IB_QP_CREATE_SOURCE_QPN |
1424                                IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1425                ret = -EINVAL;
1426                goto err_put;
1427        }
1428
1429        if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1430                if (!capable(CAP_NET_RAW)) {
1431                        ret = -EPERM;
1432                        goto err_put;
1433                }
1434
1435                attr.source_qpn = cmd->source_qpn;
1436        }
1437
1438        qp = ib_create_qp_user(device, pd, &attr, &attrs->driver_udata, obj,
1439                               KBUILD_MODNAME);
1440        if (IS_ERR(qp)) {
1441                ret = PTR_ERR(qp);
1442                goto err_put;
1443        }
1444        ib_qp_usecnt_inc(qp);
1445
1446        obj->uevent.uobject.object = qp;
1447        obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
1448        if (obj->uevent.event_file)
1449                uverbs_uobject_get(&obj->uevent.event_file->uobj);
1450
1451        if (xrcd) {
1452                obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1453                                          uobject);
1454                atomic_inc(&obj->uxrcd->refcnt);
1455                uobj_put_read(xrcd_uobj);
1456        }
1457
1458        if (pd)
1459                uobj_put_obj_read(pd);
1460        if (scq)
1461                rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1462                                        UVERBS_LOOKUP_READ);
1463        if (rcq && rcq != scq)
1464                rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1465                                        UVERBS_LOOKUP_READ);
1466        if (srq)
1467                rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1468                                        UVERBS_LOOKUP_READ);
1469        if (ind_tbl)
1470                uobj_put_obj_read(ind_tbl);
1471        uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1472
1473        resp.base.qpn             = qp->qp_num;
1474        resp.base.qp_handle       = obj->uevent.uobject.id;
1475        resp.base.max_recv_sge    = attr.cap.max_recv_sge;
1476        resp.base.max_send_sge    = attr.cap.max_send_sge;
1477        resp.base.max_recv_wr     = attr.cap.max_recv_wr;
1478        resp.base.max_send_wr     = attr.cap.max_send_wr;
1479        resp.base.max_inline_data = attr.cap.max_inline_data;
1480        resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1481        return uverbs_response(attrs, &resp, sizeof(resp));
1482
1483err_put:
1484        if (!IS_ERR(xrcd_uobj))
1485                uobj_put_read(xrcd_uobj);
1486        if (pd)
1487                uobj_put_obj_read(pd);
1488        if (scq)
1489                rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1490                                        UVERBS_LOOKUP_READ);
1491        if (rcq && rcq != scq)
1492                rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1493                                        UVERBS_LOOKUP_READ);
1494        if (srq)
1495                rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1496                                        UVERBS_LOOKUP_READ);
1497        if (ind_tbl)
1498                uobj_put_obj_read(ind_tbl);
1499
1500        uobj_alloc_abort(&obj->uevent.uobject, attrs);
1501        return ret;
1502}
1503
1504static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
1505{
1506        struct ib_uverbs_create_qp      cmd;
1507        struct ib_uverbs_ex_create_qp   cmd_ex;
1508        int ret;
1509
1510        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1511        if (ret)
1512                return ret;
1513
1514        memset(&cmd_ex, 0, sizeof(cmd_ex));
1515        cmd_ex.user_handle = cmd.user_handle;
1516        cmd_ex.pd_handle = cmd.pd_handle;
1517        cmd_ex.send_cq_handle = cmd.send_cq_handle;
1518        cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1519        cmd_ex.srq_handle = cmd.srq_handle;
1520        cmd_ex.max_send_wr = cmd.max_send_wr;
1521        cmd_ex.max_recv_wr = cmd.max_recv_wr;
1522        cmd_ex.max_send_sge = cmd.max_send_sge;
1523        cmd_ex.max_recv_sge = cmd.max_recv_sge;
1524        cmd_ex.max_inline_data = cmd.max_inline_data;
1525        cmd_ex.sq_sig_all = cmd.sq_sig_all;
1526        cmd_ex.qp_type = cmd.qp_type;
1527        cmd_ex.is_srq = cmd.is_srq;
1528
1529        return create_qp(attrs, &cmd_ex);
1530}
1531
1532static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
1533{
1534        struct ib_uverbs_ex_create_qp cmd;
1535        int ret;
1536
1537        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1538        if (ret)
1539                return ret;
1540
1541        if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1542                return -EINVAL;
1543
1544        if (cmd.reserved)
1545                return -EINVAL;
1546
1547        return create_qp(attrs, &cmd);
1548}
1549
1550static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
1551{
1552        struct ib_uverbs_create_qp_resp resp = {};
1553        struct ib_uverbs_open_qp        cmd;
1554        struct ib_uqp_object           *obj;
1555        struct ib_xrcd                 *xrcd;
1556        struct ib_qp                   *qp;
1557        struct ib_qp_open_attr          attr = {};
1558        int ret;
1559        struct ib_uobject *xrcd_uobj;
1560        struct ib_device *ib_dev;
1561
1562        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1563        if (ret)
1564                return ret;
1565
1566        obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1567                                                 &ib_dev);
1568        if (IS_ERR(obj))
1569                return PTR_ERR(obj);
1570
1571        xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
1572        if (IS_ERR(xrcd_uobj)) {
1573                ret = -EINVAL;
1574                goto err_put;
1575        }
1576
1577        xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1578        if (!xrcd) {
1579                ret = -EINVAL;
1580                goto err_xrcd;
1581        }
1582
1583        attr.event_handler = ib_uverbs_qp_event_handler;
1584        attr.qp_num        = cmd.qpn;
1585        attr.qp_type       = cmd.qp_type;
1586
1587        INIT_LIST_HEAD(&obj->uevent.event_list);
1588        INIT_LIST_HEAD(&obj->mcast_list);
1589
1590        qp = ib_open_qp(xrcd, &attr);
1591        if (IS_ERR(qp)) {
1592                ret = PTR_ERR(qp);
1593                goto err_xrcd;
1594        }
1595
1596        obj->uevent.uobject.object = qp;
1597        obj->uevent.uobject.user_handle = cmd.user_handle;
1598
1599        obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1600        atomic_inc(&obj->uxrcd->refcnt);
1601        qp->uobject = obj;
1602        uobj_put_read(xrcd_uobj);
1603        uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1604
1605        resp.qpn = qp->qp_num;
1606        resp.qp_handle = obj->uevent.uobject.id;
1607        return uverbs_response(attrs, &resp, sizeof(resp));
1608
1609err_xrcd:
1610        uobj_put_read(xrcd_uobj);
1611err_put:
1612        uobj_alloc_abort(&obj->uevent.uobject, attrs);
1613        return ret;
1614}
1615
1616static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1617                                   struct rdma_ah_attr *rdma_attr)
1618{
1619        const struct ib_global_route   *grh;
1620
1621        uverb_attr->dlid              = rdma_ah_get_dlid(rdma_attr);
1622        uverb_attr->sl                = rdma_ah_get_sl(rdma_attr);
1623        uverb_attr->src_path_bits     = rdma_ah_get_path_bits(rdma_attr);
1624        uverb_attr->static_rate       = rdma_ah_get_static_rate(rdma_attr);
1625        uverb_attr->is_global         = !!(rdma_ah_get_ah_flags(rdma_attr) &
1626                                         IB_AH_GRH);
1627        if (uverb_attr->is_global) {
1628                grh = rdma_ah_read_grh(rdma_attr);
1629                memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1630                uverb_attr->flow_label        = grh->flow_label;
1631                uverb_attr->sgid_index        = grh->sgid_index;
1632                uverb_attr->hop_limit         = grh->hop_limit;
1633                uverb_attr->traffic_class     = grh->traffic_class;
1634        }
1635        uverb_attr->port_num          = rdma_ah_get_port_num(rdma_attr);
1636}
1637
1638static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
1639{
1640        struct ib_uverbs_query_qp      cmd;
1641        struct ib_uverbs_query_qp_resp resp;
1642        struct ib_qp                   *qp;
1643        struct ib_qp_attr              *attr;
1644        struct ib_qp_init_attr         *init_attr;
1645        int                            ret;
1646
1647        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1648        if (ret)
1649                return ret;
1650
1651        attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1652        init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1653        if (!attr || !init_attr) {
1654                ret = -ENOMEM;
1655                goto out;
1656        }
1657
1658        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1659        if (!qp) {
1660                ret = -EINVAL;
1661                goto out;
1662        }
1663
1664        ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1665
1666        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1667                                UVERBS_LOOKUP_READ);
1668
1669        if (ret)
1670                goto out;
1671
1672        memset(&resp, 0, sizeof resp);
1673
1674        resp.qp_state               = attr->qp_state;
1675        resp.cur_qp_state           = attr->cur_qp_state;
1676        resp.path_mtu               = attr->path_mtu;
1677        resp.path_mig_state         = attr->path_mig_state;
1678        resp.qkey                   = attr->qkey;
1679        resp.rq_psn                 = attr->rq_psn;
1680        resp.sq_psn                 = attr->sq_psn;
1681        resp.dest_qp_num            = attr->dest_qp_num;
1682        resp.qp_access_flags        = attr->qp_access_flags;
1683        resp.pkey_index             = attr->pkey_index;
1684        resp.alt_pkey_index         = attr->alt_pkey_index;
1685        resp.sq_draining            = attr->sq_draining;
1686        resp.max_rd_atomic          = attr->max_rd_atomic;
1687        resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1688        resp.min_rnr_timer          = attr->min_rnr_timer;
1689        resp.port_num               = attr->port_num;
1690        resp.timeout                = attr->timeout;
1691        resp.retry_cnt              = attr->retry_cnt;
1692        resp.rnr_retry              = attr->rnr_retry;
1693        resp.alt_port_num           = attr->alt_port_num;
1694        resp.alt_timeout            = attr->alt_timeout;
1695
1696        copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1697        copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1698
1699        resp.max_send_wr            = init_attr->cap.max_send_wr;
1700        resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1701        resp.max_send_sge           = init_attr->cap.max_send_sge;
1702        resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1703        resp.max_inline_data        = init_attr->cap.max_inline_data;
1704        resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1705
1706        ret = uverbs_response(attrs, &resp, sizeof(resp));
1707
1708out:
1709        kfree(attr);
1710        kfree(init_attr);
1711
1712        return ret;
1713}
1714
1715/* Remove ignored fields set in the attribute mask */
1716static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1717{
1718        switch (qp_type) {
1719        case IB_QPT_XRC_INI:
1720                return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1721        case IB_QPT_XRC_TGT:
1722                return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1723                                IB_QP_RNR_RETRY);
1724        default:
1725                return mask;
1726        }
1727}
1728
1729static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1730                                     struct rdma_ah_attr *rdma_attr,
1731                                     struct ib_uverbs_qp_dest *uverb_attr)
1732{
1733        rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1734        if (uverb_attr->is_global) {
1735                rdma_ah_set_grh(rdma_attr, NULL,
1736                                uverb_attr->flow_label,
1737                                uverb_attr->sgid_index,
1738                                uverb_attr->hop_limit,
1739                                uverb_attr->traffic_class);
1740                rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1741        } else {
1742                rdma_ah_set_ah_flags(rdma_attr, 0);
1743        }
1744        rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1745        rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1746        rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1747        rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1748        rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1749        rdma_ah_set_make_grd(rdma_attr, false);
1750}
1751
1752static int modify_qp(struct uverbs_attr_bundle *attrs,
1753                     struct ib_uverbs_ex_modify_qp *cmd)
1754{
1755        struct ib_qp_attr *attr;
1756        struct ib_qp *qp;
1757        int ret;
1758
1759        attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1760        if (!attr)
1761                return -ENOMEM;
1762
1763        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
1764                               attrs);
1765        if (!qp) {
1766                ret = -EINVAL;
1767                goto out;
1768        }
1769
1770        if ((cmd->base.attr_mask & IB_QP_PORT) &&
1771            !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1772                ret = -EINVAL;
1773                goto release_qp;
1774        }
1775
1776        if ((cmd->base.attr_mask & IB_QP_AV)) {
1777                if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1778                        ret = -EINVAL;
1779                        goto release_qp;
1780                }
1781
1782                if (cmd->base.attr_mask & IB_QP_STATE &&
1783                    cmd->base.qp_state == IB_QPS_RTR) {
1784                /* We are in INIT->RTR TRANSITION (if we are not,
1785                 * this transition will be rejected in subsequent checks).
1786                 * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
1787                 * but the IB_QP_STATE flag is required.
1788                 *
1789                 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
1790                 * when IB_QP_AV is set, has required inclusion of a valid
1791                 * port number in the primary AV. (AVs are created and handled
1792                 * differently for infiniband and ethernet (RoCE) ports).
1793                 *
1794                 * Check the port number included in the primary AV against
1795                 * the port number in the qp struct, which was set (and saved)
1796                 * in the RST->INIT transition.
1797                 */
1798                        if (cmd->base.dest.port_num != qp->real_qp->port) {
1799                                ret = -EINVAL;
1800                                goto release_qp;
1801                        }
1802                } else {
1803                /* We are in SQD->SQD. (If we are not, this transition will
1804                 * be rejected later in the verbs layer checks).
1805                 * Check for both IB_QP_PORT and IB_QP_AV, these can be set
1806                 * together in the SQD->SQD transition.
1807                 *
1808                 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
1809                 * verbs layer driver does not track primary port changes
1810                 * resulting from path migration. Thus, in SQD, if the primary
1811                 * AV is modified, the primary port should also be modified).
1812                 *
1813                 * Note that in this transition, the IB_QP_STATE flag
1814                 * is not allowed.
1815                 */
1816                        if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1817                             == (IB_QP_AV | IB_QP_PORT)) &&
1818                            cmd->base.port_num != cmd->base.dest.port_num) {
1819                                ret = -EINVAL;
1820                                goto release_qp;
1821                        }
1822                        if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1823                            == IB_QP_AV) {
1824                                cmd->base.attr_mask |= IB_QP_PORT;
1825                                cmd->base.port_num = cmd->base.dest.port_num;
1826                        }
1827                }
1828        }
1829
1830        if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1831            (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1832            !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
1833            cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1834                ret = -EINVAL;
1835                goto release_qp;
1836        }
1837
1838        if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
1839            cmd->base.cur_qp_state > IB_QPS_ERR) ||
1840            (cmd->base.attr_mask & IB_QP_STATE &&
1841            cmd->base.qp_state > IB_QPS_ERR)) {
1842                ret = -EINVAL;
1843                goto release_qp;
1844        }
1845
1846        if (cmd->base.attr_mask & IB_QP_STATE)
1847                attr->qp_state = cmd->base.qp_state;
1848        if (cmd->base.attr_mask & IB_QP_CUR_STATE)
1849                attr->cur_qp_state = cmd->base.cur_qp_state;
1850        if (cmd->base.attr_mask & IB_QP_PATH_MTU)
1851                attr->path_mtu = cmd->base.path_mtu;
1852        if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
1853                attr->path_mig_state = cmd->base.path_mig_state;
1854        if (cmd->base.attr_mask & IB_QP_QKEY)
1855                attr->qkey = cmd->base.qkey;
1856        if (cmd->base.attr_mask & IB_QP_RQ_PSN)
1857                attr->rq_psn = cmd->base.rq_psn;
1858        if (cmd->base.attr_mask & IB_QP_SQ_PSN)
1859                attr->sq_psn = cmd->base.sq_psn;
1860        if (cmd->base.attr_mask & IB_QP_DEST_QPN)
1861                attr->dest_qp_num = cmd->base.dest_qp_num;
1862        if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
1863                attr->qp_access_flags = cmd->base.qp_access_flags;
1864        if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
1865                attr->pkey_index = cmd->base.pkey_index;
1866        if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1867                attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1868        if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1869                attr->max_rd_atomic = cmd->base.max_rd_atomic;
1870        if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1871                attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1872        if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
1873                attr->min_rnr_timer = cmd->base.min_rnr_timer;
1874        if (cmd->base.attr_mask & IB_QP_PORT)
1875                attr->port_num = cmd->base.port_num;
1876        if (cmd->base.attr_mask & IB_QP_TIMEOUT)
1877                attr->timeout = cmd->base.timeout;
1878        if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
1879                attr->retry_cnt = cmd->base.retry_cnt;
1880        if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
1881                attr->rnr_retry = cmd->base.rnr_retry;
1882        if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
1883                attr->alt_port_num = cmd->base.alt_port_num;
1884                attr->alt_timeout = cmd->base.alt_timeout;
1885                attr->alt_pkey_index = cmd->base.alt_pkey_index;
1886        }
1887        if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
1888                attr->rate_limit = cmd->rate_limit;
1889
1890        if (cmd->base.attr_mask & IB_QP_AV)
1891                copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
1892                                         &cmd->base.dest);
1893
1894        if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1895                copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
1896                                         &cmd->base.alt_dest);
1897
1898        ret = ib_modify_qp_with_udata(qp, attr,
1899                                      modify_qp_mask(qp->qp_type,
1900                                                     cmd->base.attr_mask),
1901                                      &attrs->driver_udata);
1902
1903release_qp:
1904        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1905                                UVERBS_LOOKUP_READ);
1906out:
1907        kfree(attr);
1908
1909        return ret;
1910}
1911
1912static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
1913{
1914        struct ib_uverbs_ex_modify_qp cmd;
1915        int ret;
1916
1917        ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
1918        if (ret)
1919                return ret;
1920
1921        if (cmd.base.attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1922                return -EOPNOTSUPP;
1923
1924        return modify_qp(attrs, &cmd);
1925}
1926
1927static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
1928{
1929        struct ib_uverbs_ex_modify_qp cmd;
1930        struct ib_uverbs_ex_modify_qp_resp resp = {
1931                .response_length = uverbs_response_length(attrs, sizeof(resp))
1932        };
1933        int ret;
1934
1935        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1936        if (ret)
1937                return ret;
1938
1939        /*
1940         * Last bit is reserved for extending the attr_mask by
1941         * using another field.
1942         */
1943        if (cmd.base.attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
1944                return -EOPNOTSUPP;
1945
1946        ret = modify_qp(attrs, &cmd);
1947        if (ret)
1948                return ret;
1949
1950        return uverbs_response(attrs, &resp, sizeof(resp));
1951}
1952
1953static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
1954{
1955        struct ib_uverbs_destroy_qp      cmd;
1956        struct ib_uverbs_destroy_qp_resp resp;
1957        struct ib_uobject               *uobj;
1958        struct ib_uqp_object            *obj;
1959        int ret;
1960
1961        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1962        if (ret)
1963                return ret;
1964
1965        uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1966        if (IS_ERR(uobj))
1967                return PTR_ERR(uobj);
1968
1969        obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1970        memset(&resp, 0, sizeof(resp));
1971        resp.events_reported = obj->uevent.events_reported;
1972
1973        uobj_put_destroy(uobj);
1974
1975        return uverbs_response(attrs, &resp, sizeof(resp));
1976}
1977
1978static void *alloc_wr(size_t wr_size, __u32 num_sge)
1979{
1980        if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) /
1981                               sizeof(struct ib_sge))
1982                return NULL;
1983
1984        return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) +
1985                               num_sge * sizeof(struct ib_sge),
1986                       GFP_KERNEL);
1987}
1988
1989static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
1990{
1991        struct ib_uverbs_post_send      cmd;
1992        struct ib_uverbs_post_send_resp resp;
1993        struct ib_uverbs_send_wr       *user_wr;
1994        struct ib_send_wr              *wr = NULL, *last, *next;
1995        const struct ib_send_wr        *bad_wr;
1996        struct ib_qp                   *qp;
1997        int                             i, sg_ind;
1998        int                             is_ud;
1999        int ret, ret2;
2000        size_t                          next_size;
2001        const struct ib_sge __user *sgls;
2002        const void __user *wqes;
2003        struct uverbs_req_iter iter;
2004
2005        ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2006        if (ret)
2007                return ret;
2008        wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
2009        if (IS_ERR(wqes))
2010                return PTR_ERR(wqes);
2011        sgls = uverbs_request_next_ptr(
2012                &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
2013        if (IS_ERR(sgls))
2014                return PTR_ERR(sgls);
2015        ret = uverbs_request_finish(&iter);
2016        if (ret)
2017                return ret;
2018
2019        user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2020        if (!user_wr)
2021                return -ENOMEM;
2022
2023        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2024        if (!qp) {
2025                ret = -EINVAL;
2026                goto out;
2027        }
2028
2029        is_ud = qp->qp_type == IB_QPT_UD;
2030        sg_ind = 0;
2031        last = NULL;
2032        for (i = 0; i < cmd.wr_count; ++i) {
2033                if (copy_from_user(user_wr, wqes + i * cmd.wqe_size,
2034                                   cmd.wqe_size)) {
2035                        ret = -EFAULT;
2036                        goto out_put;
2037                }
2038
2039                if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2040                        ret = -EINVAL;
2041                        goto out_put;
2042                }
2043
2044                if (is_ud) {
2045                        struct ib_ud_wr *ud;
2046
2047                        if (user_wr->opcode != IB_WR_SEND &&
2048                            user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2049                                ret = -EINVAL;
2050                                goto out_put;
2051                        }
2052
2053                        next_size = sizeof(*ud);
2054                        ud = alloc_wr(next_size, user_wr->num_sge);
2055                        if (!ud) {
2056                                ret = -ENOMEM;
2057                                goto out_put;
2058                        }
2059
2060                        ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
2061                                                   user_wr->wr.ud.ah, attrs);
2062                        if (!ud->ah) {
2063                                kfree(ud);
2064                                ret = -EINVAL;
2065                                goto out_put;
2066                        }
2067                        ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2068                        ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2069
2070                        next = &ud->wr;
2071                } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2072                           user_wr->opcode == IB_WR_RDMA_WRITE ||
2073                           user_wr->opcode == IB_WR_RDMA_READ) {
2074                        struct ib_rdma_wr *rdma;
2075
2076                        next_size = sizeof(*rdma);
2077                        rdma = alloc_wr(next_size, user_wr->num_sge);
2078                        if (!rdma) {
2079                                ret = -ENOMEM;
2080                                goto out_put;
2081                        }
2082
2083                        rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2084                        rdma->rkey = user_wr->wr.rdma.rkey;
2085
2086                        next = &rdma->wr;
2087                } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2088                           user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2089                        struct ib_atomic_wr *atomic;
2090
2091                        next_size = sizeof(*atomic);
2092                        atomic = alloc_wr(next_size, user_wr->num_sge);
2093                        if (!atomic) {
2094                                ret = -ENOMEM;
2095                                goto out_put;
2096                        }
2097
2098                        atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2099                        atomic->compare_add = user_wr->wr.atomic.compare_add;
2100                        atomic->swap = user_wr->wr.atomic.swap;
2101                        atomic->rkey = user_wr->wr.atomic.rkey;
2102
2103                        next = &atomic->wr;
2104                } else if (user_wr->opcode == IB_WR_SEND ||
2105                           user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2106                           user_wr->opcode == IB_WR_SEND_WITH_INV) {
2107                        next_size = sizeof(*next);
2108                        next = alloc_wr(next_size, user_wr->num_sge);
2109                        if (!next) {
2110                                ret = -ENOMEM;
2111                                goto out_put;
2112                        }
2113                } else {
2114                        ret = -EINVAL;
2115                        goto out_put;
2116                }
2117
2118                if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2119                    user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2120                        next->ex.imm_data =
2121                                        (__be32 __force) user_wr->ex.imm_data;
2122                } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2123                        next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2124                }
2125
2126                if (!last)
2127                        wr = next;
2128                else
2129                        last->next = next;
2130                last = next;
2131
2132                next->next       = NULL;
2133                next->wr_id      = user_wr->wr_id;
2134                next->num_sge    = user_wr->num_sge;
2135                next->opcode     = user_wr->opcode;
2136                next->send_flags = user_wr->send_flags;
2137
2138                if (next->num_sge) {
2139                        next->sg_list = (void *) next +
2140                                ALIGN(next_size, sizeof(struct ib_sge));
2141                        if (copy_from_user(next->sg_list, sgls + sg_ind,
2142                                           next->num_sge *
2143                                                   sizeof(struct ib_sge))) {
2144                                ret = -EFAULT;
2145                                goto out_put;
2146                        }
2147                        sg_ind += next->num_sge;
2148                } else
2149                        next->sg_list = NULL;
2150        }
2151
2152        resp.bad_wr = 0;
2153        ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
2154        if (ret)
2155                for (next = wr; next; next = next->next) {
2156                        ++resp.bad_wr;
2157                        if (next == bad_wr)
2158                                break;
2159                }
2160
2161        ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2162        if (ret2)
2163                ret = ret2;
2164
2165out_put:
2166        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2167                                UVERBS_LOOKUP_READ);
2168
2169        while (wr) {
2170                if (is_ud && ud_wr(wr)->ah)
2171                        uobj_put_obj_read(ud_wr(wr)->ah);
2172                next = wr->next;
2173                kfree(wr);
2174                wr = next;
2175        }
2176
2177out:
2178        kfree(user_wr);
2179
2180        return ret;
2181}
2182
2183static struct ib_recv_wr *
2184ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
2185                          u32 wqe_size, u32 sge_count)
2186{
2187        struct ib_uverbs_recv_wr *user_wr;
2188        struct ib_recv_wr        *wr = NULL, *last, *next;
2189        int                       sg_ind;
2190        int                       i;
2191        int                       ret;
2192        const struct ib_sge __user *sgls;
2193        const void __user *wqes;
2194
2195        if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
2196                return ERR_PTR(-EINVAL);
2197
2198        wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
2199        if (IS_ERR(wqes))
2200                return ERR_CAST(wqes);
2201        sgls = uverbs_request_next_ptr(
2202                iter, sge_count * sizeof(struct ib_uverbs_sge));
2203        if (IS_ERR(sgls))
2204                return ERR_CAST(sgls);
2205        ret = uverbs_request_finish(iter);
2206        if (ret)
2207                return ERR_PTR(ret);
2208
2209        user_wr = kmalloc(wqe_size, GFP_KERNEL);
2210        if (!user_wr)
2211                return ERR_PTR(-ENOMEM);
2212
2213        sg_ind = 0;
2214        last = NULL;
2215        for (i = 0; i < wr_count; ++i) {
2216                if (copy_from_user(user_wr, wqes + i * wqe_size,
2217                                   wqe_size)) {
2218                        ret = -EFAULT;
2219                        goto err;
2220                }
2221
2222                if (user_wr->num_sge + sg_ind > sge_count) {
2223                        ret = -EINVAL;
2224                        goto err;
2225                }
2226
2227                if (user_wr->num_sge >=
2228                    (U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) /
2229                            sizeof(struct ib_sge)) {
2230                        ret = -EINVAL;
2231                        goto err;
2232                }
2233
2234                next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) +
2235                                       user_wr->num_sge * sizeof(struct ib_sge),
2236                               GFP_KERNEL);
2237                if (!next) {
2238                        ret = -ENOMEM;
2239                        goto err;
2240                }
2241
2242                if (!last)
2243                        wr = next;
2244                else
2245                        last->next = next;
2246                last = next;
2247
2248                next->next       = NULL;
2249                next->wr_id      = user_wr->wr_id;
2250                next->num_sge    = user_wr->num_sge;
2251
2252                if (next->num_sge) {
2253                        next->sg_list = (void *)next +
2254                                ALIGN(sizeof(*next), sizeof(struct ib_sge));
2255                        if (copy_from_user(next->sg_list, sgls + sg_ind,
2256                                           next->num_sge *
2257                                                   sizeof(struct ib_sge))) {
2258                                ret = -EFAULT;
2259                                goto err;
2260                        }
2261                        sg_ind += next->num_sge;
2262                } else
2263                        next->sg_list = NULL;
2264        }
2265
2266        kfree(user_wr);
2267        return wr;
2268
2269err:
2270        kfree(user_wr);
2271
2272        while (wr) {
2273                next = wr->next;
2274                kfree(wr);
2275                wr = next;
2276        }
2277
2278        return ERR_PTR(ret);
2279}
2280
2281static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
2282{
2283        struct ib_uverbs_post_recv      cmd;
2284        struct ib_uverbs_post_recv_resp resp;
2285        struct ib_recv_wr              *wr, *next;
2286        const struct ib_recv_wr        *bad_wr;
2287        struct ib_qp                   *qp;
2288        int ret, ret2;
2289        struct uverbs_req_iter iter;
2290
2291        ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2292        if (ret)
2293                return ret;
2294
2295        wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2296                                       cmd.sge_count);
2297        if (IS_ERR(wr))
2298                return PTR_ERR(wr);
2299
2300        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2301        if (!qp) {
2302                ret = -EINVAL;
2303                goto out;
2304        }
2305
2306        resp.bad_wr = 0;
2307        ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
2308
2309        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2310                                UVERBS_LOOKUP_READ);
2311        if (ret) {
2312                for (next = wr; next; next = next->next) {
2313                        ++resp.bad_wr;
2314                        if (next == bad_wr)
2315                                break;
2316                }
2317        }
2318
2319        ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2320        if (ret2)
2321                ret = ret2;
2322out:
2323        while (wr) {
2324                next = wr->next;
2325                kfree(wr);
2326                wr = next;
2327        }
2328
2329        return ret;
2330}
2331
2332static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
2333{
2334        struct ib_uverbs_post_srq_recv      cmd;
2335        struct ib_uverbs_post_srq_recv_resp resp;
2336        struct ib_recv_wr                  *wr, *next;
2337        const struct ib_recv_wr            *bad_wr;
2338        struct ib_srq                      *srq;
2339        int ret, ret2;
2340        struct uverbs_req_iter iter;
2341
2342        ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2343        if (ret)
2344                return ret;
2345
2346        wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2347                                       cmd.sge_count);
2348        if (IS_ERR(wr))
2349                return PTR_ERR(wr);
2350
2351        srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
2352        if (!srq) {
2353                ret = -EINVAL;
2354                goto out;
2355        }
2356
2357        resp.bad_wr = 0;
2358        ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
2359
2360        rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
2361                                UVERBS_LOOKUP_READ);
2362
2363        if (ret)
2364                for (next = wr; next; next = next->next) {
2365                        ++resp.bad_wr;
2366                        if (next == bad_wr)
2367                                break;
2368                }
2369
2370        ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2371        if (ret2)
2372                ret = ret2;
2373
2374out:
2375        while (wr) {
2376                next = wr->next;
2377                kfree(wr);
2378                wr = next;
2379        }
2380
2381        return ret;
2382}
2383
2384static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
2385{
2386        struct ib_uverbs_create_ah       cmd;
2387        struct ib_uverbs_create_ah_resp  resp;
2388        struct ib_uobject               *uobj;
2389        struct ib_pd                    *pd;
2390        struct ib_ah                    *ah;
2391        struct rdma_ah_attr             attr = {};
2392        int ret;
2393        struct ib_device *ib_dev;
2394
2395        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2396        if (ret)
2397                return ret;
2398
2399        uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
2400        if (IS_ERR(uobj))
2401                return PTR_ERR(uobj);
2402
2403        if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
2404                ret = -EINVAL;
2405                goto err;
2406        }
2407
2408        pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2409        if (!pd) {
2410                ret = -EINVAL;
2411                goto err;
2412        }
2413
2414        attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2415        rdma_ah_set_make_grd(&attr, false);
2416        rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2417        rdma_ah_set_sl(&attr, cmd.attr.sl);
2418        rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2419        rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2420        rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2421
2422        if (cmd.attr.is_global) {
2423                rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2424                                cmd.attr.grh.sgid_index,
2425                                cmd.attr.grh.hop_limit,
2426                                cmd.attr.grh.traffic_class);
2427                rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2428        } else {
2429                rdma_ah_set_ah_flags(&attr, 0);
2430        }
2431
2432        ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
2433        if (IS_ERR(ah)) {
2434                ret = PTR_ERR(ah);
2435                goto err_put;
2436        }
2437
2438        ah->uobject  = uobj;
2439        uobj->user_handle = cmd.user_handle;
2440        uobj->object = ah;
2441        uobj_put_obj_read(pd);
2442        uobj_finalize_uobj_create(uobj, attrs);
2443
2444        resp.ah_handle = uobj->id;
2445        return uverbs_response(attrs, &resp, sizeof(resp));
2446
2447err_put:
2448        uobj_put_obj_read(pd);
2449err:
2450        uobj_alloc_abort(uobj, attrs);
2451        return ret;
2452}
2453
2454static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
2455{
2456        struct ib_uverbs_destroy_ah cmd;
2457        int ret;
2458
2459        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2460        if (ret)
2461                return ret;
2462
2463        return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
2464}
2465
2466static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
2467{
2468        struct ib_uverbs_attach_mcast cmd;
2469        struct ib_qp                 *qp;
2470        struct ib_uqp_object         *obj;
2471        struct ib_uverbs_mcast_entry *mcast;
2472        int                           ret;
2473
2474        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2475        if (ret)
2476                return ret;
2477
2478        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2479        if (!qp)
2480                return -EINVAL;
2481
2482        obj = qp->uobject;
2483
2484        mutex_lock(&obj->mcast_lock);
2485        list_for_each_entry(mcast, &obj->mcast_list, list)
2486                if (cmd.mlid == mcast->lid &&
2487                    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2488                        ret = 0;
2489                        goto out_put;
2490                }
2491
2492        mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2493        if (!mcast) {
2494                ret = -ENOMEM;
2495                goto out_put;
2496        }
2497
2498        mcast->lid = cmd.mlid;
2499        memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2500
2501        ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2502        if (!ret)
2503                list_add_tail(&mcast->list, &obj->mcast_list);
2504        else
2505                kfree(mcast);
2506
2507out_put:
2508        mutex_unlock(&obj->mcast_lock);
2509        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2510                                UVERBS_LOOKUP_READ);
2511
2512        return ret;
2513}
2514
2515static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
2516{
2517        struct ib_uverbs_detach_mcast cmd;
2518        struct ib_uqp_object         *obj;
2519        struct ib_qp                 *qp;
2520        struct ib_uverbs_mcast_entry *mcast;
2521        int                           ret;
2522        bool                          found = false;
2523
2524        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2525        if (ret)
2526                return ret;
2527
2528        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2529        if (!qp)
2530                return -EINVAL;
2531
2532        obj = qp->uobject;
2533        mutex_lock(&obj->mcast_lock);
2534
2535        list_for_each_entry(mcast, &obj->mcast_list, list)
2536                if (cmd.mlid == mcast->lid &&
2537                    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2538                        list_del(&mcast->list);
2539                        kfree(mcast);
2540                        found = true;
2541                        break;
2542                }
2543
2544        if (!found) {
2545                ret = -EINVAL;
2546                goto out_put;
2547        }
2548
2549        ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2550
2551out_put:
2552        mutex_unlock(&obj->mcast_lock);
2553        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2554                                UVERBS_LOOKUP_READ);
2555        return ret;
2556}
2557
2558struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2559{
2560        struct ib_uflow_resources *resources;
2561
2562        resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2563
2564        if (!resources)
2565                return NULL;
2566
2567        if (!num_specs)
2568                goto out;
2569
2570        resources->counters =
2571                kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
2572        resources->collection =
2573                kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
2574
2575        if (!resources->counters || !resources->collection)
2576                goto err;
2577
2578out:
2579        resources->max = num_specs;
2580        return resources;
2581
2582err:
2583        kfree(resources->counters);
2584        kfree(resources);
2585
2586        return NULL;
2587}
2588EXPORT_SYMBOL(flow_resources_alloc);
2589
2590void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
2591{
2592        unsigned int i;
2593
2594        if (!uflow_res)
2595                return;
2596
2597        for (i = 0; i < uflow_res->collection_num; i++)
2598                atomic_dec(&uflow_res->collection[i]->usecnt);
2599
2600        for (i = 0; i < uflow_res->counters_num; i++)
2601                atomic_dec(&uflow_res->counters[i]->usecnt);
2602
2603        kfree(uflow_res->collection);
2604        kfree(uflow_res->counters);
2605        kfree(uflow_res);
2606}
2607EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
2608
2609void flow_resources_add(struct ib_uflow_resources *uflow_res,
2610                        enum ib_flow_spec_type type,
2611                        void *ibobj)
2612{
2613        WARN_ON(uflow_res->num >= uflow_res->max);
2614
2615        switch (type) {
2616        case IB_FLOW_SPEC_ACTION_HANDLE:
2617                atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
2618                uflow_res->collection[uflow_res->collection_num++] =
2619                        (struct ib_flow_action *)ibobj;
2620                break;
2621        case IB_FLOW_SPEC_ACTION_COUNT:
2622                atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
2623                uflow_res->counters[uflow_res->counters_num++] =
2624                        (struct ib_counters *)ibobj;
2625                break;
2626        default:
2627                WARN_ON(1);
2628        }
2629
2630        uflow_res->num++;
2631}
2632EXPORT_SYMBOL(flow_resources_add);
2633
2634static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
2635                                       struct ib_uverbs_flow_spec *kern_spec,
2636                                       union ib_flow_spec *ib_spec,
2637                                       struct ib_uflow_resources *uflow_res)
2638{
2639        ib_spec->type = kern_spec->type;
2640        switch (ib_spec->type) {
2641        case IB_FLOW_SPEC_ACTION_TAG:
2642                if (kern_spec->flow_tag.size !=
2643                    sizeof(struct ib_uverbs_flow_spec_action_tag))
2644                        return -EINVAL;
2645
2646                ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2647                ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2648                break;
2649        case IB_FLOW_SPEC_ACTION_DROP:
2650                if (kern_spec->drop.size !=
2651                    sizeof(struct ib_uverbs_flow_spec_action_drop))
2652                        return -EINVAL;
2653
2654                ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2655                break;
2656        case IB_FLOW_SPEC_ACTION_HANDLE:
2657                if (kern_spec->action.size !=
2658                    sizeof(struct ib_uverbs_flow_spec_action_handle))
2659                        return -EOPNOTSUPP;
2660                ib_spec->action.act = uobj_get_obj_read(flow_action,
2661                                                        UVERBS_OBJECT_FLOW_ACTION,
2662                                                        kern_spec->action.handle,
2663                                                        attrs);
2664                if (!ib_spec->action.act)
2665                        return -EINVAL;
2666                ib_spec->action.size =
2667                        sizeof(struct ib_flow_spec_action_handle);
2668                flow_resources_add(uflow_res,
2669                                   IB_FLOW_SPEC_ACTION_HANDLE,
2670                                   ib_spec->action.act);
2671                uobj_put_obj_read(ib_spec->action.act);
2672                break;
2673        case IB_FLOW_SPEC_ACTION_COUNT:
2674                if (kern_spec->flow_count.size !=
2675                        sizeof(struct ib_uverbs_flow_spec_action_count))
2676                        return -EINVAL;
2677                ib_spec->flow_count.counters =
2678                        uobj_get_obj_read(counters,
2679                                          UVERBS_OBJECT_COUNTERS,
2680                                          kern_spec->flow_count.handle,
2681                                          attrs);
2682                if (!ib_spec->flow_count.counters)
2683                        return -EINVAL;
2684                ib_spec->flow_count.size =
2685                                sizeof(struct ib_flow_spec_action_count);
2686                flow_resources_add(uflow_res,
2687                                   IB_FLOW_SPEC_ACTION_COUNT,
2688                                   ib_spec->flow_count.counters);
2689                uobj_put_obj_read(ib_spec->flow_count.counters);
2690                break;
2691        default:
2692                return -EINVAL;
2693        }
2694        return 0;
2695}
2696
2697static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2698                                u16 ib_real_filter_sz)
2699{
2700        /*
2701         * User space filter structures must be 64 bit aligned, otherwise this
2702         * may pass, but we won't handle additional new attributes.
2703         */
2704
2705        if (kern_filter_size > ib_real_filter_sz) {
2706                if (memchr_inv(kern_spec_filter +
2707                               ib_real_filter_sz, 0,
2708                               kern_filter_size - ib_real_filter_sz))
2709                        return -EINVAL;
2710                return ib_real_filter_sz;
2711        }
2712        return kern_filter_size;
2713}
2714
2715int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
2716                                          const void *kern_spec_mask,
2717                                          const void *kern_spec_val,
2718                                          size_t kern_filter_sz,
2719                                          union ib_flow_spec *ib_spec)
2720{
2721        ssize_t actual_filter_sz;
2722        ssize_t ib_filter_sz;
2723
2724        /* User flow spec size must be aligned to 4 bytes */
2725        if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2726                return -EINVAL;
2727
2728        ib_spec->type = type;
2729
2730        if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2731                return -EINVAL;
2732
2733        switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2734        case IB_FLOW_SPEC_ETH:
2735                ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2736                actual_filter_sz = spec_filter_size(kern_spec_mask,
2737                                                    kern_filter_sz,
2738                                                    ib_filter_sz);
2739                if (actual_filter_sz <= 0)
2740                        return -EINVAL;
2741                ib_spec->size = sizeof(struct ib_flow_spec_eth);
2742                memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2743                memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2744                break;
2745        case IB_FLOW_SPEC_IPV4:
2746                ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2747                actual_filter_sz = spec_filter_size(kern_spec_mask,
2748                                                    kern_filter_sz,
2749                                                    ib_filter_sz);
2750                if (actual_filter_sz <= 0)
2751                        return -EINVAL;
2752                ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2753                memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2754                memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2755                break;
2756        case IB_FLOW_SPEC_IPV6:
2757                ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2758                actual_filter_sz = spec_filter_size(kern_spec_mask,
2759                                                    kern_filter_sz,
2760                                                    ib_filter_sz);
2761                if (actual_filter_sz <= 0)
2762                        return -EINVAL;
2763                ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2764                memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2765                memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2766
2767                if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2768                    (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2769                        return -EINVAL;
2770                break;
2771        case IB_FLOW_SPEC_TCP:
2772        case IB_FLOW_SPEC_UDP:
2773                ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2774                actual_filter_sz = spec_filter_size(kern_spec_mask,
2775                                                    kern_filter_sz,
2776                                                    ib_filter_sz);
2777                if (actual_filter_sz <= 0)
2778                        return -EINVAL;
2779                ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2780                memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2781                memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2782                break;
2783        case IB_FLOW_SPEC_VXLAN_TUNNEL:
2784                ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2785                actual_filter_sz = spec_filter_size(kern_spec_mask,
2786                                                    kern_filter_sz,
2787                                                    ib_filter_sz);
2788                if (actual_filter_sz <= 0)
2789                        return -EINVAL;
2790                ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2791                memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2792                memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2793
2794                if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2795                    (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2796                        return -EINVAL;
2797                break;
2798        case IB_FLOW_SPEC_ESP:
2799                ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
2800                actual_filter_sz = spec_filter_size(kern_spec_mask,
2801                                                    kern_filter_sz,
2802                                                    ib_filter_sz);
2803                if (actual_filter_sz <= 0)
2804                        return -EINVAL;
2805                ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
2806                memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
2807                memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
2808                break;
2809        case IB_FLOW_SPEC_GRE:
2810                ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
2811                actual_filter_sz = spec_filter_size(kern_spec_mask,
2812                                                    kern_filter_sz,
2813                                                    ib_filter_sz);
2814                if (actual_filter_sz <= 0)
2815                        return -EINVAL;
2816                ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
2817                memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
2818                memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
2819                break;
2820        case IB_FLOW_SPEC_MPLS:
2821                ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
2822                actual_filter_sz = spec_filter_size(kern_spec_mask,
2823                                                    kern_filter_sz,
2824                                                    ib_filter_sz);
2825                if (actual_filter_sz <= 0)
2826                        return -EINVAL;
2827                ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
2828                memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
2829                memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
2830                break;
2831        default:
2832                return -EINVAL;
2833        }
2834        return 0;
2835}
2836
2837static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2838                                       union ib_flow_spec *ib_spec)
2839{
2840        size_t kern_filter_sz;
2841        void *kern_spec_mask;
2842        void *kern_spec_val;
2843
2844        if (check_sub_overflow((size_t)kern_spec->hdr.size,
2845                               sizeof(struct ib_uverbs_flow_spec_hdr),
2846                               &kern_filter_sz))
2847                return -EINVAL;
2848
2849        kern_filter_sz /= 2;
2850
2851        kern_spec_val = (void *)kern_spec +
2852                sizeof(struct ib_uverbs_flow_spec_hdr);
2853        kern_spec_mask = kern_spec_val + kern_filter_sz;
2854
2855        return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
2856                                                     kern_spec_mask,
2857                                                     kern_spec_val,
2858                                                     kern_filter_sz, ib_spec);
2859}
2860
2861static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
2862                                struct ib_uverbs_flow_spec *kern_spec,
2863                                union ib_flow_spec *ib_spec,
2864                                struct ib_uflow_resources *uflow_res)
2865{
2866        if (kern_spec->reserved)
2867                return -EINVAL;
2868
2869        if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2870                return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
2871                                                   uflow_res);
2872        else
2873                return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2874}
2875
2876static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
2877{
2878        struct ib_uverbs_ex_create_wq cmd;
2879        struct ib_uverbs_ex_create_wq_resp resp = {};
2880        struct ib_uwq_object           *obj;
2881        int err = 0;
2882        struct ib_cq *cq;
2883        struct ib_pd *pd;
2884        struct ib_wq *wq;
2885        struct ib_wq_init_attr wq_init_attr = {};
2886        struct ib_device *ib_dev;
2887
2888        err = uverbs_request(attrs, &cmd, sizeof(cmd));
2889        if (err)
2890                return err;
2891
2892        if (cmd.comp_mask)
2893                return -EOPNOTSUPP;
2894
2895        obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
2896                                                 &ib_dev);
2897        if (IS_ERR(obj))
2898                return PTR_ERR(obj);
2899
2900        pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2901        if (!pd) {
2902                err = -EINVAL;
2903                goto err_uobj;
2904        }
2905
2906        cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
2907        if (!cq) {
2908                err = -EINVAL;
2909                goto err_put_pd;
2910        }
2911
2912        wq_init_attr.cq = cq;
2913        wq_init_attr.max_sge = cmd.max_sge;
2914        wq_init_attr.max_wr = cmd.max_wr;
2915        wq_init_attr.wq_type = cmd.wq_type;
2916        wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2917        wq_init_attr.create_flags = cmd.create_flags;
2918        INIT_LIST_HEAD(&obj->uevent.event_list);
2919        obj->uevent.uobject.user_handle = cmd.user_handle;
2920
2921        wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
2922        if (IS_ERR(wq)) {
2923                err = PTR_ERR(wq);
2924                goto err_put_cq;
2925        }
2926
2927        wq->uobject = obj;
2928        obj->uevent.uobject.object = wq;
2929        wq->wq_type = wq_init_attr.wq_type;
2930        wq->cq = cq;
2931        wq->pd = pd;
2932        wq->device = pd->device;
2933        atomic_set(&wq->usecnt, 0);
2934        atomic_inc(&pd->usecnt);
2935        atomic_inc(&cq->usecnt);
2936        obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
2937        if (obj->uevent.event_file)
2938                uverbs_uobject_get(&obj->uevent.event_file->uobj);
2939
2940        uobj_put_obj_read(pd);
2941        rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2942                                UVERBS_LOOKUP_READ);
2943        uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
2944
2945        resp.wq_handle = obj->uevent.uobject.id;
2946        resp.max_sge = wq_init_attr.max_sge;
2947        resp.max_wr = wq_init_attr.max_wr;
2948        resp.wqn = wq->wq_num;
2949        resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2950        return uverbs_response(attrs, &resp, sizeof(resp));
2951
2952err_put_cq:
2953        rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2954                                UVERBS_LOOKUP_READ);
2955err_put_pd:
2956        uobj_put_obj_read(pd);
2957err_uobj:
2958        uobj_alloc_abort(&obj->uevent.uobject, attrs);
2959
2960        return err;
2961}
2962
2963static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
2964{
2965        struct ib_uverbs_ex_destroy_wq  cmd;
2966        struct ib_uverbs_ex_destroy_wq_resp     resp = {};
2967        struct ib_uobject               *uobj;
2968        struct ib_uwq_object            *obj;
2969        int                             ret;
2970
2971        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2972        if (ret)
2973                return ret;
2974
2975        if (cmd.comp_mask)
2976                return -EOPNOTSUPP;
2977
2978        resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2979        uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
2980        if (IS_ERR(uobj))
2981                return PTR_ERR(uobj);
2982
2983        obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
2984        resp.events_reported = obj->uevent.events_reported;
2985
2986        uobj_put_destroy(uobj);
2987
2988        return uverbs_response(attrs, &resp, sizeof(resp));
2989}
2990
2991static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
2992{
2993        struct ib_uverbs_ex_modify_wq cmd;
2994        struct ib_wq *wq;
2995        struct ib_wq_attr wq_attr = {};
2996        int ret;
2997
2998        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2999        if (ret)
3000                return ret;
3001
3002        if (!cmd.attr_mask)
3003                return -EINVAL;
3004
3005        if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3006                return -EINVAL;
3007
3008        wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3009        if (!wq)
3010                return -EINVAL;
3011
3012        if (cmd.attr_mask & IB_WQ_FLAGS) {
3013                wq_attr.flags = cmd.flags;
3014                wq_attr.flags_mask = cmd.flags_mask;
3015        }
3016
3017        if (cmd.attr_mask & IB_WQ_CUR_STATE) {
3018                if (cmd.curr_wq_state > IB_WQS_ERR)
3019                        return -EINVAL;
3020
3021                wq_attr.curr_wq_state = cmd.curr_wq_state;
3022        } else {
3023                wq_attr.curr_wq_state = wq->state;
3024        }
3025
3026        if (cmd.attr_mask & IB_WQ_STATE) {
3027                if (cmd.wq_state > IB_WQS_ERR)
3028                        return -EINVAL;
3029
3030                wq_attr.wq_state = cmd.wq_state;
3031        } else {
3032                wq_attr.wq_state = wq_attr.curr_wq_state;
3033        }
3034
3035        ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
3036                                        &attrs->driver_udata);
3037        rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
3038                                UVERBS_LOOKUP_READ);
3039        return ret;
3040}
3041
3042static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3043{
3044        struct ib_uverbs_ex_create_rwq_ind_table cmd;
3045        struct ib_uverbs_ex_create_rwq_ind_table_resp  resp = {};
3046        struct ib_uobject *uobj;
3047        int err;
3048        struct ib_rwq_ind_table_init_attr init_attr = {};
3049        struct ib_rwq_ind_table *rwq_ind_tbl;
3050        struct ib_wq **wqs = NULL;
3051        u32 *wqs_handles = NULL;
3052        struct ib_wq    *wq = NULL;
3053        int i, num_read_wqs;
3054        u32 num_wq_handles;
3055        struct uverbs_req_iter iter;
3056        struct ib_device *ib_dev;
3057
3058        err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3059        if (err)
3060                return err;
3061
3062        if (cmd.comp_mask)
3063                return -EOPNOTSUPP;
3064
3065        if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3066                return -EINVAL;
3067
3068        num_wq_handles = 1 << cmd.log_ind_tbl_size;
3069        wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3070                              GFP_KERNEL);
3071        if (!wqs_handles)
3072                return -ENOMEM;
3073
3074        err = uverbs_request_next(&iter, wqs_handles,
3075                                  num_wq_handles * sizeof(__u32));
3076        if (err)
3077                goto err_free;
3078
3079        err = uverbs_request_finish(&iter);
3080        if (err)
3081                goto err_free;
3082
3083        wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3084        if (!wqs) {
3085                err = -ENOMEM;
3086                goto  err_free;
3087        }
3088
3089        for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3090                        num_read_wqs++) {
3091                wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
3092                                       wqs_handles[num_read_wqs], attrs);
3093                if (!wq) {
3094                        err = -EINVAL;
3095                        goto put_wqs;
3096                }
3097
3098                wqs[num_read_wqs] = wq;
3099                atomic_inc(&wqs[num_read_wqs]->usecnt);
3100        }
3101
3102        uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
3103        if (IS_ERR(uobj)) {
3104                err = PTR_ERR(uobj);
3105                goto put_wqs;
3106        }
3107
3108        rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
3109        if (!rwq_ind_tbl) {
3110                err = -ENOMEM;
3111                goto err_uobj;
3112        }
3113
3114        init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3115        init_attr.ind_tbl = wqs;
3116
3117        rwq_ind_tbl->ind_tbl = wqs;
3118        rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3119        rwq_ind_tbl->uobject = uobj;
3120        uobj->object = rwq_ind_tbl;
3121        rwq_ind_tbl->device = ib_dev;
3122        atomic_set(&rwq_ind_tbl->usecnt, 0);
3123
3124        err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
3125                                               &attrs->driver_udata);
3126        if (err)
3127                goto err_create;
3128
3129        for (i = 0; i < num_wq_handles; i++)
3130                rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
3131                                        UVERBS_LOOKUP_READ);
3132        kfree(wqs_handles);
3133        uobj_finalize_uobj_create(uobj, attrs);
3134
3135        resp.ind_tbl_handle = uobj->id;
3136        resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3137        resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3138        return uverbs_response(attrs, &resp, sizeof(resp));
3139
3140err_create:
3141        kfree(rwq_ind_tbl);
3142err_uobj:
3143        uobj_alloc_abort(uobj, attrs);
3144put_wqs:
3145        for (i = 0; i < num_read_wqs; i++) {
3146                rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
3147                                        UVERBS_LOOKUP_READ);
3148                atomic_dec(&wqs[i]->usecnt);
3149        }
3150err_free:
3151        kfree(wqs_handles);
3152        kfree(wqs);
3153        return err;
3154}
3155
3156static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3157{
3158        struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
3159        int ret;
3160
3161        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3162        if (ret)
3163                return ret;
3164
3165        if (cmd.comp_mask)
3166                return -EOPNOTSUPP;
3167
3168        return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
3169                                    cmd.ind_tbl_handle, attrs);
3170}
3171
3172static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3173{
3174        struct ib_uverbs_create_flow      cmd;
3175        struct ib_uverbs_create_flow_resp resp = {};
3176        struct ib_uobject                 *uobj;
3177        struct ib_flow                    *flow_id;
3178        struct ib_uverbs_flow_attr        *kern_flow_attr;
3179        struct ib_flow_attr               *flow_attr;
3180        struct ib_qp                      *qp;
3181        struct ib_uflow_resources         *uflow_res;
3182        struct ib_uverbs_flow_spec_hdr    *kern_spec;
3183        struct uverbs_req_iter iter;
3184        int err;
3185        void *ib_spec;
3186        int i;
3187        struct ib_device *ib_dev;
3188
3189        err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3190        if (err)
3191                return err;
3192
3193        if (cmd.comp_mask)
3194                return -EINVAL;
3195
3196        if (!capable(CAP_NET_RAW))
3197                return -EPERM;
3198
3199        if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3200                return -EINVAL;
3201
3202        if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3203            ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3204             (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3205                return -EINVAL;
3206
3207        if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3208                return -EINVAL;
3209
3210        if (cmd.flow_attr.size >
3211            (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3212                return -EINVAL;
3213
3214        if (cmd.flow_attr.reserved[0] ||
3215            cmd.flow_attr.reserved[1])
3216                return -EINVAL;
3217
3218        if (cmd.flow_attr.num_of_specs) {
3219                kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3220                                         GFP_KERNEL);
3221                if (!kern_flow_attr)
3222                        return -ENOMEM;
3223
3224                *kern_flow_attr = cmd.flow_attr;
3225                err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
3226                                          cmd.flow_attr.size);
3227                if (err)
3228                        goto err_free_attr;
3229        } else {
3230                kern_flow_attr = &cmd.flow_attr;
3231        }
3232
3233        err = uverbs_request_finish(&iter);
3234        if (err)
3235                goto err_free_attr;
3236
3237        uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
3238        if (IS_ERR(uobj)) {
3239                err = PTR_ERR(uobj);
3240                goto err_free_attr;
3241        }
3242
3243        if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
3244                err = -EINVAL;
3245                goto err_uobj;
3246        }
3247
3248        qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
3249        if (!qp) {
3250                err = -EINVAL;
3251                goto err_uobj;
3252        }
3253
3254        if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
3255                err = -EINVAL;
3256                goto err_put;
3257        }
3258
3259        flow_attr = kzalloc(struct_size(flow_attr, flows,
3260                                cmd.flow_attr.num_of_specs), GFP_KERNEL);
3261        if (!flow_attr) {
3262                err = -ENOMEM;
3263                goto err_put;
3264        }
3265        uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
3266        if (!uflow_res) {
3267                err = -ENOMEM;
3268                goto err_free_flow_attr;
3269        }
3270
3271        flow_attr->type = kern_flow_attr->type;
3272        flow_attr->priority = kern_flow_attr->priority;
3273        flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3274        flow_attr->port = kern_flow_attr->port;
3275        flow_attr->flags = kern_flow_attr->flags;
3276        flow_attr->size = sizeof(*flow_attr);
3277
3278        kern_spec = kern_flow_attr->flow_specs;
3279        ib_spec = flow_attr + 1;
3280        for (i = 0; i < flow_attr->num_of_specs &&
3281                        cmd.flow_attr.size >= sizeof(*kern_spec) &&
3282                        cmd.flow_attr.size >= kern_spec->size;
3283             i++) {
3284                err = kern_spec_to_ib_spec(
3285                                attrs, (struct ib_uverbs_flow_spec *)kern_spec,
3286                                ib_spec, uflow_res);
3287                if (err)
3288                        goto err_free;
3289
3290                flow_attr->size +=
3291                        ((union ib_flow_spec *) ib_spec)->size;
3292                cmd.flow_attr.size -= kern_spec->size;
3293                kern_spec = ((void *)kern_spec) + kern_spec->size;
3294                ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3295        }
3296        if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3297                pr_warn("create flow failed, flow %d: %u bytes left from uverb cmd\n",
3298                        i, cmd.flow_attr.size);
3299                err = -EINVAL;
3300                goto err_free;
3301        }
3302
3303        flow_id = qp->device->ops.create_flow(qp, flow_attr,
3304                                              &attrs->driver_udata);
3305
3306        if (IS_ERR(flow_id)) {
3307                err = PTR_ERR(flow_id);
3308                goto err_free;
3309        }
3310
3311        ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
3312
3313        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3314                                UVERBS_LOOKUP_READ);
3315        kfree(flow_attr);
3316
3317        if (cmd.flow_attr.num_of_specs)
3318                kfree(kern_flow_attr);
3319        uobj_finalize_uobj_create(uobj, attrs);
3320
3321        resp.flow_handle = uobj->id;
3322        return uverbs_response(attrs, &resp, sizeof(resp));
3323
3324err_free:
3325        ib_uverbs_flow_resources_free(uflow_res);
3326err_free_flow_attr:
3327        kfree(flow_attr);
3328err_put:
3329        rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3330                                UVERBS_LOOKUP_READ);
3331err_uobj:
3332        uobj_alloc_abort(uobj, attrs);
3333err_free_attr:
3334        if (cmd.flow_attr.num_of_specs)
3335                kfree(kern_flow_attr);
3336        return err;
3337}
3338
3339static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
3340{
3341        struct ib_uverbs_destroy_flow   cmd;
3342        int                             ret;
3343
3344        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3345        if (ret)
3346                return ret;
3347
3348        if (cmd.comp_mask)
3349                return -EINVAL;
3350
3351        return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
3352}
3353
3354static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
3355                                struct ib_uverbs_create_xsrq *cmd,
3356                                struct ib_udata *udata)
3357{
3358        struct ib_uverbs_create_srq_resp resp = {};
3359        struct ib_usrq_object           *obj;
3360        struct ib_pd                    *pd;
3361        struct ib_srq                   *srq;
3362        struct ib_srq_init_attr          attr;
3363        int ret;
3364        struct ib_uobject *xrcd_uobj;
3365        struct ib_device *ib_dev;
3366
3367        obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
3368                                                  &ib_dev);
3369        if (IS_ERR(obj))
3370                return PTR_ERR(obj);
3371
3372        if (cmd->srq_type == IB_SRQT_TM)
3373                attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3374
3375        if (cmd->srq_type == IB_SRQT_XRC) {
3376                xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3377                                          attrs);
3378                if (IS_ERR(xrcd_uobj)) {
3379                        ret = -EINVAL;
3380                        goto err;
3381                }
3382
3383                attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3384                if (!attr.ext.xrc.xrcd) {
3385                        ret = -EINVAL;
3386                        goto err_put_xrcd;
3387                }
3388
3389                obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3390                atomic_inc(&obj->uxrcd->refcnt);
3391        }
3392
3393        if (ib_srq_has_cq(cmd->srq_type)) {
3394                attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
3395                                                cmd->cq_handle, attrs);
3396                if (!attr.ext.cq) {
3397                        ret = -EINVAL;
3398                        goto err_put_xrcd;
3399                }
3400        }
3401
3402        pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
3403        if (!pd) {
3404                ret = -EINVAL;
3405                goto err_put_cq;
3406        }
3407
3408        attr.event_handler  = ib_uverbs_srq_event_handler;
3409        attr.srq_type       = cmd->srq_type;
3410        attr.attr.max_wr    = cmd->max_wr;
3411        attr.attr.max_sge   = cmd->max_sge;
3412        attr.attr.srq_limit = cmd->srq_limit;
3413
3414        INIT_LIST_HEAD(&obj->uevent.event_list);
3415        obj->uevent.uobject.user_handle = cmd->user_handle;
3416
3417        srq = ib_create_srq_user(pd, &attr, obj, udata);
3418        if (IS_ERR(srq)) {
3419                ret = PTR_ERR(srq);
3420                goto err_put_pd;
3421        }
3422
3423        obj->uevent.uobject.object = srq;
3424        obj->uevent.uobject.user_handle = cmd->user_handle;
3425        obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
3426        if (obj->uevent.event_file)
3427                uverbs_uobject_get(&obj->uevent.event_file->uobj);
3428
3429        if (cmd->srq_type == IB_SRQT_XRC)
3430                resp.srqn = srq->ext.xrc.srq_num;
3431
3432        if (cmd->srq_type == IB_SRQT_XRC)
3433                uobj_put_read(xrcd_uobj);
3434
3435        if (ib_srq_has_cq(cmd->srq_type))
3436                rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3437                                        UVERBS_LOOKUP_READ);
3438
3439        uobj_put_obj_read(pd);
3440        uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
3441
3442        resp.srq_handle = obj->uevent.uobject.id;
3443        resp.max_wr = attr.attr.max_wr;
3444        resp.max_sge = attr.attr.max_sge;
3445        return uverbs_response(attrs, &resp, sizeof(resp));
3446
3447err_put_pd:
3448        uobj_put_obj_read(pd);
3449err_put_cq:
3450        if (ib_srq_has_cq(cmd->srq_type))
3451                rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3452                                        UVERBS_LOOKUP_READ);
3453
3454err_put_xrcd:
3455        if (cmd->srq_type == IB_SRQT_XRC) {
3456                atomic_dec(&obj->uxrcd->refcnt);
3457                uobj_put_read(xrcd_uobj);
3458        }
3459
3460err:
3461        uobj_alloc_abort(&obj->uevent.uobject, attrs);
3462        return ret;
3463}
3464
3465static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
3466{
3467        struct ib_uverbs_create_srq      cmd;
3468        struct ib_uverbs_create_xsrq     xcmd;
3469        int ret;
3470
3471        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3472        if (ret)
3473                return ret;
3474
3475        memset(&xcmd, 0, sizeof(xcmd));
3476        xcmd.response    = cmd.response;
3477        xcmd.user_handle = cmd.user_handle;
3478        xcmd.srq_type    = IB_SRQT_BASIC;
3479        xcmd.pd_handle   = cmd.pd_handle;
3480        xcmd.max_wr      = cmd.max_wr;
3481        xcmd.max_sge     = cmd.max_sge;
3482        xcmd.srq_limit   = cmd.srq_limit;
3483
3484        return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
3485}
3486
3487static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
3488{
3489        struct ib_uverbs_create_xsrq     cmd;
3490        int ret;
3491
3492        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3493        if (ret)
3494                return ret;
3495
3496        return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
3497}
3498
3499static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
3500{
3501        struct ib_uverbs_modify_srq cmd;
3502        struct ib_srq              *srq;
3503        struct ib_srq_attr          attr;
3504        int                         ret;
3505
3506        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3507        if (ret)
3508                return ret;
3509
3510        srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3511        if (!srq)
3512                return -EINVAL;
3513
3514        attr.max_wr    = cmd.max_wr;
3515        attr.srq_limit = cmd.srq_limit;
3516
3517        ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
3518                                          &attrs->driver_udata);
3519
3520        rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3521                                UVERBS_LOOKUP_READ);
3522
3523        return ret;
3524}
3525
3526static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
3527{
3528        struct ib_uverbs_query_srq      cmd;
3529        struct ib_uverbs_query_srq_resp resp;
3530        struct ib_srq_attr              attr;
3531        struct ib_srq                   *srq;
3532        int                             ret;
3533
3534        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3535        if (ret)
3536                return ret;
3537
3538        srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3539        if (!srq)
3540                return -EINVAL;
3541
3542        ret = ib_query_srq(srq, &attr);
3543
3544        rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3545                                UVERBS_LOOKUP_READ);
3546
3547        if (ret)
3548                return ret;
3549
3550        memset(&resp, 0, sizeof resp);
3551
3552        resp.max_wr    = attr.max_wr;
3553        resp.max_sge   = attr.max_sge;
3554        resp.srq_limit = attr.srq_limit;
3555
3556        return uverbs_response(attrs, &resp, sizeof(resp));
3557}
3558
3559static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
3560{
3561        struct ib_uverbs_destroy_srq      cmd;
3562        struct ib_uverbs_destroy_srq_resp resp;
3563        struct ib_uobject                *uobj;
3564        struct ib_uevent_object          *obj;
3565        int ret;
3566
3567        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3568        if (ret)
3569                return ret;
3570
3571        uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3572        if (IS_ERR(uobj))
3573                return PTR_ERR(uobj);
3574
3575        obj = container_of(uobj, struct ib_uevent_object, uobject);
3576        memset(&resp, 0, sizeof(resp));
3577        resp.events_reported = obj->events_reported;
3578
3579        uobj_put_destroy(uobj);
3580
3581        return uverbs_response(attrs, &resp, sizeof(resp));
3582}
3583
3584static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
3585{
3586        struct ib_uverbs_ex_query_device_resp resp = {};
3587        struct ib_uverbs_ex_query_device  cmd;
3588        struct ib_device_attr attr = {0};
3589        struct ib_ucontext *ucontext;
3590        struct ib_device *ib_dev;
3591        int err;
3592
3593        ucontext = ib_uverbs_get_ucontext(attrs);
3594        if (IS_ERR(ucontext))
3595                return PTR_ERR(ucontext);
3596        ib_dev = ucontext->device;
3597
3598        err = uverbs_request(attrs, &cmd, sizeof(cmd));
3599        if (err)
3600                return err;
3601
3602        if (cmd.comp_mask)
3603                return -EINVAL;
3604
3605        if (cmd.reserved)
3606                return -EINVAL;
3607
3608        err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
3609        if (err)
3610                return err;
3611
3612        copy_query_dev_fields(ucontext, &resp.base, &attr);
3613
3614        resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3615        resp.odp_caps.per_transport_caps.rc_odp_caps =
3616                attr.odp_caps.per_transport_caps.rc_odp_caps;
3617        resp.odp_caps.per_transport_caps.uc_odp_caps =
3618                attr.odp_caps.per_transport_caps.uc_odp_caps;
3619        resp.odp_caps.per_transport_caps.ud_odp_caps =
3620                attr.odp_caps.per_transport_caps.ud_odp_caps;
3621        resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
3622
3623        resp.timestamp_mask = attr.timestamp_mask;
3624        resp.hca_core_clock = attr.hca_core_clock;
3625        resp.device_cap_flags_ex = attr.device_cap_flags;
3626        resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3627        resp.rss_caps.max_rwq_indirection_tables =
3628                attr.rss_caps.max_rwq_indirection_tables;
3629        resp.rss_caps.max_rwq_indirection_table_size =
3630                attr.rss_caps.max_rwq_indirection_table_size;
3631        resp.max_wq_type_rq = attr.max_wq_type_rq;
3632        resp.raw_packet_caps = attr.raw_packet_caps;
3633        resp.tm_caps.max_rndv_hdr_size  = attr.tm_caps.max_rndv_hdr_size;
3634        resp.tm_caps.max_num_tags       = attr.tm_caps.max_num_tags;
3635        resp.tm_caps.max_ops            = attr.tm_caps.max_ops;
3636        resp.tm_caps.max_sge            = attr.tm_caps.max_sge;
3637        resp.tm_caps.flags              = attr.tm_caps.flags;
3638        resp.cq_moderation_caps.max_cq_moderation_count  =
3639                attr.cq_caps.max_cq_moderation_count;
3640        resp.cq_moderation_caps.max_cq_moderation_period =
3641                attr.cq_caps.max_cq_moderation_period;
3642        resp.max_dm_size = attr.max_dm_size;
3643        resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3644
3645        return uverbs_response(attrs, &resp, sizeof(resp));
3646}
3647
3648static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
3649{
3650        struct ib_uverbs_ex_modify_cq cmd;
3651        struct ib_cq *cq;
3652        int ret;
3653
3654        ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3655        if (ret)
3656                return ret;
3657
3658        if (!cmd.attr_mask || cmd.reserved)
3659                return -EINVAL;
3660
3661        if (cmd.attr_mask > IB_CQ_MODERATE)
3662                return -EOPNOTSUPP;
3663
3664        cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
3665        if (!cq)
3666                return -EINVAL;
3667
3668        ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3669
3670        rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
3671                                UVERBS_LOOKUP_READ);
3672        return ret;
3673}
3674
3675/*
3676 * Describe the input structs for write(). Some write methods have an input
3677 * only struct, most have an input and output. If the struct has an output then
3678 * the 'response' u64 must be the first field in the request structure.
3679 *
3680 * If udata is present then both the request and response structs have a
3681 * trailing driver_data flex array. In this case the size of the base struct
3682 * cannot be changed.
3683 */
3684#define UAPI_DEF_WRITE_IO(req, resp)                                           \
3685        .write.has_resp = 1 +                                                  \
3686                          BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
3687                          BUILD_BUG_ON_ZERO(sizeof_field(req, response) !=    \
3688                                            sizeof(u64)),                      \
3689        .write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
3690
3691#define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
3692
3693#define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
3694        UAPI_DEF_WRITE_IO(req, resp),                                          \
3695                .write.has_udata =                                             \
3696                        1 +                                                    \
3697                        BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
3698                                          sizeof(req)) +                       \
3699                        BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) !=       \
3700                                          sizeof(resp))
3701
3702#define UAPI_DEF_WRITE_UDATA_I(req)                                            \
3703        UAPI_DEF_WRITE_I(req),                                                 \
3704                .write.has_udata =                                             \
3705                        1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
3706                                              sizeof(req))
3707
3708/*
3709 * The _EX versions are for use with WRITE_EX and allow the last struct member
3710 * to be specified. Buffers that do not include that member will be rejected.
3711 */
3712#define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
3713        .write.has_resp = 1,                                                   \
3714        .write.req_size = offsetofend(req, req_last_member),                   \
3715        .write.resp_size = offsetofend(resp, resp_last_member)
3716
3717#define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
3718        .write.req_size = offsetofend(req, req_last_member)
3719
3720const struct uapi_definition uverbs_def_write_intf[] = {
3721        DECLARE_UVERBS_OBJECT(
3722                UVERBS_OBJECT_AH,
3723                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
3724                                     ib_uverbs_create_ah,
3725                                     UAPI_DEF_WRITE_UDATA_IO(
3726                                             struct ib_uverbs_create_ah,
3727                                             struct ib_uverbs_create_ah_resp)),
3728                DECLARE_UVERBS_WRITE(
3729                        IB_USER_VERBS_CMD_DESTROY_AH,
3730                        ib_uverbs_destroy_ah,
3731                        UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah)),
3732                UAPI_DEF_OBJ_NEEDS_FN(create_user_ah),
3733                UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
3734
3735        DECLARE_UVERBS_OBJECT(
3736                UVERBS_OBJECT_COMP_CHANNEL,
3737                DECLARE_UVERBS_WRITE(
3738                        IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
3739                        ib_uverbs_create_comp_channel,
3740                        UAPI_DEF_WRITE_IO(
3741                                struct ib_uverbs_create_comp_channel,
3742                                struct ib_uverbs_create_comp_channel_resp))),
3743
3744        DECLARE_UVERBS_OBJECT(
3745                UVERBS_OBJECT_CQ,
3746                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
3747                                     ib_uverbs_create_cq,
3748                                     UAPI_DEF_WRITE_UDATA_IO(
3749                                             struct ib_uverbs_create_cq,
3750                                             struct ib_uverbs_create_cq_resp),
3751                                     UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3752                DECLARE_UVERBS_WRITE(
3753                        IB_USER_VERBS_CMD_DESTROY_CQ,
3754                        ib_uverbs_destroy_cq,
3755                        UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
3756                                          struct ib_uverbs_destroy_cq_resp),
3757                        UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
3758                DECLARE_UVERBS_WRITE(
3759                        IB_USER_VERBS_CMD_POLL_CQ,
3760                        ib_uverbs_poll_cq,
3761                        UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
3762                                          struct ib_uverbs_poll_cq_resp),
3763                        UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
3764                DECLARE_UVERBS_WRITE(
3765                        IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
3766                        ib_uverbs_req_notify_cq,
3767                        UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
3768                        UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
3769                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
3770                                     ib_uverbs_resize_cq,
3771                                     UAPI_DEF_WRITE_UDATA_IO(
3772                                             struct ib_uverbs_resize_cq,
3773                                             struct ib_uverbs_resize_cq_resp),
3774                                     UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
3775                DECLARE_UVERBS_WRITE_EX(
3776                        IB_USER_VERBS_EX_CMD_CREATE_CQ,
3777                        ib_uverbs_ex_create_cq,
3778                        UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
3779                                             reserved,
3780                                             struct ib_uverbs_ex_create_cq_resp,
3781                                             response_length),
3782                        UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3783                DECLARE_UVERBS_WRITE_EX(
3784                        IB_USER_VERBS_EX_CMD_MODIFY_CQ,
3785                        ib_uverbs_ex_modify_cq,
3786                        UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
3787                        UAPI_DEF_METHOD_NEEDS_FN(modify_cq))),
3788
3789        DECLARE_UVERBS_OBJECT(
3790                UVERBS_OBJECT_DEVICE,
3791                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
3792                                     ib_uverbs_get_context,
3793                                     UAPI_DEF_WRITE_UDATA_IO(
3794                                             struct ib_uverbs_get_context,
3795                                             struct ib_uverbs_get_context_resp)),
3796                DECLARE_UVERBS_WRITE(
3797                        IB_USER_VERBS_CMD_QUERY_DEVICE,
3798                        ib_uverbs_query_device,
3799                        UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
3800                                          struct ib_uverbs_query_device_resp)),
3801                DECLARE_UVERBS_WRITE(
3802                        IB_USER_VERBS_CMD_QUERY_PORT,
3803                        ib_uverbs_query_port,
3804                        UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
3805                                          struct ib_uverbs_query_port_resp),
3806                        UAPI_DEF_METHOD_NEEDS_FN(query_port)),
3807                DECLARE_UVERBS_WRITE_EX(
3808                        IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
3809                        ib_uverbs_ex_query_device,
3810                        UAPI_DEF_WRITE_IO_EX(
3811                                struct ib_uverbs_ex_query_device,
3812                                reserved,
3813                                struct ib_uverbs_ex_query_device_resp,
3814                                response_length),
3815                        UAPI_DEF_METHOD_NEEDS_FN(query_device)),
3816                UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
3817                UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
3818
3819        DECLARE_UVERBS_OBJECT(
3820                UVERBS_OBJECT_FLOW,
3821                DECLARE_UVERBS_WRITE_EX(
3822                        IB_USER_VERBS_EX_CMD_CREATE_FLOW,
3823                        ib_uverbs_ex_create_flow,
3824                        UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
3825                                             flow_attr,
3826                                             struct ib_uverbs_create_flow_resp,
3827                                             flow_handle),
3828                        UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
3829                DECLARE_UVERBS_WRITE_EX(
3830                        IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
3831                        ib_uverbs_ex_destroy_flow,
3832                        UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
3833                        UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
3834
3835        DECLARE_UVERBS_OBJECT(
3836                UVERBS_OBJECT_MR,
3837                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
3838                                     ib_uverbs_dereg_mr,
3839                                     UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
3840                                     UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
3841                DECLARE_UVERBS_WRITE(
3842                        IB_USER_VERBS_CMD_REG_MR,
3843                        ib_uverbs_reg_mr,
3844                        UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
3845                                                struct ib_uverbs_reg_mr_resp),
3846                        UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
3847                DECLARE_UVERBS_WRITE(
3848                        IB_USER_VERBS_CMD_REREG_MR,
3849                        ib_uverbs_rereg_mr,
3850                        UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
3851                                                struct ib_uverbs_rereg_mr_resp),
3852                        UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
3853
3854        DECLARE_UVERBS_OBJECT(
3855                UVERBS_OBJECT_MW,
3856                DECLARE_UVERBS_WRITE(
3857                        IB_USER_VERBS_CMD_ALLOC_MW,
3858                        ib_uverbs_alloc_mw,
3859                        UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
3860                                                struct ib_uverbs_alloc_mw_resp),
3861                        UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
3862                DECLARE_UVERBS_WRITE(
3863                        IB_USER_VERBS_CMD_DEALLOC_MW,
3864                        ib_uverbs_dealloc_mw,
3865                        UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
3866                        UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
3867
3868        DECLARE_UVERBS_OBJECT(
3869                UVERBS_OBJECT_PD,
3870                DECLARE_UVERBS_WRITE(
3871                        IB_USER_VERBS_CMD_ALLOC_PD,
3872                        ib_uverbs_alloc_pd,
3873                        UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
3874                                                struct ib_uverbs_alloc_pd_resp),
3875                        UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
3876                DECLARE_UVERBS_WRITE(
3877                        IB_USER_VERBS_CMD_DEALLOC_PD,
3878                        ib_uverbs_dealloc_pd,
3879                        UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
3880                        UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
3881
3882        DECLARE_UVERBS_OBJECT(
3883                UVERBS_OBJECT_QP,
3884                DECLARE_UVERBS_WRITE(
3885                        IB_USER_VERBS_CMD_ATTACH_MCAST,
3886                        ib_uverbs_attach_mcast,
3887                        UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
3888                        UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
3889                        UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3890                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
3891                                     ib_uverbs_create_qp,
3892                                     UAPI_DEF_WRITE_UDATA_IO(
3893                                             struct ib_uverbs_create_qp,
3894                                             struct ib_uverbs_create_qp_resp),
3895                                     UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3896                DECLARE_UVERBS_WRITE(
3897                        IB_USER_VERBS_CMD_DESTROY_QP,
3898                        ib_uverbs_destroy_qp,
3899                        UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
3900                                          struct ib_uverbs_destroy_qp_resp),
3901                        UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
3902                DECLARE_UVERBS_WRITE(
3903                        IB_USER_VERBS_CMD_DETACH_MCAST,
3904                        ib_uverbs_detach_mcast,
3905                        UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
3906                        UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3907                DECLARE_UVERBS_WRITE(
3908                        IB_USER_VERBS_CMD_MODIFY_QP,
3909                        ib_uverbs_modify_qp,
3910                        UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
3911                        UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
3912                DECLARE_UVERBS_WRITE(
3913                        IB_USER_VERBS_CMD_POST_RECV,
3914                        ib_uverbs_post_recv,
3915                        UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
3916                                          struct ib_uverbs_post_recv_resp),
3917                        UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
3918                DECLARE_UVERBS_WRITE(
3919                        IB_USER_VERBS_CMD_POST_SEND,
3920                        ib_uverbs_post_send,
3921                        UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
3922                                          struct ib_uverbs_post_send_resp),
3923                        UAPI_DEF_METHOD_NEEDS_FN(post_send)),
3924                DECLARE_UVERBS_WRITE(
3925                        IB_USER_VERBS_CMD_QUERY_QP,
3926                        ib_uverbs_query_qp,
3927                        UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
3928                                          struct ib_uverbs_query_qp_resp),
3929                        UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
3930                DECLARE_UVERBS_WRITE_EX(
3931                        IB_USER_VERBS_EX_CMD_CREATE_QP,
3932                        ib_uverbs_ex_create_qp,
3933                        UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
3934                                             comp_mask,
3935                                             struct ib_uverbs_ex_create_qp_resp,
3936                                             response_length),
3937                        UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3938                DECLARE_UVERBS_WRITE_EX(
3939                        IB_USER_VERBS_EX_CMD_MODIFY_QP,
3940                        ib_uverbs_ex_modify_qp,
3941                        UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
3942                                             base,
3943                                             struct ib_uverbs_ex_modify_qp_resp,
3944                                             response_length),
3945                        UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
3946
3947        DECLARE_UVERBS_OBJECT(
3948                UVERBS_OBJECT_RWQ_IND_TBL,
3949                DECLARE_UVERBS_WRITE_EX(
3950                        IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
3951                        ib_uverbs_ex_create_rwq_ind_table,
3952                        UAPI_DEF_WRITE_IO_EX(
3953                                struct ib_uverbs_ex_create_rwq_ind_table,
3954                                log_ind_tbl_size,
3955                                struct ib_uverbs_ex_create_rwq_ind_table_resp,
3956                                ind_tbl_num),
3957                        UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
3958                DECLARE_UVERBS_WRITE_EX(
3959                        IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
3960                        ib_uverbs_ex_destroy_rwq_ind_table,
3961                        UAPI_DEF_WRITE_I(
3962                                struct ib_uverbs_ex_destroy_rwq_ind_table),
3963                        UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
3964
3965        DECLARE_UVERBS_OBJECT(
3966                UVERBS_OBJECT_WQ,
3967                DECLARE_UVERBS_WRITE_EX(
3968                        IB_USER_VERBS_EX_CMD_CREATE_WQ,
3969                        ib_uverbs_ex_create_wq,
3970                        UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
3971                                             max_sge,
3972                                             struct ib_uverbs_ex_create_wq_resp,
3973                                             wqn),
3974                        UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
3975                DECLARE_UVERBS_WRITE_EX(
3976                        IB_USER_VERBS_EX_CMD_DESTROY_WQ,
3977                        ib_uverbs_ex_destroy_wq,
3978                        UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
3979                                             wq_handle,
3980                                             struct ib_uverbs_ex_destroy_wq_resp,
3981                                             reserved),
3982                        UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
3983                DECLARE_UVERBS_WRITE_EX(
3984                        IB_USER_VERBS_EX_CMD_MODIFY_WQ,
3985                        ib_uverbs_ex_modify_wq,
3986                        UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
3987                                            curr_wq_state),
3988                        UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
3989
3990        DECLARE_UVERBS_OBJECT(
3991                UVERBS_OBJECT_SRQ,
3992                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
3993                                     ib_uverbs_create_srq,
3994                                     UAPI_DEF_WRITE_UDATA_IO(
3995                                             struct ib_uverbs_create_srq,
3996                                             struct ib_uverbs_create_srq_resp),
3997                                     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
3998                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
3999                                     ib_uverbs_create_xsrq,
4000                                     UAPI_DEF_WRITE_UDATA_IO(
4001                                             struct ib_uverbs_create_xsrq,
4002                                             struct ib_uverbs_create_srq_resp),
4003                                     UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
4004                DECLARE_UVERBS_WRITE(
4005                        IB_USER_VERBS_CMD_DESTROY_SRQ,
4006                        ib_uverbs_destroy_srq,
4007                        UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
4008                                          struct ib_uverbs_destroy_srq_resp),
4009                        UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
4010                DECLARE_UVERBS_WRITE(
4011                        IB_USER_VERBS_CMD_MODIFY_SRQ,
4012                        ib_uverbs_modify_srq,
4013                        UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
4014                        UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
4015                DECLARE_UVERBS_WRITE(
4016                        IB_USER_VERBS_CMD_POST_SRQ_RECV,
4017                        ib_uverbs_post_srq_recv,
4018                        UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
4019                                          struct ib_uverbs_post_srq_recv_resp),
4020                        UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
4021                DECLARE_UVERBS_WRITE(
4022                        IB_USER_VERBS_CMD_QUERY_SRQ,
4023                        ib_uverbs_query_srq,
4024                        UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
4025                                          struct ib_uverbs_query_srq_resp),
4026                        UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
4027
4028        DECLARE_UVERBS_OBJECT(
4029                UVERBS_OBJECT_XRCD,
4030                DECLARE_UVERBS_WRITE(
4031                        IB_USER_VERBS_CMD_CLOSE_XRCD,
4032                        ib_uverbs_close_xrcd,
4033                        UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd)),
4034                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
4035                                     ib_uverbs_open_qp,
4036                                     UAPI_DEF_WRITE_UDATA_IO(
4037                                             struct ib_uverbs_open_qp,
4038                                             struct ib_uverbs_create_qp_resp)),
4039                DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
4040                                     ib_uverbs_open_xrcd,
4041                                     UAPI_DEF_WRITE_UDATA_IO(
4042                                             struct ib_uverbs_open_xrcd,
4043                                             struct ib_uverbs_open_xrcd_resp)),
4044                UAPI_DEF_OBJ_NEEDS_FN(alloc_xrcd),
4045                UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)),
4046
4047        {},
4048};
4049