linux/net/sunrpc/xprtrdma/svc_rdma_transport.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (c) 2015-2018 Oracle. All rights reserved.
   4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
   5 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the BSD-type
  11 * license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 *      Redistributions of source code must retain the above copyright
  18 *      notice, this list of conditions and the following disclaimer.
  19 *
  20 *      Redistributions in binary form must reproduce the above
  21 *      copyright notice, this list of conditions and the following
  22 *      disclaimer in the documentation and/or other materials provided
  23 *      with the distribution.
  24 *
  25 *      Neither the name of the Network Appliance, Inc. nor the names of
  26 *      its contributors may be used to endorse or promote products
  27 *      derived from this software without specific prior written
  28 *      permission.
  29 *
  30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  41 *
  42 * Author: Tom Tucker <tom@opengridcomputing.com>
  43 */
  44
  45#include <linux/interrupt.h>
  46#include <linux/sched.h>
  47#include <linux/slab.h>
  48#include <linux/spinlock.h>
  49#include <linux/workqueue.h>
  50#include <linux/export.h>
  51
  52#include <rdma/ib_verbs.h>
  53#include <rdma/rdma_cm.h>
  54#include <rdma/rw.h>
  55
  56#include <linux/sunrpc/addr.h>
  57#include <linux/sunrpc/debug.h>
  58#include <linux/sunrpc/svc_xprt.h>
  59#include <linux/sunrpc/svc_rdma.h>
  60
  61#include "xprt_rdma.h"
  62#include <trace/events/rpcrdma.h>
  63
  64#define RPCDBG_FACILITY RPCDBG_SVCXPRT
  65
  66static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
  67                                                 struct net *net);
  68static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
  69                                        struct net *net,
  70                                        struct sockaddr *sa, int salen,
  71                                        int flags);
  72static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
  73static void svc_rdma_detach(struct svc_xprt *xprt);
  74static void svc_rdma_free(struct svc_xprt *xprt);
  75static int svc_rdma_has_wspace(struct svc_xprt *xprt);
  76static void svc_rdma_secure_port(struct svc_rqst *);
  77static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
  78
  79static const struct svc_xprt_ops svc_rdma_ops = {
  80        .xpo_create = svc_rdma_create,
  81        .xpo_recvfrom = svc_rdma_recvfrom,
  82        .xpo_sendto = svc_rdma_sendto,
  83        .xpo_read_payload = svc_rdma_read_payload,
  84        .xpo_release_rqst = svc_rdma_release_rqst,
  85        .xpo_detach = svc_rdma_detach,
  86        .xpo_free = svc_rdma_free,
  87        .xpo_has_wspace = svc_rdma_has_wspace,
  88        .xpo_accept = svc_rdma_accept,
  89        .xpo_secure_port = svc_rdma_secure_port,
  90        .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
  91};
  92
  93struct svc_xprt_class svc_rdma_class = {
  94        .xcl_name = "rdma",
  95        .xcl_owner = THIS_MODULE,
  96        .xcl_ops = &svc_rdma_ops,
  97        .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
  98        .xcl_ident = XPRT_TRANSPORT_RDMA,
  99};
 100
 101/* QP event handler */
 102static void qp_event_handler(struct ib_event *event, void *context)
 103{
 104        struct svc_xprt *xprt = context;
 105
 106        trace_svcrdma_qp_error(event, (struct sockaddr *)&xprt->xpt_remote);
 107        switch (event->event) {
 108        /* These are considered benign events */
 109        case IB_EVENT_PATH_MIG:
 110        case IB_EVENT_COMM_EST:
 111        case IB_EVENT_SQ_DRAINED:
 112        case IB_EVENT_QP_LAST_WQE_REACHED:
 113                break;
 114
 115        /* These are considered fatal events */
 116        case IB_EVENT_PATH_MIG_ERR:
 117        case IB_EVENT_QP_FATAL:
 118        case IB_EVENT_QP_REQ_ERR:
 119        case IB_EVENT_QP_ACCESS_ERR:
 120        case IB_EVENT_DEVICE_FATAL:
 121        default:
 122                set_bit(XPT_CLOSE, &xprt->xpt_flags);
 123                svc_xprt_enqueue(xprt);
 124                break;
 125        }
 126}
 127
 128static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
 129                                                 struct net *net)
 130{
 131        struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
 132
 133        if (!cma_xprt) {
 134                dprintk("svcrdma: failed to create new transport\n");
 135                return NULL;
 136        }
 137        svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
 138        INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
 139        INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
 140        INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
 141        INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
 142        init_llist_head(&cma_xprt->sc_recv_ctxts);
 143        INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
 144        init_waitqueue_head(&cma_xprt->sc_send_wait);
 145
 146        spin_lock_init(&cma_xprt->sc_lock);
 147        spin_lock_init(&cma_xprt->sc_rq_dto_lock);
 148        spin_lock_init(&cma_xprt->sc_send_lock);
 149        spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
 150
 151        /*
 152         * Note that this implies that the underlying transport support
 153         * has some form of congestion control (see RFC 7530 section 3.1
 154         * paragraph 2). For now, we assume that all supported RDMA
 155         * transports are suitable here.
 156         */
 157        set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
 158
 159        return cma_xprt;
 160}
 161
 162static void
 163svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
 164                               struct rdma_conn_param *param)
 165{
 166        const struct rpcrdma_connect_private *pmsg = param->private_data;
 167
 168        if (pmsg &&
 169            pmsg->cp_magic == rpcrdma_cmp_magic &&
 170            pmsg->cp_version == RPCRDMA_CMP_VERSION) {
 171                newxprt->sc_snd_w_inv = pmsg->cp_flags &
 172                                        RPCRDMA_CMP_F_SND_W_INV_OK;
 173
 174                dprintk("svcrdma: client send_size %u, recv_size %u "
 175                        "remote inv %ssupported\n",
 176                        rpcrdma_decode_buffer_size(pmsg->cp_send_size),
 177                        rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
 178                        newxprt->sc_snd_w_inv ? "" : "un");
 179        }
 180}
 181
 182/*
 183 * This function handles the CONNECT_REQUEST event on a listening
 184 * endpoint. It is passed the cma_id for the _new_ connection. The context in
 185 * this cma_id is inherited from the listening cma_id and is the svc_xprt
 186 * structure for the listening endpoint.
 187 *
 188 * This function creates a new xprt for the new connection and enqueues it on
 189 * the accept queue for the listent xprt. When the listen thread is kicked, it
 190 * will call the recvfrom method on the listen xprt which will accept the new
 191 * connection.
 192 */
 193static void handle_connect_req(struct rdma_cm_id *new_cma_id,
 194                               struct rdma_conn_param *param)
 195{
 196        struct svcxprt_rdma *listen_xprt = new_cma_id->context;
 197        struct svcxprt_rdma *newxprt;
 198        struct sockaddr *sa;
 199
 200        /* Create a new transport */
 201        newxprt = svc_rdma_create_xprt(listen_xprt->sc_xprt.xpt_server,
 202                                       listen_xprt->sc_xprt.xpt_net);
 203        if (!newxprt)
 204                return;
 205        newxprt->sc_cm_id = new_cma_id;
 206        new_cma_id->context = newxprt;
 207        svc_rdma_parse_connect_private(newxprt, param);
 208
 209        /* Save client advertised inbound read limit for use later in accept. */
 210        newxprt->sc_ord = param->initiator_depth;
 211
 212        sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
 213        newxprt->sc_xprt.xpt_remotelen = svc_addr_len(sa);
 214        memcpy(&newxprt->sc_xprt.xpt_remote, sa,
 215               newxprt->sc_xprt.xpt_remotelen);
 216        snprintf(newxprt->sc_xprt.xpt_remotebuf,
 217                 sizeof(newxprt->sc_xprt.xpt_remotebuf) - 1, "%pISc", sa);
 218
 219        /* The remote port is arbitrary and not under the control of the
 220         * client ULP. Set it to a fixed value so that the DRC continues
 221         * to be effective after a reconnect.
 222         */
 223        rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0);
 224
 225        sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
 226        svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
 227
 228        /*
 229         * Enqueue the new transport on the accept queue of the listening
 230         * transport
 231         */
 232        spin_lock(&listen_xprt->sc_lock);
 233        list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
 234        spin_unlock(&listen_xprt->sc_lock);
 235
 236        set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
 237        svc_xprt_enqueue(&listen_xprt->sc_xprt);
 238}
 239
 240/**
 241 * svc_rdma_listen_handler - Handle CM events generated on a listening endpoint
 242 * @cma_id: the server's listener rdma_cm_id
 243 * @event: details of the event
 244 *
 245 * Return values:
 246 *     %0: Do not destroy @cma_id
 247 *     %1: Destroy @cma_id (never returned here)
 248 *
 249 * NB: There is never a DEVICE_REMOVAL event for INADDR_ANY listeners.
 250 */
 251static int svc_rdma_listen_handler(struct rdma_cm_id *cma_id,
 252                                   struct rdma_cm_event *event)
 253{
 254        switch (event->event) {
 255        case RDMA_CM_EVENT_CONNECT_REQUEST:
 256                handle_connect_req(cma_id, &event->param.conn);
 257                break;
 258        default:
 259                break;
 260        }
 261        return 0;
 262}
 263
 264/**
 265 * svc_rdma_cma_handler - Handle CM events on client connections
 266 * @cma_id: the server's listener rdma_cm_id
 267 * @event: details of the event
 268 *
 269 * Return values:
 270 *     %0: Do not destroy @cma_id
 271 *     %1: Destroy @cma_id (never returned here)
 272 */
 273static int svc_rdma_cma_handler(struct rdma_cm_id *cma_id,
 274                                struct rdma_cm_event *event)
 275{
 276        struct svcxprt_rdma *rdma = cma_id->context;
 277        struct svc_xprt *xprt = &rdma->sc_xprt;
 278
 279        switch (event->event) {
 280        case RDMA_CM_EVENT_ESTABLISHED:
 281                clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
 282                svc_xprt_enqueue(xprt);
 283                break;
 284        case RDMA_CM_EVENT_DISCONNECTED:
 285        case RDMA_CM_EVENT_DEVICE_REMOVAL:
 286                set_bit(XPT_CLOSE, &xprt->xpt_flags);
 287                svc_xprt_enqueue(xprt);
 288                break;
 289        default:
 290                break;
 291        }
 292        return 0;
 293}
 294
 295/*
 296 * Create a listening RDMA service endpoint.
 297 */
 298static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
 299                                        struct net *net,
 300                                        struct sockaddr *sa, int salen,
 301                                        int flags)
 302{
 303        struct rdma_cm_id *listen_id;
 304        struct svcxprt_rdma *cma_xprt;
 305        int ret;
 306
 307        if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6)
 308                return ERR_PTR(-EAFNOSUPPORT);
 309        cma_xprt = svc_rdma_create_xprt(serv, net);
 310        if (!cma_xprt)
 311                return ERR_PTR(-ENOMEM);
 312        set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
 313        strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
 314
 315        listen_id = rdma_create_id(net, svc_rdma_listen_handler, cma_xprt,
 316                                   RDMA_PS_TCP, IB_QPT_RC);
 317        if (IS_ERR(listen_id)) {
 318                ret = PTR_ERR(listen_id);
 319                goto err0;
 320        }
 321
 322        /* Allow both IPv4 and IPv6 sockets to bind a single port
 323         * at the same time.
 324         */
 325#if IS_ENABLED(CONFIG_IPV6)
 326        ret = rdma_set_afonly(listen_id, 1);
 327        if (ret)
 328                goto err1;
 329#endif
 330        ret = rdma_bind_addr(listen_id, sa);
 331        if (ret)
 332                goto err1;
 333        cma_xprt->sc_cm_id = listen_id;
 334
 335        ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
 336        if (ret)
 337                goto err1;
 338
 339        /*
 340         * We need to use the address from the cm_id in case the
 341         * caller specified 0 for the port number.
 342         */
 343        sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
 344        svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
 345
 346        return &cma_xprt->sc_xprt;
 347
 348 err1:
 349        rdma_destroy_id(listen_id);
 350 err0:
 351        kfree(cma_xprt);
 352        return ERR_PTR(ret);
 353}
 354
 355/*
 356 * This is the xpo_recvfrom function for listening endpoints. Its
 357 * purpose is to accept incoming connections. The CMA callback handler
 358 * has already created a new transport and attached it to the new CMA
 359 * ID.
 360 *
 361 * There is a queue of pending connections hung on the listening
 362 * transport. This queue contains the new svc_xprt structure. This
 363 * function takes svc_xprt structures off the accept_q and completes
 364 * the connection.
 365 */
 366static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 367{
 368        struct svcxprt_rdma *listen_rdma;
 369        struct svcxprt_rdma *newxprt = NULL;
 370        struct rdma_conn_param conn_param;
 371        struct rpcrdma_connect_private pmsg;
 372        struct ib_qp_init_attr qp_attr;
 373        unsigned int ctxts, rq_depth;
 374        struct ib_device *dev;
 375        int ret = 0;
 376        RPC_IFDEBUG(struct sockaddr *sap);
 377
 378        listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
 379        clear_bit(XPT_CONN, &xprt->xpt_flags);
 380        /* Get the next entry off the accept list */
 381        spin_lock(&listen_rdma->sc_lock);
 382        if (!list_empty(&listen_rdma->sc_accept_q)) {
 383                newxprt = list_entry(listen_rdma->sc_accept_q.next,
 384                                     struct svcxprt_rdma, sc_accept_q);
 385                list_del_init(&newxprt->sc_accept_q);
 386        }
 387        if (!list_empty(&listen_rdma->sc_accept_q))
 388                set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
 389        spin_unlock(&listen_rdma->sc_lock);
 390        if (!newxprt)
 391                return NULL;
 392
 393        dev = newxprt->sc_cm_id->device;
 394        newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
 395
 396        /* Qualify the transport resource defaults with the
 397         * capabilities of this particular device */
 398        /* Transport header, head iovec, tail iovec */
 399        newxprt->sc_max_send_sges = 3;
 400        /* Add one SGE per page list entry */
 401        newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
 402        if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
 403                newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
 404        newxprt->sc_max_req_size = svcrdma_max_req_size;
 405        newxprt->sc_max_requests = svcrdma_max_requests;
 406        newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
 407        rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
 408        if (rq_depth > dev->attrs.max_qp_wr) {
 409                pr_warn("svcrdma: reducing receive depth to %d\n",
 410                        dev->attrs.max_qp_wr);
 411                rq_depth = dev->attrs.max_qp_wr;
 412                newxprt->sc_max_requests = rq_depth - 2;
 413                newxprt->sc_max_bc_requests = 2;
 414        }
 415        newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
 416        ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
 417        ctxts *= newxprt->sc_max_requests;
 418        newxprt->sc_sq_depth = rq_depth + ctxts;
 419        if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
 420                pr_warn("svcrdma: reducing send depth to %d\n",
 421                        dev->attrs.max_qp_wr);
 422                newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
 423        }
 424        atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
 425
 426        newxprt->sc_pd = ib_alloc_pd(dev, 0);
 427        if (IS_ERR(newxprt->sc_pd)) {
 428                trace_svcrdma_pd_err(newxprt, PTR_ERR(newxprt->sc_pd));
 429                goto errout;
 430        }
 431        newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
 432                                            IB_POLL_WORKQUEUE);
 433        if (IS_ERR(newxprt->sc_sq_cq))
 434                goto errout;
 435        newxprt->sc_rq_cq =
 436                ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
 437        if (IS_ERR(newxprt->sc_rq_cq))
 438                goto errout;
 439
 440        memset(&qp_attr, 0, sizeof qp_attr);
 441        qp_attr.event_handler = qp_event_handler;
 442        qp_attr.qp_context = &newxprt->sc_xprt;
 443        qp_attr.port_num = newxprt->sc_port_num;
 444        qp_attr.cap.max_rdma_ctxs = ctxts;
 445        qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts;
 446        qp_attr.cap.max_recv_wr = rq_depth;
 447        qp_attr.cap.max_send_sge = newxprt->sc_max_send_sges;
 448        qp_attr.cap.max_recv_sge = 1;
 449        qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 450        qp_attr.qp_type = IB_QPT_RC;
 451        qp_attr.send_cq = newxprt->sc_sq_cq;
 452        qp_attr.recv_cq = newxprt->sc_rq_cq;
 453        dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
 454                newxprt->sc_cm_id, newxprt->sc_pd);
 455        dprintk("    cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
 456                qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
 457        dprintk("    cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
 458                qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
 459
 460        ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
 461        if (ret) {
 462                trace_svcrdma_qp_err(newxprt, ret);
 463                goto errout;
 464        }
 465        newxprt->sc_qp = newxprt->sc_cm_id->qp;
 466
 467        if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
 468                newxprt->sc_snd_w_inv = false;
 469        if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
 470            !rdma_ib_or_roce(dev, newxprt->sc_port_num)) {
 471                trace_svcrdma_fabric_err(newxprt, -EINVAL);
 472                goto errout;
 473        }
 474
 475        if (!svc_rdma_post_recvs(newxprt))
 476                goto errout;
 477
 478        /* Swap out the handler */
 479        newxprt->sc_cm_id->event_handler = svc_rdma_cma_handler;
 480
 481        /* Construct RDMA-CM private message */
 482        pmsg.cp_magic = rpcrdma_cmp_magic;
 483        pmsg.cp_version = RPCRDMA_CMP_VERSION;
 484        pmsg.cp_flags = 0;
 485        pmsg.cp_send_size = pmsg.cp_recv_size =
 486                rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
 487
 488        /* Accept Connection */
 489        set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
 490        memset(&conn_param, 0, sizeof conn_param);
 491        conn_param.responder_resources = 0;
 492        conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
 493                                           dev->attrs.max_qp_init_rd_atom);
 494        if (!conn_param.initiator_depth) {
 495                ret = -EINVAL;
 496                trace_svcrdma_initdepth_err(newxprt, ret);
 497                goto errout;
 498        }
 499        conn_param.private_data = &pmsg;
 500        conn_param.private_data_len = sizeof(pmsg);
 501        ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
 502        if (ret) {
 503                trace_svcrdma_accept_err(newxprt, ret);
 504                goto errout;
 505        }
 506
 507#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 508        dprintk("svcrdma: new connection %p accepted:\n", newxprt);
 509        sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
 510        dprintk("    local address   : %pIS:%u\n", sap, rpc_get_port(sap));
 511        sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
 512        dprintk("    remote address  : %pIS:%u\n", sap, rpc_get_port(sap));
 513        dprintk("    max_sge         : %d\n", newxprt->sc_max_send_sges);
 514        dprintk("    sq_depth        : %d\n", newxprt->sc_sq_depth);
 515        dprintk("    rdma_rw_ctxs    : %d\n", ctxts);
 516        dprintk("    max_requests    : %d\n", newxprt->sc_max_requests);
 517        dprintk("    ord             : %d\n", conn_param.initiator_depth);
 518#endif
 519
 520        return &newxprt->sc_xprt;
 521
 522 errout:
 523        /* Take a reference in case the DTO handler runs */
 524        svc_xprt_get(&newxprt->sc_xprt);
 525        if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
 526                ib_destroy_qp(newxprt->sc_qp);
 527        rdma_destroy_id(newxprt->sc_cm_id);
 528        /* This call to put will destroy the transport */
 529        svc_xprt_put(&newxprt->sc_xprt);
 530        return NULL;
 531}
 532
 533static void svc_rdma_detach(struct svc_xprt *xprt)
 534{
 535        struct svcxprt_rdma *rdma =
 536                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 537
 538        rdma_disconnect(rdma->sc_cm_id);
 539}
 540
 541static void __svc_rdma_free(struct work_struct *work)
 542{
 543        struct svcxprt_rdma *rdma =
 544                container_of(work, struct svcxprt_rdma, sc_work);
 545        struct svc_xprt *xprt = &rdma->sc_xprt;
 546
 547        /* This blocks until the Completion Queues are empty */
 548        if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
 549                ib_drain_qp(rdma->sc_qp);
 550
 551        svc_rdma_flush_recv_queues(rdma);
 552
 553        /* Final put of backchannel client transport */
 554        if (xprt->xpt_bc_xprt) {
 555                xprt_put(xprt->xpt_bc_xprt);
 556                xprt->xpt_bc_xprt = NULL;
 557        }
 558
 559        svc_rdma_destroy_rw_ctxts(rdma);
 560        svc_rdma_send_ctxts_destroy(rdma);
 561        svc_rdma_recv_ctxts_destroy(rdma);
 562
 563        /* Destroy the QP if present (not a listener) */
 564        if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
 565                ib_destroy_qp(rdma->sc_qp);
 566
 567        if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
 568                ib_free_cq(rdma->sc_sq_cq);
 569
 570        if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
 571                ib_free_cq(rdma->sc_rq_cq);
 572
 573        if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
 574                ib_dealloc_pd(rdma->sc_pd);
 575
 576        /* Destroy the CM ID */
 577        rdma_destroy_id(rdma->sc_cm_id);
 578
 579        kfree(rdma);
 580}
 581
 582static void svc_rdma_free(struct svc_xprt *xprt)
 583{
 584        struct svcxprt_rdma *rdma =
 585                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 586
 587        INIT_WORK(&rdma->sc_work, __svc_rdma_free);
 588        schedule_work(&rdma->sc_work);
 589}
 590
 591static int svc_rdma_has_wspace(struct svc_xprt *xprt)
 592{
 593        struct svcxprt_rdma *rdma =
 594                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 595
 596        /*
 597         * If there are already waiters on the SQ,
 598         * return false.
 599         */
 600        if (waitqueue_active(&rdma->sc_send_wait))
 601                return 0;
 602
 603        /* Otherwise return true. */
 604        return 1;
 605}
 606
 607static void svc_rdma_secure_port(struct svc_rqst *rqstp)
 608{
 609        set_bit(RQ_SECURE, &rqstp->rq_flags);
 610}
 611
 612static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
 613{
 614}
 615