linux/net/sunrpc/xprtrdma/svc_rdma_transport.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the BSD-type
   8 * license below:
   9 *
  10 * Redistribution and use in source and binary forms, with or without
  11 * modification, are permitted provided that the following conditions
  12 * are met:
  13 *
  14 *      Redistributions of source code must retain the above copyright
  15 *      notice, this list of conditions and the following disclaimer.
  16 *
  17 *      Redistributions in binary form must reproduce the above
  18 *      copyright notice, this list of conditions and the following
  19 *      disclaimer in the documentation and/or other materials provided
  20 *      with the distribution.
  21 *
  22 *      Neither the name of the Network Appliance, Inc. nor the names of
  23 *      its contributors may be used to endorse or promote products
  24 *      derived from this software without specific prior written
  25 *      permission.
  26 *
  27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38 *
  39 * Author: Tom Tucker <tom@opengridcomputing.com>
  40 */
  41
  42#include <linux/sunrpc/svc_xprt.h>
  43#include <linux/sunrpc/debug.h>
  44#include <linux/sunrpc/rpc_rdma.h>
  45#include <linux/sched.h>
  46#include <linux/spinlock.h>
  47#include <rdma/ib_verbs.h>
  48#include <rdma/rdma_cm.h>
  49#include <linux/sunrpc/svc_rdma.h>
  50
  51#define RPCDBG_FACILITY RPCDBG_SVCXPRT
  52
  53static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
  54                                        struct sockaddr *sa, int salen,
  55                                        int flags);
  56static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
  57static void svc_rdma_release_rqst(struct svc_rqst *);
  58static void dto_tasklet_func(unsigned long data);
  59static void svc_rdma_detach(struct svc_xprt *xprt);
  60static void svc_rdma_free(struct svc_xprt *xprt);
  61static int svc_rdma_has_wspace(struct svc_xprt *xprt);
  62static void rq_cq_reap(struct svcxprt_rdma *xprt);
  63static void sq_cq_reap(struct svcxprt_rdma *xprt);
  64
  65static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
  66static DEFINE_SPINLOCK(dto_lock);
  67static LIST_HEAD(dto_xprt_q);
  68
  69static struct svc_xprt_ops svc_rdma_ops = {
  70        .xpo_create = svc_rdma_create,
  71        .xpo_recvfrom = svc_rdma_recvfrom,
  72        .xpo_sendto = svc_rdma_sendto,
  73        .xpo_release_rqst = svc_rdma_release_rqst,
  74        .xpo_detach = svc_rdma_detach,
  75        .xpo_free = svc_rdma_free,
  76        .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
  77        .xpo_has_wspace = svc_rdma_has_wspace,
  78        .xpo_accept = svc_rdma_accept,
  79};
  80
  81struct svc_xprt_class svc_rdma_class = {
  82        .xcl_name = "rdma",
  83        .xcl_owner = THIS_MODULE,
  84        .xcl_ops = &svc_rdma_ops,
  85        .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
  86};
  87
  88/* WR context cache. Created in svc_rdma.c  */
  89extern struct kmem_cache *svc_rdma_ctxt_cachep;
  90
  91struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
  92{
  93        struct svc_rdma_op_ctxt *ctxt;
  94
  95        while (1) {
  96                ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
  97                if (ctxt)
  98                        break;
  99                schedule_timeout_uninterruptible(msecs_to_jiffies(500));
 100        }
 101        ctxt->xprt = xprt;
 102        INIT_LIST_HEAD(&ctxt->dto_q);
 103        ctxt->count = 0;
 104        ctxt->frmr = NULL;
 105        atomic_inc(&xprt->sc_ctxt_used);
 106        return ctxt;
 107}
 108
 109void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
 110{
 111        struct svcxprt_rdma *xprt = ctxt->xprt;
 112        int i;
 113        for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
 114                /*
 115                 * Unmap the DMA addr in the SGE if the lkey matches
 116                 * the sc_dma_lkey, otherwise, ignore it since it is
 117                 * an FRMR lkey and will be unmapped later when the
 118                 * last WR that uses it completes.
 119                 */
 120                if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
 121                        atomic_dec(&xprt->sc_dma_used);
 122                        ib_dma_unmap_single(xprt->sc_cm_id->device,
 123                                            ctxt->sge[i].addr,
 124                                            ctxt->sge[i].length,
 125                                            ctxt->direction);
 126                }
 127        }
 128}
 129
 130void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
 131{
 132        struct svcxprt_rdma *xprt;
 133        int i;
 134
 135        BUG_ON(!ctxt);
 136        xprt = ctxt->xprt;
 137        if (free_pages)
 138                for (i = 0; i < ctxt->count; i++)
 139                        put_page(ctxt->pages[i]);
 140
 141        kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
 142        atomic_dec(&xprt->sc_ctxt_used);
 143}
 144
 145/* Temporary NFS request map cache. Created in svc_rdma.c  */
 146extern struct kmem_cache *svc_rdma_map_cachep;
 147
 148/*
 149 * Temporary NFS req mappings are shared across all transport
 150 * instances. These are short lived and should be bounded by the number
 151 * of concurrent server threads * depth of the SQ.
 152 */
 153struct svc_rdma_req_map *svc_rdma_get_req_map(void)
 154{
 155        struct svc_rdma_req_map *map;
 156        while (1) {
 157                map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
 158                if (map)
 159                        break;
 160                schedule_timeout_uninterruptible(msecs_to_jiffies(500));
 161        }
 162        map->count = 0;
 163        map->frmr = NULL;
 164        return map;
 165}
 166
 167void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
 168{
 169        kmem_cache_free(svc_rdma_map_cachep, map);
 170}
 171
 172/* ib_cq event handler */
 173static void cq_event_handler(struct ib_event *event, void *context)
 174{
 175        struct svc_xprt *xprt = context;
 176        dprintk("svcrdma: received CQ event id=%d, context=%p\n",
 177                event->event, context);
 178        set_bit(XPT_CLOSE, &xprt->xpt_flags);
 179}
 180
 181/* QP event handler */
 182static void qp_event_handler(struct ib_event *event, void *context)
 183{
 184        struct svc_xprt *xprt = context;
 185
 186        switch (event->event) {
 187        /* These are considered benign events */
 188        case IB_EVENT_PATH_MIG:
 189        case IB_EVENT_COMM_EST:
 190        case IB_EVENT_SQ_DRAINED:
 191        case IB_EVENT_QP_LAST_WQE_REACHED:
 192                dprintk("svcrdma: QP event %d received for QP=%p\n",
 193                        event->event, event->element.qp);
 194                break;
 195        /* These are considered fatal events */
 196        case IB_EVENT_PATH_MIG_ERR:
 197        case IB_EVENT_QP_FATAL:
 198        case IB_EVENT_QP_REQ_ERR:
 199        case IB_EVENT_QP_ACCESS_ERR:
 200        case IB_EVENT_DEVICE_FATAL:
 201        default:
 202                dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
 203                        "closing transport\n",
 204                        event->event, event->element.qp);
 205                set_bit(XPT_CLOSE, &xprt->xpt_flags);
 206                break;
 207        }
 208}
 209
 210/*
 211 * Data Transfer Operation Tasklet
 212 *
 213 * Walks a list of transports with I/O pending, removing entries as
 214 * they are added to the server's I/O pending list. Two bits indicate
 215 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
 216 * spinlock that serializes access to the transport list with the RQ
 217 * and SQ interrupt handlers.
 218 */
 219static void dto_tasklet_func(unsigned long data)
 220{
 221        struct svcxprt_rdma *xprt;
 222        unsigned long flags;
 223
 224        spin_lock_irqsave(&dto_lock, flags);
 225        while (!list_empty(&dto_xprt_q)) {
 226                xprt = list_entry(dto_xprt_q.next,
 227                                  struct svcxprt_rdma, sc_dto_q);
 228                list_del_init(&xprt->sc_dto_q);
 229                spin_unlock_irqrestore(&dto_lock, flags);
 230
 231                rq_cq_reap(xprt);
 232                sq_cq_reap(xprt);
 233
 234                svc_xprt_put(&xprt->sc_xprt);
 235                spin_lock_irqsave(&dto_lock, flags);
 236        }
 237        spin_unlock_irqrestore(&dto_lock, flags);
 238}
 239
 240/*
 241 * Receive Queue Completion Handler
 242 *
 243 * Since an RQ completion handler is called on interrupt context, we
 244 * need to defer the handling of the I/O to a tasklet
 245 */
 246static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
 247{
 248        struct svcxprt_rdma *xprt = cq_context;
 249        unsigned long flags;
 250
 251        /* Guard against unconditional flush call for destroyed QP */
 252        if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
 253                return;
 254
 255        /*
 256         * Set the bit regardless of whether or not it's on the list
 257         * because it may be on the list already due to an SQ
 258         * completion.
 259         */
 260        set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
 261
 262        /*
 263         * If this transport is not already on the DTO transport queue,
 264         * add it
 265         */
 266        spin_lock_irqsave(&dto_lock, flags);
 267        if (list_empty(&xprt->sc_dto_q)) {
 268                svc_xprt_get(&xprt->sc_xprt);
 269                list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
 270        }
 271        spin_unlock_irqrestore(&dto_lock, flags);
 272
 273        /* Tasklet does all the work to avoid irqsave locks. */
 274        tasklet_schedule(&dto_tasklet);
 275}
 276
 277/*
 278 * rq_cq_reap - Process the RQ CQ.
 279 *
 280 * Take all completing WC off the CQE and enqueue the associated DTO
 281 * context on the dto_q for the transport.
 282 *
 283 * Note that caller must hold a transport reference.
 284 */
 285static void rq_cq_reap(struct svcxprt_rdma *xprt)
 286{
 287        int ret;
 288        struct ib_wc wc;
 289        struct svc_rdma_op_ctxt *ctxt = NULL;
 290
 291        if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
 292                return;
 293
 294        ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
 295        atomic_inc(&rdma_stat_rq_poll);
 296
 297        while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
 298                ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
 299                ctxt->wc_status = wc.status;
 300                ctxt->byte_len = wc.byte_len;
 301                svc_rdma_unmap_dma(ctxt);
 302                if (wc.status != IB_WC_SUCCESS) {
 303                        /* Close the transport */
 304                        dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
 305                        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
 306                        svc_rdma_put_context(ctxt, 1);
 307                        svc_xprt_put(&xprt->sc_xprt);
 308                        continue;
 309                }
 310                spin_lock_bh(&xprt->sc_rq_dto_lock);
 311                list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
 312                spin_unlock_bh(&xprt->sc_rq_dto_lock);
 313                svc_xprt_put(&xprt->sc_xprt);
 314        }
 315
 316        if (ctxt)
 317                atomic_inc(&rdma_stat_rq_prod);
 318
 319        set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
 320        /*
 321         * If data arrived before established event,
 322         * don't enqueue. This defers RPC I/O until the
 323         * RDMA connection is complete.
 324         */
 325        if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
 326                svc_xprt_enqueue(&xprt->sc_xprt);
 327}
 328
 329/*
 330 * Processs a completion context
 331 */
 332static void process_context(struct svcxprt_rdma *xprt,
 333                            struct svc_rdma_op_ctxt *ctxt)
 334{
 335        svc_rdma_unmap_dma(ctxt);
 336
 337        switch (ctxt->wr_op) {
 338        case IB_WR_SEND:
 339                if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags))
 340                        svc_rdma_put_frmr(xprt, ctxt->frmr);
 341                svc_rdma_put_context(ctxt, 1);
 342                break;
 343
 344        case IB_WR_RDMA_WRITE:
 345                svc_rdma_put_context(ctxt, 0);
 346                break;
 347
 348        case IB_WR_RDMA_READ:
 349        case IB_WR_RDMA_READ_WITH_INV:
 350                if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
 351                        struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
 352                        BUG_ON(!read_hdr);
 353                        if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags))
 354                                svc_rdma_put_frmr(xprt, ctxt->frmr);
 355                        spin_lock_bh(&xprt->sc_rq_dto_lock);
 356                        set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
 357                        list_add_tail(&read_hdr->dto_q,
 358                                      &xprt->sc_read_complete_q);
 359                        spin_unlock_bh(&xprt->sc_rq_dto_lock);
 360                        svc_xprt_enqueue(&xprt->sc_xprt);
 361                }
 362                svc_rdma_put_context(ctxt, 0);
 363                break;
 364
 365        default:
 366                printk(KERN_ERR "svcrdma: unexpected completion type, "
 367                       "opcode=%d\n",
 368                       ctxt->wr_op);
 369                break;
 370        }
 371}
 372
 373/*
 374 * Send Queue Completion Handler - potentially called on interrupt context.
 375 *
 376 * Note that caller must hold a transport reference.
 377 */
 378static void sq_cq_reap(struct svcxprt_rdma *xprt)
 379{
 380        struct svc_rdma_op_ctxt *ctxt = NULL;
 381        struct ib_wc wc;
 382        struct ib_cq *cq = xprt->sc_sq_cq;
 383        int ret;
 384
 385        if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
 386                return;
 387
 388        ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
 389        atomic_inc(&rdma_stat_sq_poll);
 390        while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
 391                if (wc.status != IB_WC_SUCCESS)
 392                        /* Close the transport */
 393                        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
 394
 395                /* Decrement used SQ WR count */
 396                atomic_dec(&xprt->sc_sq_count);
 397                wake_up(&xprt->sc_send_wait);
 398
 399                ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
 400                if (ctxt)
 401                        process_context(xprt, ctxt);
 402
 403                svc_xprt_put(&xprt->sc_xprt);
 404        }
 405
 406        if (ctxt)
 407                atomic_inc(&rdma_stat_sq_prod);
 408}
 409
 410static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
 411{
 412        struct svcxprt_rdma *xprt = cq_context;
 413        unsigned long flags;
 414
 415        /* Guard against unconditional flush call for destroyed QP */
 416        if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
 417                return;
 418
 419        /*
 420         * Set the bit regardless of whether or not it's on the list
 421         * because it may be on the list already due to an RQ
 422         * completion.
 423         */
 424        set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
 425
 426        /*
 427         * If this transport is not already on the DTO transport queue,
 428         * add it
 429         */
 430        spin_lock_irqsave(&dto_lock, flags);
 431        if (list_empty(&xprt->sc_dto_q)) {
 432                svc_xprt_get(&xprt->sc_xprt);
 433                list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
 434        }
 435        spin_unlock_irqrestore(&dto_lock, flags);
 436
 437        /* Tasklet does all the work to avoid irqsave locks. */
 438        tasklet_schedule(&dto_tasklet);
 439}
 440
 441static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
 442                                             int listener)
 443{
 444        struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
 445
 446        if (!cma_xprt)
 447                return NULL;
 448        svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
 449        INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
 450        INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
 451        INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
 452        INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
 453        INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
 454        init_waitqueue_head(&cma_xprt->sc_send_wait);
 455
 456        spin_lock_init(&cma_xprt->sc_lock);
 457        spin_lock_init(&cma_xprt->sc_rq_dto_lock);
 458        spin_lock_init(&cma_xprt->sc_frmr_q_lock);
 459
 460        cma_xprt->sc_ord = svcrdma_ord;
 461
 462        cma_xprt->sc_max_req_size = svcrdma_max_req_size;
 463        cma_xprt->sc_max_requests = svcrdma_max_requests;
 464        cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
 465        atomic_set(&cma_xprt->sc_sq_count, 0);
 466        atomic_set(&cma_xprt->sc_ctxt_used, 0);
 467
 468        if (listener)
 469                set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
 470
 471        return cma_xprt;
 472}
 473
 474struct page *svc_rdma_get_page(void)
 475{
 476        struct page *page;
 477
 478        while ((page = alloc_page(GFP_KERNEL)) == NULL) {
 479                /* If we can't get memory, wait a bit and try again */
 480                printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
 481                       "jiffies.\n");
 482                schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
 483        }
 484        return page;
 485}
 486
 487int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
 488{
 489        struct ib_recv_wr recv_wr, *bad_recv_wr;
 490        struct svc_rdma_op_ctxt *ctxt;
 491        struct page *page;
 492        dma_addr_t pa;
 493        int sge_no;
 494        int buflen;
 495        int ret;
 496
 497        ctxt = svc_rdma_get_context(xprt);
 498        buflen = 0;
 499        ctxt->direction = DMA_FROM_DEVICE;
 500        for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
 501                BUG_ON(sge_no >= xprt->sc_max_sge);
 502                page = svc_rdma_get_page();
 503                ctxt->pages[sge_no] = page;
 504                pa = ib_dma_map_single(xprt->sc_cm_id->device,
 505                                     page_address(page), PAGE_SIZE,
 506                                     DMA_FROM_DEVICE);
 507                if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
 508                        goto err_put_ctxt;
 509                atomic_inc(&xprt->sc_dma_used);
 510                ctxt->sge[sge_no].addr = pa;
 511                ctxt->sge[sge_no].length = PAGE_SIZE;
 512                ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
 513                buflen += PAGE_SIZE;
 514        }
 515        ctxt->count = sge_no;
 516        recv_wr.next = NULL;
 517        recv_wr.sg_list = &ctxt->sge[0];
 518        recv_wr.num_sge = ctxt->count;
 519        recv_wr.wr_id = (u64)(unsigned long)ctxt;
 520
 521        svc_xprt_get(&xprt->sc_xprt);
 522        ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
 523        if (ret) {
 524                svc_rdma_unmap_dma(ctxt);
 525                svc_rdma_put_context(ctxt, 1);
 526                svc_xprt_put(&xprt->sc_xprt);
 527        }
 528        return ret;
 529
 530 err_put_ctxt:
 531        svc_rdma_put_context(ctxt, 1);
 532        return -ENOMEM;
 533}
 534
 535/*
 536 * This function handles the CONNECT_REQUEST event on a listening
 537 * endpoint. It is passed the cma_id for the _new_ connection. The context in
 538 * this cma_id is inherited from the listening cma_id and is the svc_xprt
 539 * structure for the listening endpoint.
 540 *
 541 * This function creates a new xprt for the new connection and enqueues it on
 542 * the accept queue for the listent xprt. When the listen thread is kicked, it
 543 * will call the recvfrom method on the listen xprt which will accept the new
 544 * connection.
 545 */
 546static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
 547{
 548        struct svcxprt_rdma *listen_xprt = new_cma_id->context;
 549        struct svcxprt_rdma *newxprt;
 550        struct sockaddr *sa;
 551
 552        /* Create a new transport */
 553        newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
 554        if (!newxprt) {
 555                dprintk("svcrdma: failed to create new transport\n");
 556                return;
 557        }
 558        newxprt->sc_cm_id = new_cma_id;
 559        new_cma_id->context = newxprt;
 560        dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
 561                newxprt, newxprt->sc_cm_id, listen_xprt);
 562
 563        /* Save client advertised inbound read limit for use later in accept. */
 564        newxprt->sc_ord = client_ird;
 565
 566        /* Set the local and remote addresses in the transport */
 567        sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
 568        svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
 569        sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
 570        svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
 571
 572        /*
 573         * Enqueue the new transport on the accept queue of the listening
 574         * transport
 575         */
 576        spin_lock_bh(&listen_xprt->sc_lock);
 577        list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
 578        spin_unlock_bh(&listen_xprt->sc_lock);
 579
 580        /*
 581         * Can't use svc_xprt_received here because we are not on a
 582         * rqstp thread
 583        */
 584        set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
 585        svc_xprt_enqueue(&listen_xprt->sc_xprt);
 586}
 587
 588/*
 589 * Handles events generated on the listening endpoint. These events will be
 590 * either be incoming connect requests or adapter removal  events.
 591 */
 592static int rdma_listen_handler(struct rdma_cm_id *cma_id,
 593                               struct rdma_cm_event *event)
 594{
 595        struct svcxprt_rdma *xprt = cma_id->context;
 596        int ret = 0;
 597
 598        switch (event->event) {
 599        case RDMA_CM_EVENT_CONNECT_REQUEST:
 600                dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
 601                        "event=%d\n", cma_id, cma_id->context, event->event);
 602                handle_connect_req(cma_id,
 603                                   event->param.conn.initiator_depth);
 604                break;
 605
 606        case RDMA_CM_EVENT_ESTABLISHED:
 607                /* Accept complete */
 608                dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
 609                        "cm_id=%p\n", xprt, cma_id);
 610                break;
 611
 612        case RDMA_CM_EVENT_DEVICE_REMOVAL:
 613                dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
 614                        xprt, cma_id);
 615                if (xprt)
 616                        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
 617                break;
 618
 619        default:
 620                dprintk("svcrdma: Unexpected event on listening endpoint %p, "
 621                        "event=%d\n", cma_id, event->event);
 622                break;
 623        }
 624
 625        return ret;
 626}
 627
 628static int rdma_cma_handler(struct rdma_cm_id *cma_id,
 629                            struct rdma_cm_event *event)
 630{
 631        struct svc_xprt *xprt = cma_id->context;
 632        struct svcxprt_rdma *rdma =
 633                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 634        switch (event->event) {
 635        case RDMA_CM_EVENT_ESTABLISHED:
 636                /* Accept complete */
 637                svc_xprt_get(xprt);
 638                dprintk("svcrdma: Connection completed on DTO xprt=%p, "
 639                        "cm_id=%p\n", xprt, cma_id);
 640                clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
 641                svc_xprt_enqueue(xprt);
 642                break;
 643        case RDMA_CM_EVENT_DISCONNECTED:
 644                dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
 645                        xprt, cma_id);
 646                if (xprt) {
 647                        set_bit(XPT_CLOSE, &xprt->xpt_flags);
 648                        svc_xprt_enqueue(xprt);
 649                        svc_xprt_put(xprt);
 650                }
 651                break;
 652        case RDMA_CM_EVENT_DEVICE_REMOVAL:
 653                dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
 654                        "event=%d\n", cma_id, xprt, event->event);
 655                if (xprt) {
 656                        set_bit(XPT_CLOSE, &xprt->xpt_flags);
 657                        svc_xprt_enqueue(xprt);
 658                }
 659                break;
 660        default:
 661                dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
 662                        "event=%d\n", cma_id, event->event);
 663                break;
 664        }
 665        return 0;
 666}
 667
 668/*
 669 * Create a listening RDMA service endpoint.
 670 */
 671static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
 672                                        struct sockaddr *sa, int salen,
 673                                        int flags)
 674{
 675        struct rdma_cm_id *listen_id;
 676        struct svcxprt_rdma *cma_xprt;
 677        struct svc_xprt *xprt;
 678        int ret;
 679
 680        dprintk("svcrdma: Creating RDMA socket\n");
 681
 682        cma_xprt = rdma_create_xprt(serv, 1);
 683        if (!cma_xprt)
 684                return ERR_PTR(-ENOMEM);
 685        xprt = &cma_xprt->sc_xprt;
 686
 687        listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
 688        if (IS_ERR(listen_id)) {
 689                ret = PTR_ERR(listen_id);
 690                dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
 691                goto err0;
 692        }
 693
 694        ret = rdma_bind_addr(listen_id, sa);
 695        if (ret) {
 696                dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
 697                goto err1;
 698        }
 699        cma_xprt->sc_cm_id = listen_id;
 700
 701        ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
 702        if (ret) {
 703                dprintk("svcrdma: rdma_listen failed = %d\n", ret);
 704                goto err1;
 705        }
 706
 707        /*
 708         * We need to use the address from the cm_id in case the
 709         * caller specified 0 for the port number.
 710         */
 711        sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
 712        svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
 713
 714        return &cma_xprt->sc_xprt;
 715
 716 err1:
 717        rdma_destroy_id(listen_id);
 718 err0:
 719        kfree(cma_xprt);
 720        return ERR_PTR(ret);
 721}
 722
 723static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
 724{
 725        struct ib_mr *mr;
 726        struct ib_fast_reg_page_list *pl;
 727        struct svc_rdma_fastreg_mr *frmr;
 728
 729        frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
 730        if (!frmr)
 731                goto err;
 732
 733        mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
 734        if (IS_ERR(mr))
 735                goto err_free_frmr;
 736
 737        pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
 738                                         RPCSVC_MAXPAGES);
 739        if (IS_ERR(pl))
 740                goto err_free_mr;
 741
 742        frmr->mr = mr;
 743        frmr->page_list = pl;
 744        INIT_LIST_HEAD(&frmr->frmr_list);
 745        return frmr;
 746
 747 err_free_mr:
 748        ib_dereg_mr(mr);
 749 err_free_frmr:
 750        kfree(frmr);
 751 err:
 752        return ERR_PTR(-ENOMEM);
 753}
 754
 755static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
 756{
 757        struct svc_rdma_fastreg_mr *frmr;
 758
 759        while (!list_empty(&xprt->sc_frmr_q)) {
 760                frmr = list_entry(xprt->sc_frmr_q.next,
 761                                  struct svc_rdma_fastreg_mr, frmr_list);
 762                list_del_init(&frmr->frmr_list);
 763                ib_dereg_mr(frmr->mr);
 764                ib_free_fast_reg_page_list(frmr->page_list);
 765                kfree(frmr);
 766        }
 767}
 768
 769struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
 770{
 771        struct svc_rdma_fastreg_mr *frmr = NULL;
 772
 773        spin_lock_bh(&rdma->sc_frmr_q_lock);
 774        if (!list_empty(&rdma->sc_frmr_q)) {
 775                frmr = list_entry(rdma->sc_frmr_q.next,
 776                                  struct svc_rdma_fastreg_mr, frmr_list);
 777                list_del_init(&frmr->frmr_list);
 778                frmr->map_len = 0;
 779                frmr->page_list_len = 0;
 780        }
 781        spin_unlock_bh(&rdma->sc_frmr_q_lock);
 782        if (frmr)
 783                return frmr;
 784
 785        return rdma_alloc_frmr(rdma);
 786}
 787
 788static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
 789                           struct svc_rdma_fastreg_mr *frmr)
 790{
 791        int page_no;
 792        for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
 793                dma_addr_t addr = frmr->page_list->page_list[page_no];
 794                if (ib_dma_mapping_error(frmr->mr->device, addr))
 795                        continue;
 796                atomic_dec(&xprt->sc_dma_used);
 797                ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE,
 798                                    frmr->direction);
 799        }
 800}
 801
 802void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
 803                       struct svc_rdma_fastreg_mr *frmr)
 804{
 805        if (frmr) {
 806                frmr_unmap_dma(rdma, frmr);
 807                spin_lock_bh(&rdma->sc_frmr_q_lock);
 808                BUG_ON(!list_empty(&frmr->frmr_list));
 809                list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
 810                spin_unlock_bh(&rdma->sc_frmr_q_lock);
 811        }
 812}
 813
 814/*
 815 * This is the xpo_recvfrom function for listening endpoints. Its
 816 * purpose is to accept incoming connections. The CMA callback handler
 817 * has already created a new transport and attached it to the new CMA
 818 * ID.
 819 *
 820 * There is a queue of pending connections hung on the listening
 821 * transport. This queue contains the new svc_xprt structure. This
 822 * function takes svc_xprt structures off the accept_q and completes
 823 * the connection.
 824 */
 825static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 826{
 827        struct svcxprt_rdma *listen_rdma;
 828        struct svcxprt_rdma *newxprt = NULL;
 829        struct rdma_conn_param conn_param;
 830        struct ib_qp_init_attr qp_attr;
 831        struct ib_device_attr devattr;
 832        int uninitialized_var(dma_mr_acc);
 833        int need_dma_mr;
 834        int ret;
 835        int i;
 836
 837        listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
 838        clear_bit(XPT_CONN, &xprt->xpt_flags);
 839        /* Get the next entry off the accept list */
 840        spin_lock_bh(&listen_rdma->sc_lock);
 841        if (!list_empty(&listen_rdma->sc_accept_q)) {
 842                newxprt = list_entry(listen_rdma->sc_accept_q.next,
 843                                     struct svcxprt_rdma, sc_accept_q);
 844                list_del_init(&newxprt->sc_accept_q);
 845        }
 846        if (!list_empty(&listen_rdma->sc_accept_q))
 847                set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
 848        spin_unlock_bh(&listen_rdma->sc_lock);
 849        if (!newxprt)
 850                return NULL;
 851
 852        dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
 853                newxprt, newxprt->sc_cm_id);
 854
 855        ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
 856        if (ret) {
 857                dprintk("svcrdma: could not query device attributes on "
 858                        "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
 859                goto errout;
 860        }
 861
 862        /* Qualify the transport resource defaults with the
 863         * capabilities of this particular device */
 864        newxprt->sc_max_sge = min((size_t)devattr.max_sge,
 865                                  (size_t)RPCSVC_MAXPAGES);
 866        newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
 867                                   (size_t)svcrdma_max_requests);
 868        newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
 869
 870        /*
 871         * Limit ORD based on client limit, local device limit, and
 872         * configured svcrdma limit.
 873         */
 874        newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
 875        newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
 876
 877        newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
 878        if (IS_ERR(newxprt->sc_pd)) {
 879                dprintk("svcrdma: error creating PD for connect request\n");
 880                goto errout;
 881        }
 882        newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
 883                                         sq_comp_handler,
 884                                         cq_event_handler,
 885                                         newxprt,
 886                                         newxprt->sc_sq_depth,
 887                                         0);
 888        if (IS_ERR(newxprt->sc_sq_cq)) {
 889                dprintk("svcrdma: error creating SQ CQ for connect request\n");
 890                goto errout;
 891        }
 892        newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
 893                                         rq_comp_handler,
 894                                         cq_event_handler,
 895                                         newxprt,
 896                                         newxprt->sc_max_requests,
 897                                         0);
 898        if (IS_ERR(newxprt->sc_rq_cq)) {
 899                dprintk("svcrdma: error creating RQ CQ for connect request\n");
 900                goto errout;
 901        }
 902
 903        memset(&qp_attr, 0, sizeof qp_attr);
 904        qp_attr.event_handler = qp_event_handler;
 905        qp_attr.qp_context = &newxprt->sc_xprt;
 906        qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
 907        qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
 908        qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
 909        qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
 910        qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
 911        qp_attr.qp_type = IB_QPT_RC;
 912        qp_attr.send_cq = newxprt->sc_sq_cq;
 913        qp_attr.recv_cq = newxprt->sc_rq_cq;
 914        dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
 915                "    cm_id->device=%p, sc_pd->device=%p\n"
 916                "    cap.max_send_wr = %d\n"
 917                "    cap.max_recv_wr = %d\n"
 918                "    cap.max_send_sge = %d\n"
 919                "    cap.max_recv_sge = %d\n",
 920                newxprt->sc_cm_id, newxprt->sc_pd,
 921                newxprt->sc_cm_id->device, newxprt->sc_pd->device,
 922                qp_attr.cap.max_send_wr,
 923                qp_attr.cap.max_recv_wr,
 924                qp_attr.cap.max_send_sge,
 925                qp_attr.cap.max_recv_sge);
 926
 927        ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
 928        if (ret) {
 929                /*
 930                 * XXX: This is a hack. We need a xx_request_qp interface
 931                 * that will adjust the qp_attr's with a best-effort
 932                 * number
 933                 */
 934                qp_attr.cap.max_send_sge -= 2;
 935                qp_attr.cap.max_recv_sge -= 2;
 936                ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
 937                                     &qp_attr);
 938                if (ret) {
 939                        dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
 940                        goto errout;
 941                }
 942                newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
 943                newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
 944                newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
 945                newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
 946        }
 947        newxprt->sc_qp = newxprt->sc_cm_id->qp;
 948
 949        /*
 950         * Use the most secure set of MR resources based on the
 951         * transport type and available memory management features in
 952         * the device. Here's the table implemented below:
 953         *
 954         *              Fast    Global  DMA     Remote WR
 955         *              Reg     LKEY    MR      Access
 956         *              Sup'd   Sup'd   Needed  Needed
 957         *
 958         * IWARP        N       N       Y       Y
 959         *              N       Y       Y       Y
 960         *              Y       N       Y       N
 961         *              Y       Y       N       -
 962         *
 963         * IB           N       N       Y       N
 964         *              N       Y       N       -
 965         *              Y       N       Y       N
 966         *              Y       Y       N       -
 967         *
 968         * NB:  iWARP requires remote write access for the data sink
 969         *      of an RDMA_READ. IB does not.
 970         */
 971        if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
 972                newxprt->sc_frmr_pg_list_len =
 973                        devattr.max_fast_reg_page_list_len;
 974                newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
 975        }
 976
 977        /*
 978         * Determine if a DMA MR is required and if so, what privs are required
 979         */
 980        switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
 981        case RDMA_TRANSPORT_IWARP:
 982                newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
 983                if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
 984                        need_dma_mr = 1;
 985                        dma_mr_acc =
 986                                (IB_ACCESS_LOCAL_WRITE |
 987                                 IB_ACCESS_REMOTE_WRITE);
 988                } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
 989                        need_dma_mr = 1;
 990                        dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
 991                } else
 992                        need_dma_mr = 0;
 993                break;
 994        case RDMA_TRANSPORT_IB:
 995                if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
 996                        need_dma_mr = 1;
 997                        dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
 998                } else
 999                        need_dma_mr = 0;
1000                break;
1001        default:
1002                goto errout;
1003        }
1004
1005        /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1006        if (need_dma_mr) {
1007                /* Register all of physical memory */
1008                newxprt->sc_phys_mr =
1009                        ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1010                if (IS_ERR(newxprt->sc_phys_mr)) {
1011                        dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1012                                ret);
1013                        goto errout;
1014                }
1015                newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1016        } else
1017                newxprt->sc_dma_lkey =
1018                        newxprt->sc_cm_id->device->local_dma_lkey;
1019
1020        /* Post receive buffers */
1021        for (i = 0; i < newxprt->sc_max_requests; i++) {
1022                ret = svc_rdma_post_recv(newxprt);
1023                if (ret) {
1024                        dprintk("svcrdma: failure posting receive buffers\n");
1025                        goto errout;
1026                }
1027        }
1028
1029        /* Swap out the handler */
1030        newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1031
1032        /*
1033         * Arm the CQs for the SQ and RQ before accepting so we can't
1034         * miss the first message
1035         */
1036        ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1037        ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1038
1039        /* Accept Connection */
1040        set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1041        memset(&conn_param, 0, sizeof conn_param);
1042        conn_param.responder_resources = 0;
1043        conn_param.initiator_depth = newxprt->sc_ord;
1044        ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1045        if (ret) {
1046                dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1047                       ret);
1048                goto errout;
1049        }
1050
1051        dprintk("svcrdma: new connection %p accepted with the following "
1052                "attributes:\n"
1053                "    local_ip        : %pI4\n"
1054                "    local_port      : %d\n"
1055                "    remote_ip       : %pI4\n"
1056                "    remote_port     : %d\n"
1057                "    max_sge         : %d\n"
1058                "    sq_depth        : %d\n"
1059                "    max_requests    : %d\n"
1060                "    ord             : %d\n",
1061                newxprt,
1062                &((struct sockaddr_in *)&newxprt->sc_cm_id->
1063                         route.addr.src_addr)->sin_addr.s_addr,
1064                ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1065                       route.addr.src_addr)->sin_port),
1066                &((struct sockaddr_in *)&newxprt->sc_cm_id->
1067                         route.addr.dst_addr)->sin_addr.s_addr,
1068                ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1069                       route.addr.dst_addr)->sin_port),
1070                newxprt->sc_max_sge,
1071                newxprt->sc_sq_depth,
1072                newxprt->sc_max_requests,
1073                newxprt->sc_ord);
1074
1075        return &newxprt->sc_xprt;
1076
1077 errout:
1078        dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1079        /* Take a reference in case the DTO handler runs */
1080        svc_xprt_get(&newxprt->sc_xprt);
1081        if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1082                ib_destroy_qp(newxprt->sc_qp);
1083        rdma_destroy_id(newxprt->sc_cm_id);
1084        /* This call to put will destroy the transport */
1085        svc_xprt_put(&newxprt->sc_xprt);
1086        return NULL;
1087}
1088
1089static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1090{
1091}
1092
1093/*
1094 * When connected, an svc_xprt has at least two references:
1095 *
1096 * - A reference held by the cm_id between the ESTABLISHED and
1097 *   DISCONNECTED events. If the remote peer disconnected first, this
1098 *   reference could be gone.
1099 *
1100 * - A reference held by the svc_recv code that called this function
1101 *   as part of close processing.
1102 *
1103 * At a minimum one references should still be held.
1104 */
1105static void svc_rdma_detach(struct svc_xprt *xprt)
1106{
1107        struct svcxprt_rdma *rdma =
1108                container_of(xprt, struct svcxprt_rdma, sc_xprt);
1109        dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1110
1111        /* Disconnect and flush posted WQE */
1112        rdma_disconnect(rdma->sc_cm_id);
1113}
1114
1115static void __svc_rdma_free(struct work_struct *work)
1116{
1117        struct svcxprt_rdma *rdma =
1118                container_of(work, struct svcxprt_rdma, sc_work);
1119        dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1120
1121        /* We should only be called from kref_put */
1122        BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
1123
1124        /*
1125         * Destroy queued, but not processed read completions. Note
1126         * that this cleanup has to be done before destroying the
1127         * cm_id because the device ptr is needed to unmap the dma in
1128         * svc_rdma_put_context.
1129         */
1130        while (!list_empty(&rdma->sc_read_complete_q)) {
1131                struct svc_rdma_op_ctxt *ctxt;
1132                ctxt = list_entry(rdma->sc_read_complete_q.next,
1133                                  struct svc_rdma_op_ctxt,
1134                                  dto_q);
1135                list_del_init(&ctxt->dto_q);
1136                svc_rdma_put_context(ctxt, 1);
1137        }
1138
1139        /* Destroy queued, but not processed recv completions */
1140        while (!list_empty(&rdma->sc_rq_dto_q)) {
1141                struct svc_rdma_op_ctxt *ctxt;
1142                ctxt = list_entry(rdma->sc_rq_dto_q.next,
1143                                  struct svc_rdma_op_ctxt,
1144                                  dto_q);
1145                list_del_init(&ctxt->dto_q);
1146                svc_rdma_put_context(ctxt, 1);
1147        }
1148
1149        /* Warn if we leaked a resource or under-referenced */
1150        WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
1151        WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
1152
1153        /* De-allocate fastreg mr */
1154        rdma_dealloc_frmr_q(rdma);
1155
1156        /* Destroy the QP if present (not a listener) */
1157        if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1158                ib_destroy_qp(rdma->sc_qp);
1159
1160        if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1161                ib_destroy_cq(rdma->sc_sq_cq);
1162
1163        if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1164                ib_destroy_cq(rdma->sc_rq_cq);
1165
1166        if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1167                ib_dereg_mr(rdma->sc_phys_mr);
1168
1169        if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1170                ib_dealloc_pd(rdma->sc_pd);
1171
1172        /* Destroy the CM ID */
1173        rdma_destroy_id(rdma->sc_cm_id);
1174
1175        kfree(rdma);
1176}
1177
1178static void svc_rdma_free(struct svc_xprt *xprt)
1179{
1180        struct svcxprt_rdma *rdma =
1181                container_of(xprt, struct svcxprt_rdma, sc_xprt);
1182        INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1183        schedule_work(&rdma->sc_work);
1184}
1185
1186static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1187{
1188        struct svcxprt_rdma *rdma =
1189                container_of(xprt, struct svcxprt_rdma, sc_xprt);
1190
1191        /*
1192         * If there are fewer SQ WR available than required to send a
1193         * simple response, return false.
1194         */
1195        if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
1196                return 0;
1197
1198        /*
1199         * ...or there are already waiters on the SQ,
1200         * return false.
1201         */
1202        if (waitqueue_active(&rdma->sc_send_wait))
1203                return 0;
1204
1205        /* Otherwise return true. */
1206        return 1;
1207}
1208
1209/*
1210 * Attempt to register the kvec representing the RPC memory with the
1211 * device.
1212 *
1213 * Returns:
1214 *  NULL : The device does not support fastreg or there were no more
1215 *         fastreg mr.
1216 *  frmr : The kvec register request was successfully posted.
1217 *    <0 : An error was encountered attempting to register the kvec.
1218 */
1219int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1220                     struct svc_rdma_fastreg_mr *frmr)
1221{
1222        struct ib_send_wr fastreg_wr;
1223        u8 key;
1224
1225        /* Bump the key */
1226        key = (u8)(frmr->mr->lkey & 0x000000FF);
1227        ib_update_fast_reg_key(frmr->mr, ++key);
1228
1229        /* Prepare FASTREG WR */
1230        memset(&fastreg_wr, 0, sizeof fastreg_wr);
1231        fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1232        fastreg_wr.send_flags = IB_SEND_SIGNALED;
1233        fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1234        fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1235        fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1236        fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1237        fastreg_wr.wr.fast_reg.length = frmr->map_len;
1238        fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1239        fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1240        return svc_rdma_send(xprt, &fastreg_wr);
1241}
1242
1243int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1244{
1245        struct ib_send_wr *bad_wr, *n_wr;
1246        int wr_count;
1247        int i;
1248        int ret;
1249
1250        if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1251                return -ENOTCONN;
1252
1253        BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1254        wr_count = 1;
1255        for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1256                wr_count++;
1257
1258        /* If the SQ is full, wait until an SQ entry is available */
1259        while (1) {
1260                spin_lock_bh(&xprt->sc_lock);
1261                if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
1262                        spin_unlock_bh(&xprt->sc_lock);
1263                        atomic_inc(&rdma_stat_sq_starve);
1264
1265                        /* See if we can opportunistically reap SQ WR to make room */
1266                        sq_cq_reap(xprt);
1267
1268                        /* Wait until SQ WR available if SQ still full */
1269                        wait_event(xprt->sc_send_wait,
1270                                   atomic_read(&xprt->sc_sq_count) <
1271                                   xprt->sc_sq_depth);
1272                        if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1273                                return 0;
1274                        continue;
1275                }
1276                /* Take a transport ref for each WR posted */
1277                for (i = 0; i < wr_count; i++)
1278                        svc_xprt_get(&xprt->sc_xprt);
1279
1280                /* Bump used SQ WR count and post */
1281                atomic_add(wr_count, &xprt->sc_sq_count);
1282                ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1283                if (ret) {
1284                        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1285                        atomic_sub(wr_count, &xprt->sc_sq_count);
1286                        for (i = 0; i < wr_count; i ++)
1287                                svc_xprt_put(&xprt->sc_xprt);
1288                        dprintk("svcrdma: failed to post SQ WR rc=%d, "
1289                               "sc_sq_count=%d, sc_sq_depth=%d\n",
1290                               ret, atomic_read(&xprt->sc_sq_count),
1291                               xprt->sc_sq_depth);
1292                }
1293                spin_unlock_bh(&xprt->sc_lock);
1294                if (ret)
1295                        wake_up(&xprt->sc_send_wait);
1296                break;
1297        }
1298        return ret;
1299}
1300
1301void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1302                         enum rpcrdma_errcode err)
1303{
1304        struct ib_send_wr err_wr;
1305        struct ib_sge sge;
1306        struct page *p;
1307        struct svc_rdma_op_ctxt *ctxt;
1308        u32 *va;
1309        int length;
1310        int ret;
1311
1312        p = svc_rdma_get_page();
1313        va = page_address(p);
1314
1315        /* XDR encode error */
1316        length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1317
1318        /* Prepare SGE for local address */
1319        sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
1320                                   page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
1321        if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
1322                put_page(p);
1323                return;
1324        }
1325        atomic_inc(&xprt->sc_dma_used);
1326        sge.lkey = xprt->sc_dma_lkey;
1327        sge.length = length;
1328
1329        ctxt = svc_rdma_get_context(xprt);
1330        ctxt->count = 1;
1331        ctxt->pages[0] = p;
1332
1333        /* Prepare SEND WR */
1334        memset(&err_wr, 0, sizeof err_wr);
1335        ctxt->wr_op = IB_WR_SEND;
1336        err_wr.wr_id = (unsigned long)ctxt;
1337        err_wr.sg_list = &sge;
1338        err_wr.num_sge = 1;
1339        err_wr.opcode = IB_WR_SEND;
1340        err_wr.send_flags = IB_SEND_SIGNALED;
1341
1342        /* Post It */
1343        ret = svc_rdma_send(xprt, &err_wr);
1344        if (ret) {
1345                dprintk("svcrdma: Error %d posting send for protocol error\n",
1346                        ret);
1347                ib_dma_unmap_single(xprt->sc_cm_id->device,
1348                                  sge.addr, PAGE_SIZE,
1349                                  DMA_FROM_DEVICE);
1350                svc_rdma_put_context(ctxt, 1);
1351        }
1352}
1353