linux/net/sunrpc/xprtrdma/svc_rdma_sendto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
   4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
   5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the BSD-type
  11 * license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 *      Redistributions of source code must retain the above copyright
  18 *      notice, this list of conditions and the following disclaimer.
  19 *
  20 *      Redistributions in binary form must reproduce the above
  21 *      copyright notice, this list of conditions and the following
  22 *      disclaimer in the documentation and/or other materials provided
  23 *      with the distribution.
  24 *
  25 *      Neither the name of the Network Appliance, Inc. nor the names of
  26 *      its contributors may be used to endorse or promote products
  27 *      derived from this software without specific prior written
  28 *      permission.
  29 *
  30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  41 *
  42 * Author: Tom Tucker <tom@opengridcomputing.com>
  43 */
  44
  45/* Operation
  46 *
  47 * The main entry point is svc_rdma_sendto. This is called by the
  48 * RPC server when an RPC Reply is ready to be transmitted to a client.
  49 *
  50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
  51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
  52 * transport header, post all Write WRs needed for this Reply, then post
  53 * a Send WR conveying the transport header and the RPC message itself to
  54 * the client.
  55 *
  56 * svc_rdma_sendto must fully transmit the Reply before returning, as
  57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
  58 * resources referred to by the svc_rqst are also recycled at that time.
  59 * Therefore any resources that must remain longer must be detached
  60 * from the svc_rqst and released later.
  61 *
  62 * Page Management
  63 *
  64 * The I/O that performs Reply transmission is asynchronous, and may
  65 * complete well after sendto returns. Thus pages under I/O must be
  66 * removed from the svc_rqst before sendto returns.
  67 *
  68 * The logic here depends on Send Queue and completion ordering. Since
  69 * the Send WR is always posted last, it will always complete last. Thus
  70 * when it completes, it is guaranteed that all previous Write WRs have
  71 * also completed.
  72 *
  73 * Write WRs are constructed and posted. Each Write segment gets its own
  74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
  75 * DMA-unmap the pages under I/O for that Write segment. The Write
  76 * completion handler does not release any pages.
  77 *
  78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
  79 * The ownership of all of the Reply's pages are transferred into that
  80 * ctxt, the Send WR is posted, and sendto returns.
  81 *
  82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
  83 * Send completion handler finally releases the Reply's pages.
  84 *
  85 * This mechanism also assumes that completions on the transport's Send
  86 * Completion Queue do not run in parallel. Otherwise a Write completion
  87 * and Send completion running at the same time could release pages that
  88 * are still DMA-mapped.
  89 *
  90 * Error Handling
  91 *
  92 * - If the Send WR is posted successfully, it will either complete
  93 *   successfully, or get flushed. Either way, the Send completion
  94 *   handler releases the Reply's pages.
  95 * - If the Send WR cannot be not posted, the forward path releases
  96 *   the Reply's pages.
  97 *
  98 * This handles the case, without the use of page reference counting,
  99 * where two different Write segments send portions of the same page.
 100 */
 101
 102#include <linux/spinlock.h>
 103#include <asm/unaligned.h>
 104
 105#include <rdma/ib_verbs.h>
 106#include <rdma/rdma_cm.h>
 107
 108#include <linux/sunrpc/debug.h>
 109#include <linux/sunrpc/svc_rdma.h>
 110
 111#include "xprt_rdma.h"
 112#include <trace/events/rpcrdma.h>
 113
 114#define RPCDBG_FACILITY RPCDBG_SVCXPRT
 115
 116static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
 117
 118static inline struct svc_rdma_send_ctxt *
 119svc_rdma_next_send_ctxt(struct list_head *list)
 120{
 121        return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
 122                                        sc_list);
 123}
 124
 125static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
 126                                   struct rpc_rdma_cid *cid)
 127{
 128        cid->ci_queue_id = rdma->sc_sq_cq->res.id;
 129        cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
 130}
 131
 132static struct svc_rdma_send_ctxt *
 133svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
 134{
 135        struct svc_rdma_send_ctxt *ctxt;
 136        dma_addr_t addr;
 137        void *buffer;
 138        size_t size;
 139        int i;
 140
 141        size = sizeof(*ctxt);
 142        size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
 143        ctxt = kmalloc(size, GFP_KERNEL);
 144        if (!ctxt)
 145                goto fail0;
 146        buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
 147        if (!buffer)
 148                goto fail1;
 149        addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
 150                                 rdma->sc_max_req_size, DMA_TO_DEVICE);
 151        if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
 152                goto fail2;
 153
 154        svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
 155
 156        ctxt->sc_send_wr.next = NULL;
 157        ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
 158        ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
 159        ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
 160        ctxt->sc_cqe.done = svc_rdma_wc_send;
 161        ctxt->sc_xprt_buf = buffer;
 162        xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
 163                     rdma->sc_max_req_size);
 164        ctxt->sc_sges[0].addr = addr;
 165
 166        for (i = 0; i < rdma->sc_max_send_sges; i++)
 167                ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
 168        return ctxt;
 169
 170fail2:
 171        kfree(buffer);
 172fail1:
 173        kfree(ctxt);
 174fail0:
 175        return NULL;
 176}
 177
 178/**
 179 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
 180 * @rdma: svcxprt_rdma being torn down
 181 *
 182 */
 183void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
 184{
 185        struct svc_rdma_send_ctxt *ctxt;
 186
 187        while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
 188                list_del(&ctxt->sc_list);
 189                ib_dma_unmap_single(rdma->sc_pd->device,
 190                                    ctxt->sc_sges[0].addr,
 191                                    rdma->sc_max_req_size,
 192                                    DMA_TO_DEVICE);
 193                kfree(ctxt->sc_xprt_buf);
 194                kfree(ctxt);
 195        }
 196}
 197
 198/**
 199 * svc_rdma_send_ctxt_get - Get a free send_ctxt
 200 * @rdma: controlling svcxprt_rdma
 201 *
 202 * Returns a ready-to-use send_ctxt, or NULL if none are
 203 * available and a fresh one cannot be allocated.
 204 */
 205struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
 206{
 207        struct svc_rdma_send_ctxt *ctxt;
 208
 209        spin_lock(&rdma->sc_send_lock);
 210        ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
 211        if (!ctxt)
 212                goto out_empty;
 213        list_del(&ctxt->sc_list);
 214        spin_unlock(&rdma->sc_send_lock);
 215
 216out:
 217        rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
 218        xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
 219                        ctxt->sc_xprt_buf, NULL);
 220
 221        ctxt->sc_send_wr.num_sge = 0;
 222        ctxt->sc_cur_sge_no = 0;
 223        ctxt->sc_page_count = 0;
 224        return ctxt;
 225
 226out_empty:
 227        spin_unlock(&rdma->sc_send_lock);
 228        ctxt = svc_rdma_send_ctxt_alloc(rdma);
 229        if (!ctxt)
 230                return NULL;
 231        goto out;
 232}
 233
 234/**
 235 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
 236 * @rdma: controlling svcxprt_rdma
 237 * @ctxt: object to return to the free list
 238 *
 239 * Pages left in sc_pages are DMA unmapped and released.
 240 */
 241void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
 242                            struct svc_rdma_send_ctxt *ctxt)
 243{
 244        struct ib_device *device = rdma->sc_cm_id->device;
 245        unsigned int i;
 246
 247        /* The first SGE contains the transport header, which
 248         * remains mapped until @ctxt is destroyed.
 249         */
 250        for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
 251                ib_dma_unmap_page(device,
 252                                  ctxt->sc_sges[i].addr,
 253                                  ctxt->sc_sges[i].length,
 254                                  DMA_TO_DEVICE);
 255                trace_svcrdma_dma_unmap_page(rdma,
 256                                             ctxt->sc_sges[i].addr,
 257                                             ctxt->sc_sges[i].length);
 258        }
 259
 260        for (i = 0; i < ctxt->sc_page_count; ++i)
 261                put_page(ctxt->sc_pages[i]);
 262
 263        spin_lock(&rdma->sc_send_lock);
 264        list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
 265        spin_unlock(&rdma->sc_send_lock);
 266}
 267
 268/**
 269 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
 270 * @cq: Completion Queue context
 271 * @wc: Work Completion object
 272 *
 273 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
 274 * the Send completion handler could be running.
 275 */
 276static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
 277{
 278        struct svcxprt_rdma *rdma = cq->cq_context;
 279        struct ib_cqe *cqe = wc->wr_cqe;
 280        struct svc_rdma_send_ctxt *ctxt =
 281                container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
 282
 283        trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
 284
 285        atomic_inc(&rdma->sc_sq_avail);
 286        wake_up(&rdma->sc_send_wait);
 287
 288        svc_rdma_send_ctxt_put(rdma, ctxt);
 289
 290        if (unlikely(wc->status != IB_WC_SUCCESS)) {
 291                set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
 292                svc_xprt_enqueue(&rdma->sc_xprt);
 293        }
 294}
 295
 296/**
 297 * svc_rdma_send - Post a single Send WR
 298 * @rdma: transport on which to post the WR
 299 * @ctxt: send ctxt with a Send WR ready to post
 300 *
 301 * Returns zero the Send WR was posted successfully. Otherwise, a
 302 * negative errno is returned.
 303 */
 304int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
 305{
 306        struct ib_send_wr *wr = &ctxt->sc_send_wr;
 307        int ret;
 308
 309        might_sleep();
 310
 311        /* Sync the transport header buffer */
 312        ib_dma_sync_single_for_device(rdma->sc_pd->device,
 313                                      wr->sg_list[0].addr,
 314                                      wr->sg_list[0].length,
 315                                      DMA_TO_DEVICE);
 316
 317        /* If the SQ is full, wait until an SQ entry is available */
 318        while (1) {
 319                if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
 320                        atomic_inc(&rdma_stat_sq_starve);
 321                        trace_svcrdma_sq_full(rdma);
 322                        atomic_inc(&rdma->sc_sq_avail);
 323                        wait_event(rdma->sc_send_wait,
 324                                   atomic_read(&rdma->sc_sq_avail) > 1);
 325                        if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
 326                                return -ENOTCONN;
 327                        trace_svcrdma_sq_retry(rdma);
 328                        continue;
 329                }
 330
 331                trace_svcrdma_post_send(ctxt);
 332                ret = ib_post_send(rdma->sc_qp, wr, NULL);
 333                if (ret)
 334                        break;
 335                return 0;
 336        }
 337
 338        trace_svcrdma_sq_post_err(rdma, ret);
 339        set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
 340        wake_up(&rdma->sc_send_wait);
 341        return ret;
 342}
 343
 344/**
 345 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
 346 * @sctxt: Send context for the RPC Reply
 347 *
 348 * Return values:
 349 *   On success, returns length in bytes of the Reply XDR buffer
 350 *   that was consumed by the Reply Read list
 351 *   %-EMSGSIZE on XDR buffer overflow
 352 */
 353static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
 354{
 355        /* RPC-over-RDMA version 1 replies never have a Read list. */
 356        return xdr_stream_encode_item_absent(&sctxt->sc_stream);
 357}
 358
 359/**
 360 * svc_rdma_encode_write_segment - Encode one Write segment
 361 * @src: matching Write chunk in the RPC Call header
 362 * @sctxt: Send context for the RPC Reply
 363 * @remaining: remaining bytes of the payload left in the Write chunk
 364 *
 365 * Return values:
 366 *   On success, returns length in bytes of the Reply XDR buffer
 367 *   that was consumed by the Write segment
 368 *   %-EMSGSIZE on XDR buffer overflow
 369 */
 370static ssize_t svc_rdma_encode_write_segment(__be32 *src,
 371                                             struct svc_rdma_send_ctxt *sctxt,
 372                                             unsigned int *remaining)
 373{
 374        __be32 *p;
 375        const size_t len = rpcrdma_segment_maxsz * sizeof(*p);
 376        u32 handle, length;
 377        u64 offset;
 378
 379        p = xdr_reserve_space(&sctxt->sc_stream, len);
 380        if (!p)
 381                return -EMSGSIZE;
 382
 383        xdr_decode_rdma_segment(src, &handle, &length, &offset);
 384
 385        if (*remaining < length) {
 386                /* segment only partly filled */
 387                length = *remaining;
 388                *remaining = 0;
 389        } else {
 390                /* entire segment was consumed */
 391                *remaining -= length;
 392        }
 393        xdr_encode_rdma_segment(p, handle, length, offset);
 394
 395        trace_svcrdma_encode_wseg(handle, length, offset);
 396        return len;
 397}
 398
 399/**
 400 * svc_rdma_encode_write_chunk - Encode one Write chunk
 401 * @src: matching Write chunk in the RPC Call header
 402 * @sctxt: Send context for the RPC Reply
 403 * @remaining: size in bytes of the payload in the Write chunk
 404 *
 405 * Copy a Write chunk from the Call transport header to the
 406 * Reply transport header. Update each segment's length field
 407 * to reflect the number of bytes written in that segment.
 408 *
 409 * Return values:
 410 *   On success, returns length in bytes of the Reply XDR buffer
 411 *   that was consumed by the Write chunk
 412 *   %-EMSGSIZE on XDR buffer overflow
 413 */
 414static ssize_t svc_rdma_encode_write_chunk(__be32 *src,
 415                                           struct svc_rdma_send_ctxt *sctxt,
 416                                           unsigned int remaining)
 417{
 418        unsigned int i, nsegs;
 419        ssize_t len, ret;
 420
 421        len = 0;
 422        trace_svcrdma_encode_write_chunk(remaining);
 423
 424        src++;
 425        ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
 426        if (ret < 0)
 427                return -EMSGSIZE;
 428        len += ret;
 429
 430        nsegs = be32_to_cpup(src++);
 431        ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
 432        if (ret < 0)
 433                return -EMSGSIZE;
 434        len += ret;
 435
 436        for (i = nsegs; i; i--) {
 437                ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
 438                if (ret < 0)
 439                        return -EMSGSIZE;
 440                src += rpcrdma_segment_maxsz;
 441                len += ret;
 442        }
 443
 444        return len;
 445}
 446
 447/**
 448 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
 449 * @rctxt: Reply context with information about the RPC Call
 450 * @sctxt: Send context for the RPC Reply
 451 * @length: size in bytes of the payload in the first Write chunk
 452 *
 453 * The client provides a Write chunk list in the Call message. Fill
 454 * in the segments in the first Write chunk in the Reply's transport
 455 * header with the number of bytes consumed in each segment.
 456 * Remaining chunks are returned unused.
 457 *
 458 * Assumptions:
 459 *  - Client has provided only one Write chunk
 460 *
 461 * Return values:
 462 *   On success, returns length in bytes of the Reply XDR buffer
 463 *   that was consumed by the Reply's Write list
 464 *   %-EMSGSIZE on XDR buffer overflow
 465 */
 466static ssize_t
 467svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt,
 468                           struct svc_rdma_send_ctxt *sctxt,
 469                           unsigned int length)
 470{
 471        ssize_t len, ret;
 472
 473        ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
 474        if (ret < 0)
 475                return ret;
 476        len = ret;
 477
 478        /* Terminate the Write list */
 479        ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
 480        if (ret < 0)
 481                return ret;
 482
 483        return len + ret;
 484}
 485
 486/**
 487 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
 488 * @rctxt: Reply context with information about the RPC Call
 489 * @sctxt: Send context for the RPC Reply
 490 * @length: size in bytes of the payload in the Reply chunk
 491 *
 492 * Assumptions:
 493 * - Reply can always fit in the client-provided Reply chunk
 494 *
 495 * Return values:
 496 *   On success, returns length in bytes of the Reply XDR buffer
 497 *   that was consumed by the Reply's Reply chunk
 498 *   %-EMSGSIZE on XDR buffer overflow
 499 */
 500static ssize_t
 501svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt,
 502                            struct svc_rdma_send_ctxt *sctxt,
 503                            unsigned int length)
 504{
 505        return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
 506                                           length);
 507}
 508
 509static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
 510                                 struct svc_rdma_send_ctxt *ctxt,
 511                                 struct page *page,
 512                                 unsigned long offset,
 513                                 unsigned int len)
 514{
 515        struct ib_device *dev = rdma->sc_cm_id->device;
 516        dma_addr_t dma_addr;
 517
 518        dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
 519        trace_svcrdma_dma_map_page(rdma, dma_addr, len);
 520        if (ib_dma_mapping_error(dev, dma_addr))
 521                goto out_maperr;
 522
 523        ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
 524        ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
 525        ctxt->sc_send_wr.num_sge++;
 526        return 0;
 527
 528out_maperr:
 529        return -EIO;
 530}
 531
 532/* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
 533 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
 534 */
 535static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
 536                                struct svc_rdma_send_ctxt *ctxt,
 537                                unsigned char *base,
 538                                unsigned int len)
 539{
 540        return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
 541                                     offset_in_page(base), len);
 542}
 543
 544/**
 545 * svc_rdma_pull_up_needed - Determine whether to use pull-up
 546 * @rdma: controlling transport
 547 * @sctxt: send_ctxt for the Send WR
 548 * @rctxt: Write and Reply chunks provided by client
 549 * @xdr: xdr_buf containing RPC message to transmit
 550 *
 551 * Returns:
 552 *      %true if pull-up must be used
 553 *      %false otherwise
 554 */
 555static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
 556                                    struct svc_rdma_send_ctxt *sctxt,
 557                                    const struct svc_rdma_recv_ctxt *rctxt,
 558                                    struct xdr_buf *xdr)
 559{
 560        int elements;
 561
 562        /* For small messages, copying bytes is cheaper than DMA mapping.
 563         */
 564        if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
 565                return true;
 566
 567        /* Check whether the xdr_buf has more elements than can
 568         * fit in a single RDMA Send.
 569         */
 570        /* xdr->head */
 571        elements = 1;
 572
 573        /* xdr->pages */
 574        if (!rctxt || !rctxt->rc_write_list) {
 575                unsigned int remaining;
 576                unsigned long pageoff;
 577
 578                pageoff = xdr->page_base & ~PAGE_MASK;
 579                remaining = xdr->page_len;
 580                while (remaining) {
 581                        ++elements;
 582                        remaining -= min_t(u32, PAGE_SIZE - pageoff,
 583                                           remaining);
 584                        pageoff = 0;
 585                }
 586        }
 587
 588        /* xdr->tail */
 589        if (xdr->tail[0].iov_len)
 590                ++elements;
 591
 592        /* assume 1 SGE is needed for the transport header */
 593        return elements >= rdma->sc_max_send_sges;
 594}
 595
 596/**
 597 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
 598 * @rdma: controlling transport
 599 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
 600 * @rctxt: Write and Reply chunks provided by client
 601 * @xdr: prepared xdr_buf containing RPC message
 602 *
 603 * The device is not capable of sending the reply directly.
 604 * Assemble the elements of @xdr into the transport header buffer.
 605 *
 606 * Returns zero on success, or a negative errno on failure.
 607 */
 608static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
 609                                      struct svc_rdma_send_ctxt *sctxt,
 610                                      const struct svc_rdma_recv_ctxt *rctxt,
 611                                      const struct xdr_buf *xdr)
 612{
 613        unsigned char *dst, *tailbase;
 614        unsigned int taillen;
 615
 616        dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
 617        memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
 618        dst += xdr->head[0].iov_len;
 619
 620        tailbase = xdr->tail[0].iov_base;
 621        taillen = xdr->tail[0].iov_len;
 622        if (rctxt && rctxt->rc_write_list) {
 623                u32 xdrpad;
 624
 625                xdrpad = xdr_pad_size(xdr->page_len);
 626                if (taillen && xdrpad) {
 627                        tailbase += xdrpad;
 628                        taillen -= xdrpad;
 629                }
 630        } else {
 631                unsigned int len, remaining;
 632                unsigned long pageoff;
 633                struct page **ppages;
 634
 635                ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
 636                pageoff = xdr->page_base & ~PAGE_MASK;
 637                remaining = xdr->page_len;
 638                while (remaining) {
 639                        len = min_t(u32, PAGE_SIZE - pageoff, remaining);
 640
 641                        memcpy(dst, page_address(*ppages), len);
 642                        remaining -= len;
 643                        dst += len;
 644                        pageoff = 0;
 645                }
 646        }
 647
 648        if (taillen)
 649                memcpy(dst, tailbase, taillen);
 650
 651        sctxt->sc_sges[0].length += xdr->len;
 652        trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
 653        return 0;
 654}
 655
 656/* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
 657 * @rdma: controlling transport
 658 * @sctxt: send_ctxt for the Send WR
 659 * @rctxt: Write and Reply chunks provided by client
 660 * @xdr: prepared xdr_buf containing RPC message
 661 *
 662 * Load the xdr_buf into the ctxt's sge array, and DMA map each
 663 * element as it is added. The Send WR's num_sge field is set.
 664 *
 665 * Returns zero on success, or a negative errno on failure.
 666 */
 667int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
 668                           struct svc_rdma_send_ctxt *sctxt,
 669                           const struct svc_rdma_recv_ctxt *rctxt,
 670                           struct xdr_buf *xdr)
 671{
 672        unsigned int len, remaining;
 673        unsigned long page_off;
 674        struct page **ppages;
 675        unsigned char *base;
 676        u32 xdr_pad;
 677        int ret;
 678
 679        /* Set up the (persistently-mapped) transport header SGE. */
 680        sctxt->sc_send_wr.num_sge = 1;
 681        sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
 682
 683        /* If there is a Reply chunk, nothing follows the transport
 684         * header, and we're done here.
 685         */
 686        if (rctxt && rctxt->rc_reply_chunk)
 687                return 0;
 688
 689        /* For pull-up, svc_rdma_send() will sync the transport header.
 690         * No additional DMA mapping is necessary.
 691         */
 692        if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
 693                return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
 694
 695        ++sctxt->sc_cur_sge_no;
 696        ret = svc_rdma_dma_map_buf(rdma, sctxt,
 697                                   xdr->head[0].iov_base,
 698                                   xdr->head[0].iov_len);
 699        if (ret < 0)
 700                return ret;
 701
 702        /* If a Write chunk is present, the xdr_buf's page list
 703         * is not included inline. However the Upper Layer may
 704         * have added XDR padding in the tail buffer, and that
 705         * should not be included inline.
 706         */
 707        if (rctxt && rctxt->rc_write_list) {
 708                base = xdr->tail[0].iov_base;
 709                len = xdr->tail[0].iov_len;
 710                xdr_pad = xdr_pad_size(xdr->page_len);
 711
 712                if (len && xdr_pad) {
 713                        base += xdr_pad;
 714                        len -= xdr_pad;
 715                }
 716
 717                goto tail;
 718        }
 719
 720        ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
 721        page_off = xdr->page_base & ~PAGE_MASK;
 722        remaining = xdr->page_len;
 723        while (remaining) {
 724                len = min_t(u32, PAGE_SIZE - page_off, remaining);
 725
 726                ++sctxt->sc_cur_sge_no;
 727                ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
 728                                            page_off, len);
 729                if (ret < 0)
 730                        return ret;
 731
 732                remaining -= len;
 733                page_off = 0;
 734        }
 735
 736        base = xdr->tail[0].iov_base;
 737        len = xdr->tail[0].iov_len;
 738tail:
 739        if (len) {
 740                ++sctxt->sc_cur_sge_no;
 741                ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
 742                if (ret < 0)
 743                        return ret;
 744        }
 745
 746        return 0;
 747}
 748
 749/* The svc_rqst and all resources it owns are released as soon as
 750 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
 751 * so they are released by the Send completion handler.
 752 */
 753static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
 754                                   struct svc_rdma_send_ctxt *ctxt)
 755{
 756        int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
 757
 758        ctxt->sc_page_count += pages;
 759        for (i = 0; i < pages; i++) {
 760                ctxt->sc_pages[i] = rqstp->rq_respages[i];
 761                rqstp->rq_respages[i] = NULL;
 762        }
 763
 764        /* Prevent svc_xprt_release from releasing pages in rq_pages */
 765        rqstp->rq_next_page = rqstp->rq_respages;
 766}
 767
 768/* Prepare the portion of the RPC Reply that will be transmitted
 769 * via RDMA Send. The RPC-over-RDMA transport header is prepared
 770 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
 771 *
 772 * Depending on whether a Write list or Reply chunk is present,
 773 * the server may send all, a portion of, or none of the xdr_buf.
 774 * In the latter case, only the transport header (sc_sges[0]) is
 775 * transmitted.
 776 *
 777 * RDMA Send is the last step of transmitting an RPC reply. Pages
 778 * involved in the earlier RDMA Writes are here transferred out
 779 * of the rqstp and into the sctxt's page array. These pages are
 780 * DMA unmapped by each Write completion, but the subsequent Send
 781 * completion finally releases these pages.
 782 *
 783 * Assumptions:
 784 * - The Reply's transport header will never be larger than a page.
 785 */
 786static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
 787                                   struct svc_rdma_send_ctxt *sctxt,
 788                                   const struct svc_rdma_recv_ctxt *rctxt,
 789                                   struct svc_rqst *rqstp)
 790{
 791        int ret;
 792
 793        ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
 794        if (ret < 0)
 795                return ret;
 796
 797        svc_rdma_save_io_pages(rqstp, sctxt);
 798
 799        if (rctxt->rc_inv_rkey) {
 800                sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
 801                sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
 802        } else {
 803                sctxt->sc_send_wr.opcode = IB_WR_SEND;
 804        }
 805        return svc_rdma_send(rdma, sctxt);
 806}
 807
 808/**
 809 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
 810 * @rdma: controlling transport context
 811 * @sctxt: Send context for the response
 812 * @rctxt: Receive context for incoming bad message
 813 * @status: negative errno indicating error that occurred
 814 *
 815 * Given the client-provided Read, Write, and Reply chunks, the
 816 * server was not able to parse the Call or form a complete Reply.
 817 * Return an RDMA_ERROR message so the client can retire the RPC
 818 * transaction.
 819 *
 820 * The caller does not have to release @sctxt. It is released by
 821 * Send completion, or by this function on error.
 822 */
 823void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
 824                             struct svc_rdma_send_ctxt *sctxt,
 825                             struct svc_rdma_recv_ctxt *rctxt,
 826                             int status)
 827{
 828        __be32 *rdma_argp = rctxt->rc_recv_buf;
 829        __be32 *p;
 830
 831        rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
 832        xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
 833                        sctxt->sc_xprt_buf, NULL);
 834
 835        p = xdr_reserve_space(&sctxt->sc_stream,
 836                              rpcrdma_fixed_maxsz * sizeof(*p));
 837        if (!p)
 838                goto put_ctxt;
 839
 840        *p++ = *rdma_argp;
 841        *p++ = *(rdma_argp + 1);
 842        *p++ = rdma->sc_fc_credits;
 843        *p = rdma_error;
 844
 845        switch (status) {
 846        case -EPROTONOSUPPORT:
 847                p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
 848                if (!p)
 849                        goto put_ctxt;
 850
 851                *p++ = err_vers;
 852                *p++ = rpcrdma_version;
 853                *p = rpcrdma_version;
 854                trace_svcrdma_err_vers(*rdma_argp);
 855                break;
 856        default:
 857                p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
 858                if (!p)
 859                        goto put_ctxt;
 860
 861                *p = err_chunk;
 862                trace_svcrdma_err_chunk(*rdma_argp);
 863        }
 864
 865        /* Remote Invalidation is skipped for simplicity. */
 866        sctxt->sc_send_wr.num_sge = 1;
 867        sctxt->sc_send_wr.opcode = IB_WR_SEND;
 868        sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
 869        if (svc_rdma_send(rdma, sctxt))
 870                goto put_ctxt;
 871        return;
 872
 873put_ctxt:
 874        svc_rdma_send_ctxt_put(rdma, sctxt);
 875}
 876
 877/**
 878 * svc_rdma_sendto - Transmit an RPC reply
 879 * @rqstp: processed RPC request, reply XDR already in ::rq_res
 880 *
 881 * Any resources still associated with @rqstp are released upon return.
 882 * If no reply message was possible, the connection is closed.
 883 *
 884 * Returns:
 885 *      %0 if an RPC reply has been successfully posted,
 886 *      %-ENOMEM if a resource shortage occurred (connection is lost),
 887 *      %-ENOTCONN if posting failed (connection is lost).
 888 */
 889int svc_rdma_sendto(struct svc_rqst *rqstp)
 890{
 891        struct svc_xprt *xprt = rqstp->rq_xprt;
 892        struct svcxprt_rdma *rdma =
 893                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 894        struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
 895        __be32 *rdma_argp = rctxt->rc_recv_buf;
 896        __be32 *wr_lst = rctxt->rc_write_list;
 897        __be32 *rp_ch = rctxt->rc_reply_chunk;
 898        struct xdr_buf *xdr = &rqstp->rq_res;
 899        struct svc_rdma_send_ctxt *sctxt;
 900        __be32 *p;
 901        int ret;
 902
 903        ret = -ENOTCONN;
 904        if (svc_xprt_is_dead(xprt))
 905                goto err0;
 906
 907        ret = -ENOMEM;
 908        sctxt = svc_rdma_send_ctxt_get(rdma);
 909        if (!sctxt)
 910                goto err0;
 911
 912        p = xdr_reserve_space(&sctxt->sc_stream,
 913                              rpcrdma_fixed_maxsz * sizeof(*p));
 914        if (!p)
 915                goto err0;
 916        *p++ = *rdma_argp;
 917        *p++ = *(rdma_argp + 1);
 918        *p++ = rdma->sc_fc_credits;
 919        *p   = rp_ch ? rdma_nomsg : rdma_msg;
 920
 921        if (svc_rdma_encode_read_list(sctxt) < 0)
 922                goto err0;
 923        if (wr_lst) {
 924                /* XXX: Presume the client sent only one Write chunk */
 925                unsigned long offset;
 926                unsigned int length;
 927
 928                if (rctxt->rc_read_payload_length) {
 929                        offset = rctxt->rc_read_payload_offset;
 930                        length = rctxt->rc_read_payload_length;
 931                } else {
 932                        offset = xdr->head[0].iov_len;
 933                        length = xdr->page_len;
 934                }
 935                ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
 936                                                length);
 937                if (ret < 0)
 938                        goto err2;
 939                if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
 940                        goto err0;
 941        } else {
 942                if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
 943                        goto err0;
 944        }
 945        if (rp_ch) {
 946                ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
 947                if (ret < 0)
 948                        goto err2;
 949                if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
 950                        goto err0;
 951        } else {
 952                if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
 953                        goto err0;
 954        }
 955
 956        ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
 957        if (ret < 0)
 958                goto err1;
 959        return 0;
 960
 961 err2:
 962        if (ret != -E2BIG && ret != -EINVAL)
 963                goto err1;
 964
 965        /* Send completion releases payload pages that were part
 966         * of previously posted RDMA Writes.
 967         */
 968        svc_rdma_save_io_pages(rqstp, sctxt);
 969        svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
 970        return 0;
 971
 972 err1:
 973        svc_rdma_send_ctxt_put(rdma, sctxt);
 974 err0:
 975        trace_svcrdma_send_err(rqstp, ret);
 976        set_bit(XPT_CLOSE, &xprt->xpt_flags);
 977        return -ENOTCONN;
 978}
 979
 980/**
 981 * svc_rdma_read_payload - special processing for a READ payload
 982 * @rqstp: svc_rqst to operate on
 983 * @offset: payload's byte offset in @xdr
 984 * @length: size of payload, in bytes
 985 *
 986 * Returns zero on success.
 987 *
 988 * For the moment, just record the xdr_buf location of the READ
 989 * payload. svc_rdma_sendto will use that location later when
 990 * we actually send the payload.
 991 */
 992int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
 993                          unsigned int length)
 994{
 995        struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
 996
 997        /* XXX: Just one READ payload slot for now, since our
 998         * transport implementation currently supports only one
 999         * Write chunk.
1000         */
1001        rctxt->rc_read_payload_offset = offset;
1002        rctxt->rc_read_payload_length = length;
1003
1004        return 0;
1005}
1006