linux/net/sunrpc/xprtrdma/rpc_rdma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the BSD-type
   8 * license below:
   9 *
  10 * Redistribution and use in source and binary forms, with or without
  11 * modification, are permitted provided that the following conditions
  12 * are met:
  13 *
  14 *      Redistributions of source code must retain the above copyright
  15 *      notice, this list of conditions and the following disclaimer.
  16 *
  17 *      Redistributions in binary form must reproduce the above
  18 *      copyright notice, this list of conditions and the following
  19 *      disclaimer in the documentation and/or other materials provided
  20 *      with the distribution.
  21 *
  22 *      Neither the name of the Network Appliance, Inc. nor the names of
  23 *      its contributors may be used to endorse or promote products
  24 *      derived from this software without specific prior written
  25 *      permission.
  26 *
  27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38 */
  39
  40/*
  41 * rpc_rdma.c
  42 *
  43 * This file contains the guts of the RPC RDMA protocol, and
  44 * does marshaling/unmarshaling, etc. It is also where interfacing
  45 * to the Linux RPC framework lives.
  46 */
  47
  48#include "xprt_rdma.h"
  49
  50#include <linux/highmem.h>
  51
  52#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  53# define RPCDBG_FACILITY        RPCDBG_TRANS
  54#endif
  55
  56enum rpcrdma_chunktype {
  57        rpcrdma_noch = 0,
  58        rpcrdma_readch,
  59        rpcrdma_areadch,
  60        rpcrdma_writech,
  61        rpcrdma_replych
  62};
  63
  64#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  65static const char transfertypes[][12] = {
  66        "pure inline",  /* no chunks */
  67        " read chunk",  /* some argument via rdma read */
  68        "*read chunk",  /* entire request via rdma read */
  69        "write chunk",  /* some result via rdma write */
  70        "reply chunk"   /* entire reply via rdma write */
  71};
  72#endif
  73
  74/* The client can send a request inline as long as the RPCRDMA header
  75 * plus the RPC call fit under the transport's inline limit. If the
  76 * combined call message size exceeds that limit, the client must use
  77 * the read chunk list for this operation.
  78 */
  79static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
  80{
  81        unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
  82
  83        return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
  84}
  85
  86/* The client can't know how large the actual reply will be. Thus it
  87 * plans for the largest possible reply for that particular ULP
  88 * operation. If the maximum combined reply message size exceeds that
  89 * limit, the client must provide a write list or a reply chunk for
  90 * this request.
  91 */
  92static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
  93{
  94        unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
  95
  96        return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
  97}
  98
  99static int
 100rpcrdma_tail_pullup(struct xdr_buf *buf)
 101{
 102        size_t tlen = buf->tail[0].iov_len;
 103        size_t skip = tlen & 3;
 104
 105        /* Do not include the tail if it is only an XDR pad */
 106        if (tlen < 4)
 107                return 0;
 108
 109        /* xdr_write_pages() adds a pad at the beginning of the tail
 110         * if the content in "buf->pages" is unaligned. Force the
 111         * tail's actual content to land at the next XDR position
 112         * after the head instead.
 113         */
 114        if (skip) {
 115                unsigned char *src, *dst;
 116                unsigned int count;
 117
 118                src = buf->tail[0].iov_base;
 119                dst = buf->head[0].iov_base;
 120                dst += buf->head[0].iov_len;
 121
 122                src += skip;
 123                tlen -= skip;
 124
 125                dprintk("RPC:       %s: skip=%zu, memmove(%p, %p, %zu)\n",
 126                        __func__, skip, dst, src, tlen);
 127
 128                for (count = tlen; count; count--)
 129                        *dst++ = *src++;
 130        }
 131
 132        return tlen;
 133}
 134
 135/* Split "vec" on page boundaries into segments. FMR registers pages,
 136 * not a byte range. Other modes coalesce these segments into a single
 137 * MR when they can.
 138 */
 139static int
 140rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
 141                     int n, int nsegs)
 142{
 143        size_t page_offset;
 144        u32 remaining;
 145        char *base;
 146
 147        base = vec->iov_base;
 148        page_offset = offset_in_page(base);
 149        remaining = vec->iov_len;
 150        while (remaining && n < nsegs) {
 151                seg[n].mr_page = NULL;
 152                seg[n].mr_offset = base;
 153                seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
 154                remaining -= seg[n].mr_len;
 155                base += seg[n].mr_len;
 156                ++n;
 157                page_offset = 0;
 158        }
 159        return n;
 160}
 161
 162/*
 163 * Chunk assembly from upper layer xdr_buf.
 164 *
 165 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
 166 * elements. Segments are then coalesced when registered, if possible
 167 * within the selected memreg mode.
 168 *
 169 * Returns positive number of segments converted, or a negative errno.
 170 */
 171
 172static int
 173rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
 174        enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
 175{
 176        int len, n = 0, p;
 177        int page_base;
 178        struct page **ppages;
 179
 180        if (pos == 0) {
 181                n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n, nsegs);
 182                if (n == nsegs)
 183                        return -EIO;
 184        }
 185
 186        len = xdrbuf->page_len;
 187        ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
 188        page_base = xdrbuf->page_base & ~PAGE_MASK;
 189        p = 0;
 190        while (len && n < nsegs) {
 191                if (!ppages[p]) {
 192                        /* alloc the pagelist for receiving buffer */
 193                        ppages[p] = alloc_page(GFP_ATOMIC);
 194                        if (!ppages[p])
 195                                return -ENOMEM;
 196                }
 197                seg[n].mr_page = ppages[p];
 198                seg[n].mr_offset = (void *)(unsigned long) page_base;
 199                seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
 200                if (seg[n].mr_len > PAGE_SIZE)
 201                        return -EIO;
 202                len -= seg[n].mr_len;
 203                ++n;
 204                ++p;
 205                page_base = 0;  /* page offset only applies to first page */
 206        }
 207
 208        /* Message overflows the seg array */
 209        if (len && n == nsegs)
 210                return -EIO;
 211
 212        /* When encoding the read list, the tail is always sent inline */
 213        if (type == rpcrdma_readch)
 214                return n;
 215
 216        if (xdrbuf->tail[0].iov_len) {
 217                /* the rpcrdma protocol allows us to omit any trailing
 218                 * xdr pad bytes, saving the server an RDMA operation. */
 219                if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
 220                        return n;
 221                n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n, nsegs);
 222                if (n == nsegs)
 223                        return -EIO;
 224        }
 225
 226        return n;
 227}
 228
 229/*
 230 * Create read/write chunk lists, and reply chunks, for RDMA
 231 *
 232 *   Assume check against THRESHOLD has been done, and chunks are required.
 233 *   Assume only encoding one list entry for read|write chunks. The NFSv3
 234 *     protocol is simple enough to allow this as it only has a single "bulk
 235 *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
 236 *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
 237 *
 238 * When used for a single reply chunk (which is a special write
 239 * chunk used for the entire reply, rather than just the data), it
 240 * is used primarily for READDIR and READLINK which would otherwise
 241 * be severely size-limited by a small rdma inline read max. The server
 242 * response will come back as an RDMA Write, followed by a message
 243 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
 244 * chunks do not provide data alignment, however they do not require
 245 * "fixup" (moving the response to the upper layer buffer) either.
 246 *
 247 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 248 *
 249 *  Read chunklist (a linked list):
 250 *   N elements, position P (same P for all chunks of same arg!):
 251 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
 252 *
 253 *  Write chunklist (a list of (one) counted array):
 254 *   N elements:
 255 *    1 - N - HLOO - HLOO - ... - HLOO - 0
 256 *
 257 *  Reply chunk (a counted array):
 258 *   N elements:
 259 *    1 - N - HLOO - HLOO - ... - HLOO
 260 *
 261 * Returns positive RPC/RDMA header size, or negative errno.
 262 */
 263
 264static ssize_t
 265rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
 266                struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
 267{
 268        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 269        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
 270        int n, nsegs, nchunks = 0;
 271        unsigned int pos;
 272        struct rpcrdma_mr_seg *seg = req->rl_segments;
 273        struct rpcrdma_read_chunk *cur_rchunk = NULL;
 274        struct rpcrdma_write_array *warray = NULL;
 275        struct rpcrdma_write_chunk *cur_wchunk = NULL;
 276        __be32 *iptr = headerp->rm_body.rm_chunks;
 277        int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
 278
 279        if (type == rpcrdma_readch || type == rpcrdma_areadch) {
 280                /* a read chunk - server will RDMA Read our memory */
 281                cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
 282        } else {
 283                /* a write or reply chunk - server will RDMA Write our memory */
 284                *iptr++ = xdr_zero;     /* encode a NULL read chunk list */
 285                if (type == rpcrdma_replych)
 286                        *iptr++ = xdr_zero;     /* a NULL write chunk list */
 287                warray = (struct rpcrdma_write_array *) iptr;
 288                cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
 289        }
 290
 291        if (type == rpcrdma_replych || type == rpcrdma_areadch)
 292                pos = 0;
 293        else
 294                pos = target->head[0].iov_len;
 295
 296        nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
 297        if (nsegs < 0)
 298                return nsegs;
 299
 300        map = r_xprt->rx_ia.ri_ops->ro_map;
 301        do {
 302                n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
 303                if (n <= 0)
 304                        goto out;
 305                if (cur_rchunk) {       /* read */
 306                        cur_rchunk->rc_discrim = xdr_one;
 307                        /* all read chunks have the same "position" */
 308                        cur_rchunk->rc_position = cpu_to_be32(pos);
 309                        cur_rchunk->rc_target.rs_handle =
 310                                                cpu_to_be32(seg->mr_rkey);
 311                        cur_rchunk->rc_target.rs_length =
 312                                                cpu_to_be32(seg->mr_len);
 313                        xdr_encode_hyper(
 314                                        (__be32 *)&cur_rchunk->rc_target.rs_offset,
 315                                        seg->mr_base);
 316                        dprintk("RPC:       %s: read chunk "
 317                                "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
 318                                seg->mr_len, (unsigned long long)seg->mr_base,
 319                                seg->mr_rkey, pos, n < nsegs ? "more" : "last");
 320                        cur_rchunk++;
 321                        r_xprt->rx_stats.read_chunk_count++;
 322                } else {                /* write/reply */
 323                        cur_wchunk->wc_target.rs_handle =
 324                                                cpu_to_be32(seg->mr_rkey);
 325                        cur_wchunk->wc_target.rs_length =
 326                                                cpu_to_be32(seg->mr_len);
 327                        xdr_encode_hyper(
 328                                        (__be32 *)&cur_wchunk->wc_target.rs_offset,
 329                                        seg->mr_base);
 330                        dprintk("RPC:       %s: %s chunk "
 331                                "elem %d@0x%llx:0x%x (%s)\n", __func__,
 332                                (type == rpcrdma_replych) ? "reply" : "write",
 333                                seg->mr_len, (unsigned long long)seg->mr_base,
 334                                seg->mr_rkey, n < nsegs ? "more" : "last");
 335                        cur_wchunk++;
 336                        if (type == rpcrdma_replych)
 337                                r_xprt->rx_stats.reply_chunk_count++;
 338                        else
 339                                r_xprt->rx_stats.write_chunk_count++;
 340                        r_xprt->rx_stats.total_rdma_request += seg->mr_len;
 341                }
 342                nchunks++;
 343                seg   += n;
 344                nsegs -= n;
 345        } while (nsegs);
 346
 347        /* success. all failures return above */
 348        req->rl_nchunks = nchunks;
 349
 350        /*
 351         * finish off header. If write, marshal discrim and nchunks.
 352         */
 353        if (cur_rchunk) {
 354                iptr = (__be32 *) cur_rchunk;
 355                *iptr++ = xdr_zero;     /* finish the read chunk list */
 356                *iptr++ = xdr_zero;     /* encode a NULL write chunk list */
 357                *iptr++ = xdr_zero;     /* encode a NULL reply chunk */
 358        } else {
 359                warray->wc_discrim = xdr_one;
 360                warray->wc_nchunks = cpu_to_be32(nchunks);
 361                iptr = (__be32 *) cur_wchunk;
 362                if (type == rpcrdma_writech) {
 363                        *iptr++ = xdr_zero; /* finish the write chunk list */
 364                        *iptr++ = xdr_zero; /* encode a NULL reply chunk */
 365                }
 366        }
 367
 368        /*
 369         * Return header size.
 370         */
 371        return (unsigned char *)iptr - (unsigned char *)headerp;
 372
 373out:
 374        for (pos = 0; nchunks--;)
 375                pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
 376                                                      &req->rl_segments[pos]);
 377        return n;
 378}
 379
 380/*
 381 * Copy write data inline.
 382 * This function is used for "small" requests. Data which is passed
 383 * to RPC via iovecs (or page list) is copied directly into the
 384 * pre-registered memory buffer for this request. For small amounts
 385 * of data, this is efficient. The cutoff value is tunable.
 386 */
 387static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
 388{
 389        int i, npages, curlen;
 390        int copy_len;
 391        unsigned char *srcp, *destp;
 392        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
 393        int page_base;
 394        struct page **ppages;
 395
 396        destp = rqst->rq_svec[0].iov_base;
 397        curlen = rqst->rq_svec[0].iov_len;
 398        destp += curlen;
 399
 400        dprintk("RPC:       %s: destp 0x%p len %d hdrlen %d\n",
 401                __func__, destp, rqst->rq_slen, curlen);
 402
 403        copy_len = rqst->rq_snd_buf.page_len;
 404
 405        if (rqst->rq_snd_buf.tail[0].iov_len) {
 406                curlen = rqst->rq_snd_buf.tail[0].iov_len;
 407                if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
 408                        memmove(destp + copy_len,
 409                                rqst->rq_snd_buf.tail[0].iov_base, curlen);
 410                        r_xprt->rx_stats.pullup_copy_count += curlen;
 411                }
 412                dprintk("RPC:       %s: tail destp 0x%p len %d\n",
 413                        __func__, destp + copy_len, curlen);
 414                rqst->rq_svec[0].iov_len += curlen;
 415        }
 416        r_xprt->rx_stats.pullup_copy_count += copy_len;
 417
 418        page_base = rqst->rq_snd_buf.page_base;
 419        ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
 420        page_base &= ~PAGE_MASK;
 421        npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
 422        for (i = 0; copy_len && i < npages; i++) {
 423                curlen = PAGE_SIZE - page_base;
 424                if (curlen > copy_len)
 425                        curlen = copy_len;
 426                dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
 427                        __func__, i, destp, copy_len, curlen);
 428                srcp = kmap_atomic(ppages[i]);
 429                memcpy(destp, srcp+page_base, curlen);
 430                kunmap_atomic(srcp);
 431                rqst->rq_svec[0].iov_len += curlen;
 432                destp += curlen;
 433                copy_len -= curlen;
 434                page_base = 0;
 435        }
 436        /* header now contains entire send message */
 437}
 438
 439/*
 440 * Marshal a request: the primary job of this routine is to choose
 441 * the transfer modes. See comments below.
 442 *
 443 * Uses multiple RDMA IOVs for a request:
 444 *  [0] -- RPC RDMA header, which uses memory from the *start* of the
 445 *         preregistered buffer that already holds the RPC data in
 446 *         its middle.
 447 *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
 448 *  [2] -- optional padding.
 449 *  [3] -- if padded, header only in [1] and data here.
 450 *
 451 * Returns zero on success, otherwise a negative errno.
 452 */
 453
 454int
 455rpcrdma_marshal_req(struct rpc_rqst *rqst)
 456{
 457        struct rpc_xprt *xprt = rqst->rq_xprt;
 458        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 459        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 460        char *base;
 461        size_t rpclen;
 462        ssize_t hdrlen;
 463        enum rpcrdma_chunktype rtype, wtype;
 464        struct rpcrdma_msg *headerp;
 465
 466#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 467        if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
 468                return rpcrdma_bc_marshal_reply(rqst);
 469#endif
 470
 471        /*
 472         * rpclen gets amount of data in first buffer, which is the
 473         * pre-registered buffer.
 474         */
 475        base = rqst->rq_svec[0].iov_base;
 476        rpclen = rqst->rq_svec[0].iov_len;
 477
 478        headerp = rdmab_to_msg(req->rl_rdmabuf);
 479        /* don't byte-swap XID, it's already done in request */
 480        headerp->rm_xid = rqst->rq_xid;
 481        headerp->rm_vers = rpcrdma_version;
 482        headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
 483        headerp->rm_type = rdma_msg;
 484
 485        /*
 486         * Chunks needed for results?
 487         *
 488         * o Read ops return data as write chunk(s), header as inline.
 489         * o If the expected result is under the inline threshold, all ops
 490         *   return as inline.
 491         * o Large non-read ops return as a single reply chunk.
 492         */
 493        if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
 494                wtype = rpcrdma_writech;
 495        else if (rpcrdma_results_inline(rqst))
 496                wtype = rpcrdma_noch;
 497        else
 498                wtype = rpcrdma_replych;
 499
 500        /*
 501         * Chunks needed for arguments?
 502         *
 503         * o If the total request is under the inline threshold, all ops
 504         *   are sent as inline.
 505         * o Large write ops transmit data as read chunk(s), header as
 506         *   inline.
 507         * o Large non-write ops are sent with the entire message as a
 508         *   single read chunk (protocol 0-position special case).
 509         *
 510         * This assumes that the upper layer does not present a request
 511         * that both has a data payload, and whose non-data arguments
 512         * by themselves are larger than the inline threshold.
 513         */
 514        if (rpcrdma_args_inline(rqst)) {
 515                rtype = rpcrdma_noch;
 516        } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
 517                rtype = rpcrdma_readch;
 518        } else {
 519                r_xprt->rx_stats.nomsg_call_count++;
 520                headerp->rm_type = htonl(RDMA_NOMSG);
 521                rtype = rpcrdma_areadch;
 522                rpclen = 0;
 523        }
 524
 525        /* The following simplification is not true forever */
 526        if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
 527                wtype = rpcrdma_noch;
 528        if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
 529                dprintk("RPC:       %s: cannot marshal multiple chunk lists\n",
 530                        __func__);
 531                return -EIO;
 532        }
 533
 534        hdrlen = RPCRDMA_HDRLEN_MIN;
 535
 536        /*
 537         * Pull up any extra send data into the preregistered buffer.
 538         * When padding is in use and applies to the transfer, insert
 539         * it and change the message type.
 540         */
 541        if (rtype == rpcrdma_noch) {
 542
 543                rpcrdma_inline_pullup(rqst);
 544
 545                headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
 546                headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
 547                headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
 548                /* new length after pullup */
 549                rpclen = rqst->rq_svec[0].iov_len;
 550        } else if (rtype == rpcrdma_readch)
 551                rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
 552        if (rtype != rpcrdma_noch) {
 553                hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
 554                                               headerp, rtype);
 555                wtype = rtype;  /* simplify dprintk */
 556
 557        } else if (wtype != rpcrdma_noch) {
 558                hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
 559                                               headerp, wtype);
 560        }
 561        if (hdrlen < 0)
 562                return hdrlen;
 563
 564        dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd"
 565                " headerp 0x%p base 0x%p lkey 0x%x\n",
 566                __func__, transfertypes[wtype], hdrlen, rpclen,
 567                headerp, base, rdmab_lkey(req->rl_rdmabuf));
 568
 569        /*
 570         * initialize send_iov's - normally only two: rdma chunk header and
 571         * single preregistered RPC header buffer, but if padding is present,
 572         * then use a preregistered (and zeroed) pad buffer between the RPC
 573         * header and any write data. In all non-rdma cases, any following
 574         * data has been copied into the RPC header buffer.
 575         */
 576        req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
 577        req->rl_send_iov[0].length = hdrlen;
 578        req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
 579
 580        req->rl_niovs = 1;
 581        if (rtype == rpcrdma_areadch)
 582                return 0;
 583
 584        req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
 585        req->rl_send_iov[1].length = rpclen;
 586        req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
 587
 588        req->rl_niovs = 2;
 589        return 0;
 590}
 591
 592/*
 593 * Chase down a received write or reply chunklist to get length
 594 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
 595 */
 596static int
 597rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
 598{
 599        unsigned int i, total_len;
 600        struct rpcrdma_write_chunk *cur_wchunk;
 601        char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
 602
 603        i = be32_to_cpu(**iptrp);
 604        if (i > max)
 605                return -1;
 606        cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
 607        total_len = 0;
 608        while (i--) {
 609                struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
 610                ifdebug(FACILITY) {
 611                        u64 off;
 612                        xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
 613                        dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
 614                                __func__,
 615                                be32_to_cpu(seg->rs_length),
 616                                (unsigned long long)off,
 617                                be32_to_cpu(seg->rs_handle));
 618                }
 619                total_len += be32_to_cpu(seg->rs_length);
 620                ++cur_wchunk;
 621        }
 622        /* check and adjust for properly terminated write chunk */
 623        if (wrchunk) {
 624                __be32 *w = (__be32 *) cur_wchunk;
 625                if (*w++ != xdr_zero)
 626                        return -1;
 627                cur_wchunk = (struct rpcrdma_write_chunk *) w;
 628        }
 629        if ((char *)cur_wchunk > base + rep->rr_len)
 630                return -1;
 631
 632        *iptrp = (__be32 *) cur_wchunk;
 633        return total_len;
 634}
 635
 636/*
 637 * Scatter inline received data back into provided iov's.
 638 */
 639static void
 640rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 641{
 642        int i, npages, curlen, olen;
 643        char *destp;
 644        struct page **ppages;
 645        int page_base;
 646
 647        curlen = rqst->rq_rcv_buf.head[0].iov_len;
 648        if (curlen > copy_len) {        /* write chunk header fixup */
 649                curlen = copy_len;
 650                rqst->rq_rcv_buf.head[0].iov_len = curlen;
 651        }
 652
 653        dprintk("RPC:       %s: srcp 0x%p len %d hdrlen %d\n",
 654                __func__, srcp, copy_len, curlen);
 655
 656        /* Shift pointer for first receive segment only */
 657        rqst->rq_rcv_buf.head[0].iov_base = srcp;
 658        srcp += curlen;
 659        copy_len -= curlen;
 660
 661        olen = copy_len;
 662        i = 0;
 663        rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
 664        page_base = rqst->rq_rcv_buf.page_base;
 665        ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
 666        page_base &= ~PAGE_MASK;
 667
 668        if (copy_len && rqst->rq_rcv_buf.page_len) {
 669                npages = PAGE_ALIGN(page_base +
 670                        rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
 671                for (; i < npages; i++) {
 672                        curlen = PAGE_SIZE - page_base;
 673                        if (curlen > copy_len)
 674                                curlen = copy_len;
 675                        dprintk("RPC:       %s: page %d"
 676                                " srcp 0x%p len %d curlen %d\n",
 677                                __func__, i, srcp, copy_len, curlen);
 678                        destp = kmap_atomic(ppages[i]);
 679                        memcpy(destp + page_base, srcp, curlen);
 680                        flush_dcache_page(ppages[i]);
 681                        kunmap_atomic(destp);
 682                        srcp += curlen;
 683                        copy_len -= curlen;
 684                        if (copy_len == 0)
 685                                break;
 686                        page_base = 0;
 687                }
 688        }
 689
 690        if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
 691                curlen = copy_len;
 692                if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
 693                        curlen = rqst->rq_rcv_buf.tail[0].iov_len;
 694                if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
 695                        memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
 696                dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
 697                        __func__, srcp, copy_len, curlen);
 698                rqst->rq_rcv_buf.tail[0].iov_len = curlen;
 699                copy_len -= curlen; ++i;
 700        } else
 701                rqst->rq_rcv_buf.tail[0].iov_len = 0;
 702
 703        if (pad) {
 704                /* implicit padding on terminal chunk */
 705                unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
 706                while (pad--)
 707                        p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
 708        }
 709
 710        if (copy_len)
 711                dprintk("RPC:       %s: %d bytes in"
 712                        " %d extra segments (%d lost)\n",
 713                        __func__, olen, i, copy_len);
 714
 715        /* TBD avoid a warning from call_decode() */
 716        rqst->rq_private_buf = rqst->rq_rcv_buf;
 717}
 718
 719void
 720rpcrdma_connect_worker(struct work_struct *work)
 721{
 722        struct rpcrdma_ep *ep =
 723                container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
 724        struct rpcrdma_xprt *r_xprt =
 725                container_of(ep, struct rpcrdma_xprt, rx_ep);
 726        struct rpc_xprt *xprt = &r_xprt->rx_xprt;
 727
 728        spin_lock_bh(&xprt->transport_lock);
 729        if (++xprt->connect_cookie == 0)        /* maintain a reserved value */
 730                ++xprt->connect_cookie;
 731        if (ep->rep_connected > 0) {
 732                if (!xprt_test_and_set_connected(xprt))
 733                        xprt_wake_pending_tasks(xprt, 0);
 734        } else {
 735                if (xprt_test_and_clear_connected(xprt))
 736                        xprt_wake_pending_tasks(xprt, -ENOTCONN);
 737        }
 738        spin_unlock_bh(&xprt->transport_lock);
 739}
 740
 741#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 742/* By convention, backchannel calls arrive via rdma_msg type
 743 * messages, and never populate the chunk lists. This makes
 744 * the RPC/RDMA header small and fixed in size, so it is
 745 * straightforward to check the RPC header's direction field.
 746 */
 747static bool
 748rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
 749{
 750        __be32 *p = (__be32 *)headerp;
 751
 752        if (headerp->rm_type != rdma_msg)
 753                return false;
 754        if (headerp->rm_body.rm_chunks[0] != xdr_zero)
 755                return false;
 756        if (headerp->rm_body.rm_chunks[1] != xdr_zero)
 757                return false;
 758        if (headerp->rm_body.rm_chunks[2] != xdr_zero)
 759                return false;
 760
 761        /* sanity */
 762        if (p[7] != headerp->rm_xid)
 763                return false;
 764        /* call direction */
 765        if (p[8] != cpu_to_be32(RPC_CALL))
 766                return false;
 767
 768        return true;
 769}
 770#endif  /* CONFIG_SUNRPC_BACKCHANNEL */
 771
 772/*
 773 * This function is called when an async event is posted to
 774 * the connection which changes the connection state. All it
 775 * does at this point is mark the connection up/down, the rpc
 776 * timers do the rest.
 777 */
 778void
 779rpcrdma_conn_func(struct rpcrdma_ep *ep)
 780{
 781        schedule_delayed_work(&ep->rep_connect_worker, 0);
 782}
 783
 784/* Process received RPC/RDMA messages.
 785 *
 786 * Errors must result in the RPC task either being awakened, or
 787 * allowed to timeout, to discover the errors at that time.
 788 */
 789void
 790rpcrdma_reply_handler(struct rpcrdma_rep *rep)
 791{
 792        struct rpcrdma_msg *headerp;
 793        struct rpcrdma_req *req;
 794        struct rpc_rqst *rqst;
 795        struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
 796        struct rpc_xprt *xprt = &r_xprt->rx_xprt;
 797        __be32 *iptr;
 798        int rdmalen, status, rmerr;
 799        unsigned long cwnd;
 800
 801        dprintk("RPC:       %s: incoming rep %p\n", __func__, rep);
 802
 803        if (rep->rr_len == RPCRDMA_BAD_LEN)
 804                goto out_badstatus;
 805        if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
 806                goto out_shortreply;
 807
 808        headerp = rdmab_to_msg(rep->rr_rdmabuf);
 809#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 810        if (rpcrdma_is_bcall(headerp))
 811                goto out_bcall;
 812#endif
 813
 814        /* Match incoming rpcrdma_rep to an rpcrdma_req to
 815         * get context for handling any incoming chunks.
 816         */
 817        spin_lock_bh(&xprt->transport_lock);
 818        rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
 819        if (!rqst)
 820                goto out_nomatch;
 821
 822        req = rpcr_to_rdmar(rqst);
 823        if (req->rl_reply)
 824                goto out_duplicate;
 825
 826        /* Sanity checking has passed. We are now committed
 827         * to complete this transaction.
 828         */
 829        list_del_init(&rqst->rq_list);
 830        spin_unlock_bh(&xprt->transport_lock);
 831        dprintk("RPC:       %s: reply %p completes request %p (xid 0x%08x)\n",
 832                __func__, rep, req, be32_to_cpu(headerp->rm_xid));
 833
 834        /* from here on, the reply is no longer an orphan */
 835        req->rl_reply = rep;
 836        xprt->reestablish_timeout = 0;
 837
 838        if (headerp->rm_vers != rpcrdma_version)
 839                goto out_badversion;
 840
 841        /* check for expected message types */
 842        /* The order of some of these tests is important. */
 843        switch (headerp->rm_type) {
 844        case rdma_msg:
 845                /* never expect read chunks */
 846                /* never expect reply chunks (two ways to check) */
 847                /* never expect write chunks without having offered RDMA */
 848                if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
 849                    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
 850                     headerp->rm_body.rm_chunks[2] != xdr_zero) ||
 851                    (headerp->rm_body.rm_chunks[1] != xdr_zero &&
 852                     req->rl_nchunks == 0))
 853                        goto badheader;
 854                if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
 855                        /* count any expected write chunks in read reply */
 856                        /* start at write chunk array count */
 857                        iptr = &headerp->rm_body.rm_chunks[2];
 858                        rdmalen = rpcrdma_count_chunks(rep,
 859                                                req->rl_nchunks, 1, &iptr);
 860                        /* check for validity, and no reply chunk after */
 861                        if (rdmalen < 0 || *iptr++ != xdr_zero)
 862                                goto badheader;
 863                        rep->rr_len -=
 864                            ((unsigned char *)iptr - (unsigned char *)headerp);
 865                        status = rep->rr_len + rdmalen;
 866                        r_xprt->rx_stats.total_rdma_reply += rdmalen;
 867                        /* special case - last chunk may omit padding */
 868                        if (rdmalen &= 3) {
 869                                rdmalen = 4 - rdmalen;
 870                                status += rdmalen;
 871                        }
 872                } else {
 873                        /* else ordinary inline */
 874                        rdmalen = 0;
 875                        iptr = (__be32 *)((unsigned char *)headerp +
 876                                                        RPCRDMA_HDRLEN_MIN);
 877                        rep->rr_len -= RPCRDMA_HDRLEN_MIN;
 878                        status = rep->rr_len;
 879                }
 880                /* Fix up the rpc results for upper layer */
 881                rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
 882                break;
 883
 884        case rdma_nomsg:
 885                /* never expect read or write chunks, always reply chunks */
 886                if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
 887                    headerp->rm_body.rm_chunks[1] != xdr_zero ||
 888                    headerp->rm_body.rm_chunks[2] != xdr_one ||
 889                    req->rl_nchunks == 0)
 890                        goto badheader;
 891                iptr = (__be32 *)((unsigned char *)headerp +
 892                                                        RPCRDMA_HDRLEN_MIN);
 893                rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
 894                if (rdmalen < 0)
 895                        goto badheader;
 896                r_xprt->rx_stats.total_rdma_reply += rdmalen;
 897                /* Reply chunk buffer already is the reply vector - no fixup. */
 898                status = rdmalen;
 899                break;
 900
 901        case rdma_error:
 902                goto out_rdmaerr;
 903
 904badheader:
 905        default:
 906                dprintk("%s: invalid rpcrdma reply header (type %d):"
 907                                " chunks[012] == %d %d %d"
 908                                " expected chunks <= %d\n",
 909                                __func__, be32_to_cpu(headerp->rm_type),
 910                                headerp->rm_body.rm_chunks[0],
 911                                headerp->rm_body.rm_chunks[1],
 912                                headerp->rm_body.rm_chunks[2],
 913                                req->rl_nchunks);
 914                status = -EIO;
 915                r_xprt->rx_stats.bad_reply_count++;
 916                break;
 917        }
 918
 919out:
 920        /* Invalidate and flush the data payloads before waking the
 921         * waiting application. This guarantees the memory region is
 922         * properly fenced from the server before the application
 923         * accesses the data. It also ensures proper send flow
 924         * control: waking the next RPC waits until this RPC has
 925         * relinquished all its Send Queue entries.
 926         */
 927        if (req->rl_nchunks)
 928                r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
 929
 930        spin_lock_bh(&xprt->transport_lock);
 931        cwnd = xprt->cwnd;
 932        xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
 933        if (xprt->cwnd > cwnd)
 934                xprt_release_rqst_cong(rqst->rq_task);
 935
 936        xprt_complete_rqst(rqst->rq_task, status);
 937        spin_unlock_bh(&xprt->transport_lock);
 938        dprintk("RPC:       %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
 939                        __func__, xprt, rqst, status);
 940        return;
 941
 942out_badstatus:
 943        rpcrdma_recv_buffer_put(rep);
 944        if (r_xprt->rx_ep.rep_connected == 1) {
 945                r_xprt->rx_ep.rep_connected = -EIO;
 946                rpcrdma_conn_func(&r_xprt->rx_ep);
 947        }
 948        return;
 949
 950#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 951out_bcall:
 952        rpcrdma_bc_receive_call(r_xprt, rep);
 953        return;
 954#endif
 955
 956/* If the incoming reply terminated a pending RPC, the next
 957 * RPC call will post a replacement receive buffer as it is
 958 * being marshaled.
 959 */
 960out_badversion:
 961        dprintk("RPC:       %s: invalid version %d\n",
 962                __func__, be32_to_cpu(headerp->rm_vers));
 963        status = -EIO;
 964        r_xprt->rx_stats.bad_reply_count++;
 965        goto out;
 966
 967out_rdmaerr:
 968        rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
 969        switch (rmerr) {
 970        case ERR_VERS:
 971                pr_err("%s: server reports header version error (%u-%u)\n",
 972                       __func__,
 973                       be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
 974                       be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
 975                break;
 976        case ERR_CHUNK:
 977                pr_err("%s: server reports header decoding error\n",
 978                       __func__);
 979                break;
 980        default:
 981                pr_err("%s: server reports unknown error %d\n",
 982                       __func__, rmerr);
 983        }
 984        status = -EREMOTEIO;
 985        r_xprt->rx_stats.bad_reply_count++;
 986        goto out;
 987
 988/* If no pending RPC transaction was matched, post a replacement
 989 * receive buffer before returning.
 990 */
 991out_shortreply:
 992        dprintk("RPC:       %s: short/invalid reply\n", __func__);
 993        goto repost;
 994
 995out_nomatch:
 996        spin_unlock_bh(&xprt->transport_lock);
 997        dprintk("RPC:       %s: no match for incoming xid 0x%08x len %d\n",
 998                __func__, be32_to_cpu(headerp->rm_xid),
 999                rep->rr_len);
1000        goto repost;
1001
1002out_duplicate:
1003        spin_unlock_bh(&xprt->transport_lock);
1004        dprintk("RPC:       %s: "
1005                "duplicate reply %p to RPC request %p: xid 0x%08x\n",
1006                __func__, rep, req, be32_to_cpu(headerp->rm_xid));
1007
1008repost:
1009        r_xprt->rx_stats.bad_reply_count++;
1010        if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
1011                rpcrdma_recv_buffer_put(rep);
1012}
1013