linux/net/sunrpc/xprtrdma/rpc_rdma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the BSD-type
   8 * license below:
   9 *
  10 * Redistribution and use in source and binary forms, with or without
  11 * modification, are permitted provided that the following conditions
  12 * are met:
  13 *
  14 *      Redistributions of source code must retain the above copyright
  15 *      notice, this list of conditions and the following disclaimer.
  16 *
  17 *      Redistributions in binary form must reproduce the above
  18 *      copyright notice, this list of conditions and the following
  19 *      disclaimer in the documentation and/or other materials provided
  20 *      with the distribution.
  21 *
  22 *      Neither the name of the Network Appliance, Inc. nor the names of
  23 *      its contributors may be used to endorse or promote products
  24 *      derived from this software without specific prior written
  25 *      permission.
  26 *
  27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38 */
  39
  40/*
  41 * rpc_rdma.c
  42 *
  43 * This file contains the guts of the RPC RDMA protocol, and
  44 * does marshaling/unmarshaling, etc. It is also where interfacing
  45 * to the Linux RPC framework lives.
  46 */
  47
  48#include "xprt_rdma.h"
  49
  50#include <linux/highmem.h>
  51
  52#ifdef RPC_DEBUG
  53# define RPCDBG_FACILITY        RPCDBG_TRANS
  54#endif
  55
  56enum rpcrdma_chunktype {
  57        rpcrdma_noch = 0,
  58        rpcrdma_readch,
  59        rpcrdma_areadch,
  60        rpcrdma_writech,
  61        rpcrdma_replych
  62};
  63
  64#ifdef RPC_DEBUG
  65static const char transfertypes[][12] = {
  66        "pure inline",  /* no chunks */
  67        " read chunk",  /* some argument via rdma read */
  68        "*read chunk",  /* entire request via rdma read */
  69        "write chunk",  /* some result via rdma write */
  70        "reply chunk"   /* entire reply via rdma write */
  71};
  72#endif
  73
  74/*
  75 * Chunk assembly from upper layer xdr_buf.
  76 *
  77 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
  78 * elements. Segments are then coalesced when registered, if possible
  79 * within the selected memreg mode.
  80 *
  81 * Note, this routine is never called if the connection's memory
  82 * registration strategy is 0 (bounce buffers).
  83 */
  84
  85static int
  86rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
  87        enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
  88{
  89        int len, n = 0, p;
  90
  91        if (pos == 0 && xdrbuf->head[0].iov_len) {
  92                seg[n].mr_page = NULL;
  93                seg[n].mr_offset = xdrbuf->head[0].iov_base;
  94                seg[n].mr_len = xdrbuf->head[0].iov_len;
  95                ++n;
  96        }
  97
  98        if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
  99                if (n == nsegs)
 100                        return 0;
 101                seg[n].mr_page = xdrbuf->pages[0];
 102                seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
 103                seg[n].mr_len = min_t(u32,
 104                        PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
 105                len = xdrbuf->page_len - seg[n].mr_len;
 106                ++n;
 107                p = 1;
 108                while (len > 0) {
 109                        if (n == nsegs)
 110                                return 0;
 111                        seg[n].mr_page = xdrbuf->pages[p];
 112                        seg[n].mr_offset = NULL;
 113                        seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
 114                        len -= seg[n].mr_len;
 115                        ++n;
 116                        ++p;
 117                }
 118        }
 119
 120        if (xdrbuf->tail[0].iov_len) {
 121                /* the rpcrdma protocol allows us to omit any trailing
 122                 * xdr pad bytes, saving the server an RDMA operation. */
 123                if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
 124                        return n;
 125                if (n == nsegs)
 126                        return 0;
 127                seg[n].mr_page = NULL;
 128                seg[n].mr_offset = xdrbuf->tail[0].iov_base;
 129                seg[n].mr_len = xdrbuf->tail[0].iov_len;
 130                ++n;
 131        }
 132
 133        return n;
 134}
 135
 136/*
 137 * Create read/write chunk lists, and reply chunks, for RDMA
 138 *
 139 *   Assume check against THRESHOLD has been done, and chunks are required.
 140 *   Assume only encoding one list entry for read|write chunks. The NFSv3
 141 *     protocol is simple enough to allow this as it only has a single "bulk
 142 *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
 143 *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
 144 *
 145 * When used for a single reply chunk (which is a special write
 146 * chunk used for the entire reply, rather than just the data), it
 147 * is used primarily for READDIR and READLINK which would otherwise
 148 * be severely size-limited by a small rdma inline read max. The server
 149 * response will come back as an RDMA Write, followed by a message
 150 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
 151 * chunks do not provide data alignment, however they do not require
 152 * "fixup" (moving the response to the upper layer buffer) either.
 153 *
 154 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 155 *
 156 *  Read chunklist (a linked list):
 157 *   N elements, position P (same P for all chunks of same arg!):
 158 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
 159 *
 160 *  Write chunklist (a list of (one) counted array):
 161 *   N elements:
 162 *    1 - N - HLOO - HLOO - ... - HLOO - 0
 163 *
 164 *  Reply chunk (a counted array):
 165 *   N elements:
 166 *    1 - N - HLOO - HLOO - ... - HLOO
 167 */
 168
 169static unsigned int
 170rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
 171                struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
 172{
 173        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 174        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
 175        int nsegs, nchunks = 0;
 176        unsigned int pos;
 177        struct rpcrdma_mr_seg *seg = req->rl_segments;
 178        struct rpcrdma_read_chunk *cur_rchunk = NULL;
 179        struct rpcrdma_write_array *warray = NULL;
 180        struct rpcrdma_write_chunk *cur_wchunk = NULL;
 181        __be32 *iptr = headerp->rm_body.rm_chunks;
 182
 183        if (type == rpcrdma_readch || type == rpcrdma_areadch) {
 184                /* a read chunk - server will RDMA Read our memory */
 185                cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
 186        } else {
 187                /* a write or reply chunk - server will RDMA Write our memory */
 188                *iptr++ = xdr_zero;     /* encode a NULL read chunk list */
 189                if (type == rpcrdma_replych)
 190                        *iptr++ = xdr_zero;     /* a NULL write chunk list */
 191                warray = (struct rpcrdma_write_array *) iptr;
 192                cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
 193        }
 194
 195        if (type == rpcrdma_replych || type == rpcrdma_areadch)
 196                pos = 0;
 197        else
 198                pos = target->head[0].iov_len;
 199
 200        nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
 201        if (nsegs == 0)
 202                return 0;
 203
 204        do {
 205                /* bind/register the memory, then build chunk from result. */
 206                int n = rpcrdma_register_external(seg, nsegs,
 207                                                cur_wchunk != NULL, r_xprt);
 208                if (n <= 0)
 209                        goto out;
 210                if (cur_rchunk) {       /* read */
 211                        cur_rchunk->rc_discrim = xdr_one;
 212                        /* all read chunks have the same "position" */
 213                        cur_rchunk->rc_position = htonl(pos);
 214                        cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
 215                        cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
 216                        xdr_encode_hyper(
 217                                        (__be32 *)&cur_rchunk->rc_target.rs_offset,
 218                                        seg->mr_base);
 219                        dprintk("RPC:       %s: read chunk "
 220                                "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
 221                                seg->mr_len, (unsigned long long)seg->mr_base,
 222                                seg->mr_rkey, pos, n < nsegs ? "more" : "last");
 223                        cur_rchunk++;
 224                        r_xprt->rx_stats.read_chunk_count++;
 225                } else {                /* write/reply */
 226                        cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
 227                        cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
 228                        xdr_encode_hyper(
 229                                        (__be32 *)&cur_wchunk->wc_target.rs_offset,
 230                                        seg->mr_base);
 231                        dprintk("RPC:       %s: %s chunk "
 232                                "elem %d@0x%llx:0x%x (%s)\n", __func__,
 233                                (type == rpcrdma_replych) ? "reply" : "write",
 234                                seg->mr_len, (unsigned long long)seg->mr_base,
 235                                seg->mr_rkey, n < nsegs ? "more" : "last");
 236                        cur_wchunk++;
 237                        if (type == rpcrdma_replych)
 238                                r_xprt->rx_stats.reply_chunk_count++;
 239                        else
 240                                r_xprt->rx_stats.write_chunk_count++;
 241                        r_xprt->rx_stats.total_rdma_request += seg->mr_len;
 242                }
 243                nchunks++;
 244                seg   += n;
 245                nsegs -= n;
 246        } while (nsegs);
 247
 248        /* success. all failures return above */
 249        req->rl_nchunks = nchunks;
 250
 251        BUG_ON(nchunks == 0);
 252
 253        /*
 254         * finish off header. If write, marshal discrim and nchunks.
 255         */
 256        if (cur_rchunk) {
 257                iptr = (__be32 *) cur_rchunk;
 258                *iptr++ = xdr_zero;     /* finish the read chunk list */
 259                *iptr++ = xdr_zero;     /* encode a NULL write chunk list */
 260                *iptr++ = xdr_zero;     /* encode a NULL reply chunk */
 261        } else {
 262                warray->wc_discrim = xdr_one;
 263                warray->wc_nchunks = htonl(nchunks);
 264                iptr = (__be32 *) cur_wchunk;
 265                if (type == rpcrdma_writech) {
 266                        *iptr++ = xdr_zero; /* finish the write chunk list */
 267                        *iptr++ = xdr_zero; /* encode a NULL reply chunk */
 268                }
 269        }
 270
 271        /*
 272         * Return header size.
 273         */
 274        return (unsigned char *)iptr - (unsigned char *)headerp;
 275
 276out:
 277        for (pos = 0; nchunks--;)
 278                pos += rpcrdma_deregister_external(
 279                                &req->rl_segments[pos], r_xprt, NULL);
 280        return 0;
 281}
 282
 283/*
 284 * Copy write data inline.
 285 * This function is used for "small" requests. Data which is passed
 286 * to RPC via iovecs (or page list) is copied directly into the
 287 * pre-registered memory buffer for this request. For small amounts
 288 * of data, this is efficient. The cutoff value is tunable.
 289 */
 290static int
 291rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
 292{
 293        int i, npages, curlen;
 294        int copy_len;
 295        unsigned char *srcp, *destp;
 296        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
 297
 298        destp = rqst->rq_svec[0].iov_base;
 299        curlen = rqst->rq_svec[0].iov_len;
 300        destp += curlen;
 301        /*
 302         * Do optional padding where it makes sense. Alignment of write
 303         * payload can help the server, if our setting is accurate.
 304         */
 305        pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
 306        if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
 307                pad = 0;        /* don't pad this request */
 308
 309        dprintk("RPC:       %s: pad %d destp 0x%p len %d hdrlen %d\n",
 310                __func__, pad, destp, rqst->rq_slen, curlen);
 311
 312        copy_len = rqst->rq_snd_buf.page_len;
 313
 314        if (rqst->rq_snd_buf.tail[0].iov_len) {
 315                curlen = rqst->rq_snd_buf.tail[0].iov_len;
 316                if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
 317                        memmove(destp + copy_len,
 318                                rqst->rq_snd_buf.tail[0].iov_base, curlen);
 319                        r_xprt->rx_stats.pullup_copy_count += curlen;
 320                }
 321                dprintk("RPC:       %s: tail destp 0x%p len %d\n",
 322                        __func__, destp + copy_len, curlen);
 323                rqst->rq_svec[0].iov_len += curlen;
 324        }
 325
 326        r_xprt->rx_stats.pullup_copy_count += copy_len;
 327        npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
 328        for (i = 0; copy_len && i < npages; i++) {
 329                if (i == 0)
 330                        curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
 331                else
 332                        curlen = PAGE_SIZE;
 333                if (curlen > copy_len)
 334                        curlen = copy_len;
 335                dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
 336                        __func__, i, destp, copy_len, curlen);
 337                srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
 338                                        KM_SKB_SUNRPC_DATA);
 339                if (i == 0)
 340                        memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
 341                else
 342                        memcpy(destp, srcp, curlen);
 343                kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
 344                rqst->rq_svec[0].iov_len += curlen;
 345                destp += curlen;
 346                copy_len -= curlen;
 347        }
 348        /* header now contains entire send message */
 349        return pad;
 350}
 351
 352/*
 353 * Marshal a request: the primary job of this routine is to choose
 354 * the transfer modes. See comments below.
 355 *
 356 * Uses multiple RDMA IOVs for a request:
 357 *  [0] -- RPC RDMA header, which uses memory from the *start* of the
 358 *         preregistered buffer that already holds the RPC data in
 359 *         its middle.
 360 *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
 361 *  [2] -- optional padding.
 362 *  [3] -- if padded, header only in [1] and data here.
 363 */
 364
 365int
 366rpcrdma_marshal_req(struct rpc_rqst *rqst)
 367{
 368        struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
 369        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 370        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
 371        char *base;
 372        size_t hdrlen, rpclen, padlen;
 373        enum rpcrdma_chunktype rtype, wtype;
 374        struct rpcrdma_msg *headerp;
 375
 376        /*
 377         * rpclen gets amount of data in first buffer, which is the
 378         * pre-registered buffer.
 379         */
 380        base = rqst->rq_svec[0].iov_base;
 381        rpclen = rqst->rq_svec[0].iov_len;
 382
 383        /* build RDMA header in private area at front */
 384        headerp = (struct rpcrdma_msg *) req->rl_base;
 385        /* don't htonl XID, it's already done in request */
 386        headerp->rm_xid = rqst->rq_xid;
 387        headerp->rm_vers = xdr_one;
 388        headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
 389        headerp->rm_type = htonl(RDMA_MSG);
 390
 391        /*
 392         * Chunks needed for results?
 393         *
 394         * o If the expected result is under the inline threshold, all ops
 395         *   return as inline (but see later).
 396         * o Large non-read ops return as a single reply chunk.
 397         * o Large read ops return data as write chunk(s), header as inline.
 398         *
 399         * Note: the NFS code sending down multiple result segments implies
 400         * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
 401         */
 402
 403        /*
 404         * This code can handle read chunks, write chunks OR reply
 405         * chunks -- only one type. If the request is too big to fit
 406         * inline, then we will choose read chunks. If the request is
 407         * a READ, then use write chunks to separate the file data
 408         * into pages; otherwise use reply chunks.
 409         */
 410        if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
 411                wtype = rpcrdma_noch;
 412        else if (rqst->rq_rcv_buf.page_len == 0)
 413                wtype = rpcrdma_replych;
 414        else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
 415                wtype = rpcrdma_writech;
 416        else
 417                wtype = rpcrdma_replych;
 418
 419        /*
 420         * Chunks needed for arguments?
 421         *
 422         * o If the total request is under the inline threshold, all ops
 423         *   are sent as inline.
 424         * o Large non-write ops are sent with the entire message as a
 425         *   single read chunk (protocol 0-position special case).
 426         * o Large write ops transmit data as read chunk(s), header as
 427         *   inline.
 428         *
 429         * Note: the NFS code sending down multiple argument segments
 430         * implies the op is a write.
 431         * TBD check NFSv4 setacl
 432         */
 433        if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
 434                rtype = rpcrdma_noch;
 435        else if (rqst->rq_snd_buf.page_len == 0)
 436                rtype = rpcrdma_areadch;
 437        else
 438                rtype = rpcrdma_readch;
 439
 440        /* The following simplification is not true forever */
 441        if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
 442                wtype = rpcrdma_noch;
 443        BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
 444
 445        if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
 446            (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
 447                /* forced to "pure inline"? */
 448                dprintk("RPC:       %s: too much data (%d/%d) for inline\n",
 449                        __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
 450                return -1;
 451        }
 452
 453        hdrlen = 28; /*sizeof *headerp;*/
 454        padlen = 0;
 455
 456        /*
 457         * Pull up any extra send data into the preregistered buffer.
 458         * When padding is in use and applies to the transfer, insert
 459         * it and change the message type.
 460         */
 461        if (rtype == rpcrdma_noch) {
 462
 463                padlen = rpcrdma_inline_pullup(rqst,
 464                                                RPCRDMA_INLINE_PAD_VALUE(rqst));
 465
 466                if (padlen) {
 467                        headerp->rm_type = htonl(RDMA_MSGP);
 468                        headerp->rm_body.rm_padded.rm_align =
 469                                htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
 470                        headerp->rm_body.rm_padded.rm_thresh =
 471                                htonl(RPCRDMA_INLINE_PAD_THRESH);
 472                        headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
 473                        headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
 474                        headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
 475                        hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
 476                        BUG_ON(wtype != rpcrdma_noch);
 477
 478                } else {
 479                        headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
 480                        headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
 481                        headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
 482                        /* new length after pullup */
 483                        rpclen = rqst->rq_svec[0].iov_len;
 484                        /*
 485                         * Currently we try to not actually use read inline.
 486                         * Reply chunks have the desirable property that
 487                         * they land, packed, directly in the target buffers
 488                         * without headers, so they require no fixup. The
 489                         * additional RDMA Write op sends the same amount
 490                         * of data, streams on-the-wire and adds no overhead
 491                         * on receive. Therefore, we request a reply chunk
 492                         * for non-writes wherever feasible and efficient.
 493                         */
 494                        if (wtype == rpcrdma_noch &&
 495                            r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
 496                                wtype = rpcrdma_replych;
 497                }
 498        }
 499
 500        /*
 501         * Marshal chunks. This routine will return the header length
 502         * consumed by marshaling.
 503         */
 504        if (rtype != rpcrdma_noch) {
 505                hdrlen = rpcrdma_create_chunks(rqst,
 506                                        &rqst->rq_snd_buf, headerp, rtype);
 507                wtype = rtype;  /* simplify dprintk */
 508
 509        } else if (wtype != rpcrdma_noch) {
 510                hdrlen = rpcrdma_create_chunks(rqst,
 511                                        &rqst->rq_rcv_buf, headerp, wtype);
 512        }
 513
 514        if (hdrlen == 0)
 515                return -1;
 516
 517        dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd padlen %zd"
 518                " headerp 0x%p base 0x%p lkey 0x%x\n",
 519                __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
 520                headerp, base, req->rl_iov.lkey);
 521
 522        /*
 523         * initialize send_iov's - normally only two: rdma chunk header and
 524         * single preregistered RPC header buffer, but if padding is present,
 525         * then use a preregistered (and zeroed) pad buffer between the RPC
 526         * header and any write data. In all non-rdma cases, any following
 527         * data has been copied into the RPC header buffer.
 528         */
 529        req->rl_send_iov[0].addr = req->rl_iov.addr;
 530        req->rl_send_iov[0].length = hdrlen;
 531        req->rl_send_iov[0].lkey = req->rl_iov.lkey;
 532
 533        req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
 534        req->rl_send_iov[1].length = rpclen;
 535        req->rl_send_iov[1].lkey = req->rl_iov.lkey;
 536
 537        req->rl_niovs = 2;
 538
 539        if (padlen) {
 540                struct rpcrdma_ep *ep = &r_xprt->rx_ep;
 541
 542                req->rl_send_iov[2].addr = ep->rep_pad.addr;
 543                req->rl_send_iov[2].length = padlen;
 544                req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
 545
 546                req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
 547                req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
 548                req->rl_send_iov[3].lkey = req->rl_iov.lkey;
 549
 550                req->rl_niovs = 4;
 551        }
 552
 553        return 0;
 554}
 555
 556/*
 557 * Chase down a received write or reply chunklist to get length
 558 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
 559 */
 560static int
 561rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
 562{
 563        unsigned int i, total_len;
 564        struct rpcrdma_write_chunk *cur_wchunk;
 565
 566        i = ntohl(**iptrp);     /* get array count */
 567        if (i > max)
 568                return -1;
 569        cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
 570        total_len = 0;
 571        while (i--) {
 572                struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
 573                ifdebug(FACILITY) {
 574                        u64 off;
 575                        xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
 576                        dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
 577                                __func__,
 578                                ntohl(seg->rs_length),
 579                                (unsigned long long)off,
 580                                ntohl(seg->rs_handle));
 581                }
 582                total_len += ntohl(seg->rs_length);
 583                ++cur_wchunk;
 584        }
 585        /* check and adjust for properly terminated write chunk */
 586        if (wrchunk) {
 587                __be32 *w = (__be32 *) cur_wchunk;
 588                if (*w++ != xdr_zero)
 589                        return -1;
 590                cur_wchunk = (struct rpcrdma_write_chunk *) w;
 591        }
 592        if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
 593                return -1;
 594
 595        *iptrp = (__be32 *) cur_wchunk;
 596        return total_len;
 597}
 598
 599/*
 600 * Scatter inline received data back into provided iov's.
 601 */
 602static void
 603rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 604{
 605        int i, npages, curlen, olen;
 606        char *destp;
 607
 608        curlen = rqst->rq_rcv_buf.head[0].iov_len;
 609        if (curlen > copy_len) {        /* write chunk header fixup */
 610                curlen = copy_len;
 611                rqst->rq_rcv_buf.head[0].iov_len = curlen;
 612        }
 613
 614        dprintk("RPC:       %s: srcp 0x%p len %d hdrlen %d\n",
 615                __func__, srcp, copy_len, curlen);
 616
 617        /* Shift pointer for first receive segment only */
 618        rqst->rq_rcv_buf.head[0].iov_base = srcp;
 619        srcp += curlen;
 620        copy_len -= curlen;
 621
 622        olen = copy_len;
 623        i = 0;
 624        rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
 625        if (copy_len && rqst->rq_rcv_buf.page_len) {
 626                npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
 627                        rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
 628                for (; i < npages; i++) {
 629                        if (i == 0)
 630                                curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
 631                        else
 632                                curlen = PAGE_SIZE;
 633                        if (curlen > copy_len)
 634                                curlen = copy_len;
 635                        dprintk("RPC:       %s: page %d"
 636                                " srcp 0x%p len %d curlen %d\n",
 637                                __func__, i, srcp, copy_len, curlen);
 638                        destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
 639                                                KM_SKB_SUNRPC_DATA);
 640                        if (i == 0)
 641                                memcpy(destp + rqst->rq_rcv_buf.page_base,
 642                                                srcp, curlen);
 643                        else
 644                                memcpy(destp, srcp, curlen);
 645                        flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
 646                        kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
 647                        srcp += curlen;
 648                        copy_len -= curlen;
 649                        if (copy_len == 0)
 650                                break;
 651                }
 652                rqst->rq_rcv_buf.page_len = olen - copy_len;
 653        } else
 654                rqst->rq_rcv_buf.page_len = 0;
 655
 656        if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
 657                curlen = copy_len;
 658                if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
 659                        curlen = rqst->rq_rcv_buf.tail[0].iov_len;
 660                if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
 661                        memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
 662                dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
 663                        __func__, srcp, copy_len, curlen);
 664                rqst->rq_rcv_buf.tail[0].iov_len = curlen;
 665                copy_len -= curlen; ++i;
 666        } else
 667                rqst->rq_rcv_buf.tail[0].iov_len = 0;
 668
 669        if (pad) {
 670                /* implicit padding on terminal chunk */
 671                unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
 672                while (pad--)
 673                        p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
 674        }
 675
 676        if (copy_len)
 677                dprintk("RPC:       %s: %d bytes in"
 678                        " %d extra segments (%d lost)\n",
 679                        __func__, olen, i, copy_len);
 680
 681        /* TBD avoid a warning from call_decode() */
 682        rqst->rq_private_buf = rqst->rq_rcv_buf;
 683}
 684
 685/*
 686 * This function is called when an async event is posted to
 687 * the connection which changes the connection state. All it
 688 * does at this point is mark the connection up/down, the rpc
 689 * timers do the rest.
 690 */
 691void
 692rpcrdma_conn_func(struct rpcrdma_ep *ep)
 693{
 694        struct rpc_xprt *xprt = ep->rep_xprt;
 695
 696        spin_lock_bh(&xprt->transport_lock);
 697        if (++xprt->connect_cookie == 0)        /* maintain a reserved value */
 698                ++xprt->connect_cookie;
 699        if (ep->rep_connected > 0) {
 700                if (!xprt_test_and_set_connected(xprt))
 701                        xprt_wake_pending_tasks(xprt, 0);
 702        } else {
 703                if (xprt_test_and_clear_connected(xprt))
 704                        xprt_wake_pending_tasks(xprt, -ENOTCONN);
 705        }
 706        spin_unlock_bh(&xprt->transport_lock);
 707}
 708
 709/*
 710 * This function is called when memory window unbind which we are waiting
 711 * for completes. Just use rr_func (zeroed by upcall) to signal completion.
 712 */
 713static void
 714rpcrdma_unbind_func(struct rpcrdma_rep *rep)
 715{
 716        wake_up(&rep->rr_unbind);
 717}
 718
 719/*
 720 * Called as a tasklet to do req/reply match and complete a request
 721 * Errors must result in the RPC task either being awakened, or
 722 * allowed to timeout, to discover the errors at that time.
 723 */
 724void
 725rpcrdma_reply_handler(struct rpcrdma_rep *rep)
 726{
 727        struct rpcrdma_msg *headerp;
 728        struct rpcrdma_req *req;
 729        struct rpc_rqst *rqst;
 730        struct rpc_xprt *xprt = rep->rr_xprt;
 731        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 732        __be32 *iptr;
 733        int i, rdmalen, status;
 734
 735        /* Check status. If bad, signal disconnect and return rep to pool */
 736        if (rep->rr_len == ~0U) {
 737                rpcrdma_recv_buffer_put(rep);
 738                if (r_xprt->rx_ep.rep_connected == 1) {
 739                        r_xprt->rx_ep.rep_connected = -EIO;
 740                        rpcrdma_conn_func(&r_xprt->rx_ep);
 741                }
 742                return;
 743        }
 744        if (rep->rr_len < 28) {
 745                dprintk("RPC:       %s: short/invalid reply\n", __func__);
 746                goto repost;
 747        }
 748        headerp = (struct rpcrdma_msg *) rep->rr_base;
 749        if (headerp->rm_vers != xdr_one) {
 750                dprintk("RPC:       %s: invalid version %d\n",
 751                        __func__, ntohl(headerp->rm_vers));
 752                goto repost;
 753        }
 754
 755        /* Get XID and try for a match. */
 756        spin_lock(&xprt->transport_lock);
 757        rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
 758        if (rqst == NULL) {
 759                spin_unlock(&xprt->transport_lock);
 760                dprintk("RPC:       %s: reply 0x%p failed "
 761                        "to match any request xid 0x%08x len %d\n",
 762                        __func__, rep, headerp->rm_xid, rep->rr_len);
 763repost:
 764                r_xprt->rx_stats.bad_reply_count++;
 765                rep->rr_func = rpcrdma_reply_handler;
 766                if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
 767                        rpcrdma_recv_buffer_put(rep);
 768
 769                return;
 770        }
 771
 772        /* get request object */
 773        req = rpcr_to_rdmar(rqst);
 774
 775        dprintk("RPC:       %s: reply 0x%p completes request 0x%p\n"
 776                "                   RPC request 0x%p xid 0x%08x\n",
 777                        __func__, rep, req, rqst, headerp->rm_xid);
 778
 779        BUG_ON(!req || req->rl_reply);
 780
 781        /* from here on, the reply is no longer an orphan */
 782        req->rl_reply = rep;
 783
 784        /* check for expected message types */
 785        /* The order of some of these tests is important. */
 786        switch (headerp->rm_type) {
 787        case htonl(RDMA_MSG):
 788                /* never expect read chunks */
 789                /* never expect reply chunks (two ways to check) */
 790                /* never expect write chunks without having offered RDMA */
 791                if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
 792                    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
 793                     headerp->rm_body.rm_chunks[2] != xdr_zero) ||
 794                    (headerp->rm_body.rm_chunks[1] != xdr_zero &&
 795                     req->rl_nchunks == 0))
 796                        goto badheader;
 797                if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
 798                        /* count any expected write chunks in read reply */
 799                        /* start at write chunk array count */
 800                        iptr = &headerp->rm_body.rm_chunks[2];
 801                        rdmalen = rpcrdma_count_chunks(rep,
 802                                                req->rl_nchunks, 1, &iptr);
 803                        /* check for validity, and no reply chunk after */
 804                        if (rdmalen < 0 || *iptr++ != xdr_zero)
 805                                goto badheader;
 806                        rep->rr_len -=
 807                            ((unsigned char *)iptr - (unsigned char *)headerp);
 808                        status = rep->rr_len + rdmalen;
 809                        r_xprt->rx_stats.total_rdma_reply += rdmalen;
 810                        /* special case - last chunk may omit padding */
 811                        if (rdmalen &= 3) {
 812                                rdmalen = 4 - rdmalen;
 813                                status += rdmalen;
 814                        }
 815                } else {
 816                        /* else ordinary inline */
 817                        rdmalen = 0;
 818                        iptr = (__be32 *)((unsigned char *)headerp + 28);
 819                        rep->rr_len -= 28; /*sizeof *headerp;*/
 820                        status = rep->rr_len;
 821                }
 822                /* Fix up the rpc results for upper layer */
 823                rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
 824                break;
 825
 826        case htonl(RDMA_NOMSG):
 827                /* never expect read or write chunks, always reply chunks */
 828                if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
 829                    headerp->rm_body.rm_chunks[1] != xdr_zero ||
 830                    headerp->rm_body.rm_chunks[2] != xdr_one ||
 831                    req->rl_nchunks == 0)
 832                        goto badheader;
 833                iptr = (__be32 *)((unsigned char *)headerp + 28);
 834                rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
 835                if (rdmalen < 0)
 836                        goto badheader;
 837                r_xprt->rx_stats.total_rdma_reply += rdmalen;
 838                /* Reply chunk buffer already is the reply vector - no fixup. */
 839                status = rdmalen;
 840                break;
 841
 842badheader:
 843        default:
 844                dprintk("%s: invalid rpcrdma reply header (type %d):"
 845                                " chunks[012] == %d %d %d"
 846                                " expected chunks <= %d\n",
 847                                __func__, ntohl(headerp->rm_type),
 848                                headerp->rm_body.rm_chunks[0],
 849                                headerp->rm_body.rm_chunks[1],
 850                                headerp->rm_body.rm_chunks[2],
 851                                req->rl_nchunks);
 852                status = -EIO;
 853                r_xprt->rx_stats.bad_reply_count++;
 854                break;
 855        }
 856
 857        /* If using mw bind, start the deregister process now. */
 858        /* (Note: if mr_free(), cannot perform it here, in tasklet context) */
 859        if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
 860        case RPCRDMA_MEMWINDOWS:
 861                for (i = 0; req->rl_nchunks-- > 1;)
 862                        i += rpcrdma_deregister_external(
 863                                &req->rl_segments[i], r_xprt, NULL);
 864                /* Optionally wait (not here) for unbinds to complete */
 865                rep->rr_func = rpcrdma_unbind_func;
 866                (void) rpcrdma_deregister_external(&req->rl_segments[i],
 867                                                   r_xprt, rep);
 868                break;
 869        case RPCRDMA_MEMWINDOWS_ASYNC:
 870                for (i = 0; req->rl_nchunks--;)
 871                        i += rpcrdma_deregister_external(&req->rl_segments[i],
 872                                                         r_xprt, NULL);
 873                break;
 874        default:
 875                break;
 876        }
 877
 878        dprintk("RPC:       %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
 879                        __func__, xprt, rqst, status);
 880        xprt_complete_rqst(rqst->rq_task, status);
 881        spin_unlock(&xprt->transport_lock);
 882}
 883