linux/net/rds/rdma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/pagemap.h>
  34#include <linux/rbtree.h>
  35#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
  36
  37#include "rdma.h"
  38
  39/*
  40 * XXX
  41 *  - build with sparse
  42 *  - should we limit the size of a mr region?  let transport return failure?
  43 *  - should we detect duplicate keys on a socket?  hmm.
  44 *  - an rdma is an mlock, apply rlimit?
  45 */
  46
  47/*
  48 * get the number of pages by looking at the page indices that the start and
  49 * end addresses fall in.
  50 *
  51 * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
  52 * causes the address to wrap or overflows an unsigned int.  This comes
  53 * from being stored in the 'length' member of 'struct scatterlist'.
  54 */
  55static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
  56{
  57        if ((vec->addr + vec->bytes <= vec->addr) ||
  58            (vec->bytes > (u64)UINT_MAX))
  59                return 0;
  60
  61        return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
  62                (vec->addr >> PAGE_SHIFT);
  63}
  64
  65static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
  66                                       struct rds_mr *insert)
  67{
  68        struct rb_node **p = &root->rb_node;
  69        struct rb_node *parent = NULL;
  70        struct rds_mr *mr;
  71
  72        while (*p) {
  73                parent = *p;
  74                mr = rb_entry(parent, struct rds_mr, r_rb_node);
  75
  76                if (key < mr->r_key)
  77                        p = &(*p)->rb_left;
  78                else if (key > mr->r_key)
  79                        p = &(*p)->rb_right;
  80                else
  81                        return mr;
  82        }
  83
  84        if (insert) {
  85                rb_link_node(&insert->r_rb_node, parent, p);
  86                rb_insert_color(&insert->r_rb_node, root);
  87                atomic_inc(&insert->r_refcount);
  88        }
  89        return NULL;
  90}
  91
  92/*
  93 * Destroy the transport-specific part of a MR.
  94 */
  95static void rds_destroy_mr(struct rds_mr *mr)
  96{
  97        struct rds_sock *rs = mr->r_sock;
  98        void *trans_private = NULL;
  99        unsigned long flags;
 100
 101        rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
 102                        mr->r_key, atomic_read(&mr->r_refcount));
 103
 104        if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
 105                return;
 106
 107        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 108        if (!RB_EMPTY_NODE(&mr->r_rb_node))
 109                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 110        trans_private = mr->r_trans_private;
 111        mr->r_trans_private = NULL;
 112        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 113
 114        if (trans_private)
 115                mr->r_trans->free_mr(trans_private, mr->r_invalidate);
 116}
 117
 118void __rds_put_mr_final(struct rds_mr *mr)
 119{
 120        rds_destroy_mr(mr);
 121        kfree(mr);
 122}
 123
 124/*
 125 * By the time this is called we can't have any more ioctls called on
 126 * the socket so we don't need to worry about racing with others.
 127 */
 128void rds_rdma_drop_keys(struct rds_sock *rs)
 129{
 130        struct rds_mr *mr;
 131        struct rb_node *node;
 132
 133        /* Release any MRs associated with this socket */
 134        while ((node = rb_first(&rs->rs_rdma_keys))) {
 135                mr = container_of(node, struct rds_mr, r_rb_node);
 136                if (mr->r_trans == rs->rs_transport)
 137                        mr->r_invalidate = 0;
 138                rds_mr_put(mr);
 139        }
 140
 141        if (rs->rs_transport && rs->rs_transport->flush_mrs)
 142                rs->rs_transport->flush_mrs();
 143}
 144
 145/*
 146 * Helper function to pin user pages.
 147 */
 148static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
 149                        struct page **pages, int write)
 150{
 151        int ret;
 152
 153        ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
 154
 155        if (ret >= 0 && ret < nr_pages) {
 156                while (ret--)
 157                        put_page(pages[ret]);
 158                ret = -EFAULT;
 159        }
 160
 161        return ret;
 162}
 163
 164static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 165                                u64 *cookie_ret, struct rds_mr **mr_ret)
 166{
 167        struct rds_mr *mr = NULL, *found;
 168        unsigned int nr_pages;
 169        struct page **pages = NULL;
 170        struct scatterlist *sg;
 171        void *trans_private;
 172        unsigned long flags;
 173        rds_rdma_cookie_t cookie;
 174        unsigned int nents;
 175        long i;
 176        int ret;
 177
 178        if (rs->rs_bound_addr == 0) {
 179                ret = -ENOTCONN; /* XXX not a great errno */
 180                goto out;
 181        }
 182
 183        if (rs->rs_transport->get_mr == NULL) {
 184                ret = -EOPNOTSUPP;
 185                goto out;
 186        }
 187
 188        nr_pages = rds_pages_in_vec(&args->vec);
 189        if (nr_pages == 0) {
 190                ret = -EINVAL;
 191                goto out;
 192        }
 193
 194        rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
 195                args->vec.addr, args->vec.bytes, nr_pages);
 196
 197        /* XXX clamp nr_pages to limit the size of this alloc? */
 198        pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 199        if (pages == NULL) {
 200                ret = -ENOMEM;
 201                goto out;
 202        }
 203
 204        mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
 205        if (mr == NULL) {
 206                ret = -ENOMEM;
 207                goto out;
 208        }
 209
 210        atomic_set(&mr->r_refcount, 1);
 211        RB_CLEAR_NODE(&mr->r_rb_node);
 212        mr->r_trans = rs->rs_transport;
 213        mr->r_sock = rs;
 214
 215        if (args->flags & RDS_RDMA_USE_ONCE)
 216                mr->r_use_once = 1;
 217        if (args->flags & RDS_RDMA_INVALIDATE)
 218                mr->r_invalidate = 1;
 219        if (args->flags & RDS_RDMA_READWRITE)
 220                mr->r_write = 1;
 221
 222        /*
 223         * Pin the pages that make up the user buffer and transfer the page
 224         * pointers to the mr's sg array.  We check to see if we've mapped
 225         * the whole region after transferring the partial page references
 226         * to the sg array so that we can have one page ref cleanup path.
 227         *
 228         * For now we have no flag that tells us whether the mapping is
 229         * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
 230         * the zero page.
 231         */
 232        ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1);
 233        if (ret < 0)
 234                goto out;
 235
 236        nents = ret;
 237        sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
 238        if (sg == NULL) {
 239                ret = -ENOMEM;
 240                goto out;
 241        }
 242        WARN_ON(!nents);
 243        sg_init_table(sg, nents);
 244
 245        /* Stick all pages into the scatterlist */
 246        for (i = 0 ; i < nents; i++)
 247                sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
 248
 249        rdsdebug("RDS: trans_private nents is %u\n", nents);
 250
 251        /* Obtain a transport specific MR. If this succeeds, the
 252         * s/g list is now owned by the MR.
 253         * Note that dma_map() implies that pending writes are
 254         * flushed to RAM, so no dma_sync is needed here. */
 255        trans_private = rs->rs_transport->get_mr(sg, nents, rs,
 256                                                 &mr->r_key);
 257
 258        if (IS_ERR(trans_private)) {
 259                for (i = 0 ; i < nents; i++)
 260                        put_page(sg_page(&sg[i]));
 261                kfree(sg);
 262                ret = PTR_ERR(trans_private);
 263                goto out;
 264        }
 265
 266        mr->r_trans_private = trans_private;
 267
 268        rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
 269               mr->r_key, (void *)(unsigned long) args->cookie_addr);
 270
 271        /* The user may pass us an unaligned address, but we can only
 272         * map page aligned regions. So we keep the offset, and build
 273         * a 64bit cookie containing <R_Key, offset> and pass that
 274         * around. */
 275        cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
 276        if (cookie_ret)
 277                *cookie_ret = cookie;
 278
 279        if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
 280                ret = -EFAULT;
 281                goto out;
 282        }
 283
 284        /* Inserting the new MR into the rbtree bumps its
 285         * reference count. */
 286        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 287        found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
 288        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 289
 290        BUG_ON(found && found != mr);
 291
 292        rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
 293        if (mr_ret) {
 294                atomic_inc(&mr->r_refcount);
 295                *mr_ret = mr;
 296        }
 297
 298        ret = 0;
 299out:
 300        kfree(pages);
 301        if (mr)
 302                rds_mr_put(mr);
 303        return ret;
 304}
 305
 306int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
 307{
 308        struct rds_get_mr_args args;
 309
 310        if (optlen != sizeof(struct rds_get_mr_args))
 311                return -EINVAL;
 312
 313        if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
 314                           sizeof(struct rds_get_mr_args)))
 315                return -EFAULT;
 316
 317        return __rds_rdma_map(rs, &args, NULL, NULL);
 318}
 319
 320/*
 321 * Free the MR indicated by the given R_Key
 322 */
 323int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
 324{
 325        struct rds_free_mr_args args;
 326        struct rds_mr *mr;
 327        unsigned long flags;
 328
 329        if (optlen != sizeof(struct rds_free_mr_args))
 330                return -EINVAL;
 331
 332        if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
 333                           sizeof(struct rds_free_mr_args)))
 334                return -EFAULT;
 335
 336        /* Special case - a null cookie means flush all unused MRs */
 337        if (args.cookie == 0) {
 338                if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
 339                        return -EINVAL;
 340                rs->rs_transport->flush_mrs();
 341                return 0;
 342        }
 343
 344        /* Look up the MR given its R_key and remove it from the rbtree
 345         * so nobody else finds it.
 346         * This should also prevent races with rds_rdma_unuse.
 347         */
 348        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 349        mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
 350        if (mr) {
 351                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 352                RB_CLEAR_NODE(&mr->r_rb_node);
 353                if (args.flags & RDS_RDMA_INVALIDATE)
 354                        mr->r_invalidate = 1;
 355        }
 356        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 357
 358        if (!mr)
 359                return -EINVAL;
 360
 361        /*
 362         * call rds_destroy_mr() ourselves so that we're sure it's done by the time
 363         * we return.  If we let rds_mr_put() do it it might not happen until
 364         * someone else drops their ref.
 365         */
 366        rds_destroy_mr(mr);
 367        rds_mr_put(mr);
 368        return 0;
 369}
 370
 371/*
 372 * This is called when we receive an extension header that
 373 * tells us this MR was used. It allows us to implement
 374 * use_once semantics
 375 */
 376void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
 377{
 378        struct rds_mr *mr;
 379        unsigned long flags;
 380        int zot_me = 0;
 381
 382        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 383        mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
 384        if (mr && (mr->r_use_once || force)) {
 385                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 386                RB_CLEAR_NODE(&mr->r_rb_node);
 387                zot_me = 1;
 388        } else if (mr)
 389                atomic_inc(&mr->r_refcount);
 390        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 391
 392        /* May have to issue a dma_sync on this memory region.
 393         * Note we could avoid this if the operation was a RDMA READ,
 394         * but at this point we can't tell. */
 395        if (mr != NULL) {
 396                if (mr->r_trans->sync_mr)
 397                        mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
 398
 399                /* If the MR was marked as invalidate, this will
 400                 * trigger an async flush. */
 401                if (zot_me)
 402                        rds_destroy_mr(mr);
 403                rds_mr_put(mr);
 404        }
 405}
 406
 407void rds_rdma_free_op(struct rds_rdma_op *ro)
 408{
 409        unsigned int i;
 410
 411        for (i = 0; i < ro->r_nents; i++) {
 412                struct page *page = sg_page(&ro->r_sg[i]);
 413
 414                /* Mark page dirty if it was possibly modified, which
 415                 * is the case for a RDMA_READ which copies from remote
 416                 * to local memory */
 417                if (!ro->r_write)
 418                        set_page_dirty(page);
 419                put_page(page);
 420        }
 421
 422        kfree(ro->r_notifier);
 423        kfree(ro);
 424}
 425
 426/*
 427 * args is a pointer to an in-kernel copy in the sendmsg cmsg.
 428 */
 429static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
 430                                            struct rds_rdma_args *args)
 431{
 432        struct rds_iovec vec;
 433        struct rds_rdma_op *op = NULL;
 434        unsigned int nr_pages;
 435        unsigned int max_pages;
 436        unsigned int nr_bytes;
 437        struct page **pages = NULL;
 438        struct rds_iovec __user *local_vec;
 439        struct scatterlist *sg;
 440        unsigned int nr;
 441        unsigned int i, j;
 442        int ret;
 443
 444
 445        if (rs->rs_bound_addr == 0) {
 446                ret = -ENOTCONN; /* XXX not a great errno */
 447                goto out;
 448        }
 449
 450        if (args->nr_local > (u64)UINT_MAX) {
 451                ret = -EMSGSIZE;
 452                goto out;
 453        }
 454
 455        nr_pages = 0;
 456        max_pages = 0;
 457
 458        local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
 459
 460        /* figure out the number of pages in the vector */
 461        for (i = 0; i < args->nr_local; i++) {
 462                if (copy_from_user(&vec, &local_vec[i],
 463                                   sizeof(struct rds_iovec))) {
 464                        ret = -EFAULT;
 465                        goto out;
 466                }
 467
 468                nr = rds_pages_in_vec(&vec);
 469                if (nr == 0) {
 470                        ret = -EINVAL;
 471                        goto out;
 472                }
 473
 474                max_pages = max(nr, max_pages);
 475                nr_pages += nr;
 476        }
 477
 478        pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
 479        if (pages == NULL) {
 480                ret = -ENOMEM;
 481                goto out;
 482        }
 483
 484        op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL);
 485        if (op == NULL) {
 486                ret = -ENOMEM;
 487                goto out;
 488        }
 489
 490        op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
 491        op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
 492        op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
 493        op->r_recverr = rs->rs_recverr;
 494        WARN_ON(!nr_pages);
 495        sg_init_table(op->r_sg, nr_pages);
 496
 497        if (op->r_notify || op->r_recverr) {
 498                /* We allocate an uninitialized notifier here, because
 499                 * we don't want to do that in the completion handler. We
 500                 * would have to use GFP_ATOMIC there, and don't want to deal
 501                 * with failed allocations.
 502                 */
 503                op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
 504                if (!op->r_notifier) {
 505                        ret = -ENOMEM;
 506                        goto out;
 507                }
 508                op->r_notifier->n_user_token = args->user_token;
 509                op->r_notifier->n_status = RDS_RDMA_SUCCESS;
 510        }
 511
 512        /* The cookie contains the R_Key of the remote memory region, and
 513         * optionally an offset into it. This is how we implement RDMA into
 514         * unaligned memory.
 515         * When setting up the RDMA, we need to add that offset to the
 516         * destination address (which is really an offset into the MR)
 517         * FIXME: We may want to move this into ib_rdma.c
 518         */
 519        op->r_key = rds_rdma_cookie_key(args->cookie);
 520        op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
 521
 522        nr_bytes = 0;
 523
 524        rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
 525               (unsigned long long)args->nr_local,
 526               (unsigned long long)args->remote_vec.addr,
 527               op->r_key);
 528
 529        for (i = 0; i < args->nr_local; i++) {
 530                if (copy_from_user(&vec, &local_vec[i],
 531                                   sizeof(struct rds_iovec))) {
 532                        ret = -EFAULT;
 533                        goto out;
 534                }
 535
 536                nr = rds_pages_in_vec(&vec);
 537                if (nr == 0) {
 538                        ret = -EINVAL;
 539                        goto out;
 540                }
 541
 542                rs->rs_user_addr = vec.addr;
 543                rs->rs_user_bytes = vec.bytes;
 544
 545                /* did the user change the vec under us? */
 546                if (nr > max_pages || op->r_nents + nr > nr_pages) {
 547                        ret = -EINVAL;
 548                        goto out;
 549                }
 550                /* If it's a WRITE operation, we want to pin the pages for reading.
 551                 * If it's a READ operation, we need to pin the pages for writing.
 552                 */
 553                ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write);
 554                if (ret < 0)
 555                        goto out;
 556
 557                rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
 558                       nr_bytes, nr, vec.bytes, vec.addr);
 559
 560                nr_bytes += vec.bytes;
 561
 562                for (j = 0; j < nr; j++) {
 563                        unsigned int offset = vec.addr & ~PAGE_MASK;
 564
 565                        sg = &op->r_sg[op->r_nents + j];
 566                        sg_set_page(sg, pages[j],
 567                                        min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
 568                                        offset);
 569
 570                        rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
 571                               sg->offset, sg->length, vec.addr, vec.bytes);
 572
 573                        vec.addr += sg->length;
 574                        vec.bytes -= sg->length;
 575                }
 576
 577                op->r_nents += nr;
 578        }
 579
 580
 581        if (nr_bytes > args->remote_vec.bytes) {
 582                rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
 583                                nr_bytes,
 584                                (unsigned int) args->remote_vec.bytes);
 585                ret = -EINVAL;
 586                goto out;
 587        }
 588        op->r_bytes = nr_bytes;
 589
 590        ret = 0;
 591out:
 592        kfree(pages);
 593        if (ret) {
 594                if (op)
 595                        rds_rdma_free_op(op);
 596                op = ERR_PTR(ret);
 597        }
 598        return op;
 599}
 600
 601/*
 602 * The application asks for a RDMA transfer.
 603 * Extract all arguments and set up the rdma_op
 604 */
 605int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
 606                          struct cmsghdr *cmsg)
 607{
 608        struct rds_rdma_op *op;
 609
 610        if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
 611         || rm->m_rdma_op != NULL)
 612                return -EINVAL;
 613
 614        op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
 615        if (IS_ERR(op))
 616                return PTR_ERR(op);
 617        rds_stats_inc(s_send_rdma);
 618        rm->m_rdma_op = op;
 619        return 0;
 620}
 621
 622/*
 623 * The application wants us to pass an RDMA destination (aka MR)
 624 * to the remote
 625 */
 626int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
 627                          struct cmsghdr *cmsg)
 628{
 629        unsigned long flags;
 630        struct rds_mr *mr;
 631        u32 r_key;
 632        int err = 0;
 633
 634        if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t))
 635         || rm->m_rdma_cookie != 0)
 636                return -EINVAL;
 637
 638        memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
 639
 640        /* We are reusing a previously mapped MR here. Most likely, the
 641         * application has written to the buffer, so we need to explicitly
 642         * flush those writes to RAM. Otherwise the HCA may not see them
 643         * when doing a DMA from that buffer.
 644         */
 645        r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
 646
 647        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 648        mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
 649        if (mr == NULL)
 650                err = -EINVAL;  /* invalid r_key */
 651        else
 652                atomic_inc(&mr->r_refcount);
 653        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 654
 655        if (mr) {
 656                mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
 657                rm->m_rdma_mr = mr;
 658        }
 659        return err;
 660}
 661
 662/*
 663 * The application passes us an address range it wants to enable RDMA
 664 * to/from. We map the area, and save the <R_Key,offset> pair
 665 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
 666 * in an extension header.
 667 */
 668int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
 669                          struct cmsghdr *cmsg)
 670{
 671        if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args))
 672         || rm->m_rdma_cookie != 0)
 673                return -EINVAL;
 674
 675        return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
 676}
 677