linux/net/rds/rdma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/pagemap.h>
  34#include <linux/slab.h>
  35#include <linux/rbtree.h>
  36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
  37
  38#include "rds.h"
  39
  40/*
  41 * XXX
  42 *  - build with sparse
  43 *  - should we detect duplicate keys on a socket?  hmm.
  44 *  - an rdma is an mlock, apply rlimit?
  45 */
  46
  47/*
  48 * get the number of pages by looking at the page indices that the start and
  49 * end addresses fall in.
  50 *
  51 * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
  52 * causes the address to wrap or overflows an unsigned int.  This comes
  53 * from being stored in the 'length' member of 'struct scatterlist'.
  54 */
  55static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
  56{
  57        if ((vec->addr + vec->bytes <= vec->addr) ||
  58            (vec->bytes > (u64)UINT_MAX))
  59                return 0;
  60
  61        return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
  62                (vec->addr >> PAGE_SHIFT);
  63}
  64
  65static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
  66                                       struct rds_mr *insert)
  67{
  68        struct rb_node **p = &root->rb_node;
  69        struct rb_node *parent = NULL;
  70        struct rds_mr *mr;
  71
  72        while (*p) {
  73                parent = *p;
  74                mr = rb_entry(parent, struct rds_mr, r_rb_node);
  75
  76                if (key < mr->r_key)
  77                        p = &(*p)->rb_left;
  78                else if (key > mr->r_key)
  79                        p = &(*p)->rb_right;
  80                else
  81                        return mr;
  82        }
  83
  84        if (insert) {
  85                rb_link_node(&insert->r_rb_node, parent, p);
  86                rb_insert_color(&insert->r_rb_node, root);
  87                refcount_inc(&insert->r_refcount);
  88        }
  89        return NULL;
  90}
  91
  92/*
  93 * Destroy the transport-specific part of a MR.
  94 */
  95static void rds_destroy_mr(struct rds_mr *mr)
  96{
  97        struct rds_sock *rs = mr->r_sock;
  98        void *trans_private = NULL;
  99        unsigned long flags;
 100
 101        rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
 102                        mr->r_key, refcount_read(&mr->r_refcount));
 103
 104        if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
 105                return;
 106
 107        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 108        if (!RB_EMPTY_NODE(&mr->r_rb_node))
 109                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 110        trans_private = mr->r_trans_private;
 111        mr->r_trans_private = NULL;
 112        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 113
 114        if (trans_private)
 115                mr->r_trans->free_mr(trans_private, mr->r_invalidate);
 116}
 117
 118void __rds_put_mr_final(struct rds_mr *mr)
 119{
 120        rds_destroy_mr(mr);
 121        kfree(mr);
 122}
 123
 124/*
 125 * By the time this is called we can't have any more ioctls called on
 126 * the socket so we don't need to worry about racing with others.
 127 */
 128void rds_rdma_drop_keys(struct rds_sock *rs)
 129{
 130        struct rds_mr *mr;
 131        struct rb_node *node;
 132        unsigned long flags;
 133
 134        /* Release any MRs associated with this socket */
 135        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 136        while ((node = rb_first(&rs->rs_rdma_keys))) {
 137                mr = rb_entry(node, struct rds_mr, r_rb_node);
 138                if (mr->r_trans == rs->rs_transport)
 139                        mr->r_invalidate = 0;
 140                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 141                RB_CLEAR_NODE(&mr->r_rb_node);
 142                spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 143                rds_destroy_mr(mr);
 144                rds_mr_put(mr);
 145                spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 146        }
 147        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 148
 149        if (rs->rs_transport && rs->rs_transport->flush_mrs)
 150                rs->rs_transport->flush_mrs();
 151}
 152
 153/*
 154 * Helper function to pin user pages.
 155 */
 156static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
 157                        struct page **pages, int write)
 158{
 159        int ret;
 160
 161        ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0,
 162                                  pages);
 163
 164        if (ret >= 0 && ret < nr_pages) {
 165                while (ret--)
 166                        put_page(pages[ret]);
 167                ret = -EFAULT;
 168        }
 169
 170        return ret;
 171}
 172
 173static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 174                          u64 *cookie_ret, struct rds_mr **mr_ret,
 175                          struct rds_conn_path *cp)
 176{
 177        struct rds_mr *mr = NULL, *found;
 178        unsigned int nr_pages;
 179        struct page **pages = NULL;
 180        struct scatterlist *sg;
 181        void *trans_private;
 182        unsigned long flags;
 183        rds_rdma_cookie_t cookie;
 184        unsigned int nents;
 185        long i;
 186        int ret;
 187
 188        if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
 189                ret = -ENOTCONN; /* XXX not a great errno */
 190                goto out;
 191        }
 192
 193        if (!rs->rs_transport->get_mr) {
 194                ret = -EOPNOTSUPP;
 195                goto out;
 196        }
 197
 198        nr_pages = rds_pages_in_vec(&args->vec);
 199        if (nr_pages == 0) {
 200                ret = -EINVAL;
 201                goto out;
 202        }
 203
 204        /* Restrict the size of mr irrespective of underlying transport
 205         * To account for unaligned mr regions, subtract one from nr_pages
 206         */
 207        if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
 208                ret = -EMSGSIZE;
 209                goto out;
 210        }
 211
 212        rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
 213                args->vec.addr, args->vec.bytes, nr_pages);
 214
 215        /* XXX clamp nr_pages to limit the size of this alloc? */
 216        pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 217        if (!pages) {
 218                ret = -ENOMEM;
 219                goto out;
 220        }
 221
 222        mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
 223        if (!mr) {
 224                ret = -ENOMEM;
 225                goto out;
 226        }
 227
 228        refcount_set(&mr->r_refcount, 1);
 229        RB_CLEAR_NODE(&mr->r_rb_node);
 230        mr->r_trans = rs->rs_transport;
 231        mr->r_sock = rs;
 232
 233        if (args->flags & RDS_RDMA_USE_ONCE)
 234                mr->r_use_once = 1;
 235        if (args->flags & RDS_RDMA_INVALIDATE)
 236                mr->r_invalidate = 1;
 237        if (args->flags & RDS_RDMA_READWRITE)
 238                mr->r_write = 1;
 239
 240        /*
 241         * Pin the pages that make up the user buffer and transfer the page
 242         * pointers to the mr's sg array.  We check to see if we've mapped
 243         * the whole region after transferring the partial page references
 244         * to the sg array so that we can have one page ref cleanup path.
 245         *
 246         * For now we have no flag that tells us whether the mapping is
 247         * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
 248         * the zero page.
 249         */
 250        ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
 251        if (ret < 0)
 252                goto out;
 253
 254        nents = ret;
 255        sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
 256        if (!sg) {
 257                ret = -ENOMEM;
 258                goto out;
 259        }
 260        WARN_ON(!nents);
 261        sg_init_table(sg, nents);
 262
 263        /* Stick all pages into the scatterlist */
 264        for (i = 0 ; i < nents; i++)
 265                sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
 266
 267        rdsdebug("RDS: trans_private nents is %u\n", nents);
 268
 269        /* Obtain a transport specific MR. If this succeeds, the
 270         * s/g list is now owned by the MR.
 271         * Note that dma_map() implies that pending writes are
 272         * flushed to RAM, so no dma_sync is needed here. */
 273        trans_private = rs->rs_transport->get_mr(sg, nents, rs,
 274                                                 &mr->r_key,
 275                                                 cp ? cp->cp_conn : NULL);
 276
 277        if (IS_ERR(trans_private)) {
 278                for (i = 0 ; i < nents; i++)
 279                        put_page(sg_page(&sg[i]));
 280                kfree(sg);
 281                ret = PTR_ERR(trans_private);
 282                goto out;
 283        }
 284
 285        mr->r_trans_private = trans_private;
 286
 287        rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
 288               mr->r_key, (void *)(unsigned long) args->cookie_addr);
 289
 290        /* The user may pass us an unaligned address, but we can only
 291         * map page aligned regions. So we keep the offset, and build
 292         * a 64bit cookie containing <R_Key, offset> and pass that
 293         * around. */
 294        cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
 295        if (cookie_ret)
 296                *cookie_ret = cookie;
 297
 298        if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
 299                ret = -EFAULT;
 300                goto out;
 301        }
 302
 303        /* Inserting the new MR into the rbtree bumps its
 304         * reference count. */
 305        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 306        found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
 307        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 308
 309        BUG_ON(found && found != mr);
 310
 311        rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
 312        if (mr_ret) {
 313                refcount_inc(&mr->r_refcount);
 314                *mr_ret = mr;
 315        }
 316
 317        ret = 0;
 318out:
 319        kfree(pages);
 320        if (mr)
 321                rds_mr_put(mr);
 322        return ret;
 323}
 324
 325int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
 326{
 327        struct rds_get_mr_args args;
 328
 329        if (optlen != sizeof(struct rds_get_mr_args))
 330                return -EINVAL;
 331
 332        if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
 333                           sizeof(struct rds_get_mr_args)))
 334                return -EFAULT;
 335
 336        return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
 337}
 338
 339int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
 340{
 341        struct rds_get_mr_for_dest_args args;
 342        struct rds_get_mr_args new_args;
 343
 344        if (optlen != sizeof(struct rds_get_mr_for_dest_args))
 345                return -EINVAL;
 346
 347        if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
 348                           sizeof(struct rds_get_mr_for_dest_args)))
 349                return -EFAULT;
 350
 351        /*
 352         * Initially, just behave like get_mr().
 353         * TODO: Implement get_mr as wrapper around this
 354         *       and deprecate it.
 355         */
 356        new_args.vec = args.vec;
 357        new_args.cookie_addr = args.cookie_addr;
 358        new_args.flags = args.flags;
 359
 360        return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
 361}
 362
 363/*
 364 * Free the MR indicated by the given R_Key
 365 */
 366int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
 367{
 368        struct rds_free_mr_args args;
 369        struct rds_mr *mr;
 370        unsigned long flags;
 371
 372        if (optlen != sizeof(struct rds_free_mr_args))
 373                return -EINVAL;
 374
 375        if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
 376                           sizeof(struct rds_free_mr_args)))
 377                return -EFAULT;
 378
 379        /* Special case - a null cookie means flush all unused MRs */
 380        if (args.cookie == 0) {
 381                if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
 382                        return -EINVAL;
 383                rs->rs_transport->flush_mrs();
 384                return 0;
 385        }
 386
 387        /* Look up the MR given its R_key and remove it from the rbtree
 388         * so nobody else finds it.
 389         * This should also prevent races with rds_rdma_unuse.
 390         */
 391        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 392        mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
 393        if (mr) {
 394                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 395                RB_CLEAR_NODE(&mr->r_rb_node);
 396                if (args.flags & RDS_RDMA_INVALIDATE)
 397                        mr->r_invalidate = 1;
 398        }
 399        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 400
 401        if (!mr)
 402                return -EINVAL;
 403
 404        /*
 405         * call rds_destroy_mr() ourselves so that we're sure it's done by the time
 406         * we return.  If we let rds_mr_put() do it it might not happen until
 407         * someone else drops their ref.
 408         */
 409        rds_destroy_mr(mr);
 410        rds_mr_put(mr);
 411        return 0;
 412}
 413
 414/*
 415 * This is called when we receive an extension header that
 416 * tells us this MR was used. It allows us to implement
 417 * use_once semantics
 418 */
 419void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
 420{
 421        struct rds_mr *mr;
 422        unsigned long flags;
 423        int zot_me = 0;
 424
 425        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 426        mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
 427        if (!mr) {
 428                pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
 429                         r_key);
 430                spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 431                return;
 432        }
 433
 434        if (mr->r_use_once || force) {
 435                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
 436                RB_CLEAR_NODE(&mr->r_rb_node);
 437                zot_me = 1;
 438        }
 439        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 440
 441        /* May have to issue a dma_sync on this memory region.
 442         * Note we could avoid this if the operation was a RDMA READ,
 443         * but at this point we can't tell. */
 444        if (mr->r_trans->sync_mr)
 445                mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
 446
 447        /* If the MR was marked as invalidate, this will
 448         * trigger an async flush. */
 449        if (zot_me) {
 450                rds_destroy_mr(mr);
 451                rds_mr_put(mr);
 452        }
 453}
 454
 455void rds_rdma_free_op(struct rm_rdma_op *ro)
 456{
 457        unsigned int i;
 458
 459        for (i = 0; i < ro->op_nents; i++) {
 460                struct page *page = sg_page(&ro->op_sg[i]);
 461
 462                /* Mark page dirty if it was possibly modified, which
 463                 * is the case for a RDMA_READ which copies from remote
 464                 * to local memory */
 465                if (!ro->op_write) {
 466                        WARN_ON(!page->mapping && irqs_disabled());
 467                        set_page_dirty(page);
 468                }
 469                put_page(page);
 470        }
 471
 472        kfree(ro->op_notifier);
 473        ro->op_notifier = NULL;
 474        ro->op_active = 0;
 475}
 476
 477void rds_atomic_free_op(struct rm_atomic_op *ao)
 478{
 479        struct page *page = sg_page(ao->op_sg);
 480
 481        /* Mark page dirty if it was possibly modified, which
 482         * is the case for a RDMA_READ which copies from remote
 483         * to local memory */
 484        set_page_dirty(page);
 485        put_page(page);
 486
 487        kfree(ao->op_notifier);
 488        ao->op_notifier = NULL;
 489        ao->op_active = 0;
 490}
 491
 492
 493/*
 494 * Count the number of pages needed to describe an incoming iovec array.
 495 */
 496static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
 497{
 498        int tot_pages = 0;
 499        unsigned int nr_pages;
 500        unsigned int i;
 501
 502        /* figure out the number of pages in the vector */
 503        for (i = 0; i < nr_iovecs; i++) {
 504                nr_pages = rds_pages_in_vec(&iov[i]);
 505                if (nr_pages == 0)
 506                        return -EINVAL;
 507
 508                tot_pages += nr_pages;
 509
 510                /*
 511                 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
 512                 * so tot_pages cannot overflow without first going negative.
 513                 */
 514                if (tot_pages < 0)
 515                        return -EINVAL;
 516        }
 517
 518        return tot_pages;
 519}
 520
 521int rds_rdma_extra_size(struct rds_rdma_args *args,
 522                        struct rds_iov_vector *iov)
 523{
 524        struct rds_iovec *vec;
 525        struct rds_iovec __user *local_vec;
 526        int tot_pages = 0;
 527        unsigned int nr_pages;
 528        unsigned int i;
 529
 530        local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
 531
 532        if (args->nr_local == 0)
 533                return -EINVAL;
 534
 535        iov->iov = kcalloc(args->nr_local,
 536                           sizeof(struct rds_iovec),
 537                           GFP_KERNEL);
 538        if (!iov->iov)
 539                return -ENOMEM;
 540
 541        vec = &iov->iov[0];
 542
 543        if (copy_from_user(vec, local_vec, args->nr_local *
 544                           sizeof(struct rds_iovec)))
 545                return -EFAULT;
 546        iov->len = args->nr_local;
 547
 548        /* figure out the number of pages in the vector */
 549        for (i = 0; i < args->nr_local; i++, vec++) {
 550
 551                nr_pages = rds_pages_in_vec(vec);
 552                if (nr_pages == 0)
 553                        return -EINVAL;
 554
 555                tot_pages += nr_pages;
 556
 557                /*
 558                 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
 559                 * so tot_pages cannot overflow without first going negative.
 560                 */
 561                if (tot_pages < 0)
 562                        return -EINVAL;
 563        }
 564
 565        return tot_pages * sizeof(struct scatterlist);
 566}
 567
 568/*
 569 * The application asks for a RDMA transfer.
 570 * Extract all arguments and set up the rdma_op
 571 */
 572int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
 573                       struct cmsghdr *cmsg,
 574                       struct rds_iov_vector *vec)
 575{
 576        struct rds_rdma_args *args;
 577        struct rm_rdma_op *op = &rm->rdma;
 578        int nr_pages;
 579        unsigned int nr_bytes;
 580        struct page **pages = NULL;
 581        struct rds_iovec *iovs;
 582        unsigned int i, j;
 583        int ret = 0;
 584
 585        if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
 586            || rm->rdma.op_active)
 587                return -EINVAL;
 588
 589        args = CMSG_DATA(cmsg);
 590
 591        if (ipv6_addr_any(&rs->rs_bound_addr)) {
 592                ret = -ENOTCONN; /* XXX not a great errno */
 593                goto out_ret;
 594        }
 595
 596        if (args->nr_local > UIO_MAXIOV) {
 597                ret = -EMSGSIZE;
 598                goto out_ret;
 599        }
 600
 601        if (vec->len != args->nr_local) {
 602                ret = -EINVAL;
 603                goto out_ret;
 604        }
 605
 606        iovs = vec->iov;
 607
 608        nr_pages = rds_rdma_pages(iovs, args->nr_local);
 609        if (nr_pages < 0) {
 610                ret = -EINVAL;
 611                goto out_ret;
 612        }
 613
 614        pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
 615        if (!pages) {
 616                ret = -ENOMEM;
 617                goto out_ret;
 618        }
 619
 620        op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
 621        op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
 622        op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
 623        op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
 624        op->op_active = 1;
 625        op->op_recverr = rs->rs_recverr;
 626        WARN_ON(!nr_pages);
 627        op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
 628        if (!op->op_sg)
 629                goto out_pages;
 630
 631        if (op->op_notify || op->op_recverr) {
 632                /* We allocate an uninitialized notifier here, because
 633                 * we don't want to do that in the completion handler. We
 634                 * would have to use GFP_ATOMIC there, and don't want to deal
 635                 * with failed allocations.
 636                 */
 637                op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
 638                if (!op->op_notifier) {
 639                        ret = -ENOMEM;
 640                        goto out_pages;
 641                }
 642                op->op_notifier->n_user_token = args->user_token;
 643                op->op_notifier->n_status = RDS_RDMA_SUCCESS;
 644        }
 645
 646        /* The cookie contains the R_Key of the remote memory region, and
 647         * optionally an offset into it. This is how we implement RDMA into
 648         * unaligned memory.
 649         * When setting up the RDMA, we need to add that offset to the
 650         * destination address (which is really an offset into the MR)
 651         * FIXME: We may want to move this into ib_rdma.c
 652         */
 653        op->op_rkey = rds_rdma_cookie_key(args->cookie);
 654        op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
 655
 656        nr_bytes = 0;
 657
 658        rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
 659               (unsigned long long)args->nr_local,
 660               (unsigned long long)args->remote_vec.addr,
 661               op->op_rkey);
 662
 663        for (i = 0; i < args->nr_local; i++) {
 664                struct rds_iovec *iov = &iovs[i];
 665                /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
 666                unsigned int nr = rds_pages_in_vec(iov);
 667
 668                rs->rs_user_addr = iov->addr;
 669                rs->rs_user_bytes = iov->bytes;
 670
 671                /* If it's a WRITE operation, we want to pin the pages for reading.
 672                 * If it's a READ operation, we need to pin the pages for writing.
 673                 */
 674                ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
 675                if (ret < 0)
 676                        goto out_pages;
 677                else
 678                        ret = 0;
 679
 680                rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
 681                         nr_bytes, nr, iov->bytes, iov->addr);
 682
 683                nr_bytes += iov->bytes;
 684
 685                for (j = 0; j < nr; j++) {
 686                        unsigned int offset = iov->addr & ~PAGE_MASK;
 687                        struct scatterlist *sg;
 688
 689                        sg = &op->op_sg[op->op_nents + j];
 690                        sg_set_page(sg, pages[j],
 691                                        min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
 692                                        offset);
 693
 694                        rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
 695                               sg->offset, sg->length, iov->addr, iov->bytes);
 696
 697                        iov->addr += sg->length;
 698                        iov->bytes -= sg->length;
 699                }
 700
 701                op->op_nents += nr;
 702        }
 703
 704        if (nr_bytes > args->remote_vec.bytes) {
 705                rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
 706                                nr_bytes,
 707                                (unsigned int) args->remote_vec.bytes);
 708                ret = -EINVAL;
 709                goto out_pages;
 710        }
 711        op->op_bytes = nr_bytes;
 712
 713out_pages:
 714        kfree(pages);
 715out_ret:
 716        if (ret)
 717                rds_rdma_free_op(op);
 718        else
 719                rds_stats_inc(s_send_rdma);
 720
 721        return ret;
 722}
 723
 724/*
 725 * The application wants us to pass an RDMA destination (aka MR)
 726 * to the remote
 727 */
 728int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
 729                          struct cmsghdr *cmsg)
 730{
 731        unsigned long flags;
 732        struct rds_mr *mr;
 733        u32 r_key;
 734        int err = 0;
 735
 736        if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
 737            rm->m_rdma_cookie != 0)
 738                return -EINVAL;
 739
 740        memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
 741
 742        /* We are reusing a previously mapped MR here. Most likely, the
 743         * application has written to the buffer, so we need to explicitly
 744         * flush those writes to RAM. Otherwise the HCA may not see them
 745         * when doing a DMA from that buffer.
 746         */
 747        r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
 748
 749        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
 750        mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
 751        if (!mr)
 752                err = -EINVAL;  /* invalid r_key */
 753        else
 754                refcount_inc(&mr->r_refcount);
 755        spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
 756
 757        if (mr) {
 758                mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
 759                rm->rdma.op_rdma_mr = mr;
 760        }
 761        return err;
 762}
 763
 764/*
 765 * The application passes us an address range it wants to enable RDMA
 766 * to/from. We map the area, and save the <R_Key,offset> pair
 767 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
 768 * in an extension header.
 769 */
 770int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
 771                          struct cmsghdr *cmsg)
 772{
 773        if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
 774            rm->m_rdma_cookie != 0)
 775                return -EINVAL;
 776
 777        return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
 778                              &rm->rdma.op_rdma_mr, rm->m_conn_path);
 779}
 780
 781/*
 782 * Fill in rds_message for an atomic request.
 783 */
 784int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
 785                    struct cmsghdr *cmsg)
 786{
 787        struct page *page = NULL;
 788        struct rds_atomic_args *args;
 789        int ret = 0;
 790
 791        if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
 792         || rm->atomic.op_active)
 793                return -EINVAL;
 794
 795        args = CMSG_DATA(cmsg);
 796
 797        /* Nonmasked & masked cmsg ops converted to masked hw ops */
 798        switch (cmsg->cmsg_type) {
 799        case RDS_CMSG_ATOMIC_FADD:
 800                rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
 801                rm->atomic.op_m_fadd.add = args->fadd.add;
 802                rm->atomic.op_m_fadd.nocarry_mask = 0;
 803                break;
 804        case RDS_CMSG_MASKED_ATOMIC_FADD:
 805                rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
 806                rm->atomic.op_m_fadd.add = args->m_fadd.add;
 807                rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
 808                break;
 809        case RDS_CMSG_ATOMIC_CSWP:
 810                rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
 811                rm->atomic.op_m_cswp.compare = args->cswp.compare;
 812                rm->atomic.op_m_cswp.swap = args->cswp.swap;
 813                rm->atomic.op_m_cswp.compare_mask = ~0;
 814                rm->atomic.op_m_cswp.swap_mask = ~0;
 815                break;
 816        case RDS_CMSG_MASKED_ATOMIC_CSWP:
 817                rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
 818                rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
 819                rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
 820                rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
 821                rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
 822                break;
 823        default:
 824                BUG(); /* should never happen */
 825        }
 826
 827        rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
 828        rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
 829        rm->atomic.op_active = 1;
 830        rm->atomic.op_recverr = rs->rs_recverr;
 831        rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
 832        if (!rm->atomic.op_sg)
 833                goto err;
 834
 835        /* verify 8 byte-aligned */
 836        if (args->local_addr & 0x7) {
 837                ret = -EFAULT;
 838                goto err;
 839        }
 840
 841        ret = rds_pin_pages(args->local_addr, 1, &page, 1);
 842        if (ret != 1)
 843                goto err;
 844        ret = 0;
 845
 846        sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
 847
 848        if (rm->atomic.op_notify || rm->atomic.op_recverr) {
 849                /* We allocate an uninitialized notifier here, because
 850                 * we don't want to do that in the completion handler. We
 851                 * would have to use GFP_ATOMIC there, and don't want to deal
 852                 * with failed allocations.
 853                 */
 854                rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
 855                if (!rm->atomic.op_notifier) {
 856                        ret = -ENOMEM;
 857                        goto err;
 858                }
 859
 860                rm->atomic.op_notifier->n_user_token = args->user_token;
 861                rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
 862        }
 863
 864        rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
 865        rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
 866
 867        return ret;
 868err:
 869        if (page)
 870                put_page(page);
 871        rm->atomic.op_active = 0;
 872        kfree(rm->atomic.op_notifier);
 873
 874        return ret;
 875}
 876