linux/drivers/infiniband/hw/mlx4/mr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/slab.h>
  35#include <rdma/ib_user_verbs.h>
  36
  37#include "mlx4_ib.h"
  38
  39static u32 convert_access(int acc)
  40{
  41        return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC       : 0) |
  42               (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |
  43               (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |
  44               (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) |
  45               (acc & IB_ACCESS_MW_BIND       ? MLX4_PERM_BIND_MW      : 0) |
  46               MLX4_PERM_LOCAL_READ;
  47}
  48
  49static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
  50{
  51        switch (type) {
  52        case IB_MW_TYPE_1:      return MLX4_MW_TYPE_1;
  53        case IB_MW_TYPE_2:      return MLX4_MW_TYPE_2;
  54        default:                return -1;
  55        }
  56}
  57
  58struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
  59{
  60        struct mlx4_ib_mr *mr;
  61        int err;
  62
  63        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  64        if (!mr)
  65                return ERR_PTR(-ENOMEM);
  66
  67        err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
  68                            ~0ull, convert_access(acc), 0, 0, &mr->mmr);
  69        if (err)
  70                goto err_free;
  71
  72        err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
  73        if (err)
  74                goto err_mr;
  75
  76        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
  77        mr->umem = NULL;
  78
  79        return &mr->ibmr;
  80
  81err_mr:
  82        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
  83
  84err_free:
  85        kfree(mr);
  86
  87        return ERR_PTR(err);
  88}
  89
  90enum {
  91        MLX4_MAX_MTT_SHIFT = 31
  92};
  93
  94static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
  95                                        struct mlx4_mtt *mtt,
  96                                        u64 mtt_size, u64 mtt_shift, u64 len,
  97                                        u64 cur_start_addr, u64 *pages,
  98                                        int *start_index, int *npages)
  99{
 100        u64 cur_end_addr = cur_start_addr + len;
 101        u64 cur_end_addr_aligned = 0;
 102        u64 mtt_entries;
 103        int err = 0;
 104        int k;
 105
 106        len += (cur_start_addr & (mtt_size - 1ULL));
 107        cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
 108        len += (cur_end_addr_aligned - cur_end_addr);
 109        if (len & (mtt_size - 1ULL)) {
 110                pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
 111                        len, mtt_size);
 112                return -EINVAL;
 113        }
 114
 115        mtt_entries = (len >> mtt_shift);
 116
 117        /*
 118         * Align the MTT start address to the mtt_size.
 119         * Required to handle cases when the MR starts in the middle of an MTT
 120         * record. Was not required in old code since the physical addresses
 121         * provided by the dma subsystem were page aligned, which was also the
 122         * MTT size.
 123         */
 124        cur_start_addr = round_down(cur_start_addr, mtt_size);
 125        /* A new block is started ... */
 126        for (k = 0; k < mtt_entries; ++k) {
 127                pages[*npages] = cur_start_addr + (mtt_size * k);
 128                (*npages)++;
 129                /*
 130                 * Be friendly to mlx4_write_mtt() and pass it chunks of
 131                 * appropriate size.
 132                 */
 133                if (*npages == PAGE_SIZE / sizeof(u64)) {
 134                        err = mlx4_write_mtt(dev->dev, mtt, *start_index,
 135                                             *npages, pages);
 136                        if (err)
 137                                return err;
 138
 139                        (*start_index) += *npages;
 140                        *npages = 0;
 141                }
 142        }
 143
 144        return 0;
 145}
 146
 147static inline u64 alignment_of(u64 ptr)
 148{
 149        return ilog2(ptr & (~(ptr - 1)));
 150}
 151
 152static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
 153                                       u64 current_block_end,
 154                                       u64 block_shift)
 155{
 156        /* Check whether the alignment of the new block is aligned as well as
 157         * the previous block.
 158         * Block address must start with zeros till size of entity_size.
 159         */
 160        if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
 161                /*
 162                 * It is not as well aligned as the previous block-reduce the
 163                 * mtt size accordingly. Here we take the last right bit which
 164                 * is 1.
 165                 */
 166                block_shift = alignment_of(next_block_start);
 167
 168        /*
 169         * Check whether the alignment of the end of previous block - is it
 170         * aligned as well as the start of the block
 171         */
 172        if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
 173                /*
 174                 * It is not as well aligned as the start of the block -
 175                 * reduce the mtt size accordingly.
 176                 */
 177                block_shift = alignment_of(current_block_end);
 178
 179        return block_shift;
 180}
 181
 182int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
 183                           struct ib_umem *umem)
 184{
 185        u64 *pages;
 186        u64 len = 0;
 187        int err = 0;
 188        u64 mtt_size;
 189        u64 cur_start_addr = 0;
 190        u64 mtt_shift;
 191        int start_index = 0;
 192        int npages = 0;
 193        struct scatterlist *sg;
 194        int i;
 195
 196        pages = (u64 *) __get_free_page(GFP_KERNEL);
 197        if (!pages)
 198                return -ENOMEM;
 199
 200        mtt_shift = mtt->page_shift;
 201        mtt_size = 1ULL << mtt_shift;
 202
 203        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
 204                if (cur_start_addr + len == sg_dma_address(sg)) {
 205                        /* still the same block */
 206                        len += sg_dma_len(sg);
 207                        continue;
 208                }
 209                /*
 210                 * A new block is started ...
 211                 * If len is malaligned, write an extra mtt entry to cover the
 212                 * misaligned area (round up the division)
 213                 */
 214                err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
 215                                                   mtt_shift, len,
 216                                                   cur_start_addr,
 217                                                   pages, &start_index,
 218                                                   &npages);
 219                if (err)
 220                        goto out;
 221
 222                cur_start_addr = sg_dma_address(sg);
 223                len = sg_dma_len(sg);
 224        }
 225
 226        /* Handle the last block */
 227        if (len > 0) {
 228                /*
 229                 * If len is malaligned, write an extra mtt entry to cover
 230                 * the misaligned area (round up the division)
 231                 */
 232                err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
 233                                                   mtt_shift, len,
 234                                                   cur_start_addr, pages,
 235                                                   &start_index, &npages);
 236                if (err)
 237                        goto out;
 238        }
 239
 240        if (npages)
 241                err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
 242
 243out:
 244        free_page((unsigned long) pages);
 245        return err;
 246}
 247
 248/*
 249 * Calculate optimal mtt size based on contiguous pages.
 250 * Function will return also the number of pages that are not aligned to the
 251 * calculated mtt_size to be added to total number of pages. For that we should
 252 * check the first chunk length & last chunk length and if not aligned to
 253 * mtt_size we should increment the non_aligned_pages number. All chunks in the
 254 * middle already handled as part of mtt shift calculation for both their start
 255 * & end addresses.
 256 */
 257int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
 258                                       int *num_of_mtts)
 259{
 260        u64 block_shift = MLX4_MAX_MTT_SHIFT;
 261        u64 min_shift = umem->page_shift;
 262        u64 last_block_aligned_end = 0;
 263        u64 current_block_start = 0;
 264        u64 first_block_start = 0;
 265        u64 current_block_len = 0;
 266        u64 last_block_end = 0;
 267        struct scatterlist *sg;
 268        u64 current_block_end;
 269        u64 misalignment_bits;
 270        u64 next_block_start;
 271        u64 total_len = 0;
 272        int i;
 273
 274        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
 275                /*
 276                 * Initialization - save the first chunk start as the
 277                 * current_block_start - block means contiguous pages.
 278                 */
 279                if (current_block_len == 0 && current_block_start == 0) {
 280                        current_block_start = sg_dma_address(sg);
 281                        first_block_start = current_block_start;
 282                        /*
 283                         * Find the bits that are different between the physical
 284                         * address and the virtual address for the start of the
 285                         * MR.
 286                         * umem_get aligned the start_va to a page boundary.
 287                         * Therefore, we need to align the start va to the same
 288                         * boundary.
 289                         * misalignment_bits is needed to handle the  case of a
 290                         * single memory region. In this case, the rest of the
 291                         * logic will not reduce the block size.  If we use a
 292                         * block size which is bigger than the alignment of the
 293                         * misalignment bits, we might use the virtual page
 294                         * number instead of the physical page number, resulting
 295                         * in access to the wrong data.
 296                         */
 297                        misalignment_bits =
 298                        (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
 299                        ^ current_block_start;
 300                        block_shift = min(alignment_of(misalignment_bits),
 301                                          block_shift);
 302                }
 303
 304                /*
 305                 * Go over the scatter entries and check if they continue the
 306                 * previous scatter entry.
 307                 */
 308                next_block_start = sg_dma_address(sg);
 309                current_block_end = current_block_start + current_block_len;
 310                /* If we have a split (non-contig.) between two blocks */
 311                if (current_block_end != next_block_start) {
 312                        block_shift = mlx4_ib_umem_calc_block_mtt
 313                                        (next_block_start,
 314                                         current_block_end,
 315                                         block_shift);
 316
 317                        /*
 318                         * If we reached the minimum shift for 4k page we stop
 319                         * the loop.
 320                         */
 321                        if (block_shift <= min_shift)
 322                                goto end;
 323
 324                        /*
 325                         * If not saved yet we are in first block - we save the
 326                         * length of first block to calculate the
 327                         * non_aligned_pages number at the end.
 328                         */
 329                        total_len += current_block_len;
 330
 331                        /* Start a new block */
 332                        current_block_start = next_block_start;
 333                        current_block_len = sg_dma_len(sg);
 334                        continue;
 335                }
 336                /* The scatter entry is another part of the current block,
 337                 * increase the block size.
 338                 * An entry in the scatter can be larger than 4k (page) as of
 339                 * dma mapping which merge some blocks together.
 340                 */
 341                current_block_len += sg_dma_len(sg);
 342        }
 343
 344        /* Account for the last block in the total len */
 345        total_len += current_block_len;
 346        /* Add to the first block the misalignment that it suffers from. */
 347        total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
 348        last_block_end = current_block_start + current_block_len;
 349        last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
 350        total_len += (last_block_aligned_end - last_block_end);
 351
 352        if (total_len & ((1ULL << block_shift) - 1ULL))
 353                pr_warn("misaligned total length detected (%llu, %llu)!",
 354                        total_len, block_shift);
 355
 356        *num_of_mtts = total_len >> block_shift;
 357end:
 358        if (block_shift < min_shift) {
 359                /*
 360                 * If shift is less than the min we set a warning and return the
 361                 * min shift.
 362                 */
 363                pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
 364
 365                block_shift = min_shift;
 366        }
 367        return block_shift;
 368}
 369
 370struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 371                                  u64 virt_addr, int access_flags,
 372                                  struct ib_udata *udata)
 373{
 374        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 375        struct mlx4_ib_mr *mr;
 376        int shift;
 377        int err;
 378        int n;
 379
 380        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 381        if (!mr)
 382                return ERR_PTR(-ENOMEM);
 383
 384        /* Force registering the memory as writable. */
 385        /* Used for memory re-registeration. HCA protects the access */
 386        mr->umem = ib_umem_get(pd->uobject->context, start, length,
 387                               access_flags | IB_ACCESS_LOCAL_WRITE, 0);
 388        if (IS_ERR(mr->umem)) {
 389                err = PTR_ERR(mr->umem);
 390                goto err_free;
 391        }
 392
 393        n = ib_umem_page_count(mr->umem);
 394        shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
 395
 396        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
 397                            convert_access(access_flags), n, shift, &mr->mmr);
 398        if (err)
 399                goto err_umem;
 400
 401        err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
 402        if (err)
 403                goto err_mr;
 404
 405        err = mlx4_mr_enable(dev->dev, &mr->mmr);
 406        if (err)
 407                goto err_mr;
 408
 409        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
 410        mr->ibmr.length = length;
 411        mr->ibmr.iova = virt_addr;
 412        mr->ibmr.page_size = 1U << shift;
 413
 414        return &mr->ibmr;
 415
 416err_mr:
 417        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
 418
 419err_umem:
 420        ib_umem_release(mr->umem);
 421
 422err_free:
 423        kfree(mr);
 424
 425        return ERR_PTR(err);
 426}
 427
 428int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
 429                          u64 start, u64 length, u64 virt_addr,
 430                          int mr_access_flags, struct ib_pd *pd,
 431                          struct ib_udata *udata)
 432{
 433        struct mlx4_ib_dev *dev = to_mdev(mr->device);
 434        struct mlx4_ib_mr *mmr = to_mmr(mr);
 435        struct mlx4_mpt_entry *mpt_entry;
 436        struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
 437        int err;
 438
 439        /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
 440         * we assume that the calls can't run concurrently. Otherwise, a
 441         * race exists.
 442         */
 443        err =  mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
 444
 445        if (err)
 446                return err;
 447
 448        if (flags & IB_MR_REREG_PD) {
 449                err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
 450                                           to_mpd(pd)->pdn);
 451
 452                if (err)
 453                        goto release_mpt_entry;
 454        }
 455
 456        if (flags & IB_MR_REREG_ACCESS) {
 457                err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
 458                                               convert_access(mr_access_flags));
 459
 460                if (err)
 461                        goto release_mpt_entry;
 462        }
 463
 464        if (flags & IB_MR_REREG_TRANS) {
 465                int shift;
 466                int n;
 467
 468                mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
 469                ib_umem_release(mmr->umem);
 470                mmr->umem = ib_umem_get(mr->uobject->context, start, length,
 471                                        mr_access_flags |
 472                                        IB_ACCESS_LOCAL_WRITE,
 473                                        0);
 474                if (IS_ERR(mmr->umem)) {
 475                        err = PTR_ERR(mmr->umem);
 476                        /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
 477                        mmr->umem = NULL;
 478                        goto release_mpt_entry;
 479                }
 480                n = ib_umem_page_count(mmr->umem);
 481                shift = mmr->umem->page_shift;
 482
 483                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
 484                                              virt_addr, length, n, shift,
 485                                              *pmpt_entry);
 486                if (err) {
 487                        ib_umem_release(mmr->umem);
 488                        goto release_mpt_entry;
 489                }
 490                mmr->mmr.iova       = virt_addr;
 491                mmr->mmr.size       = length;
 492
 493                err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
 494                if (err) {
 495                        mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
 496                        ib_umem_release(mmr->umem);
 497                        goto release_mpt_entry;
 498                }
 499        }
 500
 501        /* If we couldn't transfer the MR to the HCA, just remember to
 502         * return a failure. But dereg_mr will free the resources.
 503         */
 504        err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
 505        if (!err && flags & IB_MR_REREG_ACCESS)
 506                mmr->mmr.access = mr_access_flags;
 507
 508release_mpt_entry:
 509        mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
 510
 511        return err;
 512}
 513
 514static int
 515mlx4_alloc_priv_pages(struct ib_device *device,
 516                      struct mlx4_ib_mr *mr,
 517                      int max_pages)
 518{
 519        int ret;
 520
 521        /* Ensure that size is aligned to DMA cacheline
 522         * requirements.
 523         * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
 524         * so page_map_size will never cross PAGE_SIZE.
 525         */
 526        mr->page_map_size = roundup(max_pages * sizeof(u64),
 527                                    MLX4_MR_PAGES_ALIGN);
 528
 529        /* Prevent cross page boundary allocation. */
 530        mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
 531        if (!mr->pages)
 532                return -ENOMEM;
 533
 534        mr->page_map = dma_map_single(device->dev.parent, mr->pages,
 535                                      mr->page_map_size, DMA_TO_DEVICE);
 536
 537        if (dma_mapping_error(device->dev.parent, mr->page_map)) {
 538                ret = -ENOMEM;
 539                goto err;
 540        }
 541
 542        return 0;
 543
 544err:
 545        free_page((unsigned long)mr->pages);
 546        return ret;
 547}
 548
 549static void
 550mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 551{
 552        if (mr->pages) {
 553                struct ib_device *device = mr->ibmr.device;
 554
 555                dma_unmap_single(device->dev.parent, mr->page_map,
 556                                 mr->page_map_size, DMA_TO_DEVICE);
 557                free_page((unsigned long)mr->pages);
 558                mr->pages = NULL;
 559        }
 560}
 561
 562int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
 563{
 564        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 565        int ret;
 566
 567        mlx4_free_priv_pages(mr);
 568
 569        ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
 570        if (ret)
 571                return ret;
 572        if (mr->umem)
 573                ib_umem_release(mr->umem);
 574        kfree(mr);
 575
 576        return 0;
 577}
 578
 579struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 580                               struct ib_udata *udata)
 581{
 582        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 583        struct mlx4_ib_mw *mw;
 584        int err;
 585
 586        mw = kmalloc(sizeof(*mw), GFP_KERNEL);
 587        if (!mw)
 588                return ERR_PTR(-ENOMEM);
 589
 590        err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
 591                            to_mlx4_type(type), &mw->mmw);
 592        if (err)
 593                goto err_free;
 594
 595        err = mlx4_mw_enable(dev->dev, &mw->mmw);
 596        if (err)
 597                goto err_mw;
 598
 599        mw->ibmw.rkey = mw->mmw.key;
 600
 601        return &mw->ibmw;
 602
 603err_mw:
 604        mlx4_mw_free(dev->dev, &mw->mmw);
 605
 606err_free:
 607        kfree(mw);
 608
 609        return ERR_PTR(err);
 610}
 611
 612int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
 613{
 614        struct mlx4_ib_mw *mw = to_mmw(ibmw);
 615
 616        mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
 617        kfree(mw);
 618
 619        return 0;
 620}
 621
 622struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
 623                               enum ib_mr_type mr_type,
 624                               u32 max_num_sg)
 625{
 626        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 627        struct mlx4_ib_mr *mr;
 628        int err;
 629
 630        if (mr_type != IB_MR_TYPE_MEM_REG ||
 631            max_num_sg > MLX4_MAX_FAST_REG_PAGES)
 632                return ERR_PTR(-EINVAL);
 633
 634        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 635        if (!mr)
 636                return ERR_PTR(-ENOMEM);
 637
 638        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
 639                            max_num_sg, 0, &mr->mmr);
 640        if (err)
 641                goto err_free;
 642
 643        err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
 644        if (err)
 645                goto err_free_mr;
 646
 647        mr->max_pages = max_num_sg;
 648        err = mlx4_mr_enable(dev->dev, &mr->mmr);
 649        if (err)
 650                goto err_free_pl;
 651
 652        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
 653        mr->umem = NULL;
 654
 655        return &mr->ibmr;
 656
 657err_free_pl:
 658        mr->ibmr.device = pd->device;
 659        mlx4_free_priv_pages(mr);
 660err_free_mr:
 661        (void) mlx4_mr_free(dev->dev, &mr->mmr);
 662err_free:
 663        kfree(mr);
 664        return ERR_PTR(err);
 665}
 666
 667struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
 668                                 struct ib_fmr_attr *fmr_attr)
 669{
 670        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 671        struct mlx4_ib_fmr *fmr;
 672        int err = -ENOMEM;
 673
 674        fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
 675        if (!fmr)
 676                return ERR_PTR(-ENOMEM);
 677
 678        err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
 679                             fmr_attr->max_pages, fmr_attr->max_maps,
 680                             fmr_attr->page_shift, &fmr->mfmr);
 681        if (err)
 682                goto err_free;
 683
 684        err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
 685        if (err)
 686                goto err_mr;
 687
 688        fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
 689
 690        return &fmr->ibfmr;
 691
 692err_mr:
 693        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
 694
 695err_free:
 696        kfree(fmr);
 697
 698        return ERR_PTR(err);
 699}
 700
 701int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 702                      int npages, u64 iova)
 703{
 704        struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 705        struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
 706
 707        return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
 708                                 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
 709}
 710
 711int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
 712{
 713        struct ib_fmr *ibfmr;
 714        int err;
 715        struct mlx4_dev *mdev = NULL;
 716
 717        list_for_each_entry(ibfmr, fmr_list, list) {
 718                if (mdev && to_mdev(ibfmr->device)->dev != mdev)
 719                        return -EINVAL;
 720                mdev = to_mdev(ibfmr->device)->dev;
 721        }
 722
 723        if (!mdev)
 724                return 0;
 725
 726        list_for_each_entry(ibfmr, fmr_list, list) {
 727                struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 728
 729                mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
 730        }
 731
 732        /*
 733         * Make sure all MPT status updates are visible before issuing
 734         * SYNC_TPT firmware command.
 735         */
 736        wmb();
 737
 738        err = mlx4_SYNC_TPT(mdev);
 739        if (err)
 740                pr_warn("SYNC_TPT error %d when "
 741                       "unmapping FMRs\n", err);
 742
 743        return 0;
 744}
 745
 746int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
 747{
 748        struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 749        struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
 750        int err;
 751
 752        err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
 753
 754        if (!err)
 755                kfree(ifmr);
 756
 757        return err;
 758}
 759
 760static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
 761{
 762        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 763
 764        if (unlikely(mr->npages == mr->max_pages))
 765                return -ENOMEM;
 766
 767        mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
 768
 769        return 0;
 770}
 771
 772int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 773                      unsigned int *sg_offset)
 774{
 775        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 776        int rc;
 777
 778        mr->npages = 0;
 779
 780        ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
 781                                   mr->page_map_size, DMA_TO_DEVICE);
 782
 783        rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 784
 785        ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
 786                                      mr->page_map_size, DMA_TO_DEVICE);
 787
 788        return rc;
 789}
 790