linux/drivers/infiniband/hw/mlx4/mr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/slab.h>
  35#include <rdma/ib_user_verbs.h>
  36
  37#include "mlx4_ib.h"
  38
  39static u32 convert_access(int acc)
  40{
  41        return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC       : 0) |
  42               (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |
  43               (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |
  44               (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) |
  45               (acc & IB_ACCESS_MW_BIND       ? MLX4_PERM_BIND_MW      : 0) |
  46               MLX4_PERM_LOCAL_READ;
  47}
  48
  49static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
  50{
  51        switch (type) {
  52        case IB_MW_TYPE_1:      return MLX4_MW_TYPE_1;
  53        case IB_MW_TYPE_2:      return MLX4_MW_TYPE_2;
  54        default:                return -1;
  55        }
  56}
  57
  58struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
  59{
  60        struct mlx4_ib_mr *mr;
  61        int err;
  62
  63        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  64        if (!mr)
  65                return ERR_PTR(-ENOMEM);
  66
  67        err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
  68                            ~0ull, convert_access(acc), 0, 0, &mr->mmr);
  69        if (err)
  70                goto err_free;
  71
  72        err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
  73        if (err)
  74                goto err_mr;
  75
  76        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
  77        mr->umem = NULL;
  78
  79        return &mr->ibmr;
  80
  81err_mr:
  82        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
  83
  84err_free:
  85        kfree(mr);
  86
  87        return ERR_PTR(err);
  88}
  89
  90enum {
  91        MLX4_MAX_MTT_SHIFT = 31
  92};
  93
  94static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
  95                                        struct mlx4_mtt *mtt,
  96                                        u64 mtt_size, u64 mtt_shift, u64 len,
  97                                        u64 cur_start_addr, u64 *pages,
  98                                        int *start_index, int *npages)
  99{
 100        u64 cur_end_addr = cur_start_addr + len;
 101        u64 cur_end_addr_aligned = 0;
 102        u64 mtt_entries;
 103        int err = 0;
 104        int k;
 105
 106        len += (cur_start_addr & (mtt_size - 1ULL));
 107        cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
 108        len += (cur_end_addr_aligned - cur_end_addr);
 109        if (len & (mtt_size - 1ULL)) {
 110                pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
 111                        len, mtt_size);
 112                return -EINVAL;
 113        }
 114
 115        mtt_entries = (len >> mtt_shift);
 116
 117        /*
 118         * Align the MTT start address to the mtt_size.
 119         * Required to handle cases when the MR starts in the middle of an MTT
 120         * record. Was not required in old code since the physical addresses
 121         * provided by the dma subsystem were page aligned, which was also the
 122         * MTT size.
 123         */
 124        cur_start_addr = round_down(cur_start_addr, mtt_size);
 125        /* A new block is started ... */
 126        for (k = 0; k < mtt_entries; ++k) {
 127                pages[*npages] = cur_start_addr + (mtt_size * k);
 128                (*npages)++;
 129                /*
 130                 * Be friendly to mlx4_write_mtt() and pass it chunks of
 131                 * appropriate size.
 132                 */
 133                if (*npages == PAGE_SIZE / sizeof(u64)) {
 134                        err = mlx4_write_mtt(dev->dev, mtt, *start_index,
 135                                             *npages, pages);
 136                        if (err)
 137                                return err;
 138
 139                        (*start_index) += *npages;
 140                        *npages = 0;
 141                }
 142        }
 143
 144        return 0;
 145}
 146
 147static inline u64 alignment_of(u64 ptr)
 148{
 149        return ilog2(ptr & (~(ptr - 1)));
 150}
 151
 152static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
 153                                       u64 current_block_end,
 154                                       u64 block_shift)
 155{
 156        /* Check whether the alignment of the new block is aligned as well as
 157         * the previous block.
 158         * Block address must start with zeros till size of entity_size.
 159         */
 160        if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
 161                /*
 162                 * It is not as well aligned as the previous block-reduce the
 163                 * mtt size accordingly. Here we take the last right bit which
 164                 * is 1.
 165                 */
 166                block_shift = alignment_of(next_block_start);
 167
 168        /*
 169         * Check whether the alignment of the end of previous block - is it
 170         * aligned as well as the start of the block
 171         */
 172        if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
 173                /*
 174                 * It is not as well aligned as the start of the block -
 175                 * reduce the mtt size accordingly.
 176                 */
 177                block_shift = alignment_of(current_block_end);
 178
 179        return block_shift;
 180}
 181
 182int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
 183                           struct ib_umem *umem)
 184{
 185        u64 *pages;
 186        u64 len = 0;
 187        int err = 0;
 188        u64 mtt_size;
 189        u64 cur_start_addr = 0;
 190        u64 mtt_shift;
 191        int start_index = 0;
 192        int npages = 0;
 193        struct scatterlist *sg;
 194        int i;
 195
 196        pages = (u64 *) __get_free_page(GFP_KERNEL);
 197        if (!pages)
 198                return -ENOMEM;
 199
 200        mtt_shift = mtt->page_shift;
 201        mtt_size = 1ULL << mtt_shift;
 202
 203        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
 204                if (cur_start_addr + len == sg_dma_address(sg)) {
 205                        /* still the same block */
 206                        len += sg_dma_len(sg);
 207                        continue;
 208                }
 209                /*
 210                 * A new block is started ...
 211                 * If len is malaligned, write an extra mtt entry to cover the
 212                 * misaligned area (round up the division)
 213                 */
 214                err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
 215                                                   mtt_shift, len,
 216                                                   cur_start_addr,
 217                                                   pages, &start_index,
 218                                                   &npages);
 219                if (err)
 220                        goto out;
 221
 222                cur_start_addr = sg_dma_address(sg);
 223                len = sg_dma_len(sg);
 224        }
 225
 226        /* Handle the last block */
 227        if (len > 0) {
 228                /*
 229                 * If len is malaligned, write an extra mtt entry to cover
 230                 * the misaligned area (round up the division)
 231                 */
 232                err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
 233                                                   mtt_shift, len,
 234                                                   cur_start_addr, pages,
 235                                                   &start_index, &npages);
 236                if (err)
 237                        goto out;
 238        }
 239
 240        if (npages)
 241                err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
 242
 243out:
 244        free_page((unsigned long) pages);
 245        return err;
 246}
 247
 248/*
 249 * Calculate optimal mtt size based on contiguous pages.
 250 * Function will return also the number of pages that are not aligned to the
 251 * calculated mtt_size to be added to total number of pages. For that we should
 252 * check the first chunk length & last chunk length and if not aligned to
 253 * mtt_size we should increment the non_aligned_pages number. All chunks in the
 254 * middle already handled as part of mtt shift calculation for both their start
 255 * & end addresses.
 256 */
 257int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
 258                                       int *num_of_mtts)
 259{
 260        u64 block_shift = MLX4_MAX_MTT_SHIFT;
 261        u64 min_shift = PAGE_SHIFT;
 262        u64 last_block_aligned_end = 0;
 263        u64 current_block_start = 0;
 264        u64 first_block_start = 0;
 265        u64 current_block_len = 0;
 266        u64 last_block_end = 0;
 267        struct scatterlist *sg;
 268        u64 current_block_end;
 269        u64 misalignment_bits;
 270        u64 next_block_start;
 271        u64 total_len = 0;
 272        int i;
 273
 274        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
 275                /*
 276                 * Initialization - save the first chunk start as the
 277                 * current_block_start - block means contiguous pages.
 278                 */
 279                if (current_block_len == 0 && current_block_start == 0) {
 280                        current_block_start = sg_dma_address(sg);
 281                        first_block_start = current_block_start;
 282                        /*
 283                         * Find the bits that are different between the physical
 284                         * address and the virtual address for the start of the
 285                         * MR.
 286                         * umem_get aligned the start_va to a page boundary.
 287                         * Therefore, we need to align the start va to the same
 288                         * boundary.
 289                         * misalignment_bits is needed to handle the  case of a
 290                         * single memory region. In this case, the rest of the
 291                         * logic will not reduce the block size.  If we use a
 292                         * block size which is bigger than the alignment of the
 293                         * misalignment bits, we might use the virtual page
 294                         * number instead of the physical page number, resulting
 295                         * in access to the wrong data.
 296                         */
 297                        misalignment_bits =
 298                                (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
 299                                current_block_start;
 300                        block_shift = min(alignment_of(misalignment_bits),
 301                                          block_shift);
 302                }
 303
 304                /*
 305                 * Go over the scatter entries and check if they continue the
 306                 * previous scatter entry.
 307                 */
 308                next_block_start = sg_dma_address(sg);
 309                current_block_end = current_block_start + current_block_len;
 310                /* If we have a split (non-contig.) between two blocks */
 311                if (current_block_end != next_block_start) {
 312                        block_shift = mlx4_ib_umem_calc_block_mtt
 313                                        (next_block_start,
 314                                         current_block_end,
 315                                         block_shift);
 316
 317                        /*
 318                         * If we reached the minimum shift for 4k page we stop
 319                         * the loop.
 320                         */
 321                        if (block_shift <= min_shift)
 322                                goto end;
 323
 324                        /*
 325                         * If not saved yet we are in first block - we save the
 326                         * length of first block to calculate the
 327                         * non_aligned_pages number at the end.
 328                         */
 329                        total_len += current_block_len;
 330
 331                        /* Start a new block */
 332                        current_block_start = next_block_start;
 333                        current_block_len = sg_dma_len(sg);
 334                        continue;
 335                }
 336                /* The scatter entry is another part of the current block,
 337                 * increase the block size.
 338                 * An entry in the scatter can be larger than 4k (page) as of
 339                 * dma mapping which merge some blocks together.
 340                 */
 341                current_block_len += sg_dma_len(sg);
 342        }
 343
 344        /* Account for the last block in the total len */
 345        total_len += current_block_len;
 346        /* Add to the first block the misalignment that it suffers from. */
 347        total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
 348        last_block_end = current_block_start + current_block_len;
 349        last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
 350        total_len += (last_block_aligned_end - last_block_end);
 351
 352        if (total_len & ((1ULL << block_shift) - 1ULL))
 353                pr_warn("misaligned total length detected (%llu, %llu)!",
 354                        total_len, block_shift);
 355
 356        *num_of_mtts = total_len >> block_shift;
 357end:
 358        if (block_shift < min_shift) {
 359                /*
 360                 * If shift is less than the min we set a warning and return the
 361                 * min shift.
 362                 */
 363                pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
 364
 365                block_shift = min_shift;
 366        }
 367        return block_shift;
 368}
 369
 370static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
 371                                        u64 length, int access_flags)
 372{
 373        /*
 374         * Force registering the memory as writable if the underlying pages
 375         * are writable.  This is so rereg can change the access permissions
 376         * from readable to writable without having to run through ib_umem_get
 377         * again
 378         */
 379        if (!ib_access_writable(access_flags)) {
 380                unsigned long untagged_start = untagged_addr(start);
 381                struct vm_area_struct *vma;
 382
 383                down_read(&current->mm->mmap_sem);
 384                /*
 385                 * FIXME: Ideally this would iterate over all the vmas that
 386                 * cover the memory, but for now it requires a single vma to
 387                 * entirely cover the MR to support RO mappings.
 388                 */
 389                vma = find_vma(current->mm, untagged_start);
 390                if (vma && vma->vm_end >= untagged_start + length &&
 391                    vma->vm_start <= untagged_start) {
 392                        if (vma->vm_flags & VM_WRITE)
 393                                access_flags |= IB_ACCESS_LOCAL_WRITE;
 394                } else {
 395                        access_flags |= IB_ACCESS_LOCAL_WRITE;
 396                }
 397
 398                up_read(&current->mm->mmap_sem);
 399        }
 400
 401        return ib_umem_get(udata, start, length, access_flags, 0);
 402}
 403
 404struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 405                                  u64 virt_addr, int access_flags,
 406                                  struct ib_udata *udata)
 407{
 408        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 409        struct mlx4_ib_mr *mr;
 410        int shift;
 411        int err;
 412        int n;
 413
 414        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 415        if (!mr)
 416                return ERR_PTR(-ENOMEM);
 417
 418        mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags);
 419        if (IS_ERR(mr->umem)) {
 420                err = PTR_ERR(mr->umem);
 421                goto err_free;
 422        }
 423
 424        n = ib_umem_page_count(mr->umem);
 425        shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
 426
 427        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
 428                            convert_access(access_flags), n, shift, &mr->mmr);
 429        if (err)
 430                goto err_umem;
 431
 432        err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
 433        if (err)
 434                goto err_mr;
 435
 436        err = mlx4_mr_enable(dev->dev, &mr->mmr);
 437        if (err)
 438                goto err_mr;
 439
 440        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
 441        mr->ibmr.length = length;
 442        mr->ibmr.iova = virt_addr;
 443        mr->ibmr.page_size = 1U << shift;
 444
 445        return &mr->ibmr;
 446
 447err_mr:
 448        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
 449
 450err_umem:
 451        ib_umem_release(mr->umem);
 452
 453err_free:
 454        kfree(mr);
 455
 456        return ERR_PTR(err);
 457}
 458
 459int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
 460                          u64 start, u64 length, u64 virt_addr,
 461                          int mr_access_flags, struct ib_pd *pd,
 462                          struct ib_udata *udata)
 463{
 464        struct mlx4_ib_dev *dev = to_mdev(mr->device);
 465        struct mlx4_ib_mr *mmr = to_mmr(mr);
 466        struct mlx4_mpt_entry *mpt_entry;
 467        struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
 468        int err;
 469
 470        /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
 471         * we assume that the calls can't run concurrently. Otherwise, a
 472         * race exists.
 473         */
 474        err =  mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
 475
 476        if (err)
 477                return err;
 478
 479        if (flags & IB_MR_REREG_PD) {
 480                err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
 481                                           to_mpd(pd)->pdn);
 482
 483                if (err)
 484                        goto release_mpt_entry;
 485        }
 486
 487        if (flags & IB_MR_REREG_ACCESS) {
 488                if (ib_access_writable(mr_access_flags) &&
 489                    !mmr->umem->writable) {
 490                        err = -EPERM;
 491                        goto release_mpt_entry;
 492                }
 493
 494                err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
 495                                               convert_access(mr_access_flags));
 496
 497                if (err)
 498                        goto release_mpt_entry;
 499        }
 500
 501        if (flags & IB_MR_REREG_TRANS) {
 502                int shift;
 503                int n;
 504
 505                mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
 506                ib_umem_release(mmr->umem);
 507                mmr->umem = mlx4_get_umem_mr(udata, start, length,
 508                                             mr_access_flags);
 509                if (IS_ERR(mmr->umem)) {
 510                        err = PTR_ERR(mmr->umem);
 511                        /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
 512                        mmr->umem = NULL;
 513                        goto release_mpt_entry;
 514                }
 515                n = ib_umem_page_count(mmr->umem);
 516                shift = PAGE_SHIFT;
 517
 518                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
 519                                              virt_addr, length, n, shift,
 520                                              *pmpt_entry);
 521                if (err) {
 522                        ib_umem_release(mmr->umem);
 523                        goto release_mpt_entry;
 524                }
 525                mmr->mmr.iova       = virt_addr;
 526                mmr->mmr.size       = length;
 527
 528                err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
 529                if (err) {
 530                        mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
 531                        ib_umem_release(mmr->umem);
 532                        goto release_mpt_entry;
 533                }
 534        }
 535
 536        /* If we couldn't transfer the MR to the HCA, just remember to
 537         * return a failure. But dereg_mr will free the resources.
 538         */
 539        err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
 540        if (!err && flags & IB_MR_REREG_ACCESS)
 541                mmr->mmr.access = mr_access_flags;
 542
 543release_mpt_entry:
 544        mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
 545
 546        return err;
 547}
 548
 549static int
 550mlx4_alloc_priv_pages(struct ib_device *device,
 551                      struct mlx4_ib_mr *mr,
 552                      int max_pages)
 553{
 554        int ret;
 555
 556        /* Ensure that size is aligned to DMA cacheline
 557         * requirements.
 558         * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
 559         * so page_map_size will never cross PAGE_SIZE.
 560         */
 561        mr->page_map_size = roundup(max_pages * sizeof(u64),
 562                                    MLX4_MR_PAGES_ALIGN);
 563
 564        /* Prevent cross page boundary allocation. */
 565        mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
 566        if (!mr->pages)
 567                return -ENOMEM;
 568
 569        mr->page_map = dma_map_single(device->dev.parent, mr->pages,
 570                                      mr->page_map_size, DMA_TO_DEVICE);
 571
 572        if (dma_mapping_error(device->dev.parent, mr->page_map)) {
 573                ret = -ENOMEM;
 574                goto err;
 575        }
 576
 577        return 0;
 578
 579err:
 580        free_page((unsigned long)mr->pages);
 581        return ret;
 582}
 583
 584static void
 585mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 586{
 587        if (mr->pages) {
 588                struct ib_device *device = mr->ibmr.device;
 589
 590                dma_unmap_single(device->dev.parent, mr->page_map,
 591                                 mr->page_map_size, DMA_TO_DEVICE);
 592                free_page((unsigned long)mr->pages);
 593                mr->pages = NULL;
 594        }
 595}
 596
 597int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
 598{
 599        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 600        int ret;
 601
 602        mlx4_free_priv_pages(mr);
 603
 604        ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
 605        if (ret)
 606                return ret;
 607        if (mr->umem)
 608                ib_umem_release(mr->umem);
 609        kfree(mr);
 610
 611        return 0;
 612}
 613
 614struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 615                               struct ib_udata *udata)
 616{
 617        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 618        struct mlx4_ib_mw *mw;
 619        int err;
 620
 621        mw = kmalloc(sizeof(*mw), GFP_KERNEL);
 622        if (!mw)
 623                return ERR_PTR(-ENOMEM);
 624
 625        err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
 626                            to_mlx4_type(type), &mw->mmw);
 627        if (err)
 628                goto err_free;
 629
 630        err = mlx4_mw_enable(dev->dev, &mw->mmw);
 631        if (err)
 632                goto err_mw;
 633
 634        mw->ibmw.rkey = mw->mmw.key;
 635
 636        return &mw->ibmw;
 637
 638err_mw:
 639        mlx4_mw_free(dev->dev, &mw->mmw);
 640
 641err_free:
 642        kfree(mw);
 643
 644        return ERR_PTR(err);
 645}
 646
 647int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
 648{
 649        struct mlx4_ib_mw *mw = to_mmw(ibmw);
 650
 651        mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
 652        kfree(mw);
 653
 654        return 0;
 655}
 656
 657struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
 658                               u32 max_num_sg, struct ib_udata *udata)
 659{
 660        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 661        struct mlx4_ib_mr *mr;
 662        int err;
 663
 664        if (mr_type != IB_MR_TYPE_MEM_REG ||
 665            max_num_sg > MLX4_MAX_FAST_REG_PAGES)
 666                return ERR_PTR(-EINVAL);
 667
 668        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 669        if (!mr)
 670                return ERR_PTR(-ENOMEM);
 671
 672        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
 673                            max_num_sg, 0, &mr->mmr);
 674        if (err)
 675                goto err_free;
 676
 677        err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
 678        if (err)
 679                goto err_free_mr;
 680
 681        mr->max_pages = max_num_sg;
 682        err = mlx4_mr_enable(dev->dev, &mr->mmr);
 683        if (err)
 684                goto err_free_pl;
 685
 686        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
 687        mr->umem = NULL;
 688
 689        return &mr->ibmr;
 690
 691err_free_pl:
 692        mr->ibmr.device = pd->device;
 693        mlx4_free_priv_pages(mr);
 694err_free_mr:
 695        (void) mlx4_mr_free(dev->dev, &mr->mmr);
 696err_free:
 697        kfree(mr);
 698        return ERR_PTR(err);
 699}
 700
 701struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
 702                                 struct ib_fmr_attr *fmr_attr)
 703{
 704        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 705        struct mlx4_ib_fmr *fmr;
 706        int err = -ENOMEM;
 707
 708        fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
 709        if (!fmr)
 710                return ERR_PTR(-ENOMEM);
 711
 712        err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
 713                             fmr_attr->max_pages, fmr_attr->max_maps,
 714                             fmr_attr->page_shift, &fmr->mfmr);
 715        if (err)
 716                goto err_free;
 717
 718        err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
 719        if (err)
 720                goto err_mr;
 721
 722        fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
 723
 724        return &fmr->ibfmr;
 725
 726err_mr:
 727        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
 728
 729err_free:
 730        kfree(fmr);
 731
 732        return ERR_PTR(err);
 733}
 734
 735int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 736                      int npages, u64 iova)
 737{
 738        struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 739        struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
 740
 741        return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
 742                                 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
 743}
 744
 745int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
 746{
 747        struct ib_fmr *ibfmr;
 748        int err;
 749        struct mlx4_dev *mdev = NULL;
 750
 751        list_for_each_entry(ibfmr, fmr_list, list) {
 752                if (mdev && to_mdev(ibfmr->device)->dev != mdev)
 753                        return -EINVAL;
 754                mdev = to_mdev(ibfmr->device)->dev;
 755        }
 756
 757        if (!mdev)
 758                return 0;
 759
 760        list_for_each_entry(ibfmr, fmr_list, list) {
 761                struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 762
 763                mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
 764        }
 765
 766        /*
 767         * Make sure all MPT status updates are visible before issuing
 768         * SYNC_TPT firmware command.
 769         */
 770        wmb();
 771
 772        err = mlx4_SYNC_TPT(mdev);
 773        if (err)
 774                pr_warn("SYNC_TPT error %d when "
 775                       "unmapping FMRs\n", err);
 776
 777        return 0;
 778}
 779
 780int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
 781{
 782        struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 783        struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
 784        int err;
 785
 786        err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
 787
 788        if (!err)
 789                kfree(ifmr);
 790
 791        return err;
 792}
 793
 794static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
 795{
 796        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 797
 798        if (unlikely(mr->npages == mr->max_pages))
 799                return -ENOMEM;
 800
 801        mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
 802
 803        return 0;
 804}
 805
 806int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 807                      unsigned int *sg_offset)
 808{
 809        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 810        int rc;
 811
 812        mr->npages = 0;
 813
 814        ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
 815                                   mr->page_map_size, DMA_TO_DEVICE);
 816
 817        rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 818
 819        ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
 820                                      mr->page_map_size, DMA_TO_DEVICE);
 821
 822        return rc;
 823}
 824