linux/drivers/infiniband/hw/mlx4/mr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/slab.h>
  35#include <rdma/ib_user_verbs.h>
  36
  37#include "mlx4_ib.h"
  38
  39static u32 convert_access(int acc)
  40{
  41        return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC       : 0) |
  42               (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |
  43               (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |
  44               (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) |
  45               (acc & IB_ACCESS_MW_BIND       ? MLX4_PERM_BIND_MW      : 0) |
  46               MLX4_PERM_LOCAL_READ;
  47}
  48
  49static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
  50{
  51        switch (type) {
  52        case IB_MW_TYPE_1:      return MLX4_MW_TYPE_1;
  53        case IB_MW_TYPE_2:      return MLX4_MW_TYPE_2;
  54        default:                return -1;
  55        }
  56}
  57
  58struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
  59{
  60        struct mlx4_ib_mr *mr;
  61        int err;
  62
  63        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  64        if (!mr)
  65                return ERR_PTR(-ENOMEM);
  66
  67        err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
  68                            ~0ull, convert_access(acc), 0, 0, &mr->mmr);
  69        if (err)
  70                goto err_free;
  71
  72        err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
  73        if (err)
  74                goto err_mr;
  75
  76        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
  77        mr->umem = NULL;
  78
  79        return &mr->ibmr;
  80
  81err_mr:
  82        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
  83
  84err_free:
  85        kfree(mr);
  86
  87        return ERR_PTR(err);
  88}
  89
  90enum {
  91        MLX4_MAX_MTT_SHIFT = 31
  92};
  93
  94static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
  95                                        struct mlx4_mtt *mtt,
  96                                        u64 mtt_size, u64 mtt_shift, u64 len,
  97                                        u64 cur_start_addr, u64 *pages,
  98                                        int *start_index, int *npages)
  99{
 100        u64 cur_end_addr = cur_start_addr + len;
 101        u64 cur_end_addr_aligned = 0;
 102        u64 mtt_entries;
 103        int err = 0;
 104        int k;
 105
 106        len += (cur_start_addr & (mtt_size - 1ULL));
 107        cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
 108        len += (cur_end_addr_aligned - cur_end_addr);
 109        if (len & (mtt_size - 1ULL)) {
 110                pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
 111                        len, mtt_size);
 112                return -EINVAL;
 113        }
 114
 115        mtt_entries = (len >> mtt_shift);
 116
 117        /*
 118         * Align the MTT start address to the mtt_size.
 119         * Required to handle cases when the MR starts in the middle of an MTT
 120         * record. Was not required in old code since the physical addresses
 121         * provided by the dma subsystem were page aligned, which was also the
 122         * MTT size.
 123         */
 124        cur_start_addr = round_down(cur_start_addr, mtt_size);
 125        /* A new block is started ... */
 126        for (k = 0; k < mtt_entries; ++k) {
 127                pages[*npages] = cur_start_addr + (mtt_size * k);
 128                (*npages)++;
 129                /*
 130                 * Be friendly to mlx4_write_mtt() and pass it chunks of
 131                 * appropriate size.
 132                 */
 133                if (*npages == PAGE_SIZE / sizeof(u64)) {
 134                        err = mlx4_write_mtt(dev->dev, mtt, *start_index,
 135                                             *npages, pages);
 136                        if (err)
 137                                return err;
 138
 139                        (*start_index) += *npages;
 140                        *npages = 0;
 141                }
 142        }
 143
 144        return 0;
 145}
 146
 147static inline u64 alignment_of(u64 ptr)
 148{
 149        return ilog2(ptr & (~(ptr - 1)));
 150}
 151
 152static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
 153                                       u64 current_block_end,
 154                                       u64 block_shift)
 155{
 156        /* Check whether the alignment of the new block is aligned as well as
 157         * the previous block.
 158         * Block address must start with zeros till size of entity_size.
 159         */
 160        if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
 161                /*
 162                 * It is not as well aligned as the previous block-reduce the
 163                 * mtt size accordingly. Here we take the last right bit which
 164                 * is 1.
 165                 */
 166                block_shift = alignment_of(next_block_start);
 167
 168        /*
 169         * Check whether the alignment of the end of previous block - is it
 170         * aligned as well as the start of the block
 171         */
 172        if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
 173                /*
 174                 * It is not as well aligned as the start of the block -
 175                 * reduce the mtt size accordingly.
 176                 */
 177                block_shift = alignment_of(current_block_end);
 178
 179        return block_shift;
 180}
 181
 182int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
 183                           struct ib_umem *umem)
 184{
 185        u64 *pages;
 186        u64 len = 0;
 187        int err = 0;
 188        u64 mtt_size;
 189        u64 cur_start_addr = 0;
 190        u64 mtt_shift;
 191        int start_index = 0;
 192        int npages = 0;
 193        struct scatterlist *sg;
 194        int i;
 195
 196        pages = (u64 *) __get_free_page(GFP_KERNEL);
 197        if (!pages)
 198                return -ENOMEM;
 199
 200        mtt_shift = mtt->page_shift;
 201        mtt_size = 1ULL << mtt_shift;
 202
 203        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
 204                if (cur_start_addr + len == sg_dma_address(sg)) {
 205                        /* still the same block */
 206                        len += sg_dma_len(sg);
 207                        continue;
 208                }
 209                /*
 210                 * A new block is started ...
 211                 * If len is malaligned, write an extra mtt entry to cover the
 212                 * misaligned area (round up the division)
 213                 */
 214                err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
 215                                                   mtt_shift, len,
 216                                                   cur_start_addr,
 217                                                   pages, &start_index,
 218                                                   &npages);
 219                if (err)
 220                        goto out;
 221
 222                cur_start_addr = sg_dma_address(sg);
 223                len = sg_dma_len(sg);
 224        }
 225
 226        /* Handle the last block */
 227        if (len > 0) {
 228                /*
 229                 * If len is malaligned, write an extra mtt entry to cover
 230                 * the misaligned area (round up the division)
 231                 */
 232                err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
 233                                                   mtt_shift, len,
 234                                                   cur_start_addr, pages,
 235                                                   &start_index, &npages);
 236                if (err)
 237                        goto out;
 238        }
 239
 240        if (npages)
 241                err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
 242
 243out:
 244        free_page((unsigned long) pages);
 245        return err;
 246}
 247
 248/*
 249 * Calculate optimal mtt size based on contiguous pages.
 250 * Function will return also the number of pages that are not aligned to the
 251 * calculated mtt_size to be added to total number of pages. For that we should
 252 * check the first chunk length & last chunk length and if not aligned to
 253 * mtt_size we should increment the non_aligned_pages number. All chunks in the
 254 * middle already handled as part of mtt shift calculation for both their start
 255 * & end addresses.
 256 */
 257int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
 258                                       int *num_of_mtts)
 259{
 260        u64 block_shift = MLX4_MAX_MTT_SHIFT;
 261        u64 min_shift = PAGE_SHIFT;
 262        u64 last_block_aligned_end = 0;
 263        u64 current_block_start = 0;
 264        u64 first_block_start = 0;
 265        u64 current_block_len = 0;
 266        u64 last_block_end = 0;
 267        struct scatterlist *sg;
 268        u64 current_block_end;
 269        u64 misalignment_bits;
 270        u64 next_block_start;
 271        u64 total_len = 0;
 272        int i;
 273
 274        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
 275                /*
 276                 * Initialization - save the first chunk start as the
 277                 * current_block_start - block means contiguous pages.
 278                 */
 279                if (current_block_len == 0 && current_block_start == 0) {
 280                        current_block_start = sg_dma_address(sg);
 281                        first_block_start = current_block_start;
 282                        /*
 283                         * Find the bits that are different between the physical
 284                         * address and the virtual address for the start of the
 285                         * MR.
 286                         * umem_get aligned the start_va to a page boundary.
 287                         * Therefore, we need to align the start va to the same
 288                         * boundary.
 289                         * misalignment_bits is needed to handle the  case of a
 290                         * single memory region. In this case, the rest of the
 291                         * logic will not reduce the block size.  If we use a
 292                         * block size which is bigger than the alignment of the
 293                         * misalignment bits, we might use the virtual page
 294                         * number instead of the physical page number, resulting
 295                         * in access to the wrong data.
 296                         */
 297                        misalignment_bits =
 298                                (start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
 299                                current_block_start;
 300                        block_shift = min(alignment_of(misalignment_bits),
 301                                          block_shift);
 302                }
 303
 304                /*
 305                 * Go over the scatter entries and check if they continue the
 306                 * previous scatter entry.
 307                 */
 308                next_block_start = sg_dma_address(sg);
 309                current_block_end = current_block_start + current_block_len;
 310                /* If we have a split (non-contig.) between two blocks */
 311                if (current_block_end != next_block_start) {
 312                        block_shift = mlx4_ib_umem_calc_block_mtt
 313                                        (next_block_start,
 314                                         current_block_end,
 315                                         block_shift);
 316
 317                        /*
 318                         * If we reached the minimum shift for 4k page we stop
 319                         * the loop.
 320                         */
 321                        if (block_shift <= min_shift)
 322                                goto end;
 323
 324                        /*
 325                         * If not saved yet we are in first block - we save the
 326                         * length of first block to calculate the
 327                         * non_aligned_pages number at the end.
 328                         */
 329                        total_len += current_block_len;
 330
 331                        /* Start a new block */
 332                        current_block_start = next_block_start;
 333                        current_block_len = sg_dma_len(sg);
 334                        continue;
 335                }
 336                /* The scatter entry is another part of the current block,
 337                 * increase the block size.
 338                 * An entry in the scatter can be larger than 4k (page) as of
 339                 * dma mapping which merge some blocks together.
 340                 */
 341                current_block_len += sg_dma_len(sg);
 342        }
 343
 344        /* Account for the last block in the total len */
 345        total_len += current_block_len;
 346        /* Add to the first block the misalignment that it suffers from. */
 347        total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
 348        last_block_end = current_block_start + current_block_len;
 349        last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
 350        total_len += (last_block_aligned_end - last_block_end);
 351
 352        if (total_len & ((1ULL << block_shift) - 1ULL))
 353                pr_warn("misaligned total length detected (%llu, %llu)!",
 354                        total_len, block_shift);
 355
 356        *num_of_mtts = total_len >> block_shift;
 357end:
 358        if (block_shift < min_shift) {
 359                /*
 360                 * If shift is less than the min we set a warning and return the
 361                 * min shift.
 362                 */
 363                pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
 364
 365                block_shift = min_shift;
 366        }
 367        return block_shift;
 368}
 369
 370static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
 371                                        u64 length, int access_flags)
 372{
 373        /*
 374         * Force registering the memory as writable if the underlying pages
 375         * are writable.  This is so rereg can change the access permissions
 376         * from readable to writable without having to run through ib_umem_get
 377         * again
 378         */
 379        if (!ib_access_writable(access_flags)) {
 380                struct vm_area_struct *vma;
 381
 382                down_read(&current->mm->mmap_sem);
 383                /*
 384                 * FIXME: Ideally this would iterate over all the vmas that
 385                 * cover the memory, but for now it requires a single vma to
 386                 * entirely cover the MR to support RO mappings.
 387                 */
 388                vma = find_vma(current->mm, start);
 389                if (vma && vma->vm_end >= start + length &&
 390                    vma->vm_start <= start) {
 391                        if (vma->vm_flags & VM_WRITE)
 392                                access_flags |= IB_ACCESS_LOCAL_WRITE;
 393                } else {
 394                        access_flags |= IB_ACCESS_LOCAL_WRITE;
 395                }
 396
 397                up_read(&current->mm->mmap_sem);
 398        }
 399
 400        return ib_umem_get(udata, start, length, access_flags, 0);
 401}
 402
 403struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 404                                  u64 virt_addr, int access_flags,
 405                                  struct ib_udata *udata)
 406{
 407        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 408        struct mlx4_ib_mr *mr;
 409        int shift;
 410        int err;
 411        int n;
 412
 413        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 414        if (!mr)
 415                return ERR_PTR(-ENOMEM);
 416
 417        mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags);
 418        if (IS_ERR(mr->umem)) {
 419                err = PTR_ERR(mr->umem);
 420                goto err_free;
 421        }
 422
 423        n = ib_umem_page_count(mr->umem);
 424        shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
 425
 426        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
 427                            convert_access(access_flags), n, shift, &mr->mmr);
 428        if (err)
 429                goto err_umem;
 430
 431        err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
 432        if (err)
 433                goto err_mr;
 434
 435        err = mlx4_mr_enable(dev->dev, &mr->mmr);
 436        if (err)
 437                goto err_mr;
 438
 439        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
 440        mr->ibmr.length = length;
 441        mr->ibmr.iova = virt_addr;
 442        mr->ibmr.page_size = 1U << shift;
 443
 444        return &mr->ibmr;
 445
 446err_mr:
 447        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
 448
 449err_umem:
 450        ib_umem_release(mr->umem);
 451
 452err_free:
 453        kfree(mr);
 454
 455        return ERR_PTR(err);
 456}
 457
 458int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
 459                          u64 start, u64 length, u64 virt_addr,
 460                          int mr_access_flags, struct ib_pd *pd,
 461                          struct ib_udata *udata)
 462{
 463        struct mlx4_ib_dev *dev = to_mdev(mr->device);
 464        struct mlx4_ib_mr *mmr = to_mmr(mr);
 465        struct mlx4_mpt_entry *mpt_entry;
 466        struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
 467        int err;
 468
 469        /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
 470         * we assume that the calls can't run concurrently. Otherwise, a
 471         * race exists.
 472         */
 473        err =  mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
 474
 475        if (err)
 476                return err;
 477
 478        if (flags & IB_MR_REREG_PD) {
 479                err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
 480                                           to_mpd(pd)->pdn);
 481
 482                if (err)
 483                        goto release_mpt_entry;
 484        }
 485
 486        if (flags & IB_MR_REREG_ACCESS) {
 487                if (ib_access_writable(mr_access_flags) &&
 488                    !mmr->umem->writable) {
 489                        err = -EPERM;
 490                        goto release_mpt_entry;
 491                }
 492
 493                err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
 494                                               convert_access(mr_access_flags));
 495
 496                if (err)
 497                        goto release_mpt_entry;
 498        }
 499
 500        if (flags & IB_MR_REREG_TRANS) {
 501                int shift;
 502                int n;
 503
 504                mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
 505                ib_umem_release(mmr->umem);
 506                mmr->umem = mlx4_get_umem_mr(udata, start, length,
 507                                             mr_access_flags);
 508                if (IS_ERR(mmr->umem)) {
 509                        err = PTR_ERR(mmr->umem);
 510                        /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
 511                        mmr->umem = NULL;
 512                        goto release_mpt_entry;
 513                }
 514                n = ib_umem_page_count(mmr->umem);
 515                shift = PAGE_SHIFT;
 516
 517                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
 518                                              virt_addr, length, n, shift,
 519                                              *pmpt_entry);
 520                if (err) {
 521                        ib_umem_release(mmr->umem);
 522                        goto release_mpt_entry;
 523                }
 524                mmr->mmr.iova       = virt_addr;
 525                mmr->mmr.size       = length;
 526
 527                err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
 528                if (err) {
 529                        mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
 530                        ib_umem_release(mmr->umem);
 531                        goto release_mpt_entry;
 532                }
 533        }
 534
 535        /* If we couldn't transfer the MR to the HCA, just remember to
 536         * return a failure. But dereg_mr will free the resources.
 537         */
 538        err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
 539        if (!err && flags & IB_MR_REREG_ACCESS)
 540                mmr->mmr.access = mr_access_flags;
 541
 542release_mpt_entry:
 543        mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
 544
 545        return err;
 546}
 547
 548static int
 549mlx4_alloc_priv_pages(struct ib_device *device,
 550                      struct mlx4_ib_mr *mr,
 551                      int max_pages)
 552{
 553        int ret;
 554
 555        /* Ensure that size is aligned to DMA cacheline
 556         * requirements.
 557         * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
 558         * so page_map_size will never cross PAGE_SIZE.
 559         */
 560        mr->page_map_size = roundup(max_pages * sizeof(u64),
 561                                    MLX4_MR_PAGES_ALIGN);
 562
 563        /* Prevent cross page boundary allocation. */
 564        mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
 565        if (!mr->pages)
 566                return -ENOMEM;
 567
 568        mr->page_map = dma_map_single(device->dev.parent, mr->pages,
 569                                      mr->page_map_size, DMA_TO_DEVICE);
 570
 571        if (dma_mapping_error(device->dev.parent, mr->page_map)) {
 572                ret = -ENOMEM;
 573                goto err;
 574        }
 575
 576        return 0;
 577
 578err:
 579        free_page((unsigned long)mr->pages);
 580        return ret;
 581}
 582
 583static void
 584mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 585{
 586        if (mr->pages) {
 587                struct ib_device *device = mr->ibmr.device;
 588
 589                dma_unmap_single(device->dev.parent, mr->page_map,
 590                                 mr->page_map_size, DMA_TO_DEVICE);
 591                free_page((unsigned long)mr->pages);
 592                mr->pages = NULL;
 593        }
 594}
 595
 596int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
 597{
 598        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 599        int ret;
 600
 601        mlx4_free_priv_pages(mr);
 602
 603        ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
 604        if (ret)
 605                return ret;
 606        if (mr->umem)
 607                ib_umem_release(mr->umem);
 608        kfree(mr);
 609
 610        return 0;
 611}
 612
 613struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 614                               struct ib_udata *udata)
 615{
 616        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 617        struct mlx4_ib_mw *mw;
 618        int err;
 619
 620        mw = kmalloc(sizeof(*mw), GFP_KERNEL);
 621        if (!mw)
 622                return ERR_PTR(-ENOMEM);
 623
 624        err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
 625                            to_mlx4_type(type), &mw->mmw);
 626        if (err)
 627                goto err_free;
 628
 629        err = mlx4_mw_enable(dev->dev, &mw->mmw);
 630        if (err)
 631                goto err_mw;
 632
 633        mw->ibmw.rkey = mw->mmw.key;
 634
 635        return &mw->ibmw;
 636
 637err_mw:
 638        mlx4_mw_free(dev->dev, &mw->mmw);
 639
 640err_free:
 641        kfree(mw);
 642
 643        return ERR_PTR(err);
 644}
 645
 646int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
 647{
 648        struct mlx4_ib_mw *mw = to_mmw(ibmw);
 649
 650        mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
 651        kfree(mw);
 652
 653        return 0;
 654}
 655
 656struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
 657                               u32 max_num_sg, struct ib_udata *udata)
 658{
 659        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 660        struct mlx4_ib_mr *mr;
 661        int err;
 662
 663        if (mr_type != IB_MR_TYPE_MEM_REG ||
 664            max_num_sg > MLX4_MAX_FAST_REG_PAGES)
 665                return ERR_PTR(-EINVAL);
 666
 667        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 668        if (!mr)
 669                return ERR_PTR(-ENOMEM);
 670
 671        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
 672                            max_num_sg, 0, &mr->mmr);
 673        if (err)
 674                goto err_free;
 675
 676        err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
 677        if (err)
 678                goto err_free_mr;
 679
 680        mr->max_pages = max_num_sg;
 681        err = mlx4_mr_enable(dev->dev, &mr->mmr);
 682        if (err)
 683                goto err_free_pl;
 684
 685        mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
 686        mr->umem = NULL;
 687
 688        return &mr->ibmr;
 689
 690err_free_pl:
 691        mr->ibmr.device = pd->device;
 692        mlx4_free_priv_pages(mr);
 693err_free_mr:
 694        (void) mlx4_mr_free(dev->dev, &mr->mmr);
 695err_free:
 696        kfree(mr);
 697        return ERR_PTR(err);
 698}
 699
 700struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
 701                                 struct ib_fmr_attr *fmr_attr)
 702{
 703        struct mlx4_ib_dev *dev = to_mdev(pd->device);
 704        struct mlx4_ib_fmr *fmr;
 705        int err = -ENOMEM;
 706
 707        fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
 708        if (!fmr)
 709                return ERR_PTR(-ENOMEM);
 710
 711        err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
 712                             fmr_attr->max_pages, fmr_attr->max_maps,
 713                             fmr_attr->page_shift, &fmr->mfmr);
 714        if (err)
 715                goto err_free;
 716
 717        err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
 718        if (err)
 719                goto err_mr;
 720
 721        fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
 722
 723        return &fmr->ibfmr;
 724
 725err_mr:
 726        (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
 727
 728err_free:
 729        kfree(fmr);
 730
 731        return ERR_PTR(err);
 732}
 733
 734int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 735                      int npages, u64 iova)
 736{
 737        struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 738        struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
 739
 740        return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
 741                                 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
 742}
 743
 744int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
 745{
 746        struct ib_fmr *ibfmr;
 747        int err;
 748        struct mlx4_dev *mdev = NULL;
 749
 750        list_for_each_entry(ibfmr, fmr_list, list) {
 751                if (mdev && to_mdev(ibfmr->device)->dev != mdev)
 752                        return -EINVAL;
 753                mdev = to_mdev(ibfmr->device)->dev;
 754        }
 755
 756        if (!mdev)
 757                return 0;
 758
 759        list_for_each_entry(ibfmr, fmr_list, list) {
 760                struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 761
 762                mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
 763        }
 764
 765        /*
 766         * Make sure all MPT status updates are visible before issuing
 767         * SYNC_TPT firmware command.
 768         */
 769        wmb();
 770
 771        err = mlx4_SYNC_TPT(mdev);
 772        if (err)
 773                pr_warn("SYNC_TPT error %d when "
 774                       "unmapping FMRs\n", err);
 775
 776        return 0;
 777}
 778
 779int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
 780{
 781        struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
 782        struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
 783        int err;
 784
 785        err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
 786
 787        if (!err)
 788                kfree(ifmr);
 789
 790        return err;
 791}
 792
 793static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
 794{
 795        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 796
 797        if (unlikely(mr->npages == mr->max_pages))
 798                return -ENOMEM;
 799
 800        mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
 801
 802        return 0;
 803}
 804
 805int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 806                      unsigned int *sg_offset)
 807{
 808        struct mlx4_ib_mr *mr = to_mmr(ibmr);
 809        int rc;
 810
 811        mr->npages = 0;
 812
 813        ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
 814                                   mr->page_map_size, DMA_TO_DEVICE);
 815
 816        rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 817
 818        ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
 819                                      mr->page_map_size, DMA_TO_DEVICE);
 820
 821        return rc;
 822}
 823