linux/drivers/infiniband/hw/hns/hns_roce_mr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/platform_device.h>
  35#include <linux/vmalloc.h>
  36#include <rdma/ib_umem.h>
  37#include "hns_roce_device.h"
  38#include "hns_roce_cmd.h"
  39#include "hns_roce_hem.h"
  40
  41static u32 hw_index_to_key(unsigned long ind)
  42{
  43        return (u32)(ind >> 24) | (ind << 8);
  44}
  45
  46unsigned long key_to_hw_index(u32 key)
  47{
  48        return (key << 24) | (key >> 8);
  49}
  50
  51static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
  52                              struct hns_roce_cmd_mailbox *mailbox,
  53                              unsigned long mpt_index)
  54{
  55        return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
  56                                 HNS_ROCE_CMD_SW2HW_MPT,
  57                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
  58}
  59
  60int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
  61                              struct hns_roce_cmd_mailbox *mailbox,
  62                              unsigned long mpt_index)
  63{
  64        return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
  65                                 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
  66                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
  67}
  68
  69static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
  70                                unsigned long *seg)
  71{
  72        int o;
  73        u32 m;
  74
  75        spin_lock(&buddy->lock);
  76
  77        for (o = order; o <= buddy->max_order; ++o) {
  78                if (buddy->num_free[o]) {
  79                        m = 1 << (buddy->max_order - o);
  80                        *seg = find_first_bit(buddy->bits[o], m);
  81                        if (*seg < m)
  82                                goto found;
  83                }
  84        }
  85        spin_unlock(&buddy->lock);
  86        return -1;
  87
  88 found:
  89        clear_bit(*seg, buddy->bits[o]);
  90        --buddy->num_free[o];
  91
  92        while (o > order) {
  93                --o;
  94                *seg <<= 1;
  95                set_bit(*seg ^ 1, buddy->bits[o]);
  96                ++buddy->num_free[o];
  97        }
  98
  99        spin_unlock(&buddy->lock);
 100
 101        *seg <<= order;
 102        return 0;
 103}
 104
 105static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
 106                                int order)
 107{
 108        seg >>= order;
 109
 110        spin_lock(&buddy->lock);
 111
 112        while (test_bit(seg ^ 1, buddy->bits[order])) {
 113                clear_bit(seg ^ 1, buddy->bits[order]);
 114                --buddy->num_free[order];
 115                seg >>= 1;
 116                ++order;
 117        }
 118
 119        set_bit(seg, buddy->bits[order]);
 120        ++buddy->num_free[order];
 121
 122        spin_unlock(&buddy->lock);
 123}
 124
 125static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
 126{
 127        int i, s;
 128
 129        buddy->max_order = max_order;
 130        spin_lock_init(&buddy->lock);
 131        buddy->bits = kcalloc(buddy->max_order + 1,
 132                              sizeof(*buddy->bits),
 133                              GFP_KERNEL);
 134        buddy->num_free = kcalloc(buddy->max_order + 1,
 135                                  sizeof(*buddy->num_free),
 136                                  GFP_KERNEL);
 137        if (!buddy->bits || !buddy->num_free)
 138                goto err_out;
 139
 140        for (i = 0; i <= buddy->max_order; ++i) {
 141                s = BITS_TO_LONGS(1 << (buddy->max_order - i));
 142                buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
 143                                         __GFP_NOWARN);
 144                if (!buddy->bits[i]) {
 145                        buddy->bits[i] = vzalloc(s * sizeof(long));
 146                        if (!buddy->bits[i])
 147                                goto err_out_free;
 148                }
 149        }
 150
 151        set_bit(0, buddy->bits[buddy->max_order]);
 152        buddy->num_free[buddy->max_order] = 1;
 153
 154        return 0;
 155
 156err_out_free:
 157        for (i = 0; i <= buddy->max_order; ++i)
 158                kvfree(buddy->bits[i]);
 159
 160err_out:
 161        kfree(buddy->bits);
 162        kfree(buddy->num_free);
 163        return -ENOMEM;
 164}
 165
 166static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
 167{
 168        int i;
 169
 170        for (i = 0; i <= buddy->max_order; ++i)
 171                kvfree(buddy->bits[i]);
 172
 173        kfree(buddy->bits);
 174        kfree(buddy->num_free);
 175}
 176
 177static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
 178                                    unsigned long *seg)
 179{
 180        struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 181        int ret = 0;
 182
 183        ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
 184        if (ret == -1)
 185                return -1;
 186
 187        if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
 188                                     *seg + (1 << order) - 1)) {
 189                hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
 190                return -1;
 191        }
 192
 193        return 0;
 194}
 195
 196int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
 197                      struct hns_roce_mtt *mtt)
 198{
 199        int ret = 0;
 200        int i;
 201
 202        /* Page num is zero, correspond to DMA memory register */
 203        if (!npages) {
 204                mtt->order = -1;
 205                mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
 206                return 0;
 207        }
 208
 209        /* Note: if page_shift is zero, FAST memory register */
 210        mtt->page_shift = page_shift;
 211
 212        /* Compute MTT entry necessary */
 213        for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
 214             i <<= 1)
 215                ++mtt->order;
 216
 217        /* Allocate MTT entry */
 218        ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
 219        if (ret == -1)
 220                return -ENOMEM;
 221
 222        return 0;
 223}
 224
 225void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
 226{
 227        struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 228
 229        if (mtt->order < 0)
 230                return;
 231
 232        hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
 233        hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
 234                                 mtt->first_seg + (1 << mtt->order) - 1);
 235}
 236
 237static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
 238                             u64 size, u32 access, int npages,
 239                             struct hns_roce_mr *mr)
 240{
 241        unsigned long index = 0;
 242        int ret = 0;
 243        struct device *dev = &hr_dev->pdev->dev;
 244
 245        /* Allocate a key for mr from mr_table */
 246        ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
 247        if (ret == -1)
 248                return -ENOMEM;
 249
 250        mr->iova = iova;                        /* MR va starting addr */
 251        mr->size = size;                        /* MR addr range */
 252        mr->pd = pd;                            /* MR num */
 253        mr->access = access;                    /* MR access permit */
 254        mr->enabled = 0;                        /* MR active status */
 255        mr->key = hw_index_to_key(index);       /* MR key */
 256
 257        if (size == ~0ull) {
 258                mr->type = MR_TYPE_DMA;
 259                mr->pbl_buf = NULL;
 260                mr->pbl_dma_addr = 0;
 261        } else {
 262                mr->type = MR_TYPE_MR;
 263                mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
 264                                                 &(mr->pbl_dma_addr),
 265                                                 GFP_KERNEL);
 266                if (!mr->pbl_buf)
 267                        return -ENOMEM;
 268        }
 269
 270        return 0;
 271}
 272
 273static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
 274                             struct hns_roce_mr *mr)
 275{
 276        struct device *dev = &hr_dev->pdev->dev;
 277        int npages = 0;
 278        int ret;
 279
 280        if (mr->enabled) {
 281                ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
 282                                         & (hr_dev->caps.num_mtpts - 1));
 283                if (ret)
 284                        dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
 285        }
 286
 287        if (mr->size != ~0ULL) {
 288                npages = ib_umem_page_count(mr->umem);
 289                dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
 290                                  mr->pbl_dma_addr);
 291        }
 292
 293        hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
 294                             key_to_hw_index(mr->key), BITMAP_NO_RR);
 295}
 296
 297static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
 298                              struct hns_roce_mr *mr)
 299{
 300        int ret;
 301        unsigned long mtpt_idx = key_to_hw_index(mr->key);
 302        struct device *dev = &hr_dev->pdev->dev;
 303        struct hns_roce_cmd_mailbox *mailbox;
 304        struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 305
 306        /* Prepare HEM entry memory */
 307        ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
 308        if (ret)
 309                return ret;
 310
 311        /* Allocate mailbox memory */
 312        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
 313        if (IS_ERR(mailbox)) {
 314                ret = PTR_ERR(mailbox);
 315                goto err_table;
 316        }
 317
 318        ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
 319        if (ret) {
 320                dev_err(dev, "Write mtpt fail!\n");
 321                goto err_page;
 322        }
 323
 324        ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
 325                                 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
 326        if (ret) {
 327                dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
 328                goto err_page;
 329        }
 330
 331        mr->enabled = 1;
 332        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
 333
 334        return 0;
 335
 336err_page:
 337        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
 338
 339err_table:
 340        hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
 341        return ret;
 342}
 343
 344static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
 345                                    struct hns_roce_mtt *mtt, u32 start_index,
 346                                    u32 npages, u64 *page_list)
 347{
 348        u32 i = 0;
 349        __le64 *mtts = NULL;
 350        dma_addr_t dma_handle;
 351        u32 s = start_index * sizeof(u64);
 352
 353        /* All MTTs must fit in the same page */
 354        if (start_index / (PAGE_SIZE / sizeof(u64)) !=
 355                (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
 356                return -EINVAL;
 357
 358        if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
 359                return -EINVAL;
 360
 361        mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
 362                                mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
 363                                &dma_handle);
 364        if (!mtts)
 365                return -ENOMEM;
 366
 367        /* Save page addr, low 12 bits : 0 */
 368        for (i = 0; i < npages; ++i)
 369                mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
 370
 371        return 0;
 372}
 373
 374static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
 375                              struct hns_roce_mtt *mtt, u32 start_index,
 376                              u32 npages, u64 *page_list)
 377{
 378        int chunk;
 379        int ret;
 380
 381        if (mtt->order < 0)
 382                return -EINVAL;
 383
 384        while (npages > 0) {
 385                chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
 386
 387                ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
 388                                               page_list);
 389                if (ret)
 390                        return ret;
 391
 392                npages -= chunk;
 393                start_index += chunk;
 394                page_list += chunk;
 395        }
 396
 397        return 0;
 398}
 399
 400int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
 401                           struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
 402{
 403        u32 i = 0;
 404        int ret = 0;
 405        u64 *page_list = NULL;
 406
 407        page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
 408        if (!page_list)
 409                return -ENOMEM;
 410
 411        for (i = 0; i < buf->npages; ++i) {
 412                if (buf->nbufs == 1)
 413                        page_list[i] = buf->direct.map + (i << buf->page_shift);
 414                else
 415                        page_list[i] = buf->page_list[i].map;
 416
 417        }
 418        ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
 419
 420        kfree(page_list);
 421
 422        return ret;
 423}
 424
 425int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
 426{
 427        struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 428        int ret = 0;
 429
 430        ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
 431                                   hr_dev->caps.num_mtpts,
 432                                   hr_dev->caps.num_mtpts - 1,
 433                                   hr_dev->caps.reserved_mrws, 0);
 434        if (ret)
 435                return ret;
 436
 437        ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
 438                                  ilog2(hr_dev->caps.num_mtt_segs));
 439        if (ret)
 440                goto err_buddy;
 441
 442        return 0;
 443
 444err_buddy:
 445        hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
 446        return ret;
 447}
 448
 449void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
 450{
 451        struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 452
 453        hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
 454        hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
 455}
 456
 457struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
 458{
 459        int ret = 0;
 460        struct hns_roce_mr *mr = NULL;
 461
 462        mr = kmalloc(sizeof(*mr), GFP_KERNEL);
 463        if (mr == NULL)
 464                return  ERR_PTR(-ENOMEM);
 465
 466        /* Allocate memory region key */
 467        ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
 468                                ~0ULL, acc, 0, mr);
 469        if (ret)
 470                goto err_free;
 471
 472        ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
 473        if (ret)
 474                goto err_mr;
 475
 476        mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
 477        mr->umem = NULL;
 478
 479        return &mr->ibmr;
 480
 481err_mr:
 482        hns_roce_mr_free(to_hr_dev(pd->device), mr);
 483
 484err_free:
 485        kfree(mr);
 486        return ERR_PTR(ret);
 487}
 488
 489int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
 490                               struct hns_roce_mtt *mtt, struct ib_umem *umem)
 491{
 492        struct scatterlist *sg;
 493        int i, k, entry;
 494        int ret = 0;
 495        u64 *pages;
 496        u32 n;
 497        int len;
 498
 499        pages = (u64 *) __get_free_page(GFP_KERNEL);
 500        if (!pages)
 501                return -ENOMEM;
 502
 503        i = n = 0;
 504
 505        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
 506                len = sg_dma_len(sg) >> mtt->page_shift;
 507                for (k = 0; k < len; ++k) {
 508                        pages[i++] = sg_dma_address(sg) +
 509                                (k << umem->page_shift);
 510                        if (i == PAGE_SIZE / sizeof(u64)) {
 511                                ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
 512                                                         pages);
 513                                if (ret)
 514                                        goto out;
 515                                n += i;
 516                                i = 0;
 517                        }
 518                }
 519        }
 520
 521        if (i)
 522                ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
 523
 524out:
 525        free_page((unsigned long) pages);
 526        return ret;
 527}
 528
 529static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
 530                                     struct ib_umem *umem)
 531{
 532        int i = 0;
 533        int entry;
 534        struct scatterlist *sg;
 535
 536        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
 537                mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
 538                i++;
 539        }
 540
 541        /* Memory barrier */
 542        mb();
 543
 544        return 0;
 545}
 546
 547struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 548                                   u64 virt_addr, int access_flags,
 549                                   struct ib_udata *udata)
 550{
 551        struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
 552        struct device *dev = &hr_dev->pdev->dev;
 553        struct hns_roce_mr *mr = NULL;
 554        int ret = 0;
 555        int n = 0;
 556
 557        mr = kmalloc(sizeof(*mr), GFP_KERNEL);
 558        if (!mr)
 559                return ERR_PTR(-ENOMEM);
 560
 561        mr->umem = ib_umem_get(pd->uobject->context, start, length,
 562                               access_flags, 0);
 563        if (IS_ERR(mr->umem)) {
 564                ret = PTR_ERR(mr->umem);
 565                goto err_free;
 566        }
 567
 568        n = ib_umem_page_count(mr->umem);
 569        if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
 570                dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
 571                        BIT(mr->umem->page_shift));
 572                ret = -EINVAL;
 573                goto err_umem;
 574        }
 575
 576        if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
 577                dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
 578                        length);
 579                ret = -EINVAL;
 580                goto err_umem;
 581        }
 582
 583        ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
 584                                access_flags, n, mr);
 585        if (ret)
 586                goto err_umem;
 587
 588        ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
 589        if (ret)
 590                goto err_mr;
 591
 592        ret = hns_roce_mr_enable(hr_dev, mr);
 593        if (ret)
 594                goto err_mr;
 595
 596        mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
 597
 598        return &mr->ibmr;
 599
 600err_mr:
 601        hns_roce_mr_free(hr_dev, mr);
 602
 603err_umem:
 604        ib_umem_release(mr->umem);
 605
 606err_free:
 607        kfree(mr);
 608        return ERR_PTR(ret);
 609}
 610
 611int hns_roce_dereg_mr(struct ib_mr *ibmr)
 612{
 613        struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
 614        struct hns_roce_mr *mr = to_hr_mr(ibmr);
 615        int ret = 0;
 616
 617        if (hr_dev->hw->dereg_mr) {
 618                ret = hr_dev->hw->dereg_mr(hr_dev, mr);
 619        } else {
 620                hns_roce_mr_free(hr_dev, mr);
 621
 622                if (mr->umem)
 623                        ib_umem_release(mr->umem);
 624
 625                kfree(mr);
 626        }
 627
 628        return ret;
 629}
 630