linux/drivers/infiniband/hw/qib/qib_mr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
   3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <rdma/ib_umem.h>
  35#include <rdma/ib_smi.h>
  36
  37#include "qib.h"
  38
  39/* Fast memory region */
  40struct qib_fmr {
  41        struct ib_fmr ibfmr;
  42        struct qib_mregion mr;        /* must be last */
  43};
  44
  45static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
  46{
  47        return container_of(ibfmr, struct qib_fmr, ibfmr);
  48}
  49
  50static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
  51        int count)
  52{
  53        int m, i = 0;
  54        int rval = 0;
  55
  56        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
  57        for (; i < m; i++) {
  58                mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
  59                if (!mr->map[i])
  60                        goto bail;
  61        }
  62        mr->mapsz = m;
  63        init_completion(&mr->comp);
  64        /* count returning the ptr to user */
  65        atomic_set(&mr->refcount, 1);
  66        mr->pd = pd;
  67        mr->max_segs = count;
  68out:
  69        return rval;
  70bail:
  71        while (i)
  72                kfree(mr->map[--i]);
  73        rval = -ENOMEM;
  74        goto out;
  75}
  76
  77static void deinit_qib_mregion(struct qib_mregion *mr)
  78{
  79        int i = mr->mapsz;
  80
  81        mr->mapsz = 0;
  82        while (i)
  83                kfree(mr->map[--i]);
  84}
  85
  86
  87/**
  88 * qib_get_dma_mr - get a DMA memory region
  89 * @pd: protection domain for this memory region
  90 * @acc: access flags
  91 *
  92 * Returns the memory region on success, otherwise returns an errno.
  93 * Note that all DMA addresses should be created via the
  94 * struct ib_dma_mapping_ops functions (see qib_dma.c).
  95 */
  96struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
  97{
  98        struct qib_mr *mr = NULL;
  99        struct ib_mr *ret;
 100        int rval;
 101
 102        if (to_ipd(pd)->user) {
 103                ret = ERR_PTR(-EPERM);
 104                goto bail;
 105        }
 106
 107        mr = kzalloc(sizeof *mr, GFP_KERNEL);
 108        if (!mr) {
 109                ret = ERR_PTR(-ENOMEM);
 110                goto bail;
 111        }
 112
 113        rval = init_qib_mregion(&mr->mr, pd, 0);
 114        if (rval) {
 115                ret = ERR_PTR(rval);
 116                goto bail;
 117        }
 118
 119
 120        rval = qib_alloc_lkey(&mr->mr, 1);
 121        if (rval) {
 122                ret = ERR_PTR(rval);
 123                goto bail_mregion;
 124        }
 125
 126        mr->mr.access_flags = acc;
 127        ret = &mr->ibmr;
 128done:
 129        return ret;
 130
 131bail_mregion:
 132        deinit_qib_mregion(&mr->mr);
 133bail:
 134        kfree(mr);
 135        goto done;
 136}
 137
 138static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
 139{
 140        struct qib_mr *mr;
 141        int rval = -ENOMEM;
 142        int m;
 143
 144        /* Allocate struct plus pointers to first level page tables. */
 145        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
 146        mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
 147        if (!mr)
 148                goto bail;
 149
 150        rval = init_qib_mregion(&mr->mr, pd, count);
 151        if (rval)
 152                goto bail;
 153        /*
 154         * ib_reg_phys_mr() will initialize mr->ibmr except for
 155         * lkey and rkey.
 156         */
 157        rval = qib_alloc_lkey(&mr->mr, 0);
 158        if (rval)
 159                goto bail_mregion;
 160        mr->ibmr.lkey = mr->mr.lkey;
 161        mr->ibmr.rkey = mr->mr.lkey;
 162done:
 163        return mr;
 164
 165bail_mregion:
 166        deinit_qib_mregion(&mr->mr);
 167bail:
 168        kfree(mr);
 169        mr = ERR_PTR(rval);
 170        goto done;
 171}
 172
 173/**
 174 * qib_reg_phys_mr - register a physical memory region
 175 * @pd: protection domain for this memory region
 176 * @buffer_list: pointer to the list of physical buffers to register
 177 * @num_phys_buf: the number of physical buffers to register
 178 * @iova_start: the starting address passed over IB which maps to this MR
 179 *
 180 * Returns the memory region on success, otherwise returns an errno.
 181 */
 182struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
 183                              struct ib_phys_buf *buffer_list,
 184                              int num_phys_buf, int acc, u64 *iova_start)
 185{
 186        struct qib_mr *mr;
 187        int n, m, i;
 188        struct ib_mr *ret;
 189
 190        mr = alloc_mr(num_phys_buf, pd);
 191        if (IS_ERR(mr)) {
 192                ret = (struct ib_mr *)mr;
 193                goto bail;
 194        }
 195
 196        mr->mr.user_base = *iova_start;
 197        mr->mr.iova = *iova_start;
 198        mr->mr.access_flags = acc;
 199
 200        m = 0;
 201        n = 0;
 202        for (i = 0; i < num_phys_buf; i++) {
 203                mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
 204                mr->mr.map[m]->segs[n].length = buffer_list[i].size;
 205                mr->mr.length += buffer_list[i].size;
 206                n++;
 207                if (n == QIB_SEGSZ) {
 208                        m++;
 209                        n = 0;
 210                }
 211        }
 212
 213        ret = &mr->ibmr;
 214
 215bail:
 216        return ret;
 217}
 218
 219/**
 220 * qib_reg_user_mr - register a userspace memory region
 221 * @pd: protection domain for this memory region
 222 * @start: starting userspace address
 223 * @length: length of region to register
 224 * @mr_access_flags: access flags for this memory region
 225 * @udata: unused by the QLogic_IB driver
 226 *
 227 * Returns the memory region on success, otherwise returns an errno.
 228 */
 229struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 230                              u64 virt_addr, int mr_access_flags,
 231                              struct ib_udata *udata)
 232{
 233        struct qib_mr *mr;
 234        struct ib_umem *umem;
 235        struct ib_umem_chunk *chunk;
 236        int n, m, i;
 237        struct ib_mr *ret;
 238
 239        if (length == 0) {
 240                ret = ERR_PTR(-EINVAL);
 241                goto bail;
 242        }
 243
 244        umem = ib_umem_get(pd->uobject->context, start, length,
 245                           mr_access_flags, 0);
 246        if (IS_ERR(umem))
 247                return (void *) umem;
 248
 249        n = 0;
 250        list_for_each_entry(chunk, &umem->chunk_list, list)
 251                n += chunk->nents;
 252
 253        mr = alloc_mr(n, pd);
 254        if (IS_ERR(mr)) {
 255                ret = (struct ib_mr *)mr;
 256                ib_umem_release(umem);
 257                goto bail;
 258        }
 259
 260        mr->mr.user_base = start;
 261        mr->mr.iova = virt_addr;
 262        mr->mr.length = length;
 263        mr->mr.offset = umem->offset;
 264        mr->mr.access_flags = mr_access_flags;
 265        mr->umem = umem;
 266
 267        if (is_power_of_2(umem->page_size))
 268                mr->mr.page_shift = ilog2(umem->page_size);
 269        m = 0;
 270        n = 0;
 271        list_for_each_entry(chunk, &umem->chunk_list, list) {
 272                for (i = 0; i < chunk->nents; i++) {
 273                        void *vaddr;
 274
 275                        vaddr = page_address(sg_page(&chunk->page_list[i]));
 276                        if (!vaddr) {
 277                                ret = ERR_PTR(-EINVAL);
 278                                goto bail;
 279                        }
 280                        mr->mr.map[m]->segs[n].vaddr = vaddr;
 281                        mr->mr.map[m]->segs[n].length = umem->page_size;
 282                        n++;
 283                        if (n == QIB_SEGSZ) {
 284                                m++;
 285                                n = 0;
 286                        }
 287                }
 288        }
 289        ret = &mr->ibmr;
 290
 291bail:
 292        return ret;
 293}
 294
 295/**
 296 * qib_dereg_mr - unregister and free a memory region
 297 * @ibmr: the memory region to free
 298 *
 299 * Returns 0 on success.
 300 *
 301 * Note that this is called to free MRs created by qib_get_dma_mr()
 302 * or qib_reg_user_mr().
 303 */
 304int qib_dereg_mr(struct ib_mr *ibmr)
 305{
 306        struct qib_mr *mr = to_imr(ibmr);
 307        int ret = 0;
 308        unsigned long timeout;
 309
 310        qib_free_lkey(&mr->mr);
 311
 312        qib_put_mr(&mr->mr); /* will set completion if last */
 313        timeout = wait_for_completion_timeout(&mr->mr.comp,
 314                5 * HZ);
 315        if (!timeout) {
 316                qib_get_mr(&mr->mr);
 317                ret = -EBUSY;
 318                goto out;
 319        }
 320        deinit_qib_mregion(&mr->mr);
 321        if (mr->umem)
 322                ib_umem_release(mr->umem);
 323        kfree(mr);
 324out:
 325        return ret;
 326}
 327
 328/*
 329 * Allocate a memory region usable with the
 330 * IB_WR_FAST_REG_MR send work request.
 331 *
 332 * Return the memory region on success, otherwise return an errno.
 333 */
 334struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
 335{
 336        struct qib_mr *mr;
 337
 338        mr = alloc_mr(max_page_list_len, pd);
 339        if (IS_ERR(mr))
 340                return (struct ib_mr *)mr;
 341
 342        return &mr->ibmr;
 343}
 344
 345struct ib_fast_reg_page_list *
 346qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
 347{
 348        unsigned size = page_list_len * sizeof(u64);
 349        struct ib_fast_reg_page_list *pl;
 350
 351        if (size > PAGE_SIZE)
 352                return ERR_PTR(-EINVAL);
 353
 354        pl = kzalloc(sizeof *pl, GFP_KERNEL);
 355        if (!pl)
 356                return ERR_PTR(-ENOMEM);
 357
 358        pl->page_list = kzalloc(size, GFP_KERNEL);
 359        if (!pl->page_list)
 360                goto err_free;
 361
 362        return pl;
 363
 364err_free:
 365        kfree(pl);
 366        return ERR_PTR(-ENOMEM);
 367}
 368
 369void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
 370{
 371        kfree(pl->page_list);
 372        kfree(pl);
 373}
 374
 375/**
 376 * qib_alloc_fmr - allocate a fast memory region
 377 * @pd: the protection domain for this memory region
 378 * @mr_access_flags: access flags for this memory region
 379 * @fmr_attr: fast memory region attributes
 380 *
 381 * Returns the memory region on success, otherwise returns an errno.
 382 */
 383struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
 384                             struct ib_fmr_attr *fmr_attr)
 385{
 386        struct qib_fmr *fmr;
 387        int m;
 388        struct ib_fmr *ret;
 389        int rval = -ENOMEM;
 390
 391        /* Allocate struct plus pointers to first level page tables. */
 392        m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
 393        fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
 394        if (!fmr)
 395                goto bail;
 396
 397        rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
 398        if (rval)
 399                goto bail;
 400
 401        /*
 402         * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
 403         * rkey.
 404         */
 405        rval = qib_alloc_lkey(&fmr->mr, 0);
 406        if (rval)
 407                goto bail_mregion;
 408        fmr->ibfmr.rkey = fmr->mr.lkey;
 409        fmr->ibfmr.lkey = fmr->mr.lkey;
 410        /*
 411         * Resources are allocated but no valid mapping (RKEY can't be
 412         * used).
 413         */
 414        fmr->mr.access_flags = mr_access_flags;
 415        fmr->mr.max_segs = fmr_attr->max_pages;
 416        fmr->mr.page_shift = fmr_attr->page_shift;
 417
 418        ret = &fmr->ibfmr;
 419done:
 420        return ret;
 421
 422bail_mregion:
 423        deinit_qib_mregion(&fmr->mr);
 424bail:
 425        kfree(fmr);
 426        ret = ERR_PTR(rval);
 427        goto done;
 428}
 429
 430/**
 431 * qib_map_phys_fmr - set up a fast memory region
 432 * @ibmfr: the fast memory region to set up
 433 * @page_list: the list of pages to associate with the fast memory region
 434 * @list_len: the number of pages to associate with the fast memory region
 435 * @iova: the virtual address of the start of the fast memory region
 436 *
 437 * This may be called from interrupt context.
 438 */
 439
 440int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 441                     int list_len, u64 iova)
 442{
 443        struct qib_fmr *fmr = to_ifmr(ibfmr);
 444        struct qib_lkey_table *rkt;
 445        unsigned long flags;
 446        int m, n, i;
 447        u32 ps;
 448        int ret;
 449
 450        i = atomic_read(&fmr->mr.refcount);
 451        if (i > 2)
 452                return -EBUSY;
 453
 454        if (list_len > fmr->mr.max_segs) {
 455                ret = -EINVAL;
 456                goto bail;
 457        }
 458        rkt = &to_idev(ibfmr->device)->lk_table;
 459        spin_lock_irqsave(&rkt->lock, flags);
 460        fmr->mr.user_base = iova;
 461        fmr->mr.iova = iova;
 462        ps = 1 << fmr->mr.page_shift;
 463        fmr->mr.length = list_len * ps;
 464        m = 0;
 465        n = 0;
 466        for (i = 0; i < list_len; i++) {
 467                fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
 468                fmr->mr.map[m]->segs[n].length = ps;
 469                if (++n == QIB_SEGSZ) {
 470                        m++;
 471                        n = 0;
 472                }
 473        }
 474        spin_unlock_irqrestore(&rkt->lock, flags);
 475        ret = 0;
 476
 477bail:
 478        return ret;
 479}
 480
 481/**
 482 * qib_unmap_fmr - unmap fast memory regions
 483 * @fmr_list: the list of fast memory regions to unmap
 484 *
 485 * Returns 0 on success.
 486 */
 487int qib_unmap_fmr(struct list_head *fmr_list)
 488{
 489        struct qib_fmr *fmr;
 490        struct qib_lkey_table *rkt;
 491        unsigned long flags;
 492
 493        list_for_each_entry(fmr, fmr_list, ibfmr.list) {
 494                rkt = &to_idev(fmr->ibfmr.device)->lk_table;
 495                spin_lock_irqsave(&rkt->lock, flags);
 496                fmr->mr.user_base = 0;
 497                fmr->mr.iova = 0;
 498                fmr->mr.length = 0;
 499                spin_unlock_irqrestore(&rkt->lock, flags);
 500        }
 501        return 0;
 502}
 503
 504/**
 505 * qib_dealloc_fmr - deallocate a fast memory region
 506 * @ibfmr: the fast memory region to deallocate
 507 *
 508 * Returns 0 on success.
 509 */
 510int qib_dealloc_fmr(struct ib_fmr *ibfmr)
 511{
 512        struct qib_fmr *fmr = to_ifmr(ibfmr);
 513        int ret = 0;
 514        unsigned long timeout;
 515
 516        qib_free_lkey(&fmr->mr);
 517        qib_put_mr(&fmr->mr); /* will set completion if last */
 518        timeout = wait_for_completion_timeout(&fmr->mr.comp,
 519                5 * HZ);
 520        if (!timeout) {
 521                qib_get_mr(&fmr->mr);
 522                ret = -EBUSY;
 523                goto out;
 524        }
 525        deinit_qib_mregion(&fmr->mr);
 526        kfree(fmr);
 527out:
 528        return ret;
 529}
 530
 531void mr_rcu_callback(struct rcu_head *list)
 532{
 533        struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
 534
 535        complete(&mr->comp);
 536}
 537