linux/drivers/infiniband/hw/qib/qib_mr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
   3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <rdma/ib_umem.h>
  35#include <rdma/ib_smi.h>
  36
  37#include "qib.h"
  38
  39/* Fast memory region */
  40struct qib_fmr {
  41        struct ib_fmr ibfmr;
  42        struct qib_mregion mr;        /* must be last */
  43};
  44
  45static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
  46{
  47        return container_of(ibfmr, struct qib_fmr, ibfmr);
  48}
  49
  50/**
  51 * qib_get_dma_mr - get a DMA memory region
  52 * @pd: protection domain for this memory region
  53 * @acc: access flags
  54 *
  55 * Returns the memory region on success, otherwise returns an errno.
  56 * Note that all DMA addresses should be created via the
  57 * struct ib_dma_mapping_ops functions (see qib_dma.c).
  58 */
  59struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
  60{
  61        struct qib_ibdev *dev = to_idev(pd->device);
  62        struct qib_mr *mr;
  63        struct ib_mr *ret;
  64        unsigned long flags;
  65
  66        if (to_ipd(pd)->user) {
  67                ret = ERR_PTR(-EPERM);
  68                goto bail;
  69        }
  70
  71        mr = kzalloc(sizeof *mr, GFP_KERNEL);
  72        if (!mr) {
  73                ret = ERR_PTR(-ENOMEM);
  74                goto bail;
  75        }
  76
  77        mr->mr.access_flags = acc;
  78        atomic_set(&mr->mr.refcount, 0);
  79
  80        spin_lock_irqsave(&dev->lk_table.lock, flags);
  81        if (!dev->dma_mr)
  82                dev->dma_mr = &mr->mr;
  83        spin_unlock_irqrestore(&dev->lk_table.lock, flags);
  84
  85        ret = &mr->ibmr;
  86
  87bail:
  88        return ret;
  89}
  90
  91static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
  92{
  93        struct qib_mr *mr;
  94        int m, i = 0;
  95
  96        /* Allocate struct plus pointers to first level page tables. */
  97        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
  98        mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
  99        if (!mr)
 100                goto done;
 101
 102        /* Allocate first level page tables. */
 103        for (; i < m; i++) {
 104                mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
 105                if (!mr->mr.map[i])
 106                        goto bail;
 107        }
 108        mr->mr.mapsz = m;
 109        mr->mr.page_shift = 0;
 110        mr->mr.max_segs = count;
 111
 112        /*
 113         * ib_reg_phys_mr() will initialize mr->ibmr except for
 114         * lkey and rkey.
 115         */
 116        if (!qib_alloc_lkey(lk_table, &mr->mr))
 117                goto bail;
 118        mr->ibmr.lkey = mr->mr.lkey;
 119        mr->ibmr.rkey = mr->mr.lkey;
 120
 121        atomic_set(&mr->mr.refcount, 0);
 122        goto done;
 123
 124bail:
 125        while (i)
 126                kfree(mr->mr.map[--i]);
 127        kfree(mr);
 128        mr = NULL;
 129
 130done:
 131        return mr;
 132}
 133
 134/**
 135 * qib_reg_phys_mr - register a physical memory region
 136 * @pd: protection domain for this memory region
 137 * @buffer_list: pointer to the list of physical buffers to register
 138 * @num_phys_buf: the number of physical buffers to register
 139 * @iova_start: the starting address passed over IB which maps to this MR
 140 *
 141 * Returns the memory region on success, otherwise returns an errno.
 142 */
 143struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
 144                              struct ib_phys_buf *buffer_list,
 145                              int num_phys_buf, int acc, u64 *iova_start)
 146{
 147        struct qib_mr *mr;
 148        int n, m, i;
 149        struct ib_mr *ret;
 150
 151        mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
 152        if (mr == NULL) {
 153                ret = ERR_PTR(-ENOMEM);
 154                goto bail;
 155        }
 156
 157        mr->mr.pd = pd;
 158        mr->mr.user_base = *iova_start;
 159        mr->mr.iova = *iova_start;
 160        mr->mr.length = 0;
 161        mr->mr.offset = 0;
 162        mr->mr.access_flags = acc;
 163        mr->umem = NULL;
 164
 165        m = 0;
 166        n = 0;
 167        for (i = 0; i < num_phys_buf; i++) {
 168                mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
 169                mr->mr.map[m]->segs[n].length = buffer_list[i].size;
 170                mr->mr.length += buffer_list[i].size;
 171                n++;
 172                if (n == QIB_SEGSZ) {
 173                        m++;
 174                        n = 0;
 175                }
 176        }
 177
 178        ret = &mr->ibmr;
 179
 180bail:
 181        return ret;
 182}
 183
 184/**
 185 * qib_reg_user_mr - register a userspace memory region
 186 * @pd: protection domain for this memory region
 187 * @start: starting userspace address
 188 * @length: length of region to register
 189 * @virt_addr: virtual address to use (from HCA's point of view)
 190 * @mr_access_flags: access flags for this memory region
 191 * @udata: unused by the QLogic_IB driver
 192 *
 193 * Returns the memory region on success, otherwise returns an errno.
 194 */
 195struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 196                              u64 virt_addr, int mr_access_flags,
 197                              struct ib_udata *udata)
 198{
 199        struct qib_mr *mr;
 200        struct ib_umem *umem;
 201        struct ib_umem_chunk *chunk;
 202        int n, m, i;
 203        struct ib_mr *ret;
 204
 205        if (length == 0) {
 206                ret = ERR_PTR(-EINVAL);
 207                goto bail;
 208        }
 209
 210        umem = ib_umem_get(pd->uobject->context, start, length,
 211                           mr_access_flags, 0);
 212        if (IS_ERR(umem))
 213                return (void *) umem;
 214
 215        n = 0;
 216        list_for_each_entry(chunk, &umem->chunk_list, list)
 217                n += chunk->nents;
 218
 219        mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
 220        if (!mr) {
 221                ret = ERR_PTR(-ENOMEM);
 222                ib_umem_release(umem);
 223                goto bail;
 224        }
 225
 226        mr->mr.pd = pd;
 227        mr->mr.user_base = start;
 228        mr->mr.iova = virt_addr;
 229        mr->mr.length = length;
 230        mr->mr.offset = umem->offset;
 231        mr->mr.access_flags = mr_access_flags;
 232        mr->umem = umem;
 233
 234        if (is_power_of_2(umem->page_size))
 235                mr->mr.page_shift = ilog2(umem->page_size);
 236        m = 0;
 237        n = 0;
 238        list_for_each_entry(chunk, &umem->chunk_list, list) {
 239                for (i = 0; i < chunk->nents; i++) {
 240                        void *vaddr;
 241
 242                        vaddr = page_address(sg_page(&chunk->page_list[i]));
 243                        if (!vaddr) {
 244                                ret = ERR_PTR(-EINVAL);
 245                                goto bail;
 246                        }
 247                        mr->mr.map[m]->segs[n].vaddr = vaddr;
 248                        mr->mr.map[m]->segs[n].length = umem->page_size;
 249                        n++;
 250                        if (n == QIB_SEGSZ) {
 251                                m++;
 252                                n = 0;
 253                        }
 254                }
 255        }
 256        ret = &mr->ibmr;
 257
 258bail:
 259        return ret;
 260}
 261
 262/**
 263 * qib_dereg_mr - unregister and free a memory region
 264 * @ibmr: the memory region to free
 265 *
 266 * Returns 0 on success.
 267 *
 268 * Note that this is called to free MRs created by qib_get_dma_mr()
 269 * or qib_reg_user_mr().
 270 */
 271int qib_dereg_mr(struct ib_mr *ibmr)
 272{
 273        struct qib_mr *mr = to_imr(ibmr);
 274        struct qib_ibdev *dev = to_idev(ibmr->device);
 275        int ret;
 276        int i;
 277
 278        ret = qib_free_lkey(dev, &mr->mr);
 279        if (ret)
 280                return ret;
 281
 282        i = mr->mr.mapsz;
 283        while (i)
 284                kfree(mr->mr.map[--i]);
 285        if (mr->umem)
 286                ib_umem_release(mr->umem);
 287        kfree(mr);
 288        return 0;
 289}
 290
 291/*
 292 * Allocate a memory region usable with the
 293 * IB_WR_FAST_REG_MR send work request.
 294 *
 295 * Return the memory region on success, otherwise return an errno.
 296 */
 297struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
 298{
 299        struct qib_mr *mr;
 300
 301        mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
 302        if (mr == NULL)
 303                return ERR_PTR(-ENOMEM);
 304
 305        mr->mr.pd = pd;
 306        mr->mr.user_base = 0;
 307        mr->mr.iova = 0;
 308        mr->mr.length = 0;
 309        mr->mr.offset = 0;
 310        mr->mr.access_flags = 0;
 311        mr->umem = NULL;
 312
 313        return &mr->ibmr;
 314}
 315
 316struct ib_fast_reg_page_list *
 317qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
 318{
 319        unsigned size = page_list_len * sizeof(u64);
 320        struct ib_fast_reg_page_list *pl;
 321
 322        if (size > PAGE_SIZE)
 323                return ERR_PTR(-EINVAL);
 324
 325        pl = kmalloc(sizeof *pl, GFP_KERNEL);
 326        if (!pl)
 327                return ERR_PTR(-ENOMEM);
 328
 329        pl->page_list = kmalloc(size, GFP_KERNEL);
 330        if (!pl->page_list)
 331                goto err_free;
 332
 333        return pl;
 334
 335err_free:
 336        kfree(pl);
 337        return ERR_PTR(-ENOMEM);
 338}
 339
 340void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
 341{
 342        kfree(pl->page_list);
 343        kfree(pl);
 344}
 345
 346/**
 347 * qib_alloc_fmr - allocate a fast memory region
 348 * @pd: the protection domain for this memory region
 349 * @mr_access_flags: access flags for this memory region
 350 * @fmr_attr: fast memory region attributes
 351 *
 352 * Returns the memory region on success, otherwise returns an errno.
 353 */
 354struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
 355                             struct ib_fmr_attr *fmr_attr)
 356{
 357        struct qib_fmr *fmr;
 358        int m, i = 0;
 359        struct ib_fmr *ret;
 360
 361        /* Allocate struct plus pointers to first level page tables. */
 362        m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
 363        fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
 364        if (!fmr)
 365                goto bail;
 366
 367        /* Allocate first level page tables. */
 368        for (; i < m; i++) {
 369                fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
 370                                         GFP_KERNEL);
 371                if (!fmr->mr.map[i])
 372                        goto bail;
 373        }
 374        fmr->mr.mapsz = m;
 375
 376        /*
 377         * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
 378         * rkey.
 379         */
 380        if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
 381                goto bail;
 382        fmr->ibfmr.rkey = fmr->mr.lkey;
 383        fmr->ibfmr.lkey = fmr->mr.lkey;
 384        /*
 385         * Resources are allocated but no valid mapping (RKEY can't be
 386         * used).
 387         */
 388        fmr->mr.pd = pd;
 389        fmr->mr.user_base = 0;
 390        fmr->mr.iova = 0;
 391        fmr->mr.length = 0;
 392        fmr->mr.offset = 0;
 393        fmr->mr.access_flags = mr_access_flags;
 394        fmr->mr.max_segs = fmr_attr->max_pages;
 395        fmr->mr.page_shift = fmr_attr->page_shift;
 396
 397        atomic_set(&fmr->mr.refcount, 0);
 398        ret = &fmr->ibfmr;
 399        goto done;
 400
 401bail:
 402        while (i)
 403                kfree(fmr->mr.map[--i]);
 404        kfree(fmr);
 405        ret = ERR_PTR(-ENOMEM);
 406
 407done:
 408        return ret;
 409}
 410
 411/**
 412 * qib_map_phys_fmr - set up a fast memory region
 413 * @ibmfr: the fast memory region to set up
 414 * @page_list: the list of pages to associate with the fast memory region
 415 * @list_len: the number of pages to associate with the fast memory region
 416 * @iova: the virtual address of the start of the fast memory region
 417 *
 418 * This may be called from interrupt context.
 419 */
 420
 421int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 422                     int list_len, u64 iova)
 423{
 424        struct qib_fmr *fmr = to_ifmr(ibfmr);
 425        struct qib_lkey_table *rkt;
 426        unsigned long flags;
 427        int m, n, i;
 428        u32 ps;
 429        int ret;
 430
 431        if (atomic_read(&fmr->mr.refcount))
 432                return -EBUSY;
 433
 434        if (list_len > fmr->mr.max_segs) {
 435                ret = -EINVAL;
 436                goto bail;
 437        }
 438        rkt = &to_idev(ibfmr->device)->lk_table;
 439        spin_lock_irqsave(&rkt->lock, flags);
 440        fmr->mr.user_base = iova;
 441        fmr->mr.iova = iova;
 442        ps = 1 << fmr->mr.page_shift;
 443        fmr->mr.length = list_len * ps;
 444        m = 0;
 445        n = 0;
 446        for (i = 0; i < list_len; i++) {
 447                fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
 448                fmr->mr.map[m]->segs[n].length = ps;
 449                if (++n == QIB_SEGSZ) {
 450                        m++;
 451                        n = 0;
 452                }
 453        }
 454        spin_unlock_irqrestore(&rkt->lock, flags);
 455        ret = 0;
 456
 457bail:
 458        return ret;
 459}
 460
 461/**
 462 * qib_unmap_fmr - unmap fast memory regions
 463 * @fmr_list: the list of fast memory regions to unmap
 464 *
 465 * Returns 0 on success.
 466 */
 467int qib_unmap_fmr(struct list_head *fmr_list)
 468{
 469        struct qib_fmr *fmr;
 470        struct qib_lkey_table *rkt;
 471        unsigned long flags;
 472
 473        list_for_each_entry(fmr, fmr_list, ibfmr.list) {
 474                rkt = &to_idev(fmr->ibfmr.device)->lk_table;
 475                spin_lock_irqsave(&rkt->lock, flags);
 476                fmr->mr.user_base = 0;
 477                fmr->mr.iova = 0;
 478                fmr->mr.length = 0;
 479                spin_unlock_irqrestore(&rkt->lock, flags);
 480        }
 481        return 0;
 482}
 483
 484/**
 485 * qib_dealloc_fmr - deallocate a fast memory region
 486 * @ibfmr: the fast memory region to deallocate
 487 *
 488 * Returns 0 on success.
 489 */
 490int qib_dealloc_fmr(struct ib_fmr *ibfmr)
 491{
 492        struct qib_fmr *fmr = to_ifmr(ibfmr);
 493        int ret;
 494        int i;
 495
 496        ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
 497        if (ret)
 498                return ret;
 499
 500        i = fmr->mr.mapsz;
 501        while (i)
 502                kfree(fmr->mr.map[--i]);
 503        kfree(fmr);
 504        return 0;
 505}
 506