linux/drivers/infiniband/hw/cxgb3/iwch_mem.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/slab.h>
  33#include <asm/byteorder.h>
  34
  35#include <rdma/iw_cm.h>
  36#include <rdma/ib_verbs.h>
  37
  38#include "cxio_hal.h"
  39#include "cxio_resource.h"
  40#include "iwch.h"
  41#include "iwch_provider.h"
  42
  43static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
  44{
  45        u32 mmid;
  46
  47        mhp->attr.state = 1;
  48        mhp->attr.stag = stag;
  49        mmid = stag >> 8;
  50        mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
  51        PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
  52        return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
  53}
  54
  55int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
  56                      struct iwch_mr *mhp, int shift)
  57{
  58        u32 stag;
  59        int ret;
  60
  61        if (cxio_register_phys_mem(&rhp->rdev,
  62                                   &stag, mhp->attr.pdid,
  63                                   mhp->attr.perms,
  64                                   mhp->attr.zbva,
  65                                   mhp->attr.va_fbo,
  66                                   mhp->attr.len,
  67                                   shift - 12,
  68                                   mhp->attr.pbl_size, mhp->attr.pbl_addr))
  69                return -ENOMEM;
  70
  71        ret = iwch_finish_mem_reg(mhp, stag);
  72        if (ret)
  73                cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
  74                       mhp->attr.pbl_addr);
  75        return ret;
  76}
  77
  78int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
  79                                        struct iwch_mr *mhp,
  80                                        int shift,
  81                                        int npages)
  82{
  83        u32 stag;
  84        int ret;
  85
  86        /* We could support this... */
  87        if (npages > mhp->attr.pbl_size)
  88                return -ENOMEM;
  89
  90        stag = mhp->attr.stag;
  91        if (cxio_reregister_phys_mem(&rhp->rdev,
  92                                   &stag, mhp->attr.pdid,
  93                                   mhp->attr.perms,
  94                                   mhp->attr.zbva,
  95                                   mhp->attr.va_fbo,
  96                                   mhp->attr.len,
  97                                   shift - 12,
  98                                   mhp->attr.pbl_size, mhp->attr.pbl_addr))
  99                return -ENOMEM;
 100
 101        ret = iwch_finish_mem_reg(mhp, stag);
 102        if (ret)
 103                cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
 104                       mhp->attr.pbl_addr);
 105
 106        return ret;
 107}
 108
 109int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
 110{
 111        mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
 112                                                    npages << 3);
 113
 114        if (!mhp->attr.pbl_addr)
 115                return -ENOMEM;
 116
 117        mhp->attr.pbl_size = npages;
 118
 119        return 0;
 120}
 121
 122void iwch_free_pbl(struct iwch_mr *mhp)
 123{
 124        cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
 125                              mhp->attr.pbl_size << 3);
 126}
 127
 128int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
 129{
 130        return cxio_write_pbl(&mhp->rhp->rdev, pages,
 131                              mhp->attr.pbl_addr + (offset << 3), npages);
 132}
 133
 134int build_phys_page_list(struct ib_phys_buf *buffer_list,
 135                                        int num_phys_buf,
 136                                        u64 *iova_start,
 137                                        u64 *total_size,
 138                                        int *npages,
 139                                        int *shift,
 140                                        __be64 **page_list)
 141{
 142        u64 mask;
 143        int i, j, n;
 144
 145        mask = 0;
 146        *total_size = 0;
 147        for (i = 0; i < num_phys_buf; ++i) {
 148                if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
 149                        return -EINVAL;
 150                if (i != 0 && i != num_phys_buf - 1 &&
 151                    (buffer_list[i].size & ~PAGE_MASK))
 152                        return -EINVAL;
 153                *total_size += buffer_list[i].size;
 154                if (i > 0)
 155                        mask |= buffer_list[i].addr;
 156                else
 157                        mask |= buffer_list[i].addr & PAGE_MASK;
 158                if (i != num_phys_buf - 1)
 159                        mask |= buffer_list[i].addr + buffer_list[i].size;
 160                else
 161                        mask |= (buffer_list[i].addr + buffer_list[i].size +
 162                                PAGE_SIZE - 1) & PAGE_MASK;
 163        }
 164
 165        if (*total_size > 0xFFFFFFFFULL)
 166                return -ENOMEM;
 167
 168        /* Find largest page shift we can use to cover buffers */
 169        for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
 170                if ((1ULL << *shift) & mask)
 171                        break;
 172
 173        buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
 174        buffer_list[0].addr &= ~0ull << *shift;
 175
 176        *npages = 0;
 177        for (i = 0; i < num_phys_buf; ++i)
 178                *npages += (buffer_list[i].size +
 179                        (1ULL << *shift) - 1) >> *shift;
 180
 181        if (!*npages)
 182                return -EINVAL;
 183
 184        *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
 185        if (!*page_list)
 186                return -ENOMEM;
 187
 188        n = 0;
 189        for (i = 0; i < num_phys_buf; ++i)
 190                for (j = 0;
 191                     j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
 192                     ++j)
 193                        (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
 194                            ((u64) j << *shift));
 195
 196        PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
 197             __func__, (unsigned long long) *iova_start,
 198             (unsigned long long) mask, *shift, (unsigned long long) *total_size,
 199             *npages);
 200
 201        return 0;
 202
 203}
 204