linux/drivers/infiniband/hw/hns/hns_roce_pd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/platform_device.h>
  34#include <linux/pci.h>
  35#include "hns_roce_device.h"
  36
  37void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
  38{
  39        struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
  40
  41        ida_init(&pd_ida->ida);
  42        pd_ida->max = hr_dev->caps.num_pds - 1;
  43        pd_ida->min = hr_dev->caps.reserved_pds;
  44}
  45
  46int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
  47{
  48        struct ib_device *ib_dev = ibpd->device;
  49        struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
  50        struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
  51        struct hns_roce_pd *pd = to_hr_pd(ibpd);
  52        int ret = 0;
  53        int id;
  54
  55        id = ida_alloc_range(&pd_ida->ida, pd_ida->min, pd_ida->max,
  56                             GFP_KERNEL);
  57        if (id < 0) {
  58                ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id);
  59                return -ENOMEM;
  60        }
  61        pd->pdn = (unsigned long)id;
  62
  63        if (udata) {
  64                struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn};
  65
  66                ret = ib_copy_to_udata(udata, &resp,
  67                                       min(udata->outlen, sizeof(resp)));
  68                if (ret) {
  69                        ida_free(&pd_ida->ida, id);
  70                        ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret);
  71                }
  72        }
  73
  74        return ret;
  75}
  76
  77int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
  78{
  79        struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
  80
  81        ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn);
  82
  83        return 0;
  84}
  85
  86int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
  87{
  88        struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
  89        struct resource *res;
  90        int id;
  91
  92        /* Using bitmap to manager UAR index */
  93        id = ida_alloc_range(&uar_ida->ida, uar_ida->min, uar_ida->max,
  94                             GFP_KERNEL);
  95        if (id < 0) {
  96                ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id);
  97                return -ENOMEM;
  98        }
  99        uar->logic_idx = (unsigned long)id;
 100
 101        if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
 102                uar->index = (uar->logic_idx - 1) %
 103                             (hr_dev->caps.phy_num_uars - 1) + 1;
 104        else
 105                uar->index = 0;
 106
 107        if (!dev_is_pci(hr_dev->dev)) {
 108                res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
 109                if (!res) {
 110                        ida_free(&uar_ida->ida, id);
 111                        dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
 112                        return -EINVAL;
 113                }
 114                uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
 115        } else {
 116                uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
 117                           >> PAGE_SHIFT);
 118        }
 119
 120        return 0;
 121}
 122
 123void hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
 124{
 125        struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
 126
 127        ida_init(&uar_ida->ida);
 128        uar_ida->max = hr_dev->caps.num_uars - 1;
 129        uar_ida->min = hr_dev->caps.reserved_uars;
 130}
 131
 132static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
 133{
 134        struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
 135        int id;
 136
 137        id = ida_alloc_range(&xrcd_ida->ida, xrcd_ida->min, xrcd_ida->max,
 138                             GFP_KERNEL);
 139        if (id < 0) {
 140                ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id);
 141                return -ENOMEM;
 142        }
 143        *xrcdn = (u32)id;
 144
 145        return 0;
 146}
 147
 148void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
 149{
 150        struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
 151
 152        ida_init(&xrcd_ida->ida);
 153        xrcd_ida->max = hr_dev->caps.num_xrcds - 1;
 154        xrcd_ida->min = hr_dev->caps.reserved_xrcds;
 155}
 156
 157int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
 158{
 159        struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
 160        struct hns_roce_xrcd *xrcd = to_hr_xrcd(ib_xrcd);
 161        int ret;
 162
 163        if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
 164                return -EINVAL;
 165
 166        ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
 167        if (ret)
 168                return ret;
 169
 170        return 0;
 171}
 172
 173int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
 174{
 175        struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
 176        u32 xrcdn = to_hr_xrcd(ib_xrcd)->xrcdn;
 177
 178        ida_free(&hr_dev->xrcd_ida.ida, (int)xrcdn);
 179
 180        return 0;
 181}
 182