linux/drivers/infiniband/hw/hns/hns_roce_alloc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/vmalloc.h>
  35#include <rdma/ib_umem.h>
  36#include "hns_roce_device.h"
  37
  38void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
  39{
  40        struct hns_roce_buf_list *trunks;
  41        u32 i;
  42
  43        if (!buf)
  44                return;
  45
  46        trunks = buf->trunk_list;
  47        if (trunks) {
  48                buf->trunk_list = NULL;
  49                for (i = 0; i < buf->ntrunks; i++)
  50                        dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
  51                                          trunks[i].buf, trunks[i].map);
  52
  53                kfree(trunks);
  54        }
  55
  56        kfree(buf);
  57}
  58
  59/*
  60 * Allocate the dma buffer for storing ROCEE table entries
  61 *
  62 * @size: required size
  63 * @page_shift: the unit size in a continuous dma address range
  64 * @flags: HNS_ROCE_BUF_ flags to control the allocation flow.
  65 */
  66struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
  67                                        u32 page_shift, u32 flags)
  68{
  69        u32 trunk_size, page_size, alloced_size;
  70        struct hns_roce_buf_list *trunks;
  71        struct hns_roce_buf *buf;
  72        gfp_t gfp_flags;
  73        u32 ntrunk, i;
  74
  75        /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
  76        if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
  77                return ERR_PTR(-EINVAL);
  78
  79        gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
  80        buf = kzalloc(sizeof(*buf), gfp_flags);
  81        if (!buf)
  82                return ERR_PTR(-ENOMEM);
  83
  84        buf->page_shift = page_shift;
  85        page_size = 1 << buf->page_shift;
  86
  87        /* Calc the trunk size and num by required size and page_shift */
  88        if (flags & HNS_ROCE_BUF_DIRECT) {
  89                buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
  90                ntrunk = 1;
  91        } else {
  92                buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
  93                ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
  94        }
  95
  96        trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
  97        if (!trunks) {
  98                kfree(buf);
  99                return ERR_PTR(-ENOMEM);
 100        }
 101
 102        trunk_size = 1 << buf->trunk_shift;
 103        alloced_size = 0;
 104        for (i = 0; i < ntrunk; i++) {
 105                trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
 106                                                   &trunks[i].map, gfp_flags);
 107                if (!trunks[i].buf)
 108                        break;
 109
 110                alloced_size += trunk_size;
 111        }
 112
 113        buf->ntrunks = i;
 114
 115        /* In nofail mode, it's only failed when the alloced size is 0 */
 116        if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
 117                for (i = 0; i < buf->ntrunks; i++)
 118                        dma_free_coherent(hr_dev->dev, trunk_size,
 119                                          trunks[i].buf, trunks[i].map);
 120
 121                kfree(trunks);
 122                kfree(buf);
 123                return ERR_PTR(-ENOMEM);
 124        }
 125
 126        buf->npages = DIV_ROUND_UP(alloced_size, page_size);
 127        buf->trunk_list = trunks;
 128
 129        return buf;
 130}
 131
 132int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
 133                           int buf_cnt, struct hns_roce_buf *buf,
 134                           unsigned int page_shift)
 135{
 136        unsigned int offset, max_size;
 137        int total = 0;
 138        int i;
 139
 140        if (page_shift > buf->trunk_shift) {
 141                dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
 142                        page_shift, buf->trunk_shift);
 143                return -EINVAL;
 144        }
 145
 146        offset = 0;
 147        max_size = buf->ntrunks << buf->trunk_shift;
 148        for (i = 0; i < buf_cnt && offset < max_size; i++) {
 149                bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
 150                offset += (1 << page_shift);
 151        }
 152
 153        return total;
 154}
 155
 156int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
 157                           int buf_cnt, struct ib_umem *umem,
 158                           unsigned int page_shift)
 159{
 160        struct ib_block_iter biter;
 161        int total = 0;
 162
 163        /* convert system page cnt to hw page cnt */
 164        rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
 165                bufs[total++] = rdma_block_iter_dma_address(&biter);
 166                if (total >= buf_cnt)
 167                        goto done;
 168        }
 169
 170done:
 171        return total;
 172}
 173
 174void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
 175{
 176        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
 177                ida_destroy(&hr_dev->xrcd_ida.ida);
 178
 179        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
 180                ida_destroy(&hr_dev->srq_table.srq_ida.ida);
 181        hns_roce_cleanup_qp_table(hr_dev);
 182        hns_roce_cleanup_cq_table(hr_dev);
 183        ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
 184        ida_destroy(&hr_dev->pd_ida.ida);
 185        ida_destroy(&hr_dev->uar_ida.ida);
 186}
 187