linux/drivers/infiniband/hw/hns/hns_roce_alloc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/platform_device.h>
  35#include <linux/vmalloc.h>
  36#include "hns_roce_device.h"
  37
  38int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
  39{
  40        int ret = 0;
  41
  42        spin_lock(&bitmap->lock);
  43        *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
  44        if (*obj >= bitmap->max) {
  45                bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
  46                               & bitmap->mask;
  47                *obj = find_first_zero_bit(bitmap->table, bitmap->max);
  48        }
  49
  50        if (*obj < bitmap->max) {
  51                set_bit(*obj, bitmap->table);
  52                bitmap->last = (*obj + 1);
  53                if (bitmap->last == bitmap->max)
  54                        bitmap->last = 0;
  55                *obj |= bitmap->top;
  56        } else {
  57                ret = -1;
  58        }
  59
  60        spin_unlock(&bitmap->lock);
  61
  62        return ret;
  63}
  64
  65void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
  66                          int rr)
  67{
  68        hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
  69}
  70EXPORT_SYMBOL_GPL(hns_roce_bitmap_free);
  71
  72int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
  73                                int align, unsigned long *obj)
  74{
  75        int ret = 0;
  76        int i;
  77
  78        if (likely(cnt == 1 && align == 1))
  79                return hns_roce_bitmap_alloc(bitmap, obj);
  80
  81        spin_lock(&bitmap->lock);
  82
  83        *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
  84                                          bitmap->last, cnt, align - 1);
  85        if (*obj >= bitmap->max) {
  86                bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
  87                               & bitmap->mask;
  88                *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
  89                                                  cnt, align - 1);
  90        }
  91
  92        if (*obj < bitmap->max) {
  93                for (i = 0; i < cnt; i++)
  94                        set_bit(*obj + i, bitmap->table);
  95
  96                if (*obj == bitmap->last) {
  97                        bitmap->last = (*obj + cnt);
  98                        if (bitmap->last >= bitmap->max)
  99                                bitmap->last = 0;
 100                }
 101                *obj |= bitmap->top;
 102        } else {
 103                ret = -1;
 104        }
 105
 106        spin_unlock(&bitmap->lock);
 107
 108        return ret;
 109}
 110
 111void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
 112                                unsigned long obj, int cnt,
 113                                int rr)
 114{
 115        int i;
 116
 117        obj &= bitmap->max + bitmap->reserved_top - 1;
 118
 119        spin_lock(&bitmap->lock);
 120        for (i = 0; i < cnt; i++)
 121                clear_bit(obj + i, bitmap->table);
 122
 123        if (!rr)
 124                bitmap->last = min(bitmap->last, obj);
 125        bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
 126                       & bitmap->mask;
 127        spin_unlock(&bitmap->lock);
 128}
 129
 130int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
 131                         u32 reserved_bot, u32 reserved_top)
 132{
 133        u32 i;
 134
 135        if (num != roundup_pow_of_two(num))
 136                return -EINVAL;
 137
 138        bitmap->last = 0;
 139        bitmap->top = 0;
 140        bitmap->max = num - reserved_top;
 141        bitmap->mask = mask;
 142        bitmap->reserved_top = reserved_top;
 143        spin_lock_init(&bitmap->lock);
 144        bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
 145                                GFP_KERNEL);
 146        if (!bitmap->table)
 147                return -ENOMEM;
 148
 149        for (i = 0; i < reserved_bot; ++i)
 150                set_bit(i, bitmap->table);
 151
 152        return 0;
 153}
 154
 155void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
 156{
 157        kfree(bitmap->table);
 158}
 159
 160void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
 161                       struct hns_roce_buf *buf)
 162{
 163        int i;
 164        struct device *dev = hr_dev->dev;
 165
 166        if (buf->nbufs == 1) {
 167                dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
 168        } else {
 169                for (i = 0; i < buf->nbufs; ++i)
 170                        if (buf->page_list[i].buf)
 171                                dma_free_coherent(dev, 1 << buf->page_shift,
 172                                                  buf->page_list[i].buf,
 173                                                  buf->page_list[i].map);
 174                kfree(buf->page_list);
 175        }
 176}
 177EXPORT_SYMBOL_GPL(hns_roce_buf_free);
 178
 179int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 180                       struct hns_roce_buf *buf, u32 page_shift)
 181{
 182        int i = 0;
 183        dma_addr_t t;
 184        struct device *dev = hr_dev->dev;
 185        u32 page_size = 1 << page_shift;
 186        u32 order;
 187
 188        /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
 189        if (size <= max_direct) {
 190                buf->nbufs = 1;
 191                /* Npages calculated by page_size */
 192                order = get_order(size);
 193                if (order <= page_shift - PAGE_SHIFT)
 194                        order = 0;
 195                else
 196                        order -= page_shift - PAGE_SHIFT;
 197                buf->npages = 1 << order;
 198                buf->page_shift = page_shift;
 199                /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
 200                buf->direct.buf = dma_alloc_coherent(dev, size, &t,
 201                                                     GFP_KERNEL);
 202                if (!buf->direct.buf)
 203                        return -ENOMEM;
 204
 205                buf->direct.map = t;
 206
 207                while (t & ((1 << buf->page_shift) - 1)) {
 208                        --buf->page_shift;
 209                        buf->npages *= 2;
 210                }
 211        } else {
 212                buf->nbufs = (size + page_size - 1) / page_size;
 213                buf->npages = buf->nbufs;
 214                buf->page_shift = page_shift;
 215                buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
 216                                         GFP_KERNEL);
 217
 218                if (!buf->page_list)
 219                        return -ENOMEM;
 220
 221                for (i = 0; i < buf->nbufs; ++i) {
 222                        buf->page_list[i].buf = dma_alloc_coherent(dev,
 223                                                                   page_size,
 224                                                                   &t,
 225                                                                   GFP_KERNEL);
 226
 227                        if (!buf->page_list[i].buf)
 228                                goto err_free;
 229
 230                        buf->page_list[i].map = t;
 231                }
 232        }
 233
 234        return 0;
 235
 236err_free:
 237        hns_roce_buf_free(hr_dev, size, buf);
 238        return -ENOMEM;
 239}
 240
 241void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
 242{
 243        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
 244                hns_roce_cleanup_srq_table(hr_dev);
 245        hns_roce_cleanup_qp_table(hr_dev);
 246        hns_roce_cleanup_cq_table(hr_dev);
 247        hns_roce_cleanup_mr_table(hr_dev);
 248        hns_roce_cleanup_pd_table(hr_dev);
 249        hns_roce_cleanup_uar_table(hr_dev);
 250}
 251