linux/drivers/net/mlx4/alloc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/errno.h>
  35#include <linux/slab.h>
  36#include <linux/mm.h>
  37#include <linux/bitmap.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/vmalloc.h>
  40
  41#include "mlx4.h"
  42
  43u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
  44{
  45        u32 obj;
  46
  47        spin_lock(&bitmap->lock);
  48
  49        obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
  50        if (obj >= bitmap->max) {
  51                bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
  52                                & bitmap->mask;
  53                obj = find_first_zero_bit(bitmap->table, bitmap->max);
  54        }
  55
  56        if (obj < bitmap->max) {
  57                set_bit(obj, bitmap->table);
  58                bitmap->last = (obj + 1);
  59                if (bitmap->last == bitmap->max)
  60                        bitmap->last = 0;
  61                obj |= bitmap->top;
  62        } else
  63                obj = -1;
  64
  65        spin_unlock(&bitmap->lock);
  66
  67        return obj;
  68}
  69
  70void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
  71{
  72        mlx4_bitmap_free_range(bitmap, obj, 1);
  73}
  74
  75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
  76{
  77        u32 obj;
  78
  79        if (likely(cnt == 1 && align == 1))
  80                return mlx4_bitmap_alloc(bitmap);
  81
  82        spin_lock(&bitmap->lock);
  83
  84        obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
  85                                bitmap->last, cnt, align - 1);
  86        if (obj >= bitmap->max) {
  87                bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
  88                                & bitmap->mask;
  89                obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
  90                                                0, cnt, align - 1);
  91        }
  92
  93        if (obj < bitmap->max) {
  94                bitmap_set(bitmap->table, obj, cnt);
  95                if (obj == bitmap->last) {
  96                        bitmap->last = (obj + cnt);
  97                        if (bitmap->last >= bitmap->max)
  98                                bitmap->last = 0;
  99                }
 100                obj |= bitmap->top;
 101        } else
 102                obj = -1;
 103
 104        spin_unlock(&bitmap->lock);
 105
 106        return obj;
 107}
 108
 109void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
 110{
 111        obj &= bitmap->max + bitmap->reserved_top - 1;
 112
 113        spin_lock(&bitmap->lock);
 114        bitmap_clear(bitmap->table, obj, cnt);
 115        bitmap->last = min(bitmap->last, obj);
 116        bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
 117                        & bitmap->mask;
 118        spin_unlock(&bitmap->lock);
 119}
 120
 121int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
 122                     u32 reserved_bot, u32 reserved_top)
 123{
 124        /* num must be a power of 2 */
 125        if (num != roundup_pow_of_two(num))
 126                return -EINVAL;
 127
 128        bitmap->last = 0;
 129        bitmap->top  = 0;
 130        bitmap->max  = num - reserved_top;
 131        bitmap->mask = mask;
 132        bitmap->reserved_top = reserved_top;
 133        spin_lock_init(&bitmap->lock);
 134        bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
 135                                sizeof (long), GFP_KERNEL);
 136        if (!bitmap->table)
 137                return -ENOMEM;
 138
 139        bitmap_set(bitmap->table, 0, reserved_bot);
 140
 141        return 0;
 142}
 143
 144void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
 145{
 146        kfree(bitmap->table);
 147}
 148
 149/*
 150 * Handling for queue buffers -- we allocate a bunch of memory and
 151 * register it in a memory region at HCA virtual address 0.  If the
 152 * requested size is > max_direct, we split the allocation into
 153 * multiple pages, so we don't require too much contiguous memory.
 154 */
 155
 156int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 157                   struct mlx4_buf *buf)
 158{
 159        dma_addr_t t;
 160
 161        if (size <= max_direct) {
 162                buf->nbufs        = 1;
 163                buf->npages       = 1;
 164                buf->page_shift   = get_order(size) + PAGE_SHIFT;
 165                buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
 166                                                       size, &t, GFP_KERNEL);
 167                if (!buf->direct.buf)
 168                        return -ENOMEM;
 169
 170                buf->direct.map = t;
 171
 172                while (t & ((1 << buf->page_shift) - 1)) {
 173                        --buf->page_shift;
 174                        buf->npages *= 2;
 175                }
 176
 177                memset(buf->direct.buf, 0, size);
 178        } else {
 179                int i;
 180
 181                buf->direct.buf  = NULL;
 182                buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 183                buf->npages      = buf->nbufs;
 184                buf->page_shift  = PAGE_SHIFT;
 185                buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
 186                                           GFP_KERNEL);
 187                if (!buf->page_list)
 188                        return -ENOMEM;
 189
 190                for (i = 0; i < buf->nbufs; ++i) {
 191                        buf->page_list[i].buf =
 192                                dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
 193                                                   &t, GFP_KERNEL);
 194                        if (!buf->page_list[i].buf)
 195                                goto err_free;
 196
 197                        buf->page_list[i].map = t;
 198
 199                        memset(buf->page_list[i].buf, 0, PAGE_SIZE);
 200                }
 201
 202                if (BITS_PER_LONG == 64) {
 203                        struct page **pages;
 204                        pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
 205                        if (!pages)
 206                                goto err_free;
 207                        for (i = 0; i < buf->nbufs; ++i)
 208                                pages[i] = virt_to_page(buf->page_list[i].buf);
 209                        buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
 210                        kfree(pages);
 211                        if (!buf->direct.buf)
 212                                goto err_free;
 213                }
 214        }
 215
 216        return 0;
 217
 218err_free:
 219        mlx4_buf_free(dev, size, buf);
 220
 221        return -ENOMEM;
 222}
 223EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
 224
 225void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 226{
 227        int i;
 228
 229        if (buf->nbufs == 1)
 230                dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 231                                  buf->direct.map);
 232        else {
 233                if (BITS_PER_LONG == 64 && buf->direct.buf)
 234                        vunmap(buf->direct.buf);
 235
 236                for (i = 0; i < buf->nbufs; ++i)
 237                        if (buf->page_list[i].buf)
 238                                dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
 239                                                  buf->page_list[i].buf,
 240                                                  buf->page_list[i].map);
 241                kfree(buf->page_list);
 242        }
 243}
 244EXPORT_SYMBOL_GPL(mlx4_buf_free);
 245
 246static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
 247{
 248        struct mlx4_db_pgdir *pgdir;
 249
 250        pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
 251        if (!pgdir)
 252                return NULL;
 253
 254        bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
 255        pgdir->bits[0] = pgdir->order0;
 256        pgdir->bits[1] = pgdir->order1;
 257        pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
 258                                            &pgdir->db_dma, GFP_KERNEL);
 259        if (!pgdir->db_page) {
 260                kfree(pgdir);
 261                return NULL;
 262        }
 263
 264        return pgdir;
 265}
 266
 267static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
 268                                    struct mlx4_db *db, int order)
 269{
 270        int o;
 271        int i;
 272
 273        for (o = order; o <= 1; ++o) {
 274                i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
 275                if (i < MLX4_DB_PER_PAGE >> o)
 276                        goto found;
 277        }
 278
 279        return -ENOMEM;
 280
 281found:
 282        clear_bit(i, pgdir->bits[o]);
 283
 284        i <<= o;
 285
 286        if (o > order)
 287                set_bit(i ^ 1, pgdir->bits[order]);
 288
 289        db->u.pgdir = pgdir;
 290        db->index   = i;
 291        db->db      = pgdir->db_page + db->index;
 292        db->dma     = pgdir->db_dma  + db->index * 4;
 293        db->order   = order;
 294
 295        return 0;
 296}
 297
 298int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
 299{
 300        struct mlx4_priv *priv = mlx4_priv(dev);
 301        struct mlx4_db_pgdir *pgdir;
 302        int ret = 0;
 303
 304        mutex_lock(&priv->pgdir_mutex);
 305
 306        list_for_each_entry(pgdir, &priv->pgdir_list, list)
 307                if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
 308                        goto out;
 309
 310        pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
 311        if (!pgdir) {
 312                ret = -ENOMEM;
 313                goto out;
 314        }
 315
 316        list_add(&pgdir->list, &priv->pgdir_list);
 317
 318        /* This should never fail -- we just allocated an empty page: */
 319        WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
 320
 321out:
 322        mutex_unlock(&priv->pgdir_mutex);
 323
 324        return ret;
 325}
 326EXPORT_SYMBOL_GPL(mlx4_db_alloc);
 327
 328void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
 329{
 330        struct mlx4_priv *priv = mlx4_priv(dev);
 331        int o;
 332        int i;
 333
 334        mutex_lock(&priv->pgdir_mutex);
 335
 336        o = db->order;
 337        i = db->index;
 338
 339        if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
 340                clear_bit(i ^ 1, db->u.pgdir->order0);
 341                ++o;
 342        }
 343        i >>= o;
 344        set_bit(i, db->u.pgdir->bits[o]);
 345
 346        if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
 347                dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
 348                                  db->u.pgdir->db_page, db->u.pgdir->db_dma);
 349                list_del(&db->u.pgdir->list);
 350                kfree(db->u.pgdir);
 351        }
 352
 353        mutex_unlock(&priv->pgdir_mutex);
 354}
 355EXPORT_SYMBOL_GPL(mlx4_db_free);
 356
 357int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 358                       int size, int max_direct)
 359{
 360        int err;
 361
 362        err = mlx4_db_alloc(dev, &wqres->db, 1);
 363        if (err)
 364                return err;
 365
 366        *wqres->db.db = 0;
 367
 368        err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
 369        if (err)
 370                goto err_db;
 371
 372        err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
 373                            &wqres->mtt);
 374        if (err)
 375                goto err_buf;
 376
 377        err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
 378        if (err)
 379                goto err_mtt;
 380
 381        return 0;
 382
 383err_mtt:
 384        mlx4_mtt_cleanup(dev, &wqres->mtt);
 385err_buf:
 386        mlx4_buf_free(dev, size, &wqres->buf);
 387err_db:
 388        mlx4_db_free(dev, &wqres->db);
 389
 390        return err;
 391}
 392EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
 393
 394void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 395                       int size)
 396{
 397        mlx4_mtt_cleanup(dev, &wqres->mtt);
 398        mlx4_buf_free(dev, size, &wqres->buf);
 399        mlx4_db_free(dev, &wqres->db);
 400}
 401EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
 402