linux/drivers/net/mlx4/alloc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/errno.h>
  35#include <linux/slab.h>
  36#include <linux/mm.h>
  37#include <linux/bitmap.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/vmalloc.h>
  40
  41#include "mlx4.h"
  42
  43u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
  44{
  45        u32 obj;
  46
  47        spin_lock(&bitmap->lock);
  48
  49        obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
  50        if (obj >= bitmap->max) {
  51                bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
  52                                & bitmap->mask;
  53                obj = find_first_zero_bit(bitmap->table, bitmap->max);
  54        }
  55
  56        if (obj < bitmap->max) {
  57                set_bit(obj, bitmap->table);
  58                bitmap->last = (obj + 1);
  59                if (bitmap->last == bitmap->max)
  60                        bitmap->last = 0;
  61                obj |= bitmap->top;
  62        } else
  63                obj = -1;
  64
  65        spin_unlock(&bitmap->lock);
  66
  67        return obj;
  68}
  69
  70void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
  71{
  72        mlx4_bitmap_free_range(bitmap, obj, 1);
  73}
  74
  75static unsigned long find_aligned_range(unsigned long *bitmap,
  76                                        u32 start, u32 nbits,
  77                                        int len, int align)
  78{
  79        unsigned long end, i;
  80
  81again:
  82        start = ALIGN(start, align);
  83
  84        while ((start < nbits) && test_bit(start, bitmap))
  85                start += align;
  86
  87        if (start >= nbits)
  88                return -1;
  89
  90        end = start+len;
  91        if (end > nbits)
  92                return -1;
  93
  94        for (i = start + 1; i < end; i++) {
  95                if (test_bit(i, bitmap)) {
  96                        start = i + 1;
  97                        goto again;
  98                }
  99        }
 100
 101        return start;
 102}
 103
 104u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
 105{
 106        u32 obj, i;
 107
 108        if (likely(cnt == 1 && align == 1))
 109                return mlx4_bitmap_alloc(bitmap);
 110
 111        spin_lock(&bitmap->lock);
 112
 113        obj = find_aligned_range(bitmap->table, bitmap->last,
 114                                 bitmap->max, cnt, align);
 115        if (obj >= bitmap->max) {
 116                bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
 117                                & bitmap->mask;
 118                obj = find_aligned_range(bitmap->table, 0, bitmap->max,
 119                                         cnt, align);
 120        }
 121
 122        if (obj < bitmap->max) {
 123                for (i = 0; i < cnt; i++)
 124                        set_bit(obj + i, bitmap->table);
 125                if (obj == bitmap->last) {
 126                        bitmap->last = (obj + cnt);
 127                        if (bitmap->last >= bitmap->max)
 128                                bitmap->last = 0;
 129                }
 130                obj |= bitmap->top;
 131        } else
 132                obj = -1;
 133
 134        spin_unlock(&bitmap->lock);
 135
 136        return obj;
 137}
 138
 139void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
 140{
 141        u32 i;
 142
 143        obj &= bitmap->max + bitmap->reserved_top - 1;
 144
 145        spin_lock(&bitmap->lock);
 146        for (i = 0; i < cnt; i++)
 147                clear_bit(obj + i, bitmap->table);
 148        bitmap->last = min(bitmap->last, obj);
 149        bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
 150                        & bitmap->mask;
 151        spin_unlock(&bitmap->lock);
 152}
 153
 154int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
 155                     u32 reserved_bot, u32 reserved_top)
 156{
 157        int i;
 158
 159        /* num must be a power of 2 */
 160        if (num != roundup_pow_of_two(num))
 161                return -EINVAL;
 162
 163        bitmap->last = 0;
 164        bitmap->top  = 0;
 165        bitmap->max  = num - reserved_top;
 166        bitmap->mask = mask;
 167        bitmap->reserved_top = reserved_top;
 168        spin_lock_init(&bitmap->lock);
 169        bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
 170                                sizeof (long), GFP_KERNEL);
 171        if (!bitmap->table)
 172                return -ENOMEM;
 173
 174        for (i = 0; i < reserved_bot; ++i)
 175                set_bit(i, bitmap->table);
 176
 177        return 0;
 178}
 179
 180void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
 181{
 182        kfree(bitmap->table);
 183}
 184
 185/*
 186 * Handling for queue buffers -- we allocate a bunch of memory and
 187 * register it in a memory region at HCA virtual address 0.  If the
 188 * requested size is > max_direct, we split the allocation into
 189 * multiple pages, so we don't require too much contiguous memory.
 190 */
 191
 192int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 193                   struct mlx4_buf *buf)
 194{
 195        dma_addr_t t;
 196
 197        if (size <= max_direct) {
 198                buf->nbufs        = 1;
 199                buf->npages       = 1;
 200                buf->page_shift   = get_order(size) + PAGE_SHIFT;
 201                buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
 202                                                       size, &t, GFP_KERNEL);
 203                if (!buf->direct.buf)
 204                        return -ENOMEM;
 205
 206                buf->direct.map = t;
 207
 208                while (t & ((1 << buf->page_shift) - 1)) {
 209                        --buf->page_shift;
 210                        buf->npages *= 2;
 211                }
 212
 213                memset(buf->direct.buf, 0, size);
 214        } else {
 215                int i;
 216
 217                buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 218                buf->npages      = buf->nbufs;
 219                buf->page_shift  = PAGE_SHIFT;
 220                buf->page_list   = kzalloc(buf->nbufs * sizeof *buf->page_list,
 221                                           GFP_KERNEL);
 222                if (!buf->page_list)
 223                        return -ENOMEM;
 224
 225                for (i = 0; i < buf->nbufs; ++i) {
 226                        buf->page_list[i].buf =
 227                                dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
 228                                                   &t, GFP_KERNEL);
 229                        if (!buf->page_list[i].buf)
 230                                goto err_free;
 231
 232                        buf->page_list[i].map = t;
 233
 234                        memset(buf->page_list[i].buf, 0, PAGE_SIZE);
 235                }
 236
 237                if (BITS_PER_LONG == 64) {
 238                        struct page **pages;
 239                        pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
 240                        if (!pages)
 241                                goto err_free;
 242                        for (i = 0; i < buf->nbufs; ++i)
 243                                pages[i] = virt_to_page(buf->page_list[i].buf);
 244                        buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
 245                        kfree(pages);
 246                        if (!buf->direct.buf)
 247                                goto err_free;
 248                }
 249        }
 250
 251        return 0;
 252
 253err_free:
 254        mlx4_buf_free(dev, size, buf);
 255
 256        return -ENOMEM;
 257}
 258EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
 259
 260void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 261{
 262        int i;
 263
 264        if (buf->nbufs == 1)
 265                dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 266                                  buf->direct.map);
 267        else {
 268                if (BITS_PER_LONG == 64)
 269                        vunmap(buf->direct.buf);
 270
 271                for (i = 0; i < buf->nbufs; ++i)
 272                        if (buf->page_list[i].buf)
 273                                dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
 274                                                  buf->page_list[i].buf,
 275                                                  buf->page_list[i].map);
 276                kfree(buf->page_list);
 277        }
 278}
 279EXPORT_SYMBOL_GPL(mlx4_buf_free);
 280
 281static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
 282{
 283        struct mlx4_db_pgdir *pgdir;
 284
 285        pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
 286        if (!pgdir)
 287                return NULL;
 288
 289        bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
 290        pgdir->bits[0] = pgdir->order0;
 291        pgdir->bits[1] = pgdir->order1;
 292        pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
 293                                            &pgdir->db_dma, GFP_KERNEL);
 294        if (!pgdir->db_page) {
 295                kfree(pgdir);
 296                return NULL;
 297        }
 298
 299        return pgdir;
 300}
 301
 302static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
 303                                    struct mlx4_db *db, int order)
 304{
 305        int o;
 306        int i;
 307
 308        for (o = order; o <= 1; ++o) {
 309                i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
 310                if (i < MLX4_DB_PER_PAGE >> o)
 311                        goto found;
 312        }
 313
 314        return -ENOMEM;
 315
 316found:
 317        clear_bit(i, pgdir->bits[o]);
 318
 319        i <<= o;
 320
 321        if (o > order)
 322                set_bit(i ^ 1, pgdir->bits[order]);
 323
 324        db->u.pgdir = pgdir;
 325        db->index   = i;
 326        db->db      = pgdir->db_page + db->index;
 327        db->dma     = pgdir->db_dma  + db->index * 4;
 328        db->order   = order;
 329
 330        return 0;
 331}
 332
 333int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
 334{
 335        struct mlx4_priv *priv = mlx4_priv(dev);
 336        struct mlx4_db_pgdir *pgdir;
 337        int ret = 0;
 338
 339        mutex_lock(&priv->pgdir_mutex);
 340
 341        list_for_each_entry(pgdir, &priv->pgdir_list, list)
 342                if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
 343                        goto out;
 344
 345        pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
 346        if (!pgdir) {
 347                ret = -ENOMEM;
 348                goto out;
 349        }
 350
 351        list_add(&pgdir->list, &priv->pgdir_list);
 352
 353        /* This should never fail -- we just allocated an empty page: */
 354        WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
 355
 356out:
 357        mutex_unlock(&priv->pgdir_mutex);
 358
 359        return ret;
 360}
 361EXPORT_SYMBOL_GPL(mlx4_db_alloc);
 362
 363void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
 364{
 365        struct mlx4_priv *priv = mlx4_priv(dev);
 366        int o;
 367        int i;
 368
 369        mutex_lock(&priv->pgdir_mutex);
 370
 371        o = db->order;
 372        i = db->index;
 373
 374        if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
 375                clear_bit(i ^ 1, db->u.pgdir->order0);
 376                ++o;
 377        }
 378        i >>= o;
 379        set_bit(i, db->u.pgdir->bits[o]);
 380
 381        if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
 382                dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
 383                                  db->u.pgdir->db_page, db->u.pgdir->db_dma);
 384                list_del(&db->u.pgdir->list);
 385                kfree(db->u.pgdir);
 386        }
 387
 388        mutex_unlock(&priv->pgdir_mutex);
 389}
 390EXPORT_SYMBOL_GPL(mlx4_db_free);
 391
 392int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 393                       int size, int max_direct)
 394{
 395        int err;
 396
 397        err = mlx4_db_alloc(dev, &wqres->db, 1);
 398        if (err)
 399                return err;
 400
 401        *wqres->db.db = 0;
 402
 403        err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
 404        if (err)
 405                goto err_db;
 406
 407        err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
 408                            &wqres->mtt);
 409        if (err)
 410                goto err_buf;
 411
 412        err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
 413        if (err)
 414                goto err_mtt;
 415
 416        return 0;
 417
 418err_mtt:
 419        mlx4_mtt_cleanup(dev, &wqres->mtt);
 420err_buf:
 421        mlx4_buf_free(dev, size, &wqres->buf);
 422err_db:
 423        mlx4_db_free(dev, &wqres->db);
 424
 425        return err;
 426}
 427EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
 428
 429void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
 430                       int size)
 431{
 432        mlx4_mtt_cleanup(dev, &wqres->mtt);
 433        mlx4_buf_free(dev, size, &wqres->buf);
 434        mlx4_db_free(dev, &wqres->db);
 435}
 436EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
 437