linux/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/errno.h>
  34#include <linux/slab.h>
  35#include <linux/mm.h>
  36#include <linux/export.h>
  37#include <linux/bitmap.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/vmalloc.h>
  40#include <linux/mlx5/driver.h>
  41
  42#include "mlx5_core.h"
  43
  44/* Handling for queue buffers -- we allocate a bunch of memory and
  45 * register it in a memory region at HCA virtual address 0.  If the
  46 * requested size is > max_direct, we split the allocation into
  47 * multiple pages, so we don't require too much contiguous memory.
  48 */
  49
  50int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
  51                   struct mlx5_buf *buf)
  52{
  53        dma_addr_t t;
  54
  55        buf->size = size;
  56        if (size <= max_direct) {
  57                buf->nbufs        = 1;
  58                buf->npages       = 1;
  59                buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
  60                buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
  61                                                        size, &t, GFP_KERNEL);
  62                if (!buf->direct.buf)
  63                        return -ENOMEM;
  64
  65                buf->direct.map = t;
  66
  67                while (t & ((1 << buf->page_shift) - 1)) {
  68                        --buf->page_shift;
  69                        buf->npages *= 2;
  70                }
  71        } else {
  72                int i;
  73
  74                buf->direct.buf  = NULL;
  75                buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  76                buf->npages      = buf->nbufs;
  77                buf->page_shift  = PAGE_SHIFT;
  78                buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
  79                                           GFP_KERNEL);
  80                if (!buf->page_list)
  81                        return -ENOMEM;
  82
  83                for (i = 0; i < buf->nbufs; i++) {
  84                        buf->page_list[i].buf =
  85                                dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
  86                                                    &t, GFP_KERNEL);
  87                        if (!buf->page_list[i].buf)
  88                                goto err_free;
  89
  90                        buf->page_list[i].map = t;
  91                }
  92
  93                if (BITS_PER_LONG == 64) {
  94                        struct page **pages;
  95                        pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
  96                        if (!pages)
  97                                goto err_free;
  98                        for (i = 0; i < buf->nbufs; i++)
  99                                pages[i] = virt_to_page(buf->page_list[i].buf);
 100                        buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
 101                        kfree(pages);
 102                        if (!buf->direct.buf)
 103                                goto err_free;
 104                }
 105        }
 106
 107        return 0;
 108
 109err_free:
 110        mlx5_buf_free(dev, buf);
 111
 112        return -ENOMEM;
 113}
 114EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 115
 116void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
 117{
 118        int i;
 119
 120        if (buf->nbufs == 1)
 121                dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
 122                                  buf->direct.map);
 123        else {
 124                if (BITS_PER_LONG == 64)
 125                        vunmap(buf->direct.buf);
 126
 127                for (i = 0; i < buf->nbufs; i++)
 128                        if (buf->page_list[i].buf)
 129                                dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
 130                                                  buf->page_list[i].buf,
 131                                                  buf->page_list[i].map);
 132                kfree(buf->page_list);
 133        }
 134}
 135EXPORT_SYMBOL_GPL(mlx5_buf_free);
 136
 137static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
 138{
 139        struct mlx5_db_pgdir *pgdir;
 140
 141        pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
 142        if (!pgdir)
 143                return NULL;
 144
 145        bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
 146        pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
 147                                            &pgdir->db_dma, GFP_KERNEL);
 148        if (!pgdir->db_page) {
 149                kfree(pgdir);
 150                return NULL;
 151        }
 152
 153        return pgdir;
 154}
 155
 156static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
 157                                    struct mlx5_db *db)
 158{
 159        int offset;
 160        int i;
 161
 162        i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
 163        if (i >= MLX5_DB_PER_PAGE)
 164                return -ENOMEM;
 165
 166        __clear_bit(i, pgdir->bitmap);
 167
 168        db->u.pgdir = pgdir;
 169        db->index   = i;
 170        offset = db->index * L1_CACHE_BYTES;
 171        db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
 172        db->dma     = pgdir->db_dma  + offset;
 173
 174        return 0;
 175}
 176
 177int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
 178{
 179        struct mlx5_db_pgdir *pgdir;
 180        int ret = 0;
 181
 182        mutex_lock(&dev->priv.pgdir_mutex);
 183
 184        list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
 185                if (!mlx5_alloc_db_from_pgdir(pgdir, db))
 186                        goto out;
 187
 188        pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
 189        if (!pgdir) {
 190                ret = -ENOMEM;
 191                goto out;
 192        }
 193
 194        list_add(&pgdir->list, &dev->priv.pgdir_list);
 195
 196        /* This should never fail -- we just allocated an empty page: */
 197        WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
 198
 199out:
 200        mutex_unlock(&dev->priv.pgdir_mutex);
 201
 202        return ret;
 203}
 204EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 205
 206void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
 207{
 208        mutex_lock(&dev->priv.pgdir_mutex);
 209
 210        __set_bit(db->index, db->u.pgdir->bitmap);
 211
 212        if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
 213                dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
 214                                  db->u.pgdir->db_page, db->u.pgdir->db_dma);
 215                list_del(&db->u.pgdir->list);
 216                kfree(db->u.pgdir);
 217        }
 218
 219        mutex_unlock(&dev->priv.pgdir_mutex);
 220}
 221EXPORT_SYMBOL_GPL(mlx5_db_free);
 222
 223
 224void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
 225{
 226        u64 addr;
 227        int i;
 228
 229        for (i = 0; i < buf->npages; i++) {
 230                if (buf->nbufs == 1)
 231                        addr = buf->direct.map + (i << buf->page_shift);
 232                else
 233                        addr = buf->page_list[i].map;
 234
 235                pas[i] = cpu_to_be64(addr);
 236        }
 237}
 238EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
 239