linux/drivers/net/mlx4/icm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   3 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/errno.h>
  35#include <linux/mm.h>
  36#include <linux/scatterlist.h>
  37
  38#include <linux/mlx4/cmd.h>
  39
  40#include "mlx4.h"
  41#include "icm.h"
  42#include "fw.h"
  43
  44/*
  45 * We allocate in as big chunks as we can, up to a maximum of 256 KB
  46 * per chunk.
  47 */
  48enum {
  49        MLX4_ICM_ALLOC_SIZE     = 1 << 18,
  50        MLX4_TABLE_CHUNK_SIZE   = 1 << 18
  51};
  52
  53static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
  54{
  55        int i;
  56
  57        if (chunk->nsg > 0)
  58                pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
  59                             PCI_DMA_BIDIRECTIONAL);
  60
  61        for (i = 0; i < chunk->npages; ++i)
  62                __free_pages(sg_page(&chunk->mem[i]),
  63                             get_order(chunk->mem[i].length));
  64}
  65
  66static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
  67{
  68        int i;
  69
  70        for (i = 0; i < chunk->npages; ++i)
  71                dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
  72                                  lowmem_page_address(sg_page(&chunk->mem[i])),
  73                                  sg_dma_address(&chunk->mem[i]));
  74}
  75
  76void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
  77{
  78        struct mlx4_icm_chunk *chunk, *tmp;
  79
  80        if (!icm)
  81                return;
  82
  83        list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
  84                if (coherent)
  85                        mlx4_free_icm_coherent(dev, chunk);
  86                else
  87                        mlx4_free_icm_pages(dev, chunk);
  88
  89                kfree(chunk);
  90        }
  91
  92        kfree(icm);
  93}
  94
  95static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
  96{
  97        struct page *page;
  98
  99        page = alloc_pages(gfp_mask, order);
 100        if (!page)
 101                return -ENOMEM;
 102
 103        sg_set_page(mem, page, PAGE_SIZE << order, 0);
 104        return 0;
 105}
 106
 107static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
 108                                    int order, gfp_t gfp_mask)
 109{
 110        void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
 111                                       &sg_dma_address(mem), gfp_mask);
 112        if (!buf)
 113                return -ENOMEM;
 114
 115        sg_set_buf(mem, buf, PAGE_SIZE << order);
 116        BUG_ON(mem->offset);
 117        sg_dma_len(mem) = PAGE_SIZE << order;
 118        return 0;
 119}
 120
 121struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
 122                                gfp_t gfp_mask, int coherent)
 123{
 124        struct mlx4_icm *icm;
 125        struct mlx4_icm_chunk *chunk = NULL;
 126        int cur_order;
 127        int ret;
 128
 129        /* We use sg_set_buf for coherent allocs, which assumes low memory */
 130        BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
 131
 132        icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
 133        if (!icm)
 134                return NULL;
 135
 136        icm->refcount = 0;
 137        INIT_LIST_HEAD(&icm->chunk_list);
 138
 139        cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
 140
 141        while (npages > 0) {
 142                if (!chunk) {
 143                        chunk = kmalloc(sizeof *chunk,
 144                                        gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
 145                        if (!chunk)
 146                                goto fail;
 147
 148                        sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
 149                        chunk->npages = 0;
 150                        chunk->nsg    = 0;
 151                        list_add_tail(&chunk->list, &icm->chunk_list);
 152                }
 153
 154                while (1 << cur_order > npages)
 155                        --cur_order;
 156
 157                if (coherent)
 158                        ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
 159                                                      &chunk->mem[chunk->npages],
 160                                                      cur_order, gfp_mask);
 161                else
 162                        ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
 163                                                   cur_order, gfp_mask);
 164
 165                if (!ret) {
 166                        ++chunk->npages;
 167
 168                        if (coherent)
 169                                ++chunk->nsg;
 170                        else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
 171                                chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
 172                                                        chunk->npages,
 173                                                        PCI_DMA_BIDIRECTIONAL);
 174
 175                                if (chunk->nsg <= 0)
 176                                        goto fail;
 177
 178                                chunk = NULL;
 179                        }
 180
 181                        npages -= 1 << cur_order;
 182                } else {
 183                        --cur_order;
 184                        if (cur_order < 0)
 185                                goto fail;
 186                }
 187        }
 188
 189        if (!coherent && chunk) {
 190                chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
 191                                        chunk->npages,
 192                                        PCI_DMA_BIDIRECTIONAL);
 193
 194                if (chunk->nsg <= 0)
 195                        goto fail;
 196        }
 197
 198        return icm;
 199
 200fail:
 201        mlx4_free_icm(dev, icm, coherent);
 202        return NULL;
 203}
 204
 205static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
 206{
 207        return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
 208}
 209
 210int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
 211{
 212        return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
 213                        MLX4_CMD_TIME_CLASS_B);
 214}
 215
 216int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
 217{
 218        struct mlx4_cmd_mailbox *mailbox;
 219        __be64 *inbox;
 220        int err;
 221
 222        mailbox = mlx4_alloc_cmd_mailbox(dev);
 223        if (IS_ERR(mailbox))
 224                return PTR_ERR(mailbox);
 225        inbox = mailbox->buf;
 226
 227        inbox[0] = cpu_to_be64(virt);
 228        inbox[1] = cpu_to_be64(dma_addr);
 229
 230        err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
 231                       MLX4_CMD_TIME_CLASS_B);
 232
 233        mlx4_free_cmd_mailbox(dev, mailbox);
 234
 235        if (!err)
 236                mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
 237                          (unsigned long long) dma_addr, (unsigned long long) virt);
 238
 239        return err;
 240}
 241
 242int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
 243{
 244        return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
 245}
 246
 247int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
 248{
 249        return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
 250}
 251
 252int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
 253{
 254        int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
 255        int ret = 0;
 256
 257        mutex_lock(&table->mutex);
 258
 259        if (table->icm[i]) {
 260                ++table->icm[i]->refcount;
 261                goto out;
 262        }
 263
 264        table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
 265                                       (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
 266                                       __GFP_NOWARN, table->coherent);
 267        if (!table->icm[i]) {
 268                ret = -ENOMEM;
 269                goto out;
 270        }
 271
 272        if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
 273                         (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
 274                mlx4_free_icm(dev, table->icm[i], table->coherent);
 275                table->icm[i] = NULL;
 276                ret = -ENOMEM;
 277                goto out;
 278        }
 279
 280        ++table->icm[i]->refcount;
 281
 282out:
 283        mutex_unlock(&table->mutex);
 284        return ret;
 285}
 286
 287void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
 288{
 289        int i;
 290
 291        i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
 292
 293        mutex_lock(&table->mutex);
 294
 295        if (--table->icm[i]->refcount == 0) {
 296                mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
 297                               MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
 298                mlx4_free_icm(dev, table->icm[i], table->coherent);
 299                table->icm[i] = NULL;
 300        }
 301
 302        mutex_unlock(&table->mutex);
 303}
 304
 305void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
 306{
 307        int idx, offset, dma_offset, i;
 308        struct mlx4_icm_chunk *chunk;
 309        struct mlx4_icm *icm;
 310        struct page *page = NULL;
 311
 312        if (!table->lowmem)
 313                return NULL;
 314
 315        mutex_lock(&table->mutex);
 316
 317        idx = (obj & (table->num_obj - 1)) * table->obj_size;
 318        icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
 319        dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
 320
 321        if (!icm)
 322                goto out;
 323
 324        list_for_each_entry(chunk, &icm->chunk_list, list) {
 325                for (i = 0; i < chunk->npages; ++i) {
 326                        if (dma_handle && dma_offset >= 0) {
 327                                if (sg_dma_len(&chunk->mem[i]) > dma_offset)
 328                                        *dma_handle = sg_dma_address(&chunk->mem[i]) +
 329                                                dma_offset;
 330                                dma_offset -= sg_dma_len(&chunk->mem[i]);
 331                        }
 332                        /*
 333                         * DMA mapping can merge pages but not split them,
 334                         * so if we found the page, dma_handle has already
 335                         * been assigned to.
 336                         */
 337                        if (chunk->mem[i].length > offset) {
 338                                page = sg_page(&chunk->mem[i]);
 339                                goto out;
 340                        }
 341                        offset -= chunk->mem[i].length;
 342                }
 343        }
 344
 345out:
 346        mutex_unlock(&table->mutex);
 347        return page ? lowmem_page_address(page) + offset : NULL;
 348}
 349
 350int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
 351                         int start, int end)
 352{
 353        int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
 354        int i, err;
 355
 356        for (i = start; i <= end; i += inc) {
 357                err = mlx4_table_get(dev, table, i);
 358                if (err)
 359                        goto fail;
 360        }
 361
 362        return 0;
 363
 364fail:
 365        while (i > start) {
 366                i -= inc;
 367                mlx4_table_put(dev, table, i);
 368        }
 369
 370        return err;
 371}
 372
 373void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
 374                          int start, int end)
 375{
 376        int i;
 377
 378        for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
 379                mlx4_table_put(dev, table, i);
 380}
 381
 382int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
 383                        u64 virt, int obj_size, int nobj, int reserved,
 384                        int use_lowmem, int use_coherent)
 385{
 386        int obj_per_chunk;
 387        int num_icm;
 388        unsigned chunk_size;
 389        int i;
 390
 391        obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
 392        num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
 393
 394        table->icm      = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
 395        if (!table->icm)
 396                return -ENOMEM;
 397        table->virt     = virt;
 398        table->num_icm  = num_icm;
 399        table->num_obj  = nobj;
 400        table->obj_size = obj_size;
 401        table->lowmem   = use_lowmem;
 402        table->coherent = use_coherent;
 403        mutex_init(&table->mutex);
 404
 405        for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
 406                chunk_size = MLX4_TABLE_CHUNK_SIZE;
 407                if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
 408                        chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
 409
 410                table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
 411                                               (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
 412                                               __GFP_NOWARN, use_coherent);
 413                if (!table->icm[i])
 414                        goto err;
 415                if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
 416                        mlx4_free_icm(dev, table->icm[i], use_coherent);
 417                        table->icm[i] = NULL;
 418                        goto err;
 419                }
 420
 421                /*
 422                 * Add a reference to this ICM chunk so that it never
 423                 * gets freed (since it contains reserved firmware objects).
 424                 */
 425                ++table->icm[i]->refcount;
 426        }
 427
 428        return 0;
 429
 430err:
 431        for (i = 0; i < num_icm; ++i)
 432                if (table->icm[i]) {
 433                        mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
 434                                       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
 435                        mlx4_free_icm(dev, table->icm[i], use_coherent);
 436                }
 437
 438        return -ENOMEM;
 439}
 440
 441void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
 442{
 443        int i;
 444
 445        for (i = 0; i < table->num_icm; ++i)
 446                if (table->icm[i]) {
 447                        mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
 448                                       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
 449                        mlx4_free_icm(dev, table->icm[i], table->coherent);
 450                }
 451
 452        kfree(table->icm);
 453}
 454