linux/drivers/infiniband/core/fmr_pool.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/errno.h>
  35#include <linux/spinlock.h>
  36#include <linux/export.h>
  37#include <linux/slab.h>
  38#include <linux/jhash.h>
  39#include <linux/kthread.h>
  40
  41#include <rdma/ib_fmr_pool.h>
  42
  43#include "core_priv.h"
  44
  45#define PFX "fmr_pool: "
  46
  47enum {
  48        IB_FMR_MAX_REMAPS = 32,
  49
  50        IB_FMR_HASH_BITS  = 8,
  51        IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
  52        IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
  53};
  54
  55/*
  56 * If an FMR is not in use, then the list member will point to either
  57 * its pool's free_list (if the FMR can be mapped again; that is,
  58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
  59 * FMR needs to be unmapped before being remapped).  In either of
  60 * these cases it is a bug if the ref_count is not 0.  In other words,
  61 * if ref_count is > 0, then the list member must not be linked into
  62 * either free_list or dirty_list.
  63 *
  64 * The cache_node member is used to link the FMR into a cache bucket
  65 * (if caching is enabled).  This is independent of the reference
  66 * count of the FMR.  When a valid FMR is released, its ref_count is
  67 * decremented, and if ref_count reaches 0, the FMR is placed in
  68 * either free_list or dirty_list as appropriate.  However, it is not
  69 * removed from the cache and may be "revived" if a call to
  70 * ib_fmr_register_physical() occurs before the FMR is remapped.  In
  71 * this case we just increment the ref_count and remove the FMR from
  72 * free_list/dirty_list.
  73 *
  74 * Before we remap an FMR from free_list, we remove it from the cache
  75 * (to prevent another user from obtaining a stale FMR).  When an FMR
  76 * is released, we add it to the tail of the free list, so that our
  77 * cache eviction policy is "least recently used."
  78 *
  79 * All manipulation of ref_count, list and cache_node is protected by
  80 * pool_lock to maintain consistency.
  81 */
  82
  83struct ib_fmr_pool {
  84        spinlock_t                pool_lock;
  85
  86        int                       pool_size;
  87        int                       max_pages;
  88        int                       max_remaps;
  89        int                       dirty_watermark;
  90        int                       dirty_len;
  91        struct list_head          free_list;
  92        struct list_head          dirty_list;
  93        struct hlist_head        *cache_bucket;
  94
  95        void                     (*flush_function)(struct ib_fmr_pool *pool,
  96                                                   void *              arg);
  97        void                     *flush_arg;
  98
  99        struct task_struct       *thread;
 100
 101        atomic_t                  req_ser;
 102        atomic_t                  flush_ser;
 103
 104        wait_queue_head_t         force_wait;
 105};
 106
 107static inline u32 ib_fmr_hash(u64 first_page)
 108{
 109        return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
 110                (IB_FMR_HASH_SIZE - 1);
 111}
 112
 113/* Caller must hold pool_lock */
 114static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
 115                                                      u64 *page_list,
 116                                                      int  page_list_len,
 117                                                      u64  io_virtual_address)
 118{
 119        struct hlist_head *bucket;
 120        struct ib_pool_fmr *fmr;
 121
 122        if (!pool->cache_bucket)
 123                return NULL;
 124
 125        bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
 126
 127        hlist_for_each_entry(fmr, bucket, cache_node)
 128                if (io_virtual_address == fmr->io_virtual_address &&
 129                    page_list_len      == fmr->page_list_len      &&
 130                    !memcmp(page_list, fmr->page_list,
 131                            page_list_len * sizeof *page_list))
 132                        return fmr;
 133
 134        return NULL;
 135}
 136
 137static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
 138{
 139        int                 ret;
 140        struct ib_pool_fmr *fmr;
 141        LIST_HEAD(unmap_list);
 142        LIST_HEAD(fmr_list);
 143
 144        spin_lock_irq(&pool->pool_lock);
 145
 146        list_for_each_entry(fmr, &pool->dirty_list, list) {
 147                hlist_del_init(&fmr->cache_node);
 148                fmr->remap_count = 0;
 149                list_add_tail(&fmr->fmr->list, &fmr_list);
 150
 151#ifdef DEBUG
 152                if (fmr->ref_count !=0) {
 153                        pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
 154                                fmr, fmr->ref_count);
 155                }
 156#endif
 157        }
 158
 159        list_splice_init(&pool->dirty_list, &unmap_list);
 160        pool->dirty_len = 0;
 161
 162        spin_unlock_irq(&pool->pool_lock);
 163
 164        if (list_empty(&unmap_list)) {
 165                return;
 166        }
 167
 168        ret = ib_unmap_fmr(&fmr_list);
 169        if (ret)
 170                pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
 171
 172        spin_lock_irq(&pool->pool_lock);
 173        list_splice(&unmap_list, &pool->free_list);
 174        spin_unlock_irq(&pool->pool_lock);
 175}
 176
 177static int ib_fmr_cleanup_thread(void *pool_ptr)
 178{
 179        struct ib_fmr_pool *pool = pool_ptr;
 180
 181        do {
 182                if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
 183                        ib_fmr_batch_release(pool);
 184
 185                        atomic_inc(&pool->flush_ser);
 186                        wake_up_interruptible(&pool->force_wait);
 187
 188                        if (pool->flush_function)
 189                                pool->flush_function(pool, pool->flush_arg);
 190                }
 191
 192                set_current_state(TASK_INTERRUPTIBLE);
 193                if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
 194                    !kthread_should_stop())
 195                        schedule();
 196                __set_current_state(TASK_RUNNING);
 197        } while (!kthread_should_stop());
 198
 199        return 0;
 200}
 201
 202/**
 203 * ib_create_fmr_pool - Create an FMR pool
 204 * @pd:Protection domain for FMRs
 205 * @params:FMR pool parameters
 206 *
 207 * Create a pool of FMRs.  Return value is pointer to new pool or
 208 * error code if creation failed.
 209 */
 210struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
 211                                       struct ib_fmr_pool_param *params)
 212{
 213        struct ib_device   *device;
 214        struct ib_fmr_pool *pool;
 215        int i;
 216        int ret;
 217        int max_remaps;
 218
 219        if (!params)
 220                return ERR_PTR(-EINVAL);
 221
 222        device = pd->device;
 223        if (!device->alloc_fmr    || !device->dealloc_fmr  ||
 224            !device->map_phys_fmr || !device->unmap_fmr) {
 225                pr_info(PFX "Device %s does not support FMRs\n", device->name);
 226                return ERR_PTR(-ENOSYS);
 227        }
 228
 229        if (!device->attrs.max_map_per_fmr)
 230                max_remaps = IB_FMR_MAX_REMAPS;
 231        else
 232                max_remaps = device->attrs.max_map_per_fmr;
 233
 234        pool = kmalloc(sizeof *pool, GFP_KERNEL);
 235        if (!pool)
 236                return ERR_PTR(-ENOMEM);
 237
 238        pool->cache_bucket   = NULL;
 239        pool->flush_function = params->flush_function;
 240        pool->flush_arg      = params->flush_arg;
 241
 242        INIT_LIST_HEAD(&pool->free_list);
 243        INIT_LIST_HEAD(&pool->dirty_list);
 244
 245        if (params->cache) {
 246                pool->cache_bucket =
 247                        kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
 248                                GFP_KERNEL);
 249                if (!pool->cache_bucket) {
 250                        ret = -ENOMEM;
 251                        goto out_free_pool;
 252                }
 253
 254                for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
 255                        INIT_HLIST_HEAD(pool->cache_bucket + i);
 256        }
 257
 258        pool->pool_size       = 0;
 259        pool->max_pages       = params->max_pages_per_fmr;
 260        pool->max_remaps      = max_remaps;
 261        pool->dirty_watermark = params->dirty_watermark;
 262        pool->dirty_len       = 0;
 263        spin_lock_init(&pool->pool_lock);
 264        atomic_set(&pool->req_ser,   0);
 265        atomic_set(&pool->flush_ser, 0);
 266        init_waitqueue_head(&pool->force_wait);
 267
 268        pool->thread = kthread_run(ib_fmr_cleanup_thread,
 269                                   pool,
 270                                   "ib_fmr(%s)",
 271                                   device->name);
 272        if (IS_ERR(pool->thread)) {
 273                pr_warn(PFX "couldn't start cleanup thread\n");
 274                ret = PTR_ERR(pool->thread);
 275                goto out_free_pool;
 276        }
 277
 278        {
 279                struct ib_pool_fmr *fmr;
 280                struct ib_fmr_attr fmr_attr = {
 281                        .max_pages  = params->max_pages_per_fmr,
 282                        .max_maps   = pool->max_remaps,
 283                        .page_shift = params->page_shift
 284                };
 285                int bytes_per_fmr = sizeof *fmr;
 286
 287                if (pool->cache_bucket)
 288                        bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
 289
 290                for (i = 0; i < params->pool_size; ++i) {
 291                        fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
 292                        if (!fmr)
 293                                goto out_fail;
 294
 295                        fmr->pool             = pool;
 296                        fmr->remap_count      = 0;
 297                        fmr->ref_count        = 0;
 298                        INIT_HLIST_NODE(&fmr->cache_node);
 299
 300                        fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
 301                        if (IS_ERR(fmr->fmr)) {
 302                                pr_warn(PFX "fmr_create failed for FMR %d\n",
 303                                        i);
 304                                kfree(fmr);
 305                                goto out_fail;
 306                        }
 307
 308                        list_add_tail(&fmr->list, &pool->free_list);
 309                        ++pool->pool_size;
 310                }
 311        }
 312
 313        return pool;
 314
 315 out_free_pool:
 316        kfree(pool->cache_bucket);
 317        kfree(pool);
 318
 319        return ERR_PTR(ret);
 320
 321 out_fail:
 322        ib_destroy_fmr_pool(pool);
 323
 324        return ERR_PTR(-ENOMEM);
 325}
 326EXPORT_SYMBOL(ib_create_fmr_pool);
 327
 328/**
 329 * ib_destroy_fmr_pool - Free FMR pool
 330 * @pool:FMR pool to free
 331 *
 332 * Destroy an FMR pool and free all associated resources.
 333 */
 334void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
 335{
 336        struct ib_pool_fmr *fmr;
 337        struct ib_pool_fmr *tmp;
 338        LIST_HEAD(fmr_list);
 339        int                 i;
 340
 341        kthread_stop(pool->thread);
 342        ib_fmr_batch_release(pool);
 343
 344        i = 0;
 345        list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
 346                if (fmr->remap_count) {
 347                        INIT_LIST_HEAD(&fmr_list);
 348                        list_add_tail(&fmr->fmr->list, &fmr_list);
 349                        ib_unmap_fmr(&fmr_list);
 350                }
 351                ib_dealloc_fmr(fmr->fmr);
 352                list_del(&fmr->list);
 353                kfree(fmr);
 354                ++i;
 355        }
 356
 357        if (i < pool->pool_size)
 358                pr_warn(PFX "pool still has %d regions registered\n",
 359                        pool->pool_size - i);
 360
 361        kfree(pool->cache_bucket);
 362        kfree(pool);
 363}
 364EXPORT_SYMBOL(ib_destroy_fmr_pool);
 365
 366/**
 367 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
 368 * @pool:FMR pool to flush
 369 *
 370 * Ensure that all unmapped FMRs are fully invalidated.
 371 */
 372int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
 373{
 374        int serial;
 375        struct ib_pool_fmr *fmr, *next;
 376
 377        /*
 378         * The free_list holds FMRs that may have been used
 379         * but have not been remapped enough times to be dirty.
 380         * Put them on the dirty list now so that the cleanup
 381         * thread will reap them too.
 382         */
 383        spin_lock_irq(&pool->pool_lock);
 384        list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
 385                if (fmr->remap_count > 0)
 386                        list_move(&fmr->list, &pool->dirty_list);
 387        }
 388        spin_unlock_irq(&pool->pool_lock);
 389
 390        serial = atomic_inc_return(&pool->req_ser);
 391        wake_up_process(pool->thread);
 392
 393        if (wait_event_interruptible(pool->force_wait,
 394                                     atomic_read(&pool->flush_ser) - serial >= 0))
 395                return -EINTR;
 396
 397        return 0;
 398}
 399EXPORT_SYMBOL(ib_flush_fmr_pool);
 400
 401/**
 402 * ib_fmr_pool_map_phys -
 403 * @pool:FMR pool to allocate FMR from
 404 * @page_list:List of pages to map
 405 * @list_len:Number of pages in @page_list
 406 * @io_virtual_address:I/O virtual address for new FMR
 407 *
 408 * Map an FMR from an FMR pool.
 409 */
 410struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
 411                                         u64                *page_list,
 412                                         int                 list_len,
 413                                         u64                 io_virtual_address)
 414{
 415        struct ib_fmr_pool *pool = pool_handle;
 416        struct ib_pool_fmr *fmr;
 417        unsigned long       flags;
 418        int                 result;
 419
 420        if (list_len < 1 || list_len > pool->max_pages)
 421                return ERR_PTR(-EINVAL);
 422
 423        spin_lock_irqsave(&pool->pool_lock, flags);
 424        fmr = ib_fmr_cache_lookup(pool,
 425                                  page_list,
 426                                  list_len,
 427                                  io_virtual_address);
 428        if (fmr) {
 429                /* found in cache */
 430                ++fmr->ref_count;
 431                if (fmr->ref_count == 1) {
 432                        list_del(&fmr->list);
 433                }
 434
 435                spin_unlock_irqrestore(&pool->pool_lock, flags);
 436
 437                return fmr;
 438        }
 439
 440        if (list_empty(&pool->free_list)) {
 441                spin_unlock_irqrestore(&pool->pool_lock, flags);
 442                return ERR_PTR(-EAGAIN);
 443        }
 444
 445        fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
 446        list_del(&fmr->list);
 447        hlist_del_init(&fmr->cache_node);
 448        spin_unlock_irqrestore(&pool->pool_lock, flags);
 449
 450        result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
 451                                 io_virtual_address);
 452
 453        if (result) {
 454                spin_lock_irqsave(&pool->pool_lock, flags);
 455                list_add(&fmr->list, &pool->free_list);
 456                spin_unlock_irqrestore(&pool->pool_lock, flags);
 457
 458                pr_warn(PFX "fmr_map returns %d\n", result);
 459
 460                return ERR_PTR(result);
 461        }
 462
 463        ++fmr->remap_count;
 464        fmr->ref_count = 1;
 465
 466        if (pool->cache_bucket) {
 467                fmr->io_virtual_address = io_virtual_address;
 468                fmr->page_list_len      = list_len;
 469                memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
 470
 471                spin_lock_irqsave(&pool->pool_lock, flags);
 472                hlist_add_head(&fmr->cache_node,
 473                               pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
 474                spin_unlock_irqrestore(&pool->pool_lock, flags);
 475        }
 476
 477        return fmr;
 478}
 479EXPORT_SYMBOL(ib_fmr_pool_map_phys);
 480
 481/**
 482 * ib_fmr_pool_unmap - Unmap FMR
 483 * @fmr:FMR to unmap
 484 *
 485 * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
 486 * reused (or until ib_flush_fmr_pool() is called).
 487 */
 488int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
 489{
 490        struct ib_fmr_pool *pool;
 491        unsigned long flags;
 492
 493        pool = fmr->pool;
 494
 495        spin_lock_irqsave(&pool->pool_lock, flags);
 496
 497        --fmr->ref_count;
 498        if (!fmr->ref_count) {
 499                if (fmr->remap_count < pool->max_remaps) {
 500                        list_add_tail(&fmr->list, &pool->free_list);
 501                } else {
 502                        list_add_tail(&fmr->list, &pool->dirty_list);
 503                        if (++pool->dirty_len >= pool->dirty_watermark) {
 504                                atomic_inc(&pool->req_ser);
 505                                wake_up_process(pool->thread);
 506                        }
 507                }
 508        }
 509
 510#ifdef DEBUG
 511        if (fmr->ref_count < 0)
 512                pr_warn(PFX "FMR %p has ref count %d < 0\n",
 513                        fmr, fmr->ref_count);
 514#endif
 515
 516        spin_unlock_irqrestore(&pool->pool_lock, flags);
 517
 518        return 0;
 519}
 520EXPORT_SYMBOL(ib_fmr_pool_unmap);
 521