linux/drivers/infiniband/core/fmr_pool.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/errno.h>
  35#include <linux/spinlock.h>
  36#include <linux/export.h>
  37#include <linux/slab.h>
  38#include <linux/jhash.h>
  39#include <linux/kthread.h>
  40
  41#include <rdma/ib_fmr_pool.h>
  42
  43#include "core_priv.h"
  44
  45#define PFX "fmr_pool: "
  46
  47enum {
  48        IB_FMR_MAX_REMAPS = 32,
  49
  50        IB_FMR_HASH_BITS  = 8,
  51        IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
  52        IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
  53};
  54
  55/*
  56 * If an FMR is not in use, then the list member will point to either
  57 * its pool's free_list (if the FMR can be mapped again; that is,
  58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
  59 * FMR needs to be unmapped before being remapped).  In either of
  60 * these cases it is a bug if the ref_count is not 0.  In other words,
  61 * if ref_count is > 0, then the list member must not be linked into
  62 * either free_list or dirty_list.
  63 *
  64 * The cache_node member is used to link the FMR into a cache bucket
  65 * (if caching is enabled).  This is independent of the reference
  66 * count of the FMR.  When a valid FMR is released, its ref_count is
  67 * decremented, and if ref_count reaches 0, the FMR is placed in
  68 * either free_list or dirty_list as appropriate.  However, it is not
  69 * removed from the cache and may be "revived" if a call to
  70 * ib_fmr_register_physical() occurs before the FMR is remapped.  In
  71 * this case we just increment the ref_count and remove the FMR from
  72 * free_list/dirty_list.
  73 *
  74 * Before we remap an FMR from free_list, we remove it from the cache
  75 * (to prevent another user from obtaining a stale FMR).  When an FMR
  76 * is released, we add it to the tail of the free list, so that our
  77 * cache eviction policy is "least recently used."
  78 *
  79 * All manipulation of ref_count, list and cache_node is protected by
  80 * pool_lock to maintain consistency.
  81 */
  82
  83struct ib_fmr_pool {
  84        spinlock_t                pool_lock;
  85
  86        int                       pool_size;
  87        int                       max_pages;
  88        int                       max_remaps;
  89        int                       dirty_watermark;
  90        int                       dirty_len;
  91        struct list_head          free_list;
  92        struct list_head          dirty_list;
  93        struct hlist_head        *cache_bucket;
  94
  95        void                     (*flush_function)(struct ib_fmr_pool *pool,
  96                                                   void *              arg);
  97        void                     *flush_arg;
  98
  99        struct task_struct       *thread;
 100
 101        atomic_t                  req_ser;
 102        atomic_t                  flush_ser;
 103
 104        wait_queue_head_t         force_wait;
 105};
 106
 107static inline u32 ib_fmr_hash(u64 first_page)
 108{
 109        return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
 110                (IB_FMR_HASH_SIZE - 1);
 111}
 112
 113/* Caller must hold pool_lock */
 114static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
 115                                                      u64 *page_list,
 116                                                      int  page_list_len,
 117                                                      u64  io_virtual_address)
 118{
 119        struct hlist_head *bucket;
 120        struct ib_pool_fmr *fmr;
 121
 122        if (!pool->cache_bucket)
 123                return NULL;
 124
 125        bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
 126
 127        hlist_for_each_entry(fmr, bucket, cache_node)
 128                if (io_virtual_address == fmr->io_virtual_address &&
 129                    page_list_len      == fmr->page_list_len      &&
 130                    !memcmp(page_list, fmr->page_list,
 131                            page_list_len * sizeof *page_list))
 132                        return fmr;
 133
 134        return NULL;
 135}
 136
 137static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
 138{
 139        int                 ret;
 140        struct ib_pool_fmr *fmr;
 141        LIST_HEAD(unmap_list);
 142        LIST_HEAD(fmr_list);
 143
 144        spin_lock_irq(&pool->pool_lock);
 145
 146        list_for_each_entry(fmr, &pool->dirty_list, list) {
 147                hlist_del_init(&fmr->cache_node);
 148                fmr->remap_count = 0;
 149                list_add_tail(&fmr->fmr->list, &fmr_list);
 150
 151#ifdef DEBUG
 152                if (fmr->ref_count !=0) {
 153                        printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
 154                               fmr, fmr->ref_count);
 155                }
 156#endif
 157        }
 158
 159        list_splice_init(&pool->dirty_list, &unmap_list);
 160        pool->dirty_len = 0;
 161
 162        spin_unlock_irq(&pool->pool_lock);
 163
 164        if (list_empty(&unmap_list)) {
 165                return;
 166        }
 167
 168        ret = ib_unmap_fmr(&fmr_list);
 169        if (ret)
 170                printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
 171
 172        spin_lock_irq(&pool->pool_lock);
 173        list_splice(&unmap_list, &pool->free_list);
 174        spin_unlock_irq(&pool->pool_lock);
 175}
 176
 177static int ib_fmr_cleanup_thread(void *pool_ptr)
 178{
 179        struct ib_fmr_pool *pool = pool_ptr;
 180
 181        do {
 182                if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
 183                        ib_fmr_batch_release(pool);
 184
 185                        atomic_inc(&pool->flush_ser);
 186                        wake_up_interruptible(&pool->force_wait);
 187
 188                        if (pool->flush_function)
 189                                pool->flush_function(pool, pool->flush_arg);
 190                }
 191
 192                set_current_state(TASK_INTERRUPTIBLE);
 193                if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
 194                    !kthread_should_stop())
 195                        schedule();
 196                __set_current_state(TASK_RUNNING);
 197        } while (!kthread_should_stop());
 198
 199        return 0;
 200}
 201
 202/**
 203 * ib_create_fmr_pool - Create an FMR pool
 204 * @pd:Protection domain for FMRs
 205 * @params:FMR pool parameters
 206 *
 207 * Create a pool of FMRs.  Return value is pointer to new pool or
 208 * error code if creation failed.
 209 */
 210struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
 211                                       struct ib_fmr_pool_param *params)
 212{
 213        struct ib_device   *device;
 214        struct ib_fmr_pool *pool;
 215        struct ib_device_attr *attr;
 216        int i;
 217        int ret;
 218        int max_remaps;
 219
 220        if (!params)
 221                return ERR_PTR(-EINVAL);
 222
 223        device = pd->device;
 224        if (!device->alloc_fmr    || !device->dealloc_fmr  ||
 225            !device->map_phys_fmr || !device->unmap_fmr) {
 226                printk(KERN_INFO PFX "Device %s does not support FMRs\n",
 227                       device->name);
 228                return ERR_PTR(-ENOSYS);
 229        }
 230
 231        attr = kmalloc(sizeof *attr, GFP_KERNEL);
 232        if (!attr) {
 233                printk(KERN_WARNING PFX "couldn't allocate device attr struct\n");
 234                return ERR_PTR(-ENOMEM);
 235        }
 236
 237        ret = ib_query_device(device, attr);
 238        if (ret) {
 239                printk(KERN_WARNING PFX "couldn't query device: %d\n", ret);
 240                kfree(attr);
 241                return ERR_PTR(ret);
 242        }
 243
 244        if (!attr->max_map_per_fmr)
 245                max_remaps = IB_FMR_MAX_REMAPS;
 246        else
 247                max_remaps = attr->max_map_per_fmr;
 248
 249        kfree(attr);
 250
 251        pool = kmalloc(sizeof *pool, GFP_KERNEL);
 252        if (!pool) {
 253                printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
 254                return ERR_PTR(-ENOMEM);
 255        }
 256
 257        pool->cache_bucket   = NULL;
 258
 259        pool->flush_function = params->flush_function;
 260        pool->flush_arg      = params->flush_arg;
 261
 262        INIT_LIST_HEAD(&pool->free_list);
 263        INIT_LIST_HEAD(&pool->dirty_list);
 264
 265        if (params->cache) {
 266                pool->cache_bucket =
 267                        kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
 268                                GFP_KERNEL);
 269                if (!pool->cache_bucket) {
 270                        printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
 271                        ret = -ENOMEM;
 272                        goto out_free_pool;
 273                }
 274
 275                for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
 276                        INIT_HLIST_HEAD(pool->cache_bucket + i);
 277        }
 278
 279        pool->pool_size       = 0;
 280        pool->max_pages       = params->max_pages_per_fmr;
 281        pool->max_remaps      = max_remaps;
 282        pool->dirty_watermark = params->dirty_watermark;
 283        pool->dirty_len       = 0;
 284        spin_lock_init(&pool->pool_lock);
 285        atomic_set(&pool->req_ser,   0);
 286        atomic_set(&pool->flush_ser, 0);
 287        init_waitqueue_head(&pool->force_wait);
 288
 289        pool->thread = kthread_run(ib_fmr_cleanup_thread,
 290                                   pool,
 291                                   "ib_fmr(%s)",
 292                                   device->name);
 293        if (IS_ERR(pool->thread)) {
 294                printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
 295                ret = PTR_ERR(pool->thread);
 296                goto out_free_pool;
 297        }
 298
 299        {
 300                struct ib_pool_fmr *fmr;
 301                struct ib_fmr_attr fmr_attr = {
 302                        .max_pages  = params->max_pages_per_fmr,
 303                        .max_maps   = pool->max_remaps,
 304                        .page_shift = params->page_shift
 305                };
 306                int bytes_per_fmr = sizeof *fmr;
 307
 308                if (pool->cache_bucket)
 309                        bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
 310
 311                for (i = 0; i < params->pool_size; ++i) {
 312                        fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
 313                        if (!fmr) {
 314                                printk(KERN_WARNING PFX "failed to allocate fmr "
 315                                       "struct for FMR %d\n", i);
 316                                goto out_fail;
 317                        }
 318
 319                        fmr->pool             = pool;
 320                        fmr->remap_count      = 0;
 321                        fmr->ref_count        = 0;
 322                        INIT_HLIST_NODE(&fmr->cache_node);
 323
 324                        fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
 325                        if (IS_ERR(fmr->fmr)) {
 326                                printk(KERN_WARNING PFX "fmr_create failed "
 327                                       "for FMR %d\n", i);
 328                                kfree(fmr);
 329                                goto out_fail;
 330                        }
 331
 332                        list_add_tail(&fmr->list, &pool->free_list);
 333                        ++pool->pool_size;
 334                }
 335        }
 336
 337        return pool;
 338
 339 out_free_pool:
 340        kfree(pool->cache_bucket);
 341        kfree(pool);
 342
 343        return ERR_PTR(ret);
 344
 345 out_fail:
 346        ib_destroy_fmr_pool(pool);
 347
 348        return ERR_PTR(-ENOMEM);
 349}
 350EXPORT_SYMBOL(ib_create_fmr_pool);
 351
 352/**
 353 * ib_destroy_fmr_pool - Free FMR pool
 354 * @pool:FMR pool to free
 355 *
 356 * Destroy an FMR pool and free all associated resources.
 357 */
 358void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
 359{
 360        struct ib_pool_fmr *fmr;
 361        struct ib_pool_fmr *tmp;
 362        LIST_HEAD(fmr_list);
 363        int                 i;
 364
 365        kthread_stop(pool->thread);
 366        ib_fmr_batch_release(pool);
 367
 368        i = 0;
 369        list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
 370                if (fmr->remap_count) {
 371                        INIT_LIST_HEAD(&fmr_list);
 372                        list_add_tail(&fmr->fmr->list, &fmr_list);
 373                        ib_unmap_fmr(&fmr_list);
 374                }
 375                ib_dealloc_fmr(fmr->fmr);
 376                list_del(&fmr->list);
 377                kfree(fmr);
 378                ++i;
 379        }
 380
 381        if (i < pool->pool_size)
 382                printk(KERN_WARNING PFX "pool still has %d regions registered\n",
 383                       pool->pool_size - i);
 384
 385        kfree(pool->cache_bucket);
 386        kfree(pool);
 387}
 388EXPORT_SYMBOL(ib_destroy_fmr_pool);
 389
 390/**
 391 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
 392 * @pool:FMR pool to flush
 393 *
 394 * Ensure that all unmapped FMRs are fully invalidated.
 395 */
 396int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
 397{
 398        int serial;
 399        struct ib_pool_fmr *fmr, *next;
 400
 401        /*
 402         * The free_list holds FMRs that may have been used
 403         * but have not been remapped enough times to be dirty.
 404         * Put them on the dirty list now so that the cleanup
 405         * thread will reap them too.
 406         */
 407        spin_lock_irq(&pool->pool_lock);
 408        list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
 409                if (fmr->remap_count > 0)
 410                        list_move(&fmr->list, &pool->dirty_list);
 411        }
 412        spin_unlock_irq(&pool->pool_lock);
 413
 414        serial = atomic_inc_return(&pool->req_ser);
 415        wake_up_process(pool->thread);
 416
 417        if (wait_event_interruptible(pool->force_wait,
 418                                     atomic_read(&pool->flush_ser) - serial >= 0))
 419                return -EINTR;
 420
 421        return 0;
 422}
 423EXPORT_SYMBOL(ib_flush_fmr_pool);
 424
 425/**
 426 * ib_fmr_pool_map_phys -
 427 * @pool:FMR pool to allocate FMR from
 428 * @page_list:List of pages to map
 429 * @list_len:Number of pages in @page_list
 430 * @io_virtual_address:I/O virtual address for new FMR
 431 *
 432 * Map an FMR from an FMR pool.
 433 */
 434struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
 435                                         u64                *page_list,
 436                                         int                 list_len,
 437                                         u64                 io_virtual_address)
 438{
 439        struct ib_fmr_pool *pool = pool_handle;
 440        struct ib_pool_fmr *fmr;
 441        unsigned long       flags;
 442        int                 result;
 443
 444        if (list_len < 1 || list_len > pool->max_pages)
 445                return ERR_PTR(-EINVAL);
 446
 447        spin_lock_irqsave(&pool->pool_lock, flags);
 448        fmr = ib_fmr_cache_lookup(pool,
 449                                  page_list,
 450                                  list_len,
 451                                  io_virtual_address);
 452        if (fmr) {
 453                /* found in cache */
 454                ++fmr->ref_count;
 455                if (fmr->ref_count == 1) {
 456                        list_del(&fmr->list);
 457                }
 458
 459                spin_unlock_irqrestore(&pool->pool_lock, flags);
 460
 461                return fmr;
 462        }
 463
 464        if (list_empty(&pool->free_list)) {
 465                spin_unlock_irqrestore(&pool->pool_lock, flags);
 466                return ERR_PTR(-EAGAIN);
 467        }
 468
 469        fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
 470        list_del(&fmr->list);
 471        hlist_del_init(&fmr->cache_node);
 472        spin_unlock_irqrestore(&pool->pool_lock, flags);
 473
 474        result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
 475                                 io_virtual_address);
 476
 477        if (result) {
 478                spin_lock_irqsave(&pool->pool_lock, flags);
 479                list_add(&fmr->list, &pool->free_list);
 480                spin_unlock_irqrestore(&pool->pool_lock, flags);
 481
 482                printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
 483
 484                return ERR_PTR(result);
 485        }
 486
 487        ++fmr->remap_count;
 488        fmr->ref_count = 1;
 489
 490        if (pool->cache_bucket) {
 491                fmr->io_virtual_address = io_virtual_address;
 492                fmr->page_list_len      = list_len;
 493                memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
 494
 495                spin_lock_irqsave(&pool->pool_lock, flags);
 496                hlist_add_head(&fmr->cache_node,
 497                               pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
 498                spin_unlock_irqrestore(&pool->pool_lock, flags);
 499        }
 500
 501        return fmr;
 502}
 503EXPORT_SYMBOL(ib_fmr_pool_map_phys);
 504
 505/**
 506 * ib_fmr_pool_unmap - Unmap FMR
 507 * @fmr:FMR to unmap
 508 *
 509 * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
 510 * reused (or until ib_flush_fmr_pool() is called).
 511 */
 512int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
 513{
 514        struct ib_fmr_pool *pool;
 515        unsigned long flags;
 516
 517        pool = fmr->pool;
 518
 519        spin_lock_irqsave(&pool->pool_lock, flags);
 520
 521        --fmr->ref_count;
 522        if (!fmr->ref_count) {
 523                if (fmr->remap_count < pool->max_remaps) {
 524                        list_add_tail(&fmr->list, &pool->free_list);
 525                } else {
 526                        list_add_tail(&fmr->list, &pool->dirty_list);
 527                        if (++pool->dirty_len >= pool->dirty_watermark) {
 528                                atomic_inc(&pool->req_ser);
 529                                wake_up_process(pool->thread);
 530                        }
 531                }
 532        }
 533
 534#ifdef DEBUG
 535        if (fmr->ref_count < 0)
 536                printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
 537                       fmr, fmr->ref_count);
 538#endif
 539
 540        spin_unlock_irqrestore(&pool->pool_lock, flags);
 541
 542        return 0;
 543}
 544EXPORT_SYMBOL(ib_fmr_pool_unmap);
 545