linux/drivers/infiniband/hw/hfi1/mmu_rb.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2016 - 2017 Intel Corporation.
   3 *
   4 * This file is provided under a dual BSD/GPLv2 license.  When using or
   5 * redistributing this file, you may do so under either license.
   6 *
   7 * GPL LICENSE SUMMARY
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of version 2 of the GNU General Public License as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * BSD LICENSE
  19 *
  20 * Redistribution and use in source and binary forms, with or without
  21 * modification, are permitted provided that the following conditions
  22 * are met:
  23 *
  24 *  - Redistributions of source code must retain the above copyright
  25 *    notice, this list of conditions and the following disclaimer.
  26 *  - Redistributions in binary form must reproduce the above copyright
  27 *    notice, this list of conditions and the following disclaimer in
  28 *    the documentation and/or other materials provided with the
  29 *    distribution.
  30 *  - Neither the name of Intel Corporation nor the names of its
  31 *    contributors may be used to endorse or promote products derived
  32 *    from this software without specific prior written permission.
  33 *
  34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45 *
  46 */
  47#include <linux/list.h>
  48#include <linux/rculist.h>
  49#include <linux/mmu_notifier.h>
  50#include <linux/interval_tree_generic.h>
  51
  52#include "mmu_rb.h"
  53#include "trace.h"
  54
  55struct mmu_rb_handler {
  56        struct mmu_notifier mn;
  57        struct rb_root_cached root;
  58        void *ops_arg;
  59        spinlock_t lock;        /* protect the RB tree */
  60        struct mmu_rb_ops *ops;
  61        struct mm_struct *mm;
  62        struct list_head lru_list;
  63        struct work_struct del_work;
  64        struct list_head del_list;
  65        struct workqueue_struct *wq;
  66};
  67
  68static unsigned long mmu_node_start(struct mmu_rb_node *);
  69static unsigned long mmu_node_last(struct mmu_rb_node *);
  70static inline void mmu_notifier_range_start(struct mmu_notifier *,
  71                                            struct mm_struct *,
  72                                            unsigned long, unsigned long);
  73static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
  74                                        struct mm_struct *,
  75                                        unsigned long, unsigned long);
  76static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
  77                                           unsigned long, unsigned long);
  78static void do_remove(struct mmu_rb_handler *handler,
  79                      struct list_head *del_list);
  80static void handle_remove(struct work_struct *work);
  81
  82static const struct mmu_notifier_ops mn_opts = {
  83        .invalidate_range_start = mmu_notifier_range_start,
  84};
  85
  86INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
  87                     mmu_node_start, mmu_node_last, static, __mmu_int_rb);
  88
  89static unsigned long mmu_node_start(struct mmu_rb_node *node)
  90{
  91        return node->addr & PAGE_MASK;
  92}
  93
  94static unsigned long mmu_node_last(struct mmu_rb_node *node)
  95{
  96        return PAGE_ALIGN(node->addr + node->len) - 1;
  97}
  98
  99int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
 100                         struct mmu_rb_ops *ops,
 101                         struct workqueue_struct *wq,
 102                         struct mmu_rb_handler **handler)
 103{
 104        struct mmu_rb_handler *handlr;
 105        int ret;
 106
 107        handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
 108        if (!handlr)
 109                return -ENOMEM;
 110
 111        handlr->root = RB_ROOT_CACHED;
 112        handlr->ops = ops;
 113        handlr->ops_arg = ops_arg;
 114        INIT_HLIST_NODE(&handlr->mn.hlist);
 115        spin_lock_init(&handlr->lock);
 116        handlr->mn.ops = &mn_opts;
 117        handlr->mm = mm;
 118        INIT_WORK(&handlr->del_work, handle_remove);
 119        INIT_LIST_HEAD(&handlr->del_list);
 120        INIT_LIST_HEAD(&handlr->lru_list);
 121        handlr->wq = wq;
 122
 123        ret = mmu_notifier_register(&handlr->mn, handlr->mm);
 124        if (ret) {
 125                kfree(handlr);
 126                return ret;
 127        }
 128
 129        *handler = handlr;
 130        return 0;
 131}
 132
 133void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
 134{
 135        struct mmu_rb_node *rbnode;
 136        struct rb_node *node;
 137        unsigned long flags;
 138        struct list_head del_list;
 139
 140        /* Unregister first so we don't get any more notifications. */
 141        mmu_notifier_unregister(&handler->mn, handler->mm);
 142
 143        /*
 144         * Make sure the wq delete handler is finished running.  It will not
 145         * be triggered once the mmu notifiers are unregistered above.
 146         */
 147        flush_work(&handler->del_work);
 148
 149        INIT_LIST_HEAD(&del_list);
 150
 151        spin_lock_irqsave(&handler->lock, flags);
 152        while ((node = rb_first_cached(&handler->root))) {
 153                rbnode = rb_entry(node, struct mmu_rb_node, node);
 154                rb_erase_cached(node, &handler->root);
 155                /* move from LRU list to delete list */
 156                list_move(&rbnode->list, &del_list);
 157        }
 158        spin_unlock_irqrestore(&handler->lock, flags);
 159
 160        do_remove(handler, &del_list);
 161
 162        kfree(handler);
 163}
 164
 165int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
 166                       struct mmu_rb_node *mnode)
 167{
 168        struct mmu_rb_node *node;
 169        unsigned long flags;
 170        int ret = 0;
 171
 172        trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
 173        spin_lock_irqsave(&handler->lock, flags);
 174        node = __mmu_rb_search(handler, mnode->addr, mnode->len);
 175        if (node) {
 176                ret = -EINVAL;
 177                goto unlock;
 178        }
 179        __mmu_int_rb_insert(mnode, &handler->root);
 180        list_add(&mnode->list, &handler->lru_list);
 181
 182        ret = handler->ops->insert(handler->ops_arg, mnode);
 183        if (ret) {
 184                __mmu_int_rb_remove(mnode, &handler->root);
 185                list_del(&mnode->list); /* remove from LRU list */
 186        }
 187unlock:
 188        spin_unlock_irqrestore(&handler->lock, flags);
 189        return ret;
 190}
 191
 192/* Caller must hold handler lock */
 193static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
 194                                           unsigned long addr,
 195                                           unsigned long len)
 196{
 197        struct mmu_rb_node *node = NULL;
 198
 199        trace_hfi1_mmu_rb_search(addr, len);
 200        if (!handler->ops->filter) {
 201                node = __mmu_int_rb_iter_first(&handler->root, addr,
 202                                               (addr + len) - 1);
 203        } else {
 204                for (node = __mmu_int_rb_iter_first(&handler->root, addr,
 205                                                    (addr + len) - 1);
 206                     node;
 207                     node = __mmu_int_rb_iter_next(node, addr,
 208                                                   (addr + len) - 1)) {
 209                        if (handler->ops->filter(node, addr, len))
 210                                return node;
 211                }
 212        }
 213        return node;
 214}
 215
 216bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
 217                                     unsigned long addr, unsigned long len,
 218                                     struct mmu_rb_node **rb_node)
 219{
 220        struct mmu_rb_node *node;
 221        unsigned long flags;
 222        bool ret = false;
 223
 224        spin_lock_irqsave(&handler->lock, flags);
 225        node = __mmu_rb_search(handler, addr, len);
 226        if (node) {
 227                if (node->addr == addr && node->len == len)
 228                        goto unlock;
 229                __mmu_int_rb_remove(node, &handler->root);
 230                list_del(&node->list); /* remove from LRU list */
 231                ret = true;
 232        }
 233unlock:
 234        spin_unlock_irqrestore(&handler->lock, flags);
 235        *rb_node = node;
 236        return ret;
 237}
 238
 239void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
 240{
 241        struct mmu_rb_node *rbnode, *ptr;
 242        struct list_head del_list;
 243        unsigned long flags;
 244        bool stop = false;
 245
 246        INIT_LIST_HEAD(&del_list);
 247
 248        spin_lock_irqsave(&handler->lock, flags);
 249        list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
 250                                         list) {
 251                if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
 252                                        &stop)) {
 253                        __mmu_int_rb_remove(rbnode, &handler->root);
 254                        /* move from LRU list to delete list */
 255                        list_move(&rbnode->list, &del_list);
 256                }
 257                if (stop)
 258                        break;
 259        }
 260        spin_unlock_irqrestore(&handler->lock, flags);
 261
 262        while (!list_empty(&del_list)) {
 263                rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
 264                list_del(&rbnode->list);
 265                handler->ops->remove(handler->ops_arg, rbnode);
 266        }
 267}
 268
 269/*
 270 * It is up to the caller to ensure that this function does not race with the
 271 * mmu invalidate notifier which may be calling the users remove callback on
 272 * 'node'.
 273 */
 274void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
 275                        struct mmu_rb_node *node)
 276{
 277        unsigned long flags;
 278
 279        /* Validity of handler and node pointers has been checked by caller. */
 280        trace_hfi1_mmu_rb_remove(node->addr, node->len);
 281        spin_lock_irqsave(&handler->lock, flags);
 282        __mmu_int_rb_remove(node, &handler->root);
 283        list_del(&node->list); /* remove from LRU list */
 284        spin_unlock_irqrestore(&handler->lock, flags);
 285
 286        handler->ops->remove(handler->ops_arg, node);
 287}
 288
 289static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
 290                                            struct mm_struct *mm,
 291                                            unsigned long start,
 292                                            unsigned long end)
 293{
 294        mmu_notifier_mem_invalidate(mn, mm, start, end);
 295}
 296
 297static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
 298                                        struct mm_struct *mm,
 299                                        unsigned long start, unsigned long end)
 300{
 301        struct mmu_rb_handler *handler =
 302                container_of(mn, struct mmu_rb_handler, mn);
 303        struct rb_root_cached *root = &handler->root;
 304        struct mmu_rb_node *node, *ptr = NULL;
 305        unsigned long flags;
 306        bool added = false;
 307
 308        spin_lock_irqsave(&handler->lock, flags);
 309        for (node = __mmu_int_rb_iter_first(root, start, end - 1);
 310             node; node = ptr) {
 311                /* Guard against node removal. */
 312                ptr = __mmu_int_rb_iter_next(node, start, end - 1);
 313                trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
 314                if (handler->ops->invalidate(handler->ops_arg, node)) {
 315                        __mmu_int_rb_remove(node, root);
 316                        /* move from LRU list to delete list */
 317                        list_move(&node->list, &handler->del_list);
 318                        added = true;
 319                }
 320        }
 321        spin_unlock_irqrestore(&handler->lock, flags);
 322
 323        if (added)
 324                queue_work(handler->wq, &handler->del_work);
 325}
 326
 327/*
 328 * Call the remove function for the given handler and the list.  This
 329 * is expected to be called with a delete list extracted from handler.
 330 * The caller should not be holding the handler lock.
 331 */
 332static void do_remove(struct mmu_rb_handler *handler,
 333                      struct list_head *del_list)
 334{
 335        struct mmu_rb_node *node;
 336
 337        while (!list_empty(del_list)) {
 338                node = list_first_entry(del_list, struct mmu_rb_node, list);
 339                list_del(&node->list);
 340                handler->ops->remove(handler->ops_arg, node);
 341        }
 342}
 343
 344/*
 345 * Work queue function to remove all nodes that have been queued up to
 346 * be removed.  The key feature is that mm->mmap_sem is not being held
 347 * and the remove callback can sleep while taking it, if needed.
 348 */
 349static void handle_remove(struct work_struct *work)
 350{
 351        struct mmu_rb_handler *handler = container_of(work,
 352                                                struct mmu_rb_handler,
 353                                                del_work);
 354        struct list_head del_list;
 355        unsigned long flags;
 356
 357        /* remove anything that is queued to get removed */
 358        spin_lock_irqsave(&handler->lock, flags);
 359        list_replace_init(&handler->del_list, &del_list);
 360        spin_unlock_irqrestore(&handler->lock, flags);
 361
 362        do_remove(handler, &del_list);
 363}
 364