linux/mm/mmu_notifier.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/mmu_notifier.c
   4 *
   5 *  Copyright (C) 2008  Qumranet, Inc.
   6 *  Copyright (C) 2008  SGI
   7 *             Christoph Lameter <cl@linux.com>
   8 */
   9
  10#include <linux/rculist.h>
  11#include <linux/mmu_notifier.h>
  12#include <linux/export.h>
  13#include <linux/mm.h>
  14#include <linux/err.h>
  15#include <linux/srcu.h>
  16#include <linux/rcupdate.h>
  17#include <linux/sched.h>
  18#include <linux/sched/mm.h>
  19#include <linux/slab.h>
  20
  21/* global SRCU for all MMs */
  22DEFINE_STATIC_SRCU(srcu);
  23
  24/*
  25 * This function allows mmu_notifier::release callback to delay a call to
  26 * a function that will free appropriate resources. The function must be
  27 * quick and must not block.
  28 */
  29void mmu_notifier_call_srcu(struct rcu_head *rcu,
  30                            void (*func)(struct rcu_head *rcu))
  31{
  32        call_srcu(&srcu, rcu, func);
  33}
  34EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);
  35
  36/*
  37 * This function can't run concurrently against mmu_notifier_register
  38 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
  39 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
  40 * in parallel despite there being no task using this mm any more,
  41 * through the vmas outside of the exit_mmap context, such as with
  42 * vmtruncate. This serializes against mmu_notifier_unregister with
  43 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
  44 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
  45 * can't go away from under us as exit_mmap holds an mm_count pin
  46 * itself.
  47 */
  48void __mmu_notifier_release(struct mm_struct *mm)
  49{
  50        struct mmu_notifier *mn;
  51        int id;
  52
  53        /*
  54         * SRCU here will block mmu_notifier_unregister until
  55         * ->release returns.
  56         */
  57        id = srcu_read_lock(&srcu);
  58        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
  59                /*
  60                 * If ->release runs before mmu_notifier_unregister it must be
  61                 * handled, as it's the only way for the driver to flush all
  62                 * existing sptes and stop the driver from establishing any more
  63                 * sptes before all the pages in the mm are freed.
  64                 */
  65                if (mn->ops->release)
  66                        mn->ops->release(mn, mm);
  67
  68        spin_lock(&mm->mmu_notifier_mm->lock);
  69        while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
  70                mn = hlist_entry(mm->mmu_notifier_mm->list.first,
  71                                 struct mmu_notifier,
  72                                 hlist);
  73                /*
  74                 * We arrived before mmu_notifier_unregister so
  75                 * mmu_notifier_unregister will do nothing other than to wait
  76                 * for ->release to finish and for mmu_notifier_unregister to
  77                 * return.
  78                 */
  79                hlist_del_init_rcu(&mn->hlist);
  80        }
  81        spin_unlock(&mm->mmu_notifier_mm->lock);
  82        srcu_read_unlock(&srcu, id);
  83
  84        /*
  85         * synchronize_srcu here prevents mmu_notifier_release from returning to
  86         * exit_mmap (which would proceed with freeing all pages in the mm)
  87         * until the ->release method returns, if it was invoked by
  88         * mmu_notifier_unregister.
  89         *
  90         * The mmu_notifier_mm can't go away from under us because one mm_count
  91         * is held by exit_mmap.
  92         */
  93        synchronize_srcu(&srcu);
  94}
  95
  96/*
  97 * If no young bitflag is supported by the hardware, ->clear_flush_young can
  98 * unmap the address and return 1 or 0 depending if the mapping previously
  99 * existed or not.
 100 */
 101int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 102                                        unsigned long start,
 103                                        unsigned long end)
 104{
 105        struct mmu_notifier *mn;
 106        int young = 0, id;
 107
 108        id = srcu_read_lock(&srcu);
 109        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
 110                if (mn->ops->clear_flush_young)
 111                        young |= mn->ops->clear_flush_young(mn, mm, start, end);
 112        }
 113        srcu_read_unlock(&srcu, id);
 114
 115        return young;
 116}
 117
 118int __mmu_notifier_clear_young(struct mm_struct *mm,
 119                               unsigned long start,
 120                               unsigned long end)
 121{
 122        struct mmu_notifier *mn;
 123        int young = 0, id;
 124
 125        id = srcu_read_lock(&srcu);
 126        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
 127                if (mn->ops->clear_young)
 128                        young |= mn->ops->clear_young(mn, mm, start, end);
 129        }
 130        srcu_read_unlock(&srcu, id);
 131
 132        return young;
 133}
 134
 135int __mmu_notifier_test_young(struct mm_struct *mm,
 136                              unsigned long address)
 137{
 138        struct mmu_notifier *mn;
 139        int young = 0, id;
 140
 141        id = srcu_read_lock(&srcu);
 142        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
 143                if (mn->ops->test_young) {
 144                        young = mn->ops->test_young(mn, mm, address);
 145                        if (young)
 146                                break;
 147                }
 148        }
 149        srcu_read_unlock(&srcu, id);
 150
 151        return young;
 152}
 153
 154void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 155                               pte_t pte)
 156{
 157        struct mmu_notifier *mn;
 158        int id;
 159
 160        id = srcu_read_lock(&srcu);
 161        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
 162                if (mn->ops->change_pte)
 163                        mn->ops->change_pte(mn, mm, address, pte);
 164        }
 165        srcu_read_unlock(&srcu, id);
 166}
 167
 168int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
 169{
 170        struct mmu_notifier *mn;
 171        int ret = 0;
 172        int id;
 173
 174        id = srcu_read_lock(&srcu);
 175        hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
 176                if (mn->ops->invalidate_range_start) {
 177                        int _ret = mn->ops->invalidate_range_start(mn, range);
 178                        if (_ret) {
 179                                pr_info("%pS callback failed with %d in %sblockable context.\n",
 180                                        mn->ops->invalidate_range_start, _ret,
 181                                        !mmu_notifier_range_blockable(range) ? "non-" : "");
 182                                ret = _ret;
 183                        }
 184                }
 185        }
 186        srcu_read_unlock(&srcu, id);
 187
 188        return ret;
 189}
 190EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
 191
 192void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
 193                                         bool only_end)
 194{
 195        struct mmu_notifier *mn;
 196        int id;
 197
 198        id = srcu_read_lock(&srcu);
 199        hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
 200                /*
 201                 * Call invalidate_range here too to avoid the need for the
 202                 * subsystem of having to register an invalidate_range_end
 203                 * call-back when there is invalidate_range already. Usually a
 204                 * subsystem registers either invalidate_range_start()/end() or
 205                 * invalidate_range(), so this will be no additional overhead
 206                 * (besides the pointer check).
 207                 *
 208                 * We skip call to invalidate_range() if we know it is safe ie
 209                 * call site use mmu_notifier_invalidate_range_only_end() which
 210                 * is safe to do when we know that a call to invalidate_range()
 211                 * already happen under page table lock.
 212                 */
 213                if (!only_end && mn->ops->invalidate_range)
 214                        mn->ops->invalidate_range(mn, range->mm,
 215                                                  range->start,
 216                                                  range->end);
 217                if (mn->ops->invalidate_range_end)
 218                        mn->ops->invalidate_range_end(mn, range);
 219        }
 220        srcu_read_unlock(&srcu, id);
 221}
 222EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
 223
 224void __mmu_notifier_invalidate_range(struct mm_struct *mm,
 225                                  unsigned long start, unsigned long end)
 226{
 227        struct mmu_notifier *mn;
 228        int id;
 229
 230        id = srcu_read_lock(&srcu);
 231        hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
 232                if (mn->ops->invalidate_range)
 233                        mn->ops->invalidate_range(mn, mm, start, end);
 234        }
 235        srcu_read_unlock(&srcu, id);
 236}
 237EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
 238
 239static int do_mmu_notifier_register(struct mmu_notifier *mn,
 240                                    struct mm_struct *mm,
 241                                    int take_mmap_sem)
 242{
 243        struct mmu_notifier_mm *mmu_notifier_mm;
 244        int ret;
 245
 246        BUG_ON(atomic_read(&mm->mm_users) <= 0);
 247
 248        ret = -ENOMEM;
 249        mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
 250        if (unlikely(!mmu_notifier_mm))
 251                goto out;
 252
 253        if (take_mmap_sem)
 254                down_write(&mm->mmap_sem);
 255        ret = mm_take_all_locks(mm);
 256        if (unlikely(ret))
 257                goto out_clean;
 258
 259        if (!mm_has_notifiers(mm)) {
 260                INIT_HLIST_HEAD(&mmu_notifier_mm->list);
 261                spin_lock_init(&mmu_notifier_mm->lock);
 262
 263                mm->mmu_notifier_mm = mmu_notifier_mm;
 264                mmu_notifier_mm = NULL;
 265        }
 266        mmgrab(mm);
 267
 268        /*
 269         * Serialize the update against mmu_notifier_unregister. A
 270         * side note: mmu_notifier_release can't run concurrently with
 271         * us because we hold the mm_users pin (either implicitly as
 272         * current->mm or explicitly with get_task_mm() or similar).
 273         * We can't race against any other mmu notifier method either
 274         * thanks to mm_take_all_locks().
 275         */
 276        spin_lock(&mm->mmu_notifier_mm->lock);
 277        hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
 278        spin_unlock(&mm->mmu_notifier_mm->lock);
 279
 280        mm_drop_all_locks(mm);
 281out_clean:
 282        if (take_mmap_sem)
 283                up_write(&mm->mmap_sem);
 284        kfree(mmu_notifier_mm);
 285out:
 286        BUG_ON(atomic_read(&mm->mm_users) <= 0);
 287        return ret;
 288}
 289
 290/*
 291 * Must not hold mmap_sem nor any other VM related lock when calling
 292 * this registration function. Must also ensure mm_users can't go down
 293 * to zero while this runs to avoid races with mmu_notifier_release,
 294 * so mm has to be current->mm or the mm should be pinned safely such
 295 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 296 * pin should be released by calling mmput after mmu_notifier_register
 297 * returns. mmu_notifier_unregister must be always called to
 298 * unregister the notifier. mm_count is automatically pinned to allow
 299 * mmu_notifier_unregister to safely run at any time later, before or
 300 * after exit_mmap. ->release will always be called before exit_mmap
 301 * frees the pages.
 302 */
 303int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
 304{
 305        return do_mmu_notifier_register(mn, mm, 1);
 306}
 307EXPORT_SYMBOL_GPL(mmu_notifier_register);
 308
 309/*
 310 * Same as mmu_notifier_register but here the caller must hold the
 311 * mmap_sem in write mode.
 312 */
 313int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
 314{
 315        return do_mmu_notifier_register(mn, mm, 0);
 316}
 317EXPORT_SYMBOL_GPL(__mmu_notifier_register);
 318
 319/* this is called after the last mmu_notifier_unregister() returned */
 320void __mmu_notifier_mm_destroy(struct mm_struct *mm)
 321{
 322        BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
 323        kfree(mm->mmu_notifier_mm);
 324        mm->mmu_notifier_mm = LIST_POISON1; /* debug */
 325}
 326
 327/*
 328 * This releases the mm_count pin automatically and frees the mm
 329 * structure if it was the last user of it. It serializes against
 330 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 331 * with the unregister lock + SRCU. All sptes must be dropped before
 332 * calling mmu_notifier_unregister. ->release or any other notifier
 333 * method may be invoked concurrently with mmu_notifier_unregister,
 334 * and only after mmu_notifier_unregister returned we're guaranteed
 335 * that ->release or any other method can't run anymore.
 336 */
 337void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
 338{
 339        BUG_ON(atomic_read(&mm->mm_count) <= 0);
 340
 341        if (!hlist_unhashed(&mn->hlist)) {
 342                /*
 343                 * SRCU here will force exit_mmap to wait for ->release to
 344                 * finish before freeing the pages.
 345                 */
 346                int id;
 347
 348                id = srcu_read_lock(&srcu);
 349                /*
 350                 * exit_mmap will block in mmu_notifier_release to guarantee
 351                 * that ->release is called before freeing the pages.
 352                 */
 353                if (mn->ops->release)
 354                        mn->ops->release(mn, mm);
 355                srcu_read_unlock(&srcu, id);
 356
 357                spin_lock(&mm->mmu_notifier_mm->lock);
 358                /*
 359                 * Can not use list_del_rcu() since __mmu_notifier_release
 360                 * can delete it before we hold the lock.
 361                 */
 362                hlist_del_init_rcu(&mn->hlist);
 363                spin_unlock(&mm->mmu_notifier_mm->lock);
 364        }
 365
 366        /*
 367         * Wait for any running method to finish, of course including
 368         * ->release if it was run by mmu_notifier_release instead of us.
 369         */
 370        synchronize_srcu(&srcu);
 371
 372        BUG_ON(atomic_read(&mm->mm_count) <= 0);
 373
 374        mmdrop(mm);
 375}
 376EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
 377
 378/*
 379 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
 380 */
 381void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
 382                                        struct mm_struct *mm)
 383{
 384        spin_lock(&mm->mmu_notifier_mm->lock);
 385        /*
 386         * Can not use list_del_rcu() since __mmu_notifier_release
 387         * can delete it before we hold the lock.
 388         */
 389        hlist_del_init_rcu(&mn->hlist);
 390        spin_unlock(&mm->mmu_notifier_mm->lock);
 391
 392        BUG_ON(atomic_read(&mm->mm_count) <= 0);
 393        mmdrop(mm);
 394}
 395EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
 396
 397bool
 398mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
 399{
 400        if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
 401                return false;
 402        /* Return true if the vma still have the read flag set. */
 403        return range->vma->vm_flags & VM_READ;
 404}
 405EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
 406