linux/fs/dax.c
<<
>>
Prefs
   1/*
   2 * fs/dax.c - Direct Access filesystem code
   3 * Copyright (c) 2013-2014 Intel Corporation
   4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/blkdev.h>
  19#include <linux/buffer_head.h>
  20#include <linux/dax.h>
  21#include <linux/fs.h>
  22#include <linux/genhd.h>
  23#include <linux/highmem.h>
  24#include <linux/memcontrol.h>
  25#include <linux/mm.h>
  26#include <linux/mutex.h>
  27#include <linux/pagevec.h>
  28#include <linux/sched.h>
  29#include <linux/sched/signal.h>
  30#include <linux/uio.h>
  31#include <linux/vmstat.h>
  32#include <linux/pfn_t.h>
  33#include <linux/sizes.h>
  34#include <linux/mmu_notifier.h>
  35#include <linux/iomap.h>
  36#include "internal.h"
  37
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/fs_dax.h>
  40
  41/* We choose 4096 entries - same as per-zone page wait tables */
  42#define DAX_WAIT_TABLE_BITS 12
  43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  44
  45/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  46#define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
  47#define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIFT)
  48
  49static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  50
  51static int __init init_dax_wait_table(void)
  52{
  53        int i;
  54
  55        for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  56                init_waitqueue_head(wait_table + i);
  57        return 0;
  58}
  59fs_initcall(init_dax_wait_table);
  60
  61/*
  62 * We use lowest available bit in exceptional entry for locking, one bit for
  63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
  64 * an empty entry that is just used for locking.  In total four special bits.
  65 *
  66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  68 * block allocation.
  69 */
  70#define RADIX_DAX_SHIFT         (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
  71#define RADIX_DAX_ENTRY_LOCK    (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
  72#define RADIX_DAX_PMD           (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
  73#define RADIX_DAX_ZERO_PAGE     (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
  74#define RADIX_DAX_EMPTY         (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
  75
  76static unsigned long dax_radix_pfn(void *entry)
  77{
  78        return (unsigned long)entry >> RADIX_DAX_SHIFT;
  79}
  80
  81static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
  82{
  83        return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
  84                        (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
  85}
  86
  87static unsigned int dax_radix_order(void *entry)
  88{
  89        if ((unsigned long)entry & RADIX_DAX_PMD)
  90                return PMD_SHIFT - PAGE_SHIFT;
  91        return 0;
  92}
  93
  94static int dax_is_pmd_entry(void *entry)
  95{
  96        return (unsigned long)entry & RADIX_DAX_PMD;
  97}
  98
  99static int dax_is_pte_entry(void *entry)
 100{
 101        return !((unsigned long)entry & RADIX_DAX_PMD);
 102}
 103
 104static int dax_is_zero_entry(void *entry)
 105{
 106        return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
 107}
 108
 109static int dax_is_empty_entry(void *entry)
 110{
 111        return (unsigned long)entry & RADIX_DAX_EMPTY;
 112}
 113
 114/*
 115 * DAX radix tree locking
 116 */
 117struct exceptional_entry_key {
 118        struct address_space *mapping;
 119        pgoff_t entry_start;
 120};
 121
 122struct wait_exceptional_entry_queue {
 123        wait_queue_entry_t wait;
 124        struct exceptional_entry_key key;
 125};
 126
 127static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
 128                pgoff_t index, void *entry, struct exceptional_entry_key *key)
 129{
 130        unsigned long hash;
 131
 132        /*
 133         * If 'entry' is a PMD, align the 'index' that we use for the wait
 134         * queue to the start of that PMD.  This ensures that all offsets in
 135         * the range covered by the PMD map to the same bit lock.
 136         */
 137        if (dax_is_pmd_entry(entry))
 138                index &= ~PG_PMD_COLOUR;
 139
 140        key->mapping = mapping;
 141        key->entry_start = index;
 142
 143        hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
 144        return wait_table + hash;
 145}
 146
 147static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
 148                                       int sync, void *keyp)
 149{
 150        struct exceptional_entry_key *key = keyp;
 151        struct wait_exceptional_entry_queue *ewait =
 152                container_of(wait, struct wait_exceptional_entry_queue, wait);
 153
 154        if (key->mapping != ewait->key.mapping ||
 155            key->entry_start != ewait->key.entry_start)
 156                return 0;
 157        return autoremove_wake_function(wait, mode, sync, NULL);
 158}
 159
 160/*
 161 * @entry may no longer be the entry at the index in the mapping.
 162 * The important information it's conveying is whether the entry at
 163 * this index used to be a PMD entry.
 164 */
 165static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
 166                pgoff_t index, void *entry, bool wake_all)
 167{
 168        struct exceptional_entry_key key;
 169        wait_queue_head_t *wq;
 170
 171        wq = dax_entry_waitqueue(mapping, index, entry, &key);
 172
 173        /*
 174         * Checking for locked entry and prepare_to_wait_exclusive() happens
 175         * under the i_pages lock, ditto for entry handling in our callers.
 176         * So at this point all tasks that could have seen our entry locked
 177         * must be in the waitqueue and the following check will see them.
 178         */
 179        if (waitqueue_active(wq))
 180                __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 181}
 182
 183/*
 184 * Check whether the given slot is locked.  Must be called with the i_pages
 185 * lock held.
 186 */
 187static inline int slot_locked(struct address_space *mapping, void **slot)
 188{
 189        unsigned long entry = (unsigned long)
 190                radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 191        return entry & RADIX_DAX_ENTRY_LOCK;
 192}
 193
 194/*
 195 * Mark the given slot as locked.  Must be called with the i_pages lock held.
 196 */
 197static inline void *lock_slot(struct address_space *mapping, void **slot)
 198{
 199        unsigned long entry = (unsigned long)
 200                radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 201
 202        entry |= RADIX_DAX_ENTRY_LOCK;
 203        radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
 204        return (void *)entry;
 205}
 206
 207/*
 208 * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
 209 */
 210static inline void *unlock_slot(struct address_space *mapping, void **slot)
 211{
 212        unsigned long entry = (unsigned long)
 213                radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
 214
 215        entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
 216        radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
 217        return (void *)entry;
 218}
 219
 220/*
 221 * Lookup entry in radix tree, wait for it to become unlocked if it is
 222 * exceptional entry and return it. The caller must call
 223 * put_unlocked_mapping_entry() when he decided not to lock the entry or
 224 * put_locked_mapping_entry() when he locked the entry and now wants to
 225 * unlock it.
 226 *
 227 * Must be called with the i_pages lock held.
 228 */
 229static void *get_unlocked_mapping_entry(struct address_space *mapping,
 230                                        pgoff_t index, void ***slotp)
 231{
 232        void *entry, **slot;
 233        struct wait_exceptional_entry_queue ewait;
 234        wait_queue_head_t *wq;
 235
 236        init_wait(&ewait.wait);
 237        ewait.wait.func = wake_exceptional_entry_func;
 238
 239        for (;;) {
 240                entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
 241                                          &slot);
 242                if (!entry ||
 243                    WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
 244                    !slot_locked(mapping, slot)) {
 245                        if (slotp)
 246                                *slotp = slot;
 247                        return entry;
 248                }
 249
 250                wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
 251                prepare_to_wait_exclusive(wq, &ewait.wait,
 252                                          TASK_UNINTERRUPTIBLE);
 253                xa_unlock_irq(&mapping->i_pages);
 254                schedule();
 255                finish_wait(wq, &ewait.wait);
 256                xa_lock_irq(&mapping->i_pages);
 257        }
 258}
 259
 260static void dax_unlock_mapping_entry(struct address_space *mapping,
 261                                     pgoff_t index)
 262{
 263        void *entry, **slot;
 264
 265        xa_lock_irq(&mapping->i_pages);
 266        entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
 267        if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
 268                         !slot_locked(mapping, slot))) {
 269                xa_unlock_irq(&mapping->i_pages);
 270                return;
 271        }
 272        unlock_slot(mapping, slot);
 273        xa_unlock_irq(&mapping->i_pages);
 274        dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 275}
 276
 277static void put_locked_mapping_entry(struct address_space *mapping,
 278                pgoff_t index)
 279{
 280        dax_unlock_mapping_entry(mapping, index);
 281}
 282
 283/*
 284 * Called when we are done with radix tree entry we looked up via
 285 * get_unlocked_mapping_entry() and which we didn't lock in the end.
 286 */
 287static void put_unlocked_mapping_entry(struct address_space *mapping,
 288                                       pgoff_t index, void *entry)
 289{
 290        if (!entry)
 291                return;
 292
 293        /* We have to wake up next waiter for the radix tree entry lock */
 294        dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 295}
 296
 297static unsigned long dax_entry_size(void *entry)
 298{
 299        if (dax_is_zero_entry(entry))
 300                return 0;
 301        else if (dax_is_empty_entry(entry))
 302                return 0;
 303        else if (dax_is_pmd_entry(entry))
 304                return PMD_SIZE;
 305        else
 306                return PAGE_SIZE;
 307}
 308
 309static unsigned long dax_radix_end_pfn(void *entry)
 310{
 311        return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 312}
 313
 314/*
 315 * Iterate through all mapped pfns represented by an entry, i.e. skip
 316 * 'empty' and 'zero' entries.
 317 */
 318#define for_each_mapped_pfn(entry, pfn) \
 319        for (pfn = dax_radix_pfn(entry); \
 320                        pfn < dax_radix_end_pfn(entry); pfn++)
 321
 322static void dax_associate_entry(void *entry, struct address_space *mapping)
 323{
 324        unsigned long pfn;
 325
 326        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 327                return;
 328
 329        for_each_mapped_pfn(entry, pfn) {
 330                struct page *page = pfn_to_page(pfn);
 331
 332                WARN_ON_ONCE(page->mapping);
 333                page->mapping = mapping;
 334        }
 335}
 336
 337static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 338                bool trunc)
 339{
 340        unsigned long pfn;
 341
 342        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 343                return;
 344
 345        for_each_mapped_pfn(entry, pfn) {
 346                struct page *page = pfn_to_page(pfn);
 347
 348                WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 349                WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 350                page->mapping = NULL;
 351        }
 352}
 353
 354/*
 355 * Find radix tree entry at given index. If it points to an exceptional entry,
 356 * return it with the radix tree entry locked. If the radix tree doesn't
 357 * contain given index, create an empty exceptional entry for the index and
 358 * return with it locked.
 359 *
 360 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
 361 * either return that locked entry or will return an error.  This error will
 362 * happen if there are any 4k entries within the 2MiB range that we are
 363 * requesting.
 364 *
 365 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
 366 * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
 367 * insertion will fail if it finds any 4k entries already in the tree, and a
 368 * 4k insertion will cause an existing 2MiB entry to be unmapped and
 369 * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
 370 * well as 2MiB empty entries.
 371 *
 372 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
 373 * real storage backing them.  We will leave these real 2MiB DAX entries in
 374 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
 375 *
 376 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 377 * persistent memory the benefit is doubtful. We can add that later if we can
 378 * show it helps.
 379 */
 380static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 381                unsigned long size_flag)
 382{
 383        bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
 384        void *entry, **slot;
 385
 386restart:
 387        xa_lock_irq(&mapping->i_pages);
 388        entry = get_unlocked_mapping_entry(mapping, index, &slot);
 389
 390        if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
 391                entry = ERR_PTR(-EIO);
 392                goto out_unlock;
 393        }
 394
 395        if (entry) {
 396                if (size_flag & RADIX_DAX_PMD) {
 397                        if (dax_is_pte_entry(entry)) {
 398                                put_unlocked_mapping_entry(mapping, index,
 399                                                entry);
 400                                entry = ERR_PTR(-EEXIST);
 401                                goto out_unlock;
 402                        }
 403                } else { /* trying to grab a PTE entry */
 404                        if (dax_is_pmd_entry(entry) &&
 405                            (dax_is_zero_entry(entry) ||
 406                             dax_is_empty_entry(entry))) {
 407                                pmd_downgrade = true;
 408                        }
 409                }
 410        }
 411
 412        /* No entry for given index? Make sure radix tree is big enough. */
 413        if (!entry || pmd_downgrade) {
 414                int err;
 415
 416                if (pmd_downgrade) {
 417                        /*
 418                         * Make sure 'entry' remains valid while we drop
 419                         * the i_pages lock.
 420                         */
 421                        entry = lock_slot(mapping, slot);
 422                }
 423
 424                xa_unlock_irq(&mapping->i_pages);
 425                /*
 426                 * Besides huge zero pages the only other thing that gets
 427                 * downgraded are empty entries which don't need to be
 428                 * unmapped.
 429                 */
 430                if (pmd_downgrade && dax_is_zero_entry(entry))
 431                        unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 432                                                        PG_PMD_NR, false);
 433
 434                err = radix_tree_preload(
 435                                mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
 436                if (err) {
 437                        if (pmd_downgrade)
 438                                put_locked_mapping_entry(mapping, index);
 439                        return ERR_PTR(err);
 440                }
 441                xa_lock_irq(&mapping->i_pages);
 442
 443                if (!entry) {
 444                        /*
 445                         * We needed to drop the i_pages lock while calling
 446                         * radix_tree_preload() and we didn't have an entry to
 447                         * lock.  See if another thread inserted an entry at
 448                         * our index during this time.
 449                         */
 450                        entry = __radix_tree_lookup(&mapping->i_pages, index,
 451                                        NULL, &slot);
 452                        if (entry) {
 453                                radix_tree_preload_end();
 454                                xa_unlock_irq(&mapping->i_pages);
 455                                goto restart;
 456                        }
 457                }
 458
 459                if (pmd_downgrade) {
 460                        dax_disassociate_entry(entry, mapping, false);
 461                        radix_tree_delete(&mapping->i_pages, index);
 462                        mapping->nrexceptional--;
 463                        dax_wake_mapping_entry_waiter(mapping, index, entry,
 464                                        true);
 465                }
 466
 467                entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
 468
 469                err = __radix_tree_insert(&mapping->i_pages, index,
 470                                dax_radix_order(entry), entry);
 471                radix_tree_preload_end();
 472                if (err) {
 473                        xa_unlock_irq(&mapping->i_pages);
 474                        /*
 475                         * Our insertion of a DAX entry failed, most likely
 476                         * because we were inserting a PMD entry and it
 477                         * collided with a PTE sized entry at a different
 478                         * index in the PMD range.  We haven't inserted
 479                         * anything into the radix tree and have no waiters to
 480                         * wake.
 481                         */
 482                        return ERR_PTR(err);
 483                }
 484                /* Good, we have inserted empty locked entry into the tree. */
 485                mapping->nrexceptional++;
 486                xa_unlock_irq(&mapping->i_pages);
 487                return entry;
 488        }
 489        entry = lock_slot(mapping, slot);
 490 out_unlock:
 491        xa_unlock_irq(&mapping->i_pages);
 492        return entry;
 493}
 494
 495static int __dax_invalidate_mapping_entry(struct address_space *mapping,
 496                                          pgoff_t index, bool trunc)
 497{
 498        int ret = 0;
 499        void *entry;
 500        struct radix_tree_root *pages = &mapping->i_pages;
 501
 502        xa_lock_irq(pages);
 503        entry = get_unlocked_mapping_entry(mapping, index, NULL);
 504        if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
 505                goto out;
 506        if (!trunc &&
 507            (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
 508             radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
 509                goto out;
 510        dax_disassociate_entry(entry, mapping, trunc);
 511        radix_tree_delete(pages, index);
 512        mapping->nrexceptional--;
 513        ret = 1;
 514out:
 515        put_unlocked_mapping_entry(mapping, index, entry);
 516        xa_unlock_irq(pages);
 517        return ret;
 518}
 519/*
 520 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
 521 * entry to get unlocked before deleting it.
 522 */
 523int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 524{
 525        int ret = __dax_invalidate_mapping_entry(mapping, index, true);
 526
 527        /*
 528         * This gets called from truncate / punch_hole path. As such, the caller
 529         * must hold locks protecting against concurrent modifications of the
 530         * radix tree (usually fs-private i_mmap_sem for writing). Since the
 531         * caller has seen exceptional entry for this index, we better find it
 532         * at that index as well...
 533         */
 534        WARN_ON_ONCE(!ret);
 535        return ret;
 536}
 537
 538/*
 539 * Invalidate exceptional DAX entry if it is clean.
 540 */
 541int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 542                                      pgoff_t index)
 543{
 544        return __dax_invalidate_mapping_entry(mapping, index, false);
 545}
 546
 547static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
 548                sector_t sector, size_t size, struct page *to,
 549                unsigned long vaddr)
 550{
 551        void *vto, *kaddr;
 552        pgoff_t pgoff;
 553        pfn_t pfn;
 554        long rc;
 555        int id;
 556
 557        rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
 558        if (rc)
 559                return rc;
 560
 561        id = dax_read_lock();
 562        rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
 563        if (rc < 0) {
 564                dax_read_unlock(id);
 565                return rc;
 566        }
 567        vto = kmap_atomic(to);
 568        copy_user_page(vto, (void __force *)kaddr, vaddr, to);
 569        kunmap_atomic(vto);
 570        dax_read_unlock(id);
 571        return 0;
 572}
 573
 574/*
 575 * By this point grab_mapping_entry() has ensured that we have a locked entry
 576 * of the appropriate size so we don't have to worry about downgrading PMDs to
 577 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 578 * already in the tree, we will skip the insertion and just dirty the PMD as
 579 * appropriate.
 580 */
 581static void *dax_insert_mapping_entry(struct address_space *mapping,
 582                                      struct vm_fault *vmf,
 583                                      void *entry, pfn_t pfn_t,
 584                                      unsigned long flags, bool dirty)
 585{
 586        struct radix_tree_root *pages = &mapping->i_pages;
 587        unsigned long pfn = pfn_t_to_pfn(pfn_t);
 588        pgoff_t index = vmf->pgoff;
 589        void *new_entry;
 590
 591        if (dirty)
 592                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 593
 594        if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
 595                /* we are replacing a zero page with block mapping */
 596                if (dax_is_pmd_entry(entry))
 597                        unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 598                                                        PG_PMD_NR, false);
 599                else /* pte entry */
 600                        unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
 601        }
 602
 603        xa_lock_irq(pages);
 604        new_entry = dax_radix_locked_entry(pfn, flags);
 605        if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 606                dax_disassociate_entry(entry, mapping, false);
 607                dax_associate_entry(new_entry, mapping);
 608        }
 609
 610        if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 611                /*
 612                 * Only swap our new entry into the radix tree if the current
 613                 * entry is a zero page or an empty entry.  If a normal PTE or
 614                 * PMD entry is already in the tree, we leave it alone.  This
 615                 * means that if we are trying to insert a PTE and the
 616                 * existing entry is a PMD, we will just leave the PMD in the
 617                 * tree and dirty it if necessary.
 618                 */
 619                struct radix_tree_node *node;
 620                void **slot;
 621                void *ret;
 622
 623                ret = __radix_tree_lookup(pages, index, &node, &slot);
 624                WARN_ON_ONCE(ret != entry);
 625                __radix_tree_replace(pages, node, slot,
 626                                     new_entry, NULL);
 627                entry = new_entry;
 628        }
 629
 630        if (dirty)
 631                radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
 632
 633        xa_unlock_irq(pages);
 634        return entry;
 635}
 636
 637static inline unsigned long
 638pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 639{
 640        unsigned long address;
 641
 642        address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 643        VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 644        return address;
 645}
 646
 647/* Walk all mappings of a given index of a file and writeprotect them */
 648static void dax_mapping_entry_mkclean(struct address_space *mapping,
 649                                      pgoff_t index, unsigned long pfn)
 650{
 651        struct vm_area_struct *vma;
 652        pte_t pte, *ptep = NULL;
 653        pmd_t *pmdp = NULL;
 654        spinlock_t *ptl;
 655
 656        i_mmap_lock_read(mapping);
 657        vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 658                unsigned long address, start, end;
 659
 660                cond_resched();
 661
 662                if (!(vma->vm_flags & VM_SHARED))
 663                        continue;
 664
 665                address = pgoff_address(index, vma);
 666
 667                /*
 668                 * Note because we provide start/end to follow_pte_pmd it will
 669                 * call mmu_notifier_invalidate_range_start() on our behalf
 670                 * before taking any lock.
 671                 */
 672                if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
 673                        continue;
 674
 675                /*
 676                 * No need to call mmu_notifier_invalidate_range() as we are
 677                 * downgrading page table protection not changing it to point
 678                 * to a new page.
 679                 *
 680                 * See Documentation/vm/mmu_notifier.txt
 681                 */
 682                if (pmdp) {
 683#ifdef CONFIG_FS_DAX_PMD
 684                        pmd_t pmd;
 685
 686                        if (pfn != pmd_pfn(*pmdp))
 687                                goto unlock_pmd;
 688                        if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
 689                                goto unlock_pmd;
 690
 691                        flush_cache_page(vma, address, pfn);
 692                        pmd = pmdp_huge_clear_flush(vma, address, pmdp);
 693                        pmd = pmd_wrprotect(pmd);
 694                        pmd = pmd_mkclean(pmd);
 695                        set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 696unlock_pmd:
 697#endif
 698                        spin_unlock(ptl);
 699                } else {
 700                        if (pfn != pte_pfn(*ptep))
 701                                goto unlock_pte;
 702                        if (!pte_dirty(*ptep) && !pte_write(*ptep))
 703                                goto unlock_pte;
 704
 705                        flush_cache_page(vma, address, pfn);
 706                        pte = ptep_clear_flush(vma, address, ptep);
 707                        pte = pte_wrprotect(pte);
 708                        pte = pte_mkclean(pte);
 709                        set_pte_at(vma->vm_mm, address, ptep, pte);
 710unlock_pte:
 711                        pte_unmap_unlock(ptep, ptl);
 712                }
 713
 714                mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
 715        }
 716        i_mmap_unlock_read(mapping);
 717}
 718
 719static int dax_writeback_one(struct dax_device *dax_dev,
 720                struct address_space *mapping, pgoff_t index, void *entry)
 721{
 722        struct radix_tree_root *pages = &mapping->i_pages;
 723        void *entry2, **slot;
 724        unsigned long pfn;
 725        long ret = 0;
 726        size_t size;
 727
 728        /*
 729         * A page got tagged dirty in DAX mapping? Something is seriously
 730         * wrong.
 731         */
 732        if (WARN_ON(!radix_tree_exceptional_entry(entry)))
 733                return -EIO;
 734
 735        xa_lock_irq(pages);
 736        entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
 737        /* Entry got punched out / reallocated? */
 738        if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
 739                goto put_unlocked;
 740        /*
 741         * Entry got reallocated elsewhere? No need to writeback. We have to
 742         * compare pfns as we must not bail out due to difference in lockbit
 743         * or entry type.
 744         */
 745        if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
 746                goto put_unlocked;
 747        if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 748                                dax_is_zero_entry(entry))) {
 749                ret = -EIO;
 750                goto put_unlocked;
 751        }
 752
 753        /* Another fsync thread may have already written back this entry */
 754        if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
 755                goto put_unlocked;
 756        /* Lock the entry to serialize with page faults */
 757        entry = lock_slot(mapping, slot);
 758        /*
 759         * We can clear the tag now but we have to be careful so that concurrent
 760         * dax_writeback_one() calls for the same index cannot finish before we
 761         * actually flush the caches. This is achieved as the calls will look
 762         * at the entry only under the i_pages lock and once they do that
 763         * they will see the entry locked and wait for it to unlock.
 764         */
 765        radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
 766        xa_unlock_irq(pages);
 767
 768        /*
 769         * Even if dax_writeback_mapping_range() was given a wbc->range_start
 770         * in the middle of a PMD, the 'index' we are given will be aligned to
 771         * the start index of the PMD, as will the pfn we pull from 'entry'.
 772         * This allows us to flush for PMD_SIZE and not have to worry about
 773         * partial PMD writebacks.
 774         */
 775        pfn = dax_radix_pfn(entry);
 776        size = PAGE_SIZE << dax_radix_order(entry);
 777
 778        dax_mapping_entry_mkclean(mapping, index, pfn);
 779        dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
 780        /*
 781         * After we have flushed the cache, we can clear the dirty tag. There
 782         * cannot be new dirty data in the pfn after the flush has completed as
 783         * the pfn mappings are writeprotected and fault waits for mapping
 784         * entry lock.
 785         */
 786        xa_lock_irq(pages);
 787        radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
 788        xa_unlock_irq(pages);
 789        trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
 790        put_locked_mapping_entry(mapping, index);
 791        return ret;
 792
 793 put_unlocked:
 794        put_unlocked_mapping_entry(mapping, index, entry2);
 795        xa_unlock_irq(pages);
 796        return ret;
 797}
 798
 799/*
 800 * Flush the mapping to the persistent domain within the byte range of [start,
 801 * end]. This is required by data integrity operations to ensure file data is
 802 * on persistent storage prior to completion of the operation.
 803 */
 804int dax_writeback_mapping_range(struct address_space *mapping,
 805                struct block_device *bdev, struct writeback_control *wbc)
 806{
 807        struct inode *inode = mapping->host;
 808        pgoff_t start_index, end_index;
 809        pgoff_t indices[PAGEVEC_SIZE];
 810        struct dax_device *dax_dev;
 811        struct pagevec pvec;
 812        bool done = false;
 813        int i, ret = 0;
 814
 815        if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 816                return -EIO;
 817
 818        if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 819                return 0;
 820
 821        dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 822        if (!dax_dev)
 823                return -EIO;
 824
 825        start_index = wbc->range_start >> PAGE_SHIFT;
 826        end_index = wbc->range_end >> PAGE_SHIFT;
 827
 828        trace_dax_writeback_range(inode, start_index, end_index);
 829
 830        tag_pages_for_writeback(mapping, start_index, end_index);
 831
 832        pagevec_init(&pvec);
 833        while (!done) {
 834                pvec.nr = find_get_entries_tag(mapping, start_index,
 835                                PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
 836                                pvec.pages, indices);
 837
 838                if (pvec.nr == 0)
 839                        break;
 840
 841                for (i = 0; i < pvec.nr; i++) {
 842                        if (indices[i] > end_index) {
 843                                done = true;
 844                                break;
 845                        }
 846
 847                        ret = dax_writeback_one(dax_dev, mapping, indices[i],
 848                                        pvec.pages[i]);
 849                        if (ret < 0) {
 850                                mapping_set_error(mapping, ret);
 851                                goto out;
 852                        }
 853                }
 854                start_index = indices[pvec.nr - 1] + 1;
 855        }
 856out:
 857        put_dax(dax_dev);
 858        trace_dax_writeback_range_done(inode, start_index, end_index);
 859        return (ret < 0 ? ret : 0);
 860}
 861EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 862
 863static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 864{
 865        return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 866}
 867
 868static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
 869                         pfn_t *pfnp)
 870{
 871        const sector_t sector = dax_iomap_sector(iomap, pos);
 872        pgoff_t pgoff;
 873        void *kaddr;
 874        int id, rc;
 875        long length;
 876
 877        rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
 878        if (rc)
 879                return rc;
 880        id = dax_read_lock();
 881        length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
 882                                   &kaddr, pfnp);
 883        if (length < 0) {
 884                rc = length;
 885                goto out;
 886        }
 887        rc = -EINVAL;
 888        if (PFN_PHYS(length) < size)
 889                goto out;
 890        if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
 891                goto out;
 892        /* For larger pages we need devmap */
 893        if (length > 1 && !pfn_t_devmap(*pfnp))
 894                goto out;
 895        rc = 0;
 896out:
 897        dax_read_unlock(id);
 898        return rc;
 899}
 900
 901/*
 902 * The user has performed a load from a hole in the file.  Allocating a new
 903 * page in the file would cause excessive storage usage for workloads with
 904 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
 905 * If this page is ever written to we will re-fault and change the mapping to
 906 * point to real DAX storage instead.
 907 */
 908static int dax_load_hole(struct address_space *mapping, void *entry,
 909                         struct vm_fault *vmf)
 910{
 911        struct inode *inode = mapping->host;
 912        unsigned long vaddr = vmf->address;
 913        int ret = VM_FAULT_NOPAGE;
 914        struct page *zero_page;
 915        void *entry2;
 916        pfn_t pfn;
 917
 918        zero_page = ZERO_PAGE(0);
 919        if (unlikely(!zero_page)) {
 920                ret = VM_FAULT_OOM;
 921                goto out;
 922        }
 923
 924        pfn = page_to_pfn_t(zero_page);
 925        entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
 926                        RADIX_DAX_ZERO_PAGE, false);
 927        if (IS_ERR(entry2)) {
 928                ret = VM_FAULT_SIGBUS;
 929                goto out;
 930        }
 931
 932        vm_insert_mixed(vmf->vma, vaddr, pfn);
 933out:
 934        trace_dax_load_hole(inode, vmf, ret);
 935        return ret;
 936}
 937
 938static bool dax_range_is_aligned(struct block_device *bdev,
 939                                 unsigned int offset, unsigned int length)
 940{
 941        unsigned short sector_size = bdev_logical_block_size(bdev);
 942
 943        if (!IS_ALIGNED(offset, sector_size))
 944                return false;
 945        if (!IS_ALIGNED(length, sector_size))
 946                return false;
 947
 948        return true;
 949}
 950
 951int __dax_zero_page_range(struct block_device *bdev,
 952                struct dax_device *dax_dev, sector_t sector,
 953                unsigned int offset, unsigned int size)
 954{
 955        if (dax_range_is_aligned(bdev, offset, size)) {
 956                sector_t start_sector = sector + (offset >> 9);
 957
 958                return blkdev_issue_zeroout(bdev, start_sector,
 959                                size >> 9, GFP_NOFS, 0);
 960        } else {
 961                pgoff_t pgoff;
 962                long rc, id;
 963                void *kaddr;
 964                pfn_t pfn;
 965
 966                rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
 967                if (rc)
 968                        return rc;
 969
 970                id = dax_read_lock();
 971                rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
 972                                &pfn);
 973                if (rc < 0) {
 974                        dax_read_unlock(id);
 975                        return rc;
 976                }
 977                memset(kaddr + offset, 0, size);
 978                dax_flush(dax_dev, kaddr + offset, size);
 979                dax_read_unlock(id);
 980        }
 981        return 0;
 982}
 983EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 984
 985static loff_t
 986dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 987                struct iomap *iomap)
 988{
 989        struct block_device *bdev = iomap->bdev;
 990        struct dax_device *dax_dev = iomap->dax_dev;
 991        struct iov_iter *iter = data;
 992        loff_t end = pos + length, done = 0;
 993        ssize_t ret = 0;
 994        int id;
 995
 996        if (iov_iter_rw(iter) == READ) {
 997                end = min(end, i_size_read(inode));
 998                if (pos >= end)
 999                        return 0;
1000
1001                if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1002                        return iov_iter_zero(min(length, end - pos), iter);
1003        }
1004
1005        if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1006                return -EIO;
1007
1008        /*
1009         * Write can allocate block for an area which has a hole page mapped
1010         * into page tables. We have to tear down these mappings so that data
1011         * written by write(2) is visible in mmap.
1012         */
1013        if (iomap->flags & IOMAP_F_NEW) {
1014                invalidate_inode_pages2_range(inode->i_mapping,
1015                                              pos >> PAGE_SHIFT,
1016                                              (end - 1) >> PAGE_SHIFT);
1017        }
1018
1019        id = dax_read_lock();
1020        while (pos < end) {
1021                unsigned offset = pos & (PAGE_SIZE - 1);
1022                const size_t size = ALIGN(length + offset, PAGE_SIZE);
1023                const sector_t sector = dax_iomap_sector(iomap, pos);
1024                ssize_t map_len;
1025                pgoff_t pgoff;
1026                void *kaddr;
1027                pfn_t pfn;
1028
1029                if (fatal_signal_pending(current)) {
1030                        ret = -EINTR;
1031                        break;
1032                }
1033
1034                ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1035                if (ret)
1036                        break;
1037
1038                map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1039                                &kaddr, &pfn);
1040                if (map_len < 0) {
1041                        ret = map_len;
1042                        break;
1043                }
1044
1045                map_len = PFN_PHYS(map_len);
1046                kaddr += offset;
1047                map_len -= offset;
1048                if (map_len > end - pos)
1049                        map_len = end - pos;
1050
1051                /*
1052                 * The userspace address for the memory copy has already been
1053                 * validated via access_ok() in either vfs_read() or
1054                 * vfs_write(), depending on which operation we are doing.
1055                 */
1056                if (iov_iter_rw(iter) == WRITE)
1057                        map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1058                                        map_len, iter);
1059                else
1060                        map_len = copy_to_iter(kaddr, map_len, iter);
1061                if (map_len <= 0) {
1062                        ret = map_len ? map_len : -EFAULT;
1063                        break;
1064                }
1065
1066                pos += map_len;
1067                length -= map_len;
1068                done += map_len;
1069        }
1070        dax_read_unlock(id);
1071
1072        return done ? done : ret;
1073}
1074
1075/**
1076 * dax_iomap_rw - Perform I/O to a DAX file
1077 * @iocb:       The control block for this I/O
1078 * @iter:       The addresses to do I/O from or to
1079 * @ops:        iomap ops passed from the file system
1080 *
1081 * This function performs read and write operations to directly mapped
1082 * persistent memory.  The callers needs to take care of read/write exclusion
1083 * and evicting any page cache pages in the region under I/O.
1084 */
1085ssize_t
1086dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1087                const struct iomap_ops *ops)
1088{
1089        struct address_space *mapping = iocb->ki_filp->f_mapping;
1090        struct inode *inode = mapping->host;
1091        loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1092        unsigned flags = 0;
1093
1094        if (iov_iter_rw(iter) == WRITE) {
1095                lockdep_assert_held_exclusive(&inode->i_rwsem);
1096                flags |= IOMAP_WRITE;
1097        } else {
1098                lockdep_assert_held(&inode->i_rwsem);
1099        }
1100
1101        while (iov_iter_count(iter)) {
1102                ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1103                                iter, dax_iomap_actor);
1104                if (ret <= 0)
1105                        break;
1106                pos += ret;
1107                done += ret;
1108        }
1109
1110        iocb->ki_pos += done;
1111        return done ? done : ret;
1112}
1113EXPORT_SYMBOL_GPL(dax_iomap_rw);
1114
1115static int dax_fault_return(int error)
1116{
1117        if (error == 0)
1118                return VM_FAULT_NOPAGE;
1119        if (error == -ENOMEM)
1120                return VM_FAULT_OOM;
1121        return VM_FAULT_SIGBUS;
1122}
1123
1124/*
1125 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1126 * flushed on write-faults (non-cow), but not read-faults.
1127 */
1128static bool dax_fault_is_synchronous(unsigned long flags,
1129                struct vm_area_struct *vma, struct iomap *iomap)
1130{
1131        return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1132                && (iomap->flags & IOMAP_F_DIRTY);
1133}
1134
1135static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1136                               int *iomap_errp, const struct iomap_ops *ops)
1137{
1138        struct vm_area_struct *vma = vmf->vma;
1139        struct address_space *mapping = vma->vm_file->f_mapping;
1140        struct inode *inode = mapping->host;
1141        unsigned long vaddr = vmf->address;
1142        loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1143        struct iomap iomap = { 0 };
1144        unsigned flags = IOMAP_FAULT;
1145        int error, major = 0;
1146        bool write = vmf->flags & FAULT_FLAG_WRITE;
1147        bool sync;
1148        int vmf_ret = 0;
1149        void *entry;
1150        pfn_t pfn;
1151
1152        trace_dax_pte_fault(inode, vmf, vmf_ret);
1153        /*
1154         * Check whether offset isn't beyond end of file now. Caller is supposed
1155         * to hold locks serializing us with truncate / punch hole so this is
1156         * a reliable test.
1157         */
1158        if (pos >= i_size_read(inode)) {
1159                vmf_ret = VM_FAULT_SIGBUS;
1160                goto out;
1161        }
1162
1163        if (write && !vmf->cow_page)
1164                flags |= IOMAP_WRITE;
1165
1166        entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1167        if (IS_ERR(entry)) {
1168                vmf_ret = dax_fault_return(PTR_ERR(entry));
1169                goto out;
1170        }
1171
1172        /*
1173         * It is possible, particularly with mixed reads & writes to private
1174         * mappings, that we have raced with a PMD fault that overlaps with
1175         * the PTE we need to set up.  If so just return and the fault will be
1176         * retried.
1177         */
1178        if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1179                vmf_ret = VM_FAULT_NOPAGE;
1180                goto unlock_entry;
1181        }
1182
1183        /*
1184         * Note that we don't bother to use iomap_apply here: DAX required
1185         * the file system block size to be equal the page size, which means
1186         * that we never have to deal with more than a single extent here.
1187         */
1188        error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1189        if (iomap_errp)
1190                *iomap_errp = error;
1191        if (error) {
1192                vmf_ret = dax_fault_return(error);
1193                goto unlock_entry;
1194        }
1195        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1196                error = -EIO;   /* fs corruption? */
1197                goto error_finish_iomap;
1198        }
1199
1200        if (vmf->cow_page) {
1201                sector_t sector = dax_iomap_sector(&iomap, pos);
1202
1203                switch (iomap.type) {
1204                case IOMAP_HOLE:
1205                case IOMAP_UNWRITTEN:
1206                        clear_user_highpage(vmf->cow_page, vaddr);
1207                        break;
1208                case IOMAP_MAPPED:
1209                        error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1210                                        sector, PAGE_SIZE, vmf->cow_page, vaddr);
1211                        break;
1212                default:
1213                        WARN_ON_ONCE(1);
1214                        error = -EIO;
1215                        break;
1216                }
1217
1218                if (error)
1219                        goto error_finish_iomap;
1220
1221                __SetPageUptodate(vmf->cow_page);
1222                vmf_ret = finish_fault(vmf);
1223                if (!vmf_ret)
1224                        vmf_ret = VM_FAULT_DONE_COW;
1225                goto finish_iomap;
1226        }
1227
1228        sync = dax_fault_is_synchronous(flags, vma, &iomap);
1229
1230        switch (iomap.type) {
1231        case IOMAP_MAPPED:
1232                if (iomap.flags & IOMAP_F_NEW) {
1233                        count_vm_event(PGMAJFAULT);
1234                        count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1235                        major = VM_FAULT_MAJOR;
1236                }
1237                error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1238                if (error < 0)
1239                        goto error_finish_iomap;
1240
1241                entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1242                                                 0, write && !sync);
1243                if (IS_ERR(entry)) {
1244                        error = PTR_ERR(entry);
1245                        goto error_finish_iomap;
1246                }
1247
1248                /*
1249                 * If we are doing synchronous page fault and inode needs fsync,
1250                 * we can insert PTE into page tables only after that happens.
1251                 * Skip insertion for now and return the pfn so that caller can
1252                 * insert it after fsync is done.
1253                 */
1254                if (sync) {
1255                        if (WARN_ON_ONCE(!pfnp)) {
1256                                error = -EIO;
1257                                goto error_finish_iomap;
1258                        }
1259                        *pfnp = pfn;
1260                        vmf_ret = VM_FAULT_NEEDDSYNC | major;
1261                        goto finish_iomap;
1262                }
1263                trace_dax_insert_mapping(inode, vmf, entry);
1264                if (write)
1265                        error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
1266                else
1267                        error = vm_insert_mixed(vma, vaddr, pfn);
1268
1269                /* -EBUSY is fine, somebody else faulted on the same PTE */
1270                if (error == -EBUSY)
1271                        error = 0;
1272                break;
1273        case IOMAP_UNWRITTEN:
1274        case IOMAP_HOLE:
1275                if (!write) {
1276                        vmf_ret = dax_load_hole(mapping, entry, vmf);
1277                        goto finish_iomap;
1278                }
1279                /*FALLTHRU*/
1280        default:
1281                WARN_ON_ONCE(1);
1282                error = -EIO;
1283                break;
1284        }
1285
1286 error_finish_iomap:
1287        vmf_ret = dax_fault_return(error) | major;
1288 finish_iomap:
1289        if (ops->iomap_end) {
1290                int copied = PAGE_SIZE;
1291
1292                if (vmf_ret & VM_FAULT_ERROR)
1293                        copied = 0;
1294                /*
1295                 * The fault is done by now and there's no way back (other
1296                 * thread may be already happily using PTE we have installed).
1297                 * Just ignore error from ->iomap_end since we cannot do much
1298                 * with it.
1299                 */
1300                ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1301        }
1302 unlock_entry:
1303        put_locked_mapping_entry(mapping, vmf->pgoff);
1304 out:
1305        trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1306        return vmf_ret;
1307}
1308
1309#ifdef CONFIG_FS_DAX_PMD
1310static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1311                void *entry)
1312{
1313        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1314        unsigned long pmd_addr = vmf->address & PMD_MASK;
1315        struct inode *inode = mapping->host;
1316        struct page *zero_page;
1317        void *ret = NULL;
1318        spinlock_t *ptl;
1319        pmd_t pmd_entry;
1320        pfn_t pfn;
1321
1322        zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1323
1324        if (unlikely(!zero_page))
1325                goto fallback;
1326
1327        pfn = page_to_pfn_t(zero_page);
1328        ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1329                        RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1330        if (IS_ERR(ret))
1331                goto fallback;
1332
1333        ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1334        if (!pmd_none(*(vmf->pmd))) {
1335                spin_unlock(ptl);
1336                goto fallback;
1337        }
1338
1339        pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1340        pmd_entry = pmd_mkhuge(pmd_entry);
1341        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1342        spin_unlock(ptl);
1343        trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1344        return VM_FAULT_NOPAGE;
1345
1346fallback:
1347        trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1348        return VM_FAULT_FALLBACK;
1349}
1350
1351static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1352                               const struct iomap_ops *ops)
1353{
1354        struct vm_area_struct *vma = vmf->vma;
1355        struct address_space *mapping = vma->vm_file->f_mapping;
1356        unsigned long pmd_addr = vmf->address & PMD_MASK;
1357        bool write = vmf->flags & FAULT_FLAG_WRITE;
1358        bool sync;
1359        unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1360        struct inode *inode = mapping->host;
1361        int result = VM_FAULT_FALLBACK;
1362        struct iomap iomap = { 0 };
1363        pgoff_t max_pgoff, pgoff;
1364        void *entry;
1365        loff_t pos;
1366        int error;
1367        pfn_t pfn;
1368
1369        /*
1370         * Check whether offset isn't beyond end of file now. Caller is
1371         * supposed to hold locks serializing us with truncate / punch hole so
1372         * this is a reliable test.
1373         */
1374        pgoff = linear_page_index(vma, pmd_addr);
1375        max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1376
1377        trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1378
1379        /*
1380         * Make sure that the faulting address's PMD offset (color) matches
1381         * the PMD offset from the start of the file.  This is necessary so
1382         * that a PMD range in the page table overlaps exactly with a PMD
1383         * range in the radix tree.
1384         */
1385        if ((vmf->pgoff & PG_PMD_COLOUR) !=
1386            ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1387                goto fallback;
1388
1389        /* Fall back to PTEs if we're going to COW */
1390        if (write && !(vma->vm_flags & VM_SHARED))
1391                goto fallback;
1392
1393        /* If the PMD would extend outside the VMA */
1394        if (pmd_addr < vma->vm_start)
1395                goto fallback;
1396        if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1397                goto fallback;
1398
1399        if (pgoff >= max_pgoff) {
1400                result = VM_FAULT_SIGBUS;
1401                goto out;
1402        }
1403
1404        /* If the PMD would extend beyond the file size */
1405        if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1406                goto fallback;
1407
1408        /*
1409         * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1410         * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
1411         * is already in the tree, for instance), it will return -EEXIST and
1412         * we just fall back to 4k entries.
1413         */
1414        entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1415        if (IS_ERR(entry))
1416                goto fallback;
1417
1418        /*
1419         * It is possible, particularly with mixed reads & writes to private
1420         * mappings, that we have raced with a PTE fault that overlaps with
1421         * the PMD we need to set up.  If so just return and the fault will be
1422         * retried.
1423         */
1424        if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1425                        !pmd_devmap(*vmf->pmd)) {
1426                result = 0;
1427                goto unlock_entry;
1428        }
1429
1430        /*
1431         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1432         * setting up a mapping, so really we're using iomap_begin() as a way
1433         * to look up our filesystem block.
1434         */
1435        pos = (loff_t)pgoff << PAGE_SHIFT;
1436        error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1437        if (error)
1438                goto unlock_entry;
1439
1440        if (iomap.offset + iomap.length < pos + PMD_SIZE)
1441                goto finish_iomap;
1442
1443        sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1444
1445        switch (iomap.type) {
1446        case IOMAP_MAPPED:
1447                error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1448                if (error < 0)
1449                        goto finish_iomap;
1450
1451                entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1452                                                RADIX_DAX_PMD, write && !sync);
1453                if (IS_ERR(entry))
1454                        goto finish_iomap;
1455
1456                /*
1457                 * If we are doing synchronous page fault and inode needs fsync,
1458                 * we can insert PMD into page tables only after that happens.
1459                 * Skip insertion for now and return the pfn so that caller can
1460                 * insert it after fsync is done.
1461                 */
1462                if (sync) {
1463                        if (WARN_ON_ONCE(!pfnp))
1464                                goto finish_iomap;
1465                        *pfnp = pfn;
1466                        result = VM_FAULT_NEEDDSYNC;
1467                        goto finish_iomap;
1468                }
1469
1470                trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1471                result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1472                                            write);
1473                break;
1474        case IOMAP_UNWRITTEN:
1475        case IOMAP_HOLE:
1476                if (WARN_ON_ONCE(write))
1477                        break;
1478                result = dax_pmd_load_hole(vmf, &iomap, entry);
1479                break;
1480        default:
1481                WARN_ON_ONCE(1);
1482                break;
1483        }
1484
1485 finish_iomap:
1486        if (ops->iomap_end) {
1487                int copied = PMD_SIZE;
1488
1489                if (result == VM_FAULT_FALLBACK)
1490                        copied = 0;
1491                /*
1492                 * The fault is done by now and there's no way back (other
1493                 * thread may be already happily using PMD we have installed).
1494                 * Just ignore error from ->iomap_end since we cannot do much
1495                 * with it.
1496                 */
1497                ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1498                                &iomap);
1499        }
1500 unlock_entry:
1501        put_locked_mapping_entry(mapping, pgoff);
1502 fallback:
1503        if (result == VM_FAULT_FALLBACK) {
1504                split_huge_pmd(vma, vmf->pmd, vmf->address);
1505                count_vm_event(THP_FAULT_FALLBACK);
1506        }
1507out:
1508        trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1509        return result;
1510}
1511#else
1512static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1513                               const struct iomap_ops *ops)
1514{
1515        return VM_FAULT_FALLBACK;
1516}
1517#endif /* CONFIG_FS_DAX_PMD */
1518
1519/**
1520 * dax_iomap_fault - handle a page fault on a DAX file
1521 * @vmf: The description of the fault
1522 * @pe_size: Size of the page to fault in
1523 * @pfnp: PFN to insert for synchronous faults if fsync is required
1524 * @iomap_errp: Storage for detailed error code in case of error
1525 * @ops: Iomap ops passed from the file system
1526 *
1527 * When a page fault occurs, filesystems may call this helper in
1528 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1529 * has done all the necessary locking for page fault to proceed
1530 * successfully.
1531 */
1532int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1533                    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1534{
1535        switch (pe_size) {
1536        case PE_SIZE_PTE:
1537                return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1538        case PE_SIZE_PMD:
1539                return dax_iomap_pmd_fault(vmf, pfnp, ops);
1540        default:
1541                return VM_FAULT_FALLBACK;
1542        }
1543}
1544EXPORT_SYMBOL_GPL(dax_iomap_fault);
1545
1546/**
1547 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1548 * @vmf: The description of the fault
1549 * @pe_size: Size of entry to be inserted
1550 * @pfn: PFN to insert
1551 *
1552 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1553 * DAX file.  It takes care of marking corresponding radix tree entry as dirty
1554 * as well.
1555 */
1556static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1557                                  enum page_entry_size pe_size,
1558                                  pfn_t pfn)
1559{
1560        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1561        void *entry, **slot;
1562        pgoff_t index = vmf->pgoff;
1563        int vmf_ret, error;
1564
1565        xa_lock_irq(&mapping->i_pages);
1566        entry = get_unlocked_mapping_entry(mapping, index, &slot);
1567        /* Did we race with someone splitting entry or so? */
1568        if (!entry ||
1569            (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1570            (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1571                put_unlocked_mapping_entry(mapping, index, entry);
1572                xa_unlock_irq(&mapping->i_pages);
1573                trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1574                                                      VM_FAULT_NOPAGE);
1575                return VM_FAULT_NOPAGE;
1576        }
1577        radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1578        entry = lock_slot(mapping, slot);
1579        xa_unlock_irq(&mapping->i_pages);
1580        switch (pe_size) {
1581        case PE_SIZE_PTE:
1582                error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1583                vmf_ret = dax_fault_return(error);
1584                break;
1585#ifdef CONFIG_FS_DAX_PMD
1586        case PE_SIZE_PMD:
1587                vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1588                        pfn, true);
1589                break;
1590#endif
1591        default:
1592                vmf_ret = VM_FAULT_FALLBACK;
1593        }
1594        put_locked_mapping_entry(mapping, index);
1595        trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
1596        return vmf_ret;
1597}
1598
1599/**
1600 * dax_finish_sync_fault - finish synchronous page fault
1601 * @vmf: The description of the fault
1602 * @pe_size: Size of entry to be inserted
1603 * @pfn: PFN to insert
1604 *
1605 * This function ensures that the file range touched by the page fault is
1606 * stored persistently on the media and handles inserting of appropriate page
1607 * table entry.
1608 */
1609int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1610                          pfn_t pfn)
1611{
1612        int err;
1613        loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1614        size_t len = 0;
1615
1616        if (pe_size == PE_SIZE_PTE)
1617                len = PAGE_SIZE;
1618        else if (pe_size == PE_SIZE_PMD)
1619                len = PMD_SIZE;
1620        else
1621                WARN_ON_ONCE(1);
1622        err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1623        if (err)
1624                return VM_FAULT_SIGBUS;
1625        return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1626}
1627EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1628