linux/fs/dax.c
<<
>>
Prefs
   1/*
   2 * fs/dax.c - Direct Access filesystem code
   3 * Copyright (c) 2013-2014 Intel Corporation
   4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
   5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/blkdev.h>
  19#include <linux/buffer_head.h>
  20#include <linux/dax.h>
  21#include <linux/fs.h>
  22#include <linux/genhd.h>
  23#include <linux/highmem.h>
  24#include <linux/memcontrol.h>
  25#include <linux/mm.h>
  26#include <linux/mutex.h>
  27#include <linux/pagevec.h>
  28#include <linux/sched.h>
  29#include <linux/sched/signal.h>
  30#include <linux/uio.h>
  31#include <linux/vmstat.h>
  32#include <linux/pfn_t.h>
  33#include <linux/sizes.h>
  34#include <linux/mmu_notifier.h>
  35#include <linux/iomap.h>
  36#include "internal.h"
  37
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/fs_dax.h>
  40
  41static inline unsigned int pe_order(enum page_entry_size pe_size)
  42{
  43        if (pe_size == PE_SIZE_PTE)
  44                return PAGE_SHIFT - PAGE_SHIFT;
  45        if (pe_size == PE_SIZE_PMD)
  46                return PMD_SHIFT - PAGE_SHIFT;
  47        if (pe_size == PE_SIZE_PUD)
  48                return PUD_SHIFT - PAGE_SHIFT;
  49        return ~0;
  50}
  51
  52/* We choose 4096 entries - same as per-zone page wait tables */
  53#define DAX_WAIT_TABLE_BITS 12
  54#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
  55
  56/* The 'colour' (ie low bits) within a PMD of a page offset.  */
  57#define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
  58#define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIFT)
  59
  60/* The order of a PMD entry */
  61#define PMD_ORDER       (PMD_SHIFT - PAGE_SHIFT)
  62
  63static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
  64
  65static int __init init_dax_wait_table(void)
  66{
  67        int i;
  68
  69        for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
  70                init_waitqueue_head(wait_table + i);
  71        return 0;
  72}
  73fs_initcall(init_dax_wait_table);
  74
  75/*
  76 * DAX pagecache entries use XArray value entries so they can't be mistaken
  77 * for pages.  We use one bit for locking, one bit for the entry size (PMD)
  78 * and two more to tell us if the entry is a zero page or an empty entry that
  79 * is just used for locking.  In total four special bits.
  80 *
  81 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
  82 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
  83 * block allocation.
  84 */
  85#define DAX_SHIFT       (4)
  86#define DAX_LOCKED      (1UL << 0)
  87#define DAX_PMD         (1UL << 1)
  88#define DAX_ZERO_PAGE   (1UL << 2)
  89#define DAX_EMPTY       (1UL << 3)
  90
  91static unsigned long dax_to_pfn(void *entry)
  92{
  93        return xa_to_value(entry) >> DAX_SHIFT;
  94}
  95
  96static void *dax_make_entry(pfn_t pfn, unsigned long flags)
  97{
  98        return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
  99}
 100
 101static bool dax_is_locked(void *entry)
 102{
 103        return xa_to_value(entry) & DAX_LOCKED;
 104}
 105
 106static unsigned int dax_entry_order(void *entry)
 107{
 108        if (xa_to_value(entry) & DAX_PMD)
 109                return PMD_ORDER;
 110        return 0;
 111}
 112
 113static unsigned long dax_is_pmd_entry(void *entry)
 114{
 115        return xa_to_value(entry) & DAX_PMD;
 116}
 117
 118static bool dax_is_pte_entry(void *entry)
 119{
 120        return !(xa_to_value(entry) & DAX_PMD);
 121}
 122
 123static int dax_is_zero_entry(void *entry)
 124{
 125        return xa_to_value(entry) & DAX_ZERO_PAGE;
 126}
 127
 128static int dax_is_empty_entry(void *entry)
 129{
 130        return xa_to_value(entry) & DAX_EMPTY;
 131}
 132
 133/*
 134 * DAX page cache entry locking
 135 */
 136struct exceptional_entry_key {
 137        struct xarray *xa;
 138        pgoff_t entry_start;
 139};
 140
 141struct wait_exceptional_entry_queue {
 142        wait_queue_entry_t wait;
 143        struct exceptional_entry_key key;
 144};
 145
 146static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
 147                void *entry, struct exceptional_entry_key *key)
 148{
 149        unsigned long hash;
 150        unsigned long index = xas->xa_index;
 151
 152        /*
 153         * If 'entry' is a PMD, align the 'index' that we use for the wait
 154         * queue to the start of that PMD.  This ensures that all offsets in
 155         * the range covered by the PMD map to the same bit lock.
 156         */
 157        if (dax_is_pmd_entry(entry))
 158                index &= ~PG_PMD_COLOUR;
 159        key->xa = xas->xa;
 160        key->entry_start = index;
 161
 162        hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
 163        return wait_table + hash;
 164}
 165
 166static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
 167                unsigned int mode, int sync, void *keyp)
 168{
 169        struct exceptional_entry_key *key = keyp;
 170        struct wait_exceptional_entry_queue *ewait =
 171                container_of(wait, struct wait_exceptional_entry_queue, wait);
 172
 173        if (key->xa != ewait->key.xa ||
 174            key->entry_start != ewait->key.entry_start)
 175                return 0;
 176        return autoremove_wake_function(wait, mode, sync, NULL);
 177}
 178
 179/*
 180 * @entry may no longer be the entry at the index in the mapping.
 181 * The important information it's conveying is whether the entry at
 182 * this index used to be a PMD entry.
 183 */
 184static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
 185{
 186        struct exceptional_entry_key key;
 187        wait_queue_head_t *wq;
 188
 189        wq = dax_entry_waitqueue(xas, entry, &key);
 190
 191        /*
 192         * Checking for locked entry and prepare_to_wait_exclusive() happens
 193         * under the i_pages lock, ditto for entry handling in our callers.
 194         * So at this point all tasks that could have seen our entry locked
 195         * must be in the waitqueue and the following check will see them.
 196         */
 197        if (waitqueue_active(wq))
 198                __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 199}
 200
 201/*
 202 * Look up entry in page cache, wait for it to become unlocked if it
 203 * is a DAX entry and return it.  The caller must subsequently call
 204 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
 205 * if it did.
 206 *
 207 * Must be called with the i_pages lock held.
 208 */
 209static void *get_unlocked_entry(struct xa_state *xas)
 210{
 211        void *entry;
 212        struct wait_exceptional_entry_queue ewait;
 213        wait_queue_head_t *wq;
 214
 215        init_wait(&ewait.wait);
 216        ewait.wait.func = wake_exceptional_entry_func;
 217
 218        for (;;) {
 219                entry = xas_find_conflict(xas);
 220                if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
 221                                !dax_is_locked(entry))
 222                        return entry;
 223
 224                wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 225                prepare_to_wait_exclusive(wq, &ewait.wait,
 226                                          TASK_UNINTERRUPTIBLE);
 227                xas_unlock_irq(xas);
 228                xas_reset(xas);
 229                schedule();
 230                finish_wait(wq, &ewait.wait);
 231                xas_lock_irq(xas);
 232        }
 233}
 234
 235/*
 236 * The only thing keeping the address space around is the i_pages lock
 237 * (it's cycled in clear_inode() after removing the entries from i_pages)
 238 * After we call xas_unlock_irq(), we cannot touch xas->xa.
 239 */
 240static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 241{
 242        struct wait_exceptional_entry_queue ewait;
 243        wait_queue_head_t *wq;
 244
 245        init_wait(&ewait.wait);
 246        ewait.wait.func = wake_exceptional_entry_func;
 247
 248        wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 249        /*
 250         * Unlike get_unlocked_entry() there is no guarantee that this
 251         * path ever successfully retrieves an unlocked entry before an
 252         * inode dies. Perform a non-exclusive wait in case this path
 253         * never successfully performs its own wake up.
 254         */
 255        prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
 256        xas_unlock_irq(xas);
 257        schedule();
 258        finish_wait(wq, &ewait.wait);
 259}
 260
 261static void put_unlocked_entry(struct xa_state *xas, void *entry)
 262{
 263        /* If we were the only waiter woken, wake the next one */
 264        if (entry)
 265                dax_wake_entry(xas, entry, false);
 266}
 267
 268/*
 269 * We used the xa_state to get the entry, but then we locked the entry and
 270 * dropped the xa_lock, so we know the xa_state is stale and must be reset
 271 * before use.
 272 */
 273static void dax_unlock_entry(struct xa_state *xas, void *entry)
 274{
 275        void *old;
 276
 277        BUG_ON(dax_is_locked(entry));
 278        xas_reset(xas);
 279        xas_lock_irq(xas);
 280        old = xas_store(xas, entry);
 281        xas_unlock_irq(xas);
 282        BUG_ON(!dax_is_locked(old));
 283        dax_wake_entry(xas, entry, false);
 284}
 285
 286/*
 287 * Return: The entry stored at this location before it was locked.
 288 */
 289static void *dax_lock_entry(struct xa_state *xas, void *entry)
 290{
 291        unsigned long v = xa_to_value(entry);
 292        return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
 293}
 294
 295static unsigned long dax_entry_size(void *entry)
 296{
 297        if (dax_is_zero_entry(entry))
 298                return 0;
 299        else if (dax_is_empty_entry(entry))
 300                return 0;
 301        else if (dax_is_pmd_entry(entry))
 302                return PMD_SIZE;
 303        else
 304                return PAGE_SIZE;
 305}
 306
 307static unsigned long dax_end_pfn(void *entry)
 308{
 309        return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
 310}
 311
 312/*
 313 * Iterate through all mapped pfns represented by an entry, i.e. skip
 314 * 'empty' and 'zero' entries.
 315 */
 316#define for_each_mapped_pfn(entry, pfn) \
 317        for (pfn = dax_to_pfn(entry); \
 318                        pfn < dax_end_pfn(entry); pfn++)
 319
 320/*
 321 * TODO: for reflink+dax we need a way to associate a single page with
 322 * multiple address_space instances at different linear_page_index()
 323 * offsets.
 324 */
 325static void dax_associate_entry(void *entry, struct address_space *mapping,
 326                struct vm_area_struct *vma, unsigned long address)
 327{
 328        unsigned long size = dax_entry_size(entry), pfn, index;
 329        int i = 0;
 330
 331        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 332                return;
 333
 334        index = linear_page_index(vma, address & ~(size - 1));
 335        for_each_mapped_pfn(entry, pfn) {
 336                struct page *page = pfn_to_page(pfn);
 337
 338                WARN_ON_ONCE(page->mapping);
 339                page->mapping = mapping;
 340                page->index = index + i++;
 341        }
 342}
 343
 344static void dax_disassociate_entry(void *entry, struct address_space *mapping,
 345                bool trunc)
 346{
 347        unsigned long pfn;
 348
 349        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 350                return;
 351
 352        for_each_mapped_pfn(entry, pfn) {
 353                struct page *page = pfn_to_page(pfn);
 354
 355                WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
 356                WARN_ON_ONCE(page->mapping && page->mapping != mapping);
 357                page->mapping = NULL;
 358                page->index = 0;
 359        }
 360}
 361
 362static struct page *dax_busy_page(void *entry)
 363{
 364        unsigned long pfn;
 365
 366        for_each_mapped_pfn(entry, pfn) {
 367                struct page *page = pfn_to_page(pfn);
 368
 369                if (page_ref_count(page) > 1)
 370                        return page;
 371        }
 372        return NULL;
 373}
 374
 375/*
 376 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
 377 * @page: The page whose entry we want to lock
 378 *
 379 * Context: Process context.
 380 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
 381 * not be locked.
 382 */
 383dax_entry_t dax_lock_page(struct page *page)
 384{
 385        XA_STATE(xas, NULL, 0);
 386        void *entry;
 387
 388        /* Ensure page->mapping isn't freed while we look at it */
 389        rcu_read_lock();
 390        for (;;) {
 391                struct address_space *mapping = READ_ONCE(page->mapping);
 392
 393                entry = NULL;
 394                if (!mapping || !dax_mapping(mapping))
 395                        break;
 396
 397                /*
 398                 * In the device-dax case there's no need to lock, a
 399                 * struct dev_pagemap pin is sufficient to keep the
 400                 * inode alive, and we assume we have dev_pagemap pin
 401                 * otherwise we would not have a valid pfn_to_page()
 402                 * translation.
 403                 */
 404                entry = (void *)~0UL;
 405                if (S_ISCHR(mapping->host->i_mode))
 406                        break;
 407
 408                xas.xa = &mapping->i_pages;
 409                xas_lock_irq(&xas);
 410                if (mapping != page->mapping) {
 411                        xas_unlock_irq(&xas);
 412                        continue;
 413                }
 414                xas_set(&xas, page->index);
 415                entry = xas_load(&xas);
 416                if (dax_is_locked(entry)) {
 417                        rcu_read_unlock();
 418                        wait_entry_unlocked(&xas, entry);
 419                        rcu_read_lock();
 420                        continue;
 421                }
 422                dax_lock_entry(&xas, entry);
 423                xas_unlock_irq(&xas);
 424                break;
 425        }
 426        rcu_read_unlock();
 427        return (dax_entry_t)entry;
 428}
 429
 430void dax_unlock_page(struct page *page, dax_entry_t cookie)
 431{
 432        struct address_space *mapping = page->mapping;
 433        XA_STATE(xas, &mapping->i_pages, page->index);
 434
 435        if (S_ISCHR(mapping->host->i_mode))
 436                return;
 437
 438        dax_unlock_entry(&xas, (void *)cookie);
 439}
 440
 441/*
 442 * Find page cache entry at given index. If it is a DAX entry, return it
 443 * with the entry locked. If the page cache doesn't contain an entry at
 444 * that index, add a locked empty entry.
 445 *
 446 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
 447 * either return that locked entry or will return VM_FAULT_FALLBACK.
 448 * This will happen if there are any PTE entries within the PMD range
 449 * that we are requesting.
 450 *
 451 * We always favor PTE entries over PMD entries. There isn't a flow where we
 452 * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
 453 * insertion will fail if it finds any PTE entries already in the tree, and a
 454 * PTE insertion will cause an existing PMD entry to be unmapped and
 455 * downgraded to PTE entries.  This happens for both PMD zero pages as
 456 * well as PMD empty entries.
 457 *
 458 * The exception to this downgrade path is for PMD entries that have
 459 * real storage backing them.  We will leave these real PMD entries in
 460 * the tree, and PTE writes will simply dirty the entire PMD entry.
 461 *
 462 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 463 * persistent memory the benefit is doubtful. We can add that later if we can
 464 * show it helps.
 465 *
 466 * On error, this function does not return an ERR_PTR.  Instead it returns
 467 * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
 468 * overlap with xarray value entries.
 469 */
 470static void *grab_mapping_entry(struct xa_state *xas,
 471                struct address_space *mapping, unsigned long size_flag)
 472{
 473        unsigned long index = xas->xa_index;
 474        bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
 475        void *entry;
 476
 477retry:
 478        xas_lock_irq(xas);
 479        entry = get_unlocked_entry(xas);
 480
 481        if (entry) {
 482                if (!xa_is_value(entry)) {
 483                        xas_set_err(xas, EIO);
 484                        goto out_unlock;
 485                }
 486
 487                if (size_flag & DAX_PMD) {
 488                        if (dax_is_pte_entry(entry)) {
 489                                put_unlocked_entry(xas, entry);
 490                                goto fallback;
 491                        }
 492                } else { /* trying to grab a PTE entry */
 493                        if (dax_is_pmd_entry(entry) &&
 494                            (dax_is_zero_entry(entry) ||
 495                             dax_is_empty_entry(entry))) {
 496                                pmd_downgrade = true;
 497                        }
 498                }
 499        }
 500
 501        if (pmd_downgrade) {
 502                /*
 503                 * Make sure 'entry' remains valid while we drop
 504                 * the i_pages lock.
 505                 */
 506                dax_lock_entry(xas, entry);
 507
 508                /*
 509                 * Besides huge zero pages the only other thing that gets
 510                 * downgraded are empty entries which don't need to be
 511                 * unmapped.
 512                 */
 513                if (dax_is_zero_entry(entry)) {
 514                        xas_unlock_irq(xas);
 515                        unmap_mapping_pages(mapping,
 516                                        xas->xa_index & ~PG_PMD_COLOUR,
 517                                        PG_PMD_NR, false);
 518                        xas_reset(xas);
 519                        xas_lock_irq(xas);
 520                }
 521
 522                dax_disassociate_entry(entry, mapping, false);
 523                xas_store(xas, NULL);   /* undo the PMD join */
 524                dax_wake_entry(xas, entry, true);
 525                mapping->nrexceptional--;
 526                entry = NULL;
 527                xas_set(xas, index);
 528        }
 529
 530        if (entry) {
 531                dax_lock_entry(xas, entry);
 532        } else {
 533                entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
 534                dax_lock_entry(xas, entry);
 535                if (xas_error(xas))
 536                        goto out_unlock;
 537                mapping->nrexceptional++;
 538        }
 539
 540out_unlock:
 541        xas_unlock_irq(xas);
 542        if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
 543                goto retry;
 544        if (xas->xa_node == XA_ERROR(-ENOMEM))
 545                return xa_mk_internal(VM_FAULT_OOM);
 546        if (xas_error(xas))
 547                return xa_mk_internal(VM_FAULT_SIGBUS);
 548        return entry;
 549fallback:
 550        xas_unlock_irq(xas);
 551        return xa_mk_internal(VM_FAULT_FALLBACK);
 552}
 553
 554/**
 555 * dax_layout_busy_page - find first pinned page in @mapping
 556 * @mapping: address space to scan for a page with ref count > 1
 557 *
 558 * DAX requires ZONE_DEVICE mapped pages. These pages are never
 559 * 'onlined' to the page allocator so they are considered idle when
 560 * page->count == 1. A filesystem uses this interface to determine if
 561 * any page in the mapping is busy, i.e. for DMA, or other
 562 * get_user_pages() usages.
 563 *
 564 * It is expected that the filesystem is holding locks to block the
 565 * establishment of new mappings in this address_space. I.e. it expects
 566 * to be able to run unmap_mapping_range() and subsequently not race
 567 * mapping_mapped() becoming true.
 568 */
 569struct page *dax_layout_busy_page(struct address_space *mapping)
 570{
 571        XA_STATE(xas, &mapping->i_pages, 0);
 572        void *entry;
 573        unsigned int scanned = 0;
 574        struct page *page = NULL;
 575
 576        /*
 577         * In the 'limited' case get_user_pages() for dax is disabled.
 578         */
 579        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
 580                return NULL;
 581
 582        if (!dax_mapping(mapping) || !mapping_mapped(mapping))
 583                return NULL;
 584
 585        /*
 586         * If we race get_user_pages_fast() here either we'll see the
 587         * elevated page count in the iteration and wait, or
 588         * get_user_pages_fast() will see that the page it took a reference
 589         * against is no longer mapped in the page tables and bail to the
 590         * get_user_pages() slow path.  The slow path is protected by
 591         * pte_lock() and pmd_lock(). New references are not taken without
 592         * holding those locks, and unmap_mapping_range() will not zero the
 593         * pte or pmd without holding the respective lock, so we are
 594         * guaranteed to either see new references or prevent new
 595         * references from being established.
 596         */
 597        unmap_mapping_range(mapping, 0, 0, 1);
 598
 599        xas_lock_irq(&xas);
 600        xas_for_each(&xas, entry, ULONG_MAX) {
 601                if (WARN_ON_ONCE(!xa_is_value(entry)))
 602                        continue;
 603                if (unlikely(dax_is_locked(entry)))
 604                        entry = get_unlocked_entry(&xas);
 605                if (entry)
 606                        page = dax_busy_page(entry);
 607                put_unlocked_entry(&xas, entry);
 608                if (page)
 609                        break;
 610                if (++scanned % XA_CHECK_SCHED)
 611                        continue;
 612
 613                xas_pause(&xas);
 614                xas_unlock_irq(&xas);
 615                cond_resched();
 616                xas_lock_irq(&xas);
 617        }
 618        xas_unlock_irq(&xas);
 619        return page;
 620}
 621EXPORT_SYMBOL_GPL(dax_layout_busy_page);
 622
 623static int __dax_invalidate_entry(struct address_space *mapping,
 624                                          pgoff_t index, bool trunc)
 625{
 626        XA_STATE(xas, &mapping->i_pages, index);
 627        int ret = 0;
 628        void *entry;
 629
 630        xas_lock_irq(&xas);
 631        entry = get_unlocked_entry(&xas);
 632        if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 633                goto out;
 634        if (!trunc &&
 635            (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
 636             xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
 637                goto out;
 638        dax_disassociate_entry(entry, mapping, trunc);
 639        xas_store(&xas, NULL);
 640        mapping->nrexceptional--;
 641        ret = 1;
 642out:
 643        put_unlocked_entry(&xas, entry);
 644        xas_unlock_irq(&xas);
 645        return ret;
 646}
 647
 648/*
 649 * Delete DAX entry at @index from @mapping.  Wait for it
 650 * to be unlocked before deleting it.
 651 */
 652int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 653{
 654        int ret = __dax_invalidate_entry(mapping, index, true);
 655
 656        /*
 657         * This gets called from truncate / punch_hole path. As such, the caller
 658         * must hold locks protecting against concurrent modifications of the
 659         * page cache (usually fs-private i_mmap_sem for writing). Since the
 660         * caller has seen a DAX entry for this index, we better find it
 661         * at that index as well...
 662         */
 663        WARN_ON_ONCE(!ret);
 664        return ret;
 665}
 666
 667/*
 668 * Invalidate DAX entry if it is clean.
 669 */
 670int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 671                                      pgoff_t index)
 672{
 673        return __dax_invalidate_entry(mapping, index, false);
 674}
 675
 676static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
 677                sector_t sector, size_t size, struct page *to,
 678                unsigned long vaddr)
 679{
 680        void *vto, *kaddr;
 681        pgoff_t pgoff;
 682        long rc;
 683        int id;
 684
 685        rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
 686        if (rc)
 687                return rc;
 688
 689        id = dax_read_lock();
 690        rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
 691        if (rc < 0) {
 692                dax_read_unlock(id);
 693                return rc;
 694        }
 695        vto = kmap_atomic(to);
 696        copy_user_page(vto, (void __force *)kaddr, vaddr, to);
 697        kunmap_atomic(vto);
 698        dax_read_unlock(id);
 699        return 0;
 700}
 701
 702/*
 703 * By this point grab_mapping_entry() has ensured that we have a locked entry
 704 * of the appropriate size so we don't have to worry about downgrading PMDs to
 705 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 706 * already in the tree, we will skip the insertion and just dirty the PMD as
 707 * appropriate.
 708 */
 709static void *dax_insert_entry(struct xa_state *xas,
 710                struct address_space *mapping, struct vm_fault *vmf,
 711                void *entry, pfn_t pfn, unsigned long flags, bool dirty)
 712{
 713        void *new_entry = dax_make_entry(pfn, flags);
 714
 715        if (dirty)
 716                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 717
 718        if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
 719                unsigned long index = xas->xa_index;
 720                /* we are replacing a zero page with block mapping */
 721                if (dax_is_pmd_entry(entry))
 722                        unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
 723                                        PG_PMD_NR, false);
 724                else /* pte entry */
 725                        unmap_mapping_pages(mapping, index, 1, false);
 726        }
 727
 728        xas_reset(xas);
 729        xas_lock_irq(xas);
 730        if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 731                dax_disassociate_entry(entry, mapping, false);
 732                dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
 733        }
 734
 735        if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 736                /*
 737                 * Only swap our new entry into the page cache if the current
 738                 * entry is a zero page or an empty entry.  If a normal PTE or
 739                 * PMD entry is already in the cache, we leave it alone.  This
 740                 * means that if we are trying to insert a PTE and the
 741                 * existing entry is a PMD, we will just leave the PMD in the
 742                 * tree and dirty it if necessary.
 743                 */
 744                void *old = dax_lock_entry(xas, new_entry);
 745                WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
 746                                        DAX_LOCKED));
 747                entry = new_entry;
 748        } else {
 749                xas_load(xas);  /* Walk the xa_state */
 750        }
 751
 752        if (dirty)
 753                xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
 754
 755        xas_unlock_irq(xas);
 756        return entry;
 757}
 758
 759static inline
 760unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
 761{
 762        unsigned long address;
 763
 764        address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 765        VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
 766        return address;
 767}
 768
 769/* Walk all mappings of a given index of a file and writeprotect them */
 770static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
 771                unsigned long pfn)
 772{
 773        struct vm_area_struct *vma;
 774        pte_t pte, *ptep = NULL;
 775        pmd_t *pmdp = NULL;
 776        spinlock_t *ptl;
 777
 778        i_mmap_lock_read(mapping);
 779        vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
 780                struct mmu_notifier_range range;
 781                unsigned long address;
 782
 783                cond_resched();
 784
 785                if (!(vma->vm_flags & VM_SHARED))
 786                        continue;
 787
 788                address = pgoff_address(index, vma);
 789
 790                /*
 791                 * Note because we provide start/end to follow_pte_pmd it will
 792                 * call mmu_notifier_invalidate_range_start() on our behalf
 793                 * before taking any lock.
 794                 */
 795                if (follow_pte_pmd(vma->vm_mm, address, &range,
 796                                   &ptep, &pmdp, &ptl))
 797                        continue;
 798
 799                /*
 800                 * No need to call mmu_notifier_invalidate_range() as we are
 801                 * downgrading page table protection not changing it to point
 802                 * to a new page.
 803                 *
 804                 * See Documentation/vm/mmu_notifier.rst
 805                 */
 806                if (pmdp) {
 807#ifdef CONFIG_FS_DAX_PMD
 808                        pmd_t pmd;
 809
 810                        if (pfn != pmd_pfn(*pmdp))
 811                                goto unlock_pmd;
 812                        if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
 813                                goto unlock_pmd;
 814
 815                        flush_cache_page(vma, address, pfn);
 816                        pmd = pmdp_huge_clear_flush(vma, address, pmdp);
 817                        pmd = pmd_wrprotect(pmd);
 818                        pmd = pmd_mkclean(pmd);
 819                        set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 820unlock_pmd:
 821#endif
 822                        spin_unlock(ptl);
 823                } else {
 824                        if (pfn != pte_pfn(*ptep))
 825                                goto unlock_pte;
 826                        if (!pte_dirty(*ptep) && !pte_write(*ptep))
 827                                goto unlock_pte;
 828
 829                        flush_cache_page(vma, address, pfn);
 830                        pte = ptep_clear_flush(vma, address, ptep);
 831                        pte = pte_wrprotect(pte);
 832                        pte = pte_mkclean(pte);
 833                        set_pte_at(vma->vm_mm, address, ptep, pte);
 834unlock_pte:
 835                        pte_unmap_unlock(ptep, ptl);
 836                }
 837
 838                mmu_notifier_invalidate_range_end(&range);
 839        }
 840        i_mmap_unlock_read(mapping);
 841}
 842
 843static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
 844                struct address_space *mapping, void *entry)
 845{
 846        unsigned long pfn;
 847        long ret = 0;
 848        size_t size;
 849
 850        /*
 851         * A page got tagged dirty in DAX mapping? Something is seriously
 852         * wrong.
 853         */
 854        if (WARN_ON(!xa_is_value(entry)))
 855                return -EIO;
 856
 857        if (unlikely(dax_is_locked(entry))) {
 858                void *old_entry = entry;
 859
 860                entry = get_unlocked_entry(xas);
 861
 862                /* Entry got punched out / reallocated? */
 863                if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
 864                        goto put_unlocked;
 865                /*
 866                 * Entry got reallocated elsewhere? No need to writeback.
 867                 * We have to compare pfns as we must not bail out due to
 868                 * difference in lockbit or entry type.
 869                 */
 870                if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
 871                        goto put_unlocked;
 872                if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
 873                                        dax_is_zero_entry(entry))) {
 874                        ret = -EIO;
 875                        goto put_unlocked;
 876                }
 877
 878                /* Another fsync thread may have already done this entry */
 879                if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
 880                        goto put_unlocked;
 881        }
 882
 883        /* Lock the entry to serialize with page faults */
 884        dax_lock_entry(xas, entry);
 885
 886        /*
 887         * We can clear the tag now but we have to be careful so that concurrent
 888         * dax_writeback_one() calls for the same index cannot finish before we
 889         * actually flush the caches. This is achieved as the calls will look
 890         * at the entry only under the i_pages lock and once they do that
 891         * they will see the entry locked and wait for it to unlock.
 892         */
 893        xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
 894        xas_unlock_irq(xas);
 895
 896        /*
 897         * Even if dax_writeback_mapping_range() was given a wbc->range_start
 898         * in the middle of a PMD, the 'index' we are given will be aligned to
 899         * the start index of the PMD, as will the pfn we pull from 'entry'.
 900         * This allows us to flush for PMD_SIZE and not have to worry about
 901         * partial PMD writebacks.
 902         */
 903        pfn = dax_to_pfn(entry);
 904        size = PAGE_SIZE << dax_entry_order(entry);
 905
 906        dax_entry_mkclean(mapping, xas->xa_index, pfn);
 907        dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
 908        /*
 909         * After we have flushed the cache, we can clear the dirty tag. There
 910         * cannot be new dirty data in the pfn after the flush has completed as
 911         * the pfn mappings are writeprotected and fault waits for mapping
 912         * entry lock.
 913         */
 914        xas_reset(xas);
 915        xas_lock_irq(xas);
 916        xas_store(xas, entry);
 917        xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
 918        dax_wake_entry(xas, entry, false);
 919
 920        trace_dax_writeback_one(mapping->host, xas->xa_index,
 921                        size >> PAGE_SHIFT);
 922        return ret;
 923
 924 put_unlocked:
 925        put_unlocked_entry(xas, entry);
 926        return ret;
 927}
 928
 929/*
 930 * Flush the mapping to the persistent domain within the byte range of [start,
 931 * end]. This is required by data integrity operations to ensure file data is
 932 * on persistent storage prior to completion of the operation.
 933 */
 934int dax_writeback_mapping_range(struct address_space *mapping,
 935                struct block_device *bdev, struct writeback_control *wbc)
 936{
 937        XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
 938        struct inode *inode = mapping->host;
 939        pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
 940        struct dax_device *dax_dev;
 941        void *entry;
 942        int ret = 0;
 943        unsigned int scanned = 0;
 944
 945        if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 946                return -EIO;
 947
 948        if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 949                return 0;
 950
 951        dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
 952        if (!dax_dev)
 953                return -EIO;
 954
 955        trace_dax_writeback_range(inode, xas.xa_index, end_index);
 956
 957        tag_pages_for_writeback(mapping, xas.xa_index, end_index);
 958
 959        xas_lock_irq(&xas);
 960        xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
 961                ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
 962                if (ret < 0) {
 963                        mapping_set_error(mapping, ret);
 964                        break;
 965                }
 966                if (++scanned % XA_CHECK_SCHED)
 967                        continue;
 968
 969                xas_pause(&xas);
 970                xas_unlock_irq(&xas);
 971                cond_resched();
 972                xas_lock_irq(&xas);
 973        }
 974        xas_unlock_irq(&xas);
 975        put_dax(dax_dev);
 976        trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
 977        return ret;
 978}
 979EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 980
 981static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 982{
 983        return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
 984}
 985
 986static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
 987                         pfn_t *pfnp)
 988{
 989        const sector_t sector = dax_iomap_sector(iomap, pos);
 990        pgoff_t pgoff;
 991        int id, rc;
 992        long length;
 993
 994        rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
 995        if (rc)
 996                return rc;
 997        id = dax_read_lock();
 998        length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
 999                                   NULL, pfnp);
1000        if (length < 0) {
1001                rc = length;
1002                goto out;
1003        }
1004        rc = -EINVAL;
1005        if (PFN_PHYS(length) < size)
1006                goto out;
1007        if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1008                goto out;
1009        /* For larger pages we need devmap */
1010        if (length > 1 && !pfn_t_devmap(*pfnp))
1011                goto out;
1012        rc = 0;
1013out:
1014        dax_read_unlock(id);
1015        return rc;
1016}
1017
1018/*
1019 * The user has performed a load from a hole in the file.  Allocating a new
1020 * page in the file would cause excessive storage usage for workloads with
1021 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1022 * If this page is ever written to we will re-fault and change the mapping to
1023 * point to real DAX storage instead.
1024 */
1025static vm_fault_t dax_load_hole(struct xa_state *xas,
1026                struct address_space *mapping, void **entry,
1027                struct vm_fault *vmf)
1028{
1029        struct inode *inode = mapping->host;
1030        unsigned long vaddr = vmf->address;
1031        pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1032        vm_fault_t ret;
1033
1034        *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1035                        DAX_ZERO_PAGE, false);
1036
1037        ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1038        trace_dax_load_hole(inode, vmf, ret);
1039        return ret;
1040}
1041
1042static bool dax_range_is_aligned(struct block_device *bdev,
1043                                 unsigned int offset, unsigned int length)
1044{
1045        unsigned short sector_size = bdev_logical_block_size(bdev);
1046
1047        if (!IS_ALIGNED(offset, sector_size))
1048                return false;
1049        if (!IS_ALIGNED(length, sector_size))
1050                return false;
1051
1052        return true;
1053}
1054
1055int __dax_zero_page_range(struct block_device *bdev,
1056                struct dax_device *dax_dev, sector_t sector,
1057                unsigned int offset, unsigned int size)
1058{
1059        if (dax_range_is_aligned(bdev, offset, size)) {
1060                sector_t start_sector = sector + (offset >> 9);
1061
1062                return blkdev_issue_zeroout(bdev, start_sector,
1063                                size >> 9, GFP_NOFS, 0);
1064        } else {
1065                pgoff_t pgoff;
1066                long rc, id;
1067                void *kaddr;
1068
1069                rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1070                if (rc)
1071                        return rc;
1072
1073                id = dax_read_lock();
1074                rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1075                if (rc < 0) {
1076                        dax_read_unlock(id);
1077                        return rc;
1078                }
1079                memset(kaddr + offset, 0, size);
1080                dax_flush(dax_dev, kaddr + offset, size);
1081                dax_read_unlock(id);
1082        }
1083        return 0;
1084}
1085EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1086
1087static loff_t
1088dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1089                struct iomap *iomap)
1090{
1091        struct block_device *bdev = iomap->bdev;
1092        struct dax_device *dax_dev = iomap->dax_dev;
1093        struct iov_iter *iter = data;
1094        loff_t end = pos + length, done = 0;
1095        ssize_t ret = 0;
1096        size_t xfer;
1097        int id;
1098
1099        if (iov_iter_rw(iter) == READ) {
1100                end = min(end, i_size_read(inode));
1101                if (pos >= end)
1102                        return 0;
1103
1104                if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1105                        return iov_iter_zero(min(length, end - pos), iter);
1106        }
1107
1108        if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1109                return -EIO;
1110
1111        /*
1112         * Write can allocate block for an area which has a hole page mapped
1113         * into page tables. We have to tear down these mappings so that data
1114         * written by write(2) is visible in mmap.
1115         */
1116        if (iomap->flags & IOMAP_F_NEW) {
1117                invalidate_inode_pages2_range(inode->i_mapping,
1118                                              pos >> PAGE_SHIFT,
1119                                              (end - 1) >> PAGE_SHIFT);
1120        }
1121
1122        id = dax_read_lock();
1123        while (pos < end) {
1124                unsigned offset = pos & (PAGE_SIZE - 1);
1125                const size_t size = ALIGN(length + offset, PAGE_SIZE);
1126                const sector_t sector = dax_iomap_sector(iomap, pos);
1127                ssize_t map_len;
1128                pgoff_t pgoff;
1129                void *kaddr;
1130
1131                if (fatal_signal_pending(current)) {
1132                        ret = -EINTR;
1133                        break;
1134                }
1135
1136                ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1137                if (ret)
1138                        break;
1139
1140                map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1141                                &kaddr, NULL);
1142                if (map_len < 0) {
1143                        ret = map_len;
1144                        break;
1145                }
1146
1147                map_len = PFN_PHYS(map_len);
1148                kaddr += offset;
1149                map_len -= offset;
1150                if (map_len > end - pos)
1151                        map_len = end - pos;
1152
1153                /*
1154                 * The userspace address for the memory copy has already been
1155                 * validated via access_ok() in either vfs_read() or
1156                 * vfs_write(), depending on which operation we are doing.
1157                 */
1158                if (iov_iter_rw(iter) == WRITE)
1159                        xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1160                                        map_len, iter);
1161                else
1162                        xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1163                                        map_len, iter);
1164
1165                pos += xfer;
1166                length -= xfer;
1167                done += xfer;
1168
1169                if (xfer == 0)
1170                        ret = -EFAULT;
1171                if (xfer < map_len)
1172                        break;
1173        }
1174        dax_read_unlock(id);
1175
1176        return done ? done : ret;
1177}
1178
1179/**
1180 * dax_iomap_rw - Perform I/O to a DAX file
1181 * @iocb:       The control block for this I/O
1182 * @iter:       The addresses to do I/O from or to
1183 * @ops:        iomap ops passed from the file system
1184 *
1185 * This function performs read and write operations to directly mapped
1186 * persistent memory.  The callers needs to take care of read/write exclusion
1187 * and evicting any page cache pages in the region under I/O.
1188 */
1189ssize_t
1190dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1191                const struct iomap_ops *ops)
1192{
1193        struct address_space *mapping = iocb->ki_filp->f_mapping;
1194        struct inode *inode = mapping->host;
1195        loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1196        unsigned flags = 0;
1197
1198        if (iov_iter_rw(iter) == WRITE) {
1199                lockdep_assert_held_exclusive(&inode->i_rwsem);
1200                flags |= IOMAP_WRITE;
1201        } else {
1202                lockdep_assert_held(&inode->i_rwsem);
1203        }
1204
1205        while (iov_iter_count(iter)) {
1206                ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1207                                iter, dax_iomap_actor);
1208                if (ret <= 0)
1209                        break;
1210                pos += ret;
1211                done += ret;
1212        }
1213
1214        iocb->ki_pos += done;
1215        return done ? done : ret;
1216}
1217EXPORT_SYMBOL_GPL(dax_iomap_rw);
1218
1219static vm_fault_t dax_fault_return(int error)
1220{
1221        if (error == 0)
1222                return VM_FAULT_NOPAGE;
1223        if (error == -ENOMEM)
1224                return VM_FAULT_OOM;
1225        return VM_FAULT_SIGBUS;
1226}
1227
1228/*
1229 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1230 * flushed on write-faults (non-cow), but not read-faults.
1231 */
1232static bool dax_fault_is_synchronous(unsigned long flags,
1233                struct vm_area_struct *vma, struct iomap *iomap)
1234{
1235        return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1236                && (iomap->flags & IOMAP_F_DIRTY);
1237}
1238
1239static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1240                               int *iomap_errp, const struct iomap_ops *ops)
1241{
1242        struct vm_area_struct *vma = vmf->vma;
1243        struct address_space *mapping = vma->vm_file->f_mapping;
1244        XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1245        struct inode *inode = mapping->host;
1246        unsigned long vaddr = vmf->address;
1247        loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1248        struct iomap iomap = { 0 };
1249        unsigned flags = IOMAP_FAULT;
1250        int error, major = 0;
1251        bool write = vmf->flags & FAULT_FLAG_WRITE;
1252        bool sync;
1253        vm_fault_t ret = 0;
1254        void *entry;
1255        pfn_t pfn;
1256
1257        trace_dax_pte_fault(inode, vmf, ret);
1258        /*
1259         * Check whether offset isn't beyond end of file now. Caller is supposed
1260         * to hold locks serializing us with truncate / punch hole so this is
1261         * a reliable test.
1262         */
1263        if (pos >= i_size_read(inode)) {
1264                ret = VM_FAULT_SIGBUS;
1265                goto out;
1266        }
1267
1268        if (write && !vmf->cow_page)
1269                flags |= IOMAP_WRITE;
1270
1271        entry = grab_mapping_entry(&xas, mapping, 0);
1272        if (xa_is_internal(entry)) {
1273                ret = xa_to_internal(entry);
1274                goto out;
1275        }
1276
1277        /*
1278         * It is possible, particularly with mixed reads & writes to private
1279         * mappings, that we have raced with a PMD fault that overlaps with
1280         * the PTE we need to set up.  If so just return and the fault will be
1281         * retried.
1282         */
1283        if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1284                ret = VM_FAULT_NOPAGE;
1285                goto unlock_entry;
1286        }
1287
1288        /*
1289         * Note that we don't bother to use iomap_apply here: DAX required
1290         * the file system block size to be equal the page size, which means
1291         * that we never have to deal with more than a single extent here.
1292         */
1293        error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1294        if (iomap_errp)
1295                *iomap_errp = error;
1296        if (error) {
1297                ret = dax_fault_return(error);
1298                goto unlock_entry;
1299        }
1300        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1301                error = -EIO;   /* fs corruption? */
1302                goto error_finish_iomap;
1303        }
1304
1305        if (vmf->cow_page) {
1306                sector_t sector = dax_iomap_sector(&iomap, pos);
1307
1308                switch (iomap.type) {
1309                case IOMAP_HOLE:
1310                case IOMAP_UNWRITTEN:
1311                        clear_user_highpage(vmf->cow_page, vaddr);
1312                        break;
1313                case IOMAP_MAPPED:
1314                        error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1315                                        sector, PAGE_SIZE, vmf->cow_page, vaddr);
1316                        break;
1317                default:
1318                        WARN_ON_ONCE(1);
1319                        error = -EIO;
1320                        break;
1321                }
1322
1323                if (error)
1324                        goto error_finish_iomap;
1325
1326                __SetPageUptodate(vmf->cow_page);
1327                ret = finish_fault(vmf);
1328                if (!ret)
1329                        ret = VM_FAULT_DONE_COW;
1330                goto finish_iomap;
1331        }
1332
1333        sync = dax_fault_is_synchronous(flags, vma, &iomap);
1334
1335        switch (iomap.type) {
1336        case IOMAP_MAPPED:
1337                if (iomap.flags & IOMAP_F_NEW) {
1338                        count_vm_event(PGMAJFAULT);
1339                        count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1340                        major = VM_FAULT_MAJOR;
1341                }
1342                error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1343                if (error < 0)
1344                        goto error_finish_iomap;
1345
1346                entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1347                                                 0, write && !sync);
1348
1349                /*
1350                 * If we are doing synchronous page fault and inode needs fsync,
1351                 * we can insert PTE into page tables only after that happens.
1352                 * Skip insertion for now and return the pfn so that caller can
1353                 * insert it after fsync is done.
1354                 */
1355                if (sync) {
1356                        if (WARN_ON_ONCE(!pfnp)) {
1357                                error = -EIO;
1358                                goto error_finish_iomap;
1359                        }
1360                        *pfnp = pfn;
1361                        ret = VM_FAULT_NEEDDSYNC | major;
1362                        goto finish_iomap;
1363                }
1364                trace_dax_insert_mapping(inode, vmf, entry);
1365                if (write)
1366                        ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1367                else
1368                        ret = vmf_insert_mixed(vma, vaddr, pfn);
1369
1370                goto finish_iomap;
1371        case IOMAP_UNWRITTEN:
1372        case IOMAP_HOLE:
1373                if (!write) {
1374                        ret = dax_load_hole(&xas, mapping, &entry, vmf);
1375                        goto finish_iomap;
1376                }
1377                /*FALLTHRU*/
1378        default:
1379                WARN_ON_ONCE(1);
1380                error = -EIO;
1381                break;
1382        }
1383
1384 error_finish_iomap:
1385        ret = dax_fault_return(error);
1386 finish_iomap:
1387        if (ops->iomap_end) {
1388                int copied = PAGE_SIZE;
1389
1390                if (ret & VM_FAULT_ERROR)
1391                        copied = 0;
1392                /*
1393                 * The fault is done by now and there's no way back (other
1394                 * thread may be already happily using PTE we have installed).
1395                 * Just ignore error from ->iomap_end since we cannot do much
1396                 * with it.
1397                 */
1398                ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1399        }
1400 unlock_entry:
1401        dax_unlock_entry(&xas, entry);
1402 out:
1403        trace_dax_pte_fault_done(inode, vmf, ret);
1404        return ret | major;
1405}
1406
1407#ifdef CONFIG_FS_DAX_PMD
1408static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1409                struct iomap *iomap, void **entry)
1410{
1411        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1412        unsigned long pmd_addr = vmf->address & PMD_MASK;
1413        struct inode *inode = mapping->host;
1414        struct page *zero_page;
1415        spinlock_t *ptl;
1416        pmd_t pmd_entry;
1417        pfn_t pfn;
1418
1419        zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1420
1421        if (unlikely(!zero_page))
1422                goto fallback;
1423
1424        pfn = page_to_pfn_t(zero_page);
1425        *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1426                        DAX_PMD | DAX_ZERO_PAGE, false);
1427
1428        ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1429        if (!pmd_none(*(vmf->pmd))) {
1430                spin_unlock(ptl);
1431                goto fallback;
1432        }
1433
1434        pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1435        pmd_entry = pmd_mkhuge(pmd_entry);
1436        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1437        spin_unlock(ptl);
1438        trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1439        return VM_FAULT_NOPAGE;
1440
1441fallback:
1442        trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1443        return VM_FAULT_FALLBACK;
1444}
1445
1446static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1447                               const struct iomap_ops *ops)
1448{
1449        struct vm_area_struct *vma = vmf->vma;
1450        struct address_space *mapping = vma->vm_file->f_mapping;
1451        XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1452        unsigned long pmd_addr = vmf->address & PMD_MASK;
1453        bool write = vmf->flags & FAULT_FLAG_WRITE;
1454        bool sync;
1455        unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1456        struct inode *inode = mapping->host;
1457        vm_fault_t result = VM_FAULT_FALLBACK;
1458        struct iomap iomap = { 0 };
1459        pgoff_t max_pgoff;
1460        void *entry;
1461        loff_t pos;
1462        int error;
1463        pfn_t pfn;
1464
1465        /*
1466         * Check whether offset isn't beyond end of file now. Caller is
1467         * supposed to hold locks serializing us with truncate / punch hole so
1468         * this is a reliable test.
1469         */
1470        max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1471
1472        trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1473
1474        /*
1475         * Make sure that the faulting address's PMD offset (color) matches
1476         * the PMD offset from the start of the file.  This is necessary so
1477         * that a PMD range in the page table overlaps exactly with a PMD
1478         * range in the page cache.
1479         */
1480        if ((vmf->pgoff & PG_PMD_COLOUR) !=
1481            ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1482                goto fallback;
1483
1484        /* Fall back to PTEs if we're going to COW */
1485        if (write && !(vma->vm_flags & VM_SHARED))
1486                goto fallback;
1487
1488        /* If the PMD would extend outside the VMA */
1489        if (pmd_addr < vma->vm_start)
1490                goto fallback;
1491        if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1492                goto fallback;
1493
1494        if (xas.xa_index >= max_pgoff) {
1495                result = VM_FAULT_SIGBUS;
1496                goto out;
1497        }
1498
1499        /* If the PMD would extend beyond the file size */
1500        if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1501                goto fallback;
1502
1503        /*
1504         * grab_mapping_entry() will make sure we get an empty PMD entry,
1505         * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1506         * entry is already in the array, for instance), it will return
1507         * VM_FAULT_FALLBACK.
1508         */
1509        entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
1510        if (xa_is_internal(entry)) {
1511                result = xa_to_internal(entry);
1512                goto fallback;
1513        }
1514
1515        /*
1516         * It is possible, particularly with mixed reads & writes to private
1517         * mappings, that we have raced with a PTE fault that overlaps with
1518         * the PMD we need to set up.  If so just return and the fault will be
1519         * retried.
1520         */
1521        if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1522                        !pmd_devmap(*vmf->pmd)) {
1523                result = 0;
1524                goto unlock_entry;
1525        }
1526
1527        /*
1528         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1529         * setting up a mapping, so really we're using iomap_begin() as a way
1530         * to look up our filesystem block.
1531         */
1532        pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1533        error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1534        if (error)
1535                goto unlock_entry;
1536
1537        if (iomap.offset + iomap.length < pos + PMD_SIZE)
1538                goto finish_iomap;
1539
1540        sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1541
1542        switch (iomap.type) {
1543        case IOMAP_MAPPED:
1544                error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1545                if (error < 0)
1546                        goto finish_iomap;
1547
1548                entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1549                                                DAX_PMD, write && !sync);
1550
1551                /*
1552                 * If we are doing synchronous page fault and inode needs fsync,
1553                 * we can insert PMD into page tables only after that happens.
1554                 * Skip insertion for now and return the pfn so that caller can
1555                 * insert it after fsync is done.
1556                 */
1557                if (sync) {
1558                        if (WARN_ON_ONCE(!pfnp))
1559                                goto finish_iomap;
1560                        *pfnp = pfn;
1561                        result = VM_FAULT_NEEDDSYNC;
1562                        goto finish_iomap;
1563                }
1564
1565                trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1566                result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1567                                            write);
1568                break;
1569        case IOMAP_UNWRITTEN:
1570        case IOMAP_HOLE:
1571                if (WARN_ON_ONCE(write))
1572                        break;
1573                result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1574                break;
1575        default:
1576                WARN_ON_ONCE(1);
1577                break;
1578        }
1579
1580 finish_iomap:
1581        if (ops->iomap_end) {
1582                int copied = PMD_SIZE;
1583
1584                if (result == VM_FAULT_FALLBACK)
1585                        copied = 0;
1586                /*
1587                 * The fault is done by now and there's no way back (other
1588                 * thread may be already happily using PMD we have installed).
1589                 * Just ignore error from ->iomap_end since we cannot do much
1590                 * with it.
1591                 */
1592                ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1593                                &iomap);
1594        }
1595 unlock_entry:
1596        dax_unlock_entry(&xas, entry);
1597 fallback:
1598        if (result == VM_FAULT_FALLBACK) {
1599                split_huge_pmd(vma, vmf->pmd, vmf->address);
1600                count_vm_event(THP_FAULT_FALLBACK);
1601        }
1602out:
1603        trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1604        return result;
1605}
1606#else
1607static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1608                               const struct iomap_ops *ops)
1609{
1610        return VM_FAULT_FALLBACK;
1611}
1612#endif /* CONFIG_FS_DAX_PMD */
1613
1614/**
1615 * dax_iomap_fault - handle a page fault on a DAX file
1616 * @vmf: The description of the fault
1617 * @pe_size: Size of the page to fault in
1618 * @pfnp: PFN to insert for synchronous faults if fsync is required
1619 * @iomap_errp: Storage for detailed error code in case of error
1620 * @ops: Iomap ops passed from the file system
1621 *
1622 * When a page fault occurs, filesystems may call this helper in
1623 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1624 * has done all the necessary locking for page fault to proceed
1625 * successfully.
1626 */
1627vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1628                    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1629{
1630        switch (pe_size) {
1631        case PE_SIZE_PTE:
1632                return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1633        case PE_SIZE_PMD:
1634                return dax_iomap_pmd_fault(vmf, pfnp, ops);
1635        default:
1636                return VM_FAULT_FALLBACK;
1637        }
1638}
1639EXPORT_SYMBOL_GPL(dax_iomap_fault);
1640
1641/*
1642 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1643 * @vmf: The description of the fault
1644 * @pfn: PFN to insert
1645 * @order: Order of entry to insert.
1646 *
1647 * This function inserts a writeable PTE or PMD entry into the page tables
1648 * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1649 */
1650static vm_fault_t
1651dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1652{
1653        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1654        XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1655        void *entry;
1656        vm_fault_t ret;
1657
1658        xas_lock_irq(&xas);
1659        entry = get_unlocked_entry(&xas);
1660        /* Did we race with someone splitting entry or so? */
1661        if (!entry ||
1662            (order == 0 && !dax_is_pte_entry(entry)) ||
1663            (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
1664                put_unlocked_entry(&xas, entry);
1665                xas_unlock_irq(&xas);
1666                trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1667                                                      VM_FAULT_NOPAGE);
1668                return VM_FAULT_NOPAGE;
1669        }
1670        xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1671        dax_lock_entry(&xas, entry);
1672        xas_unlock_irq(&xas);
1673        if (order == 0)
1674                ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1675#ifdef CONFIG_FS_DAX_PMD
1676        else if (order == PMD_ORDER)
1677                ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1678                        pfn, true);
1679#endif
1680        else
1681                ret = VM_FAULT_FALLBACK;
1682        dax_unlock_entry(&xas, entry);
1683        trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1684        return ret;
1685}
1686
1687/**
1688 * dax_finish_sync_fault - finish synchronous page fault
1689 * @vmf: The description of the fault
1690 * @pe_size: Size of entry to be inserted
1691 * @pfn: PFN to insert
1692 *
1693 * This function ensures that the file range touched by the page fault is
1694 * stored persistently on the media and handles inserting of appropriate page
1695 * table entry.
1696 */
1697vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1698                enum page_entry_size pe_size, pfn_t pfn)
1699{
1700        int err;
1701        loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1702        unsigned int order = pe_order(pe_size);
1703        size_t len = PAGE_SIZE << order;
1704
1705        err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1706        if (err)
1707                return VM_FAULT_SIGBUS;
1708        return dax_insert_pfn_mkwrite(vmf, pfn, order);
1709}
1710EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1711