linux/mm/hmm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2013 Red Hat Inc.
   4 *
   5 * Authors: Jérôme Glisse <jglisse@redhat.com>
   6 */
   7/*
   8 * Refer to include/linux/hmm.h for information about heterogeneous memory
   9 * management or HMM for short.
  10 */
  11#include <linux/pagewalk.h>
  12#include <linux/hmm.h>
  13#include <linux/init.h>
  14#include <linux/rmap.h>
  15#include <linux/swap.h>
  16#include <linux/slab.h>
  17#include <linux/sched.h>
  18#include <linux/mmzone.h>
  19#include <linux/pagemap.h>
  20#include <linux/swapops.h>
  21#include <linux/hugetlb.h>
  22#include <linux/memremap.h>
  23#include <linux/sched/mm.h>
  24#include <linux/jump_label.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/mmu_notifier.h>
  27#include <linux/memory_hotplug.h>
  28
  29#include "internal.h"
  30
  31struct hmm_vma_walk {
  32        struct hmm_range        *range;
  33        unsigned long           last;
  34};
  35
  36enum {
  37        HMM_NEED_FAULT = 1 << 0,
  38        HMM_NEED_WRITE_FAULT = 1 << 1,
  39        HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
  40};
  41
  42static int hmm_pfns_fill(unsigned long addr, unsigned long end,
  43                         struct hmm_range *range, unsigned long cpu_flags)
  44{
  45        unsigned long i = (addr - range->start) >> PAGE_SHIFT;
  46
  47        for (; addr < end; addr += PAGE_SIZE, i++)
  48                range->hmm_pfns[i] = cpu_flags;
  49        return 0;
  50}
  51
  52/*
  53 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
  54 * @addr: range virtual start address (inclusive)
  55 * @end: range virtual end address (exclusive)
  56 * @required_fault: HMM_NEED_* flags
  57 * @walk: mm_walk structure
  58 * Return: -EBUSY after page fault, or page fault error
  59 *
  60 * This function will be called whenever pmd_none() or pte_none() returns true,
  61 * or whenever there is no page directory covering the virtual address range.
  62 */
  63static int hmm_vma_fault(unsigned long addr, unsigned long end,
  64                         unsigned int required_fault, struct mm_walk *walk)
  65{
  66        struct hmm_vma_walk *hmm_vma_walk = walk->private;
  67        struct vm_area_struct *vma = walk->vma;
  68        unsigned int fault_flags = FAULT_FLAG_REMOTE;
  69
  70        WARN_ON_ONCE(!required_fault);
  71        hmm_vma_walk->last = addr;
  72
  73        if (required_fault & HMM_NEED_WRITE_FAULT) {
  74                if (!(vma->vm_flags & VM_WRITE))
  75                        return -EPERM;
  76                fault_flags |= FAULT_FLAG_WRITE;
  77        }
  78
  79        for (; addr < end; addr += PAGE_SIZE)
  80                if (handle_mm_fault(vma, addr, fault_flags, NULL) &
  81                    VM_FAULT_ERROR)
  82                        return -EFAULT;
  83        return -EBUSY;
  84}
  85
  86static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
  87                                       unsigned long pfn_req_flags,
  88                                       unsigned long cpu_flags)
  89{
  90        struct hmm_range *range = hmm_vma_walk->range;
  91
  92        /*
  93         * So we not only consider the individual per page request we also
  94         * consider the default flags requested for the range. The API can
  95         * be used 2 ways. The first one where the HMM user coalesces
  96         * multiple page faults into one request and sets flags per pfn for
  97         * those faults. The second one where the HMM user wants to pre-
  98         * fault a range with specific flags. For the latter one it is a
  99         * waste to have the user pre-fill the pfn arrays with a default
 100         * flags value.
 101         */
 102        pfn_req_flags &= range->pfn_flags_mask;
 103        pfn_req_flags |= range->default_flags;
 104
 105        /* We aren't ask to do anything ... */
 106        if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
 107                return 0;
 108
 109        /* Need to write fault ? */
 110        if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
 111            !(cpu_flags & HMM_PFN_WRITE))
 112                return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
 113
 114        /* If CPU page table is not valid then we need to fault */
 115        if (!(cpu_flags & HMM_PFN_VALID))
 116                return HMM_NEED_FAULT;
 117        return 0;
 118}
 119
 120static unsigned int
 121hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
 122                     const unsigned long hmm_pfns[], unsigned long npages,
 123                     unsigned long cpu_flags)
 124{
 125        struct hmm_range *range = hmm_vma_walk->range;
 126        unsigned int required_fault = 0;
 127        unsigned long i;
 128
 129        /*
 130         * If the default flags do not request to fault pages, and the mask does
 131         * not allow for individual pages to be faulted, then
 132         * hmm_pte_need_fault() will always return 0.
 133         */
 134        if (!((range->default_flags | range->pfn_flags_mask) &
 135              HMM_PFN_REQ_FAULT))
 136                return 0;
 137
 138        for (i = 0; i < npages; ++i) {
 139                required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
 140                                                     cpu_flags);
 141                if (required_fault == HMM_NEED_ALL_BITS)
 142                        return required_fault;
 143        }
 144        return required_fault;
 145}
 146
 147static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
 148                             __always_unused int depth, struct mm_walk *walk)
 149{
 150        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 151        struct hmm_range *range = hmm_vma_walk->range;
 152        unsigned int required_fault;
 153        unsigned long i, npages;
 154        unsigned long *hmm_pfns;
 155
 156        i = (addr - range->start) >> PAGE_SHIFT;
 157        npages = (end - addr) >> PAGE_SHIFT;
 158        hmm_pfns = &range->hmm_pfns[i];
 159        required_fault =
 160                hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
 161        if (!walk->vma) {
 162                if (required_fault)
 163                        return -EFAULT;
 164                return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
 165        }
 166        if (required_fault)
 167                return hmm_vma_fault(addr, end, required_fault, walk);
 168        return hmm_pfns_fill(addr, end, range, 0);
 169}
 170
 171static inline unsigned long hmm_pfn_flags_order(unsigned long order)
 172{
 173        return order << HMM_PFN_ORDER_SHIFT;
 174}
 175
 176static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
 177                                                 pmd_t pmd)
 178{
 179        if (pmd_protnone(pmd))
 180                return 0;
 181        return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
 182                                 HMM_PFN_VALID) |
 183               hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
 184}
 185
 186#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 187static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
 188                              unsigned long end, unsigned long hmm_pfns[],
 189                              pmd_t pmd)
 190{
 191        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 192        struct hmm_range *range = hmm_vma_walk->range;
 193        unsigned long pfn, npages, i;
 194        unsigned int required_fault;
 195        unsigned long cpu_flags;
 196
 197        npages = (end - addr) >> PAGE_SHIFT;
 198        cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
 199        required_fault =
 200                hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
 201        if (required_fault)
 202                return hmm_vma_fault(addr, end, required_fault, walk);
 203
 204        pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 205        for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
 206                hmm_pfns[i] = pfn | cpu_flags;
 207        return 0;
 208}
 209#else /* CONFIG_TRANSPARENT_HUGEPAGE */
 210/* stub to allow the code below to compile */
 211int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
 212                unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
 213#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 214
 215static inline bool hmm_is_device_private_entry(struct hmm_range *range,
 216                swp_entry_t entry)
 217{
 218        return is_device_private_entry(entry) &&
 219                pfn_swap_entry_to_page(entry)->pgmap->owner ==
 220                range->dev_private_owner;
 221}
 222
 223static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
 224                                                 pte_t pte)
 225{
 226        if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
 227                return 0;
 228        return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
 229}
 230
 231static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 232                              unsigned long end, pmd_t *pmdp, pte_t *ptep,
 233                              unsigned long *hmm_pfn)
 234{
 235        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 236        struct hmm_range *range = hmm_vma_walk->range;
 237        unsigned int required_fault;
 238        unsigned long cpu_flags;
 239        pte_t pte = *ptep;
 240        uint64_t pfn_req_flags = *hmm_pfn;
 241
 242        if (pte_none(pte)) {
 243                required_fault =
 244                        hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
 245                if (required_fault)
 246                        goto fault;
 247                *hmm_pfn = 0;
 248                return 0;
 249        }
 250
 251        if (!pte_present(pte)) {
 252                swp_entry_t entry = pte_to_swp_entry(pte);
 253
 254                /*
 255                 * Never fault in device private pages, but just report
 256                 * the PFN even if not present.
 257                 */
 258                if (hmm_is_device_private_entry(range, entry)) {
 259                        cpu_flags = HMM_PFN_VALID;
 260                        if (is_writable_device_private_entry(entry))
 261                                cpu_flags |= HMM_PFN_WRITE;
 262                        *hmm_pfn = swp_offset(entry) | cpu_flags;
 263                        return 0;
 264                }
 265
 266                required_fault =
 267                        hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
 268                if (!required_fault) {
 269                        *hmm_pfn = 0;
 270                        return 0;
 271                }
 272
 273                if (!non_swap_entry(entry))
 274                        goto fault;
 275
 276                if (is_device_exclusive_entry(entry))
 277                        goto fault;
 278
 279                if (is_migration_entry(entry)) {
 280                        pte_unmap(ptep);
 281                        hmm_vma_walk->last = addr;
 282                        migration_entry_wait(walk->mm, pmdp, addr);
 283                        return -EBUSY;
 284                }
 285
 286                /* Report error for everything else */
 287                pte_unmap(ptep);
 288                return -EFAULT;
 289        }
 290
 291        cpu_flags = pte_to_hmm_pfn_flags(range, pte);
 292        required_fault =
 293                hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
 294        if (required_fault)
 295                goto fault;
 296
 297        /*
 298         * Bypass devmap pte such as DAX page when all pfn requested
 299         * flags(pfn_req_flags) are fulfilled.
 300         * Since each architecture defines a struct page for the zero page, just
 301         * fall through and treat it like a normal page.
 302         */
 303        if (pte_special(pte) && !pte_devmap(pte) &&
 304            !is_zero_pfn(pte_pfn(pte))) {
 305                if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
 306                        pte_unmap(ptep);
 307                        return -EFAULT;
 308                }
 309                *hmm_pfn = HMM_PFN_ERROR;
 310                return 0;
 311        }
 312
 313        *hmm_pfn = pte_pfn(pte) | cpu_flags;
 314        return 0;
 315
 316fault:
 317        pte_unmap(ptep);
 318        /* Fault any virtual address we were asked to fault */
 319        return hmm_vma_fault(addr, end, required_fault, walk);
 320}
 321
 322static int hmm_vma_walk_pmd(pmd_t *pmdp,
 323                            unsigned long start,
 324                            unsigned long end,
 325                            struct mm_walk *walk)
 326{
 327        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 328        struct hmm_range *range = hmm_vma_walk->range;
 329        unsigned long *hmm_pfns =
 330                &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
 331        unsigned long npages = (end - start) >> PAGE_SHIFT;
 332        unsigned long addr = start;
 333        pte_t *ptep;
 334        pmd_t pmd;
 335
 336again:
 337        pmd = READ_ONCE(*pmdp);
 338        if (pmd_none(pmd))
 339                return hmm_vma_walk_hole(start, end, -1, walk);
 340
 341        if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
 342                if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
 343                        hmm_vma_walk->last = addr;
 344                        pmd_migration_entry_wait(walk->mm, pmdp);
 345                        return -EBUSY;
 346                }
 347                return hmm_pfns_fill(start, end, range, 0);
 348        }
 349
 350        if (!pmd_present(pmd)) {
 351                if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
 352                        return -EFAULT;
 353                return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 354        }
 355
 356        if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
 357                /*
 358                 * No need to take pmd_lock here, even if some other thread
 359                 * is splitting the huge pmd we will get that event through
 360                 * mmu_notifier callback.
 361                 *
 362                 * So just read pmd value and check again it's a transparent
 363                 * huge or device mapping one and compute corresponding pfn
 364                 * values.
 365                 */
 366                pmd = pmd_read_atomic(pmdp);
 367                barrier();
 368                if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
 369                        goto again;
 370
 371                return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
 372        }
 373
 374        /*
 375         * We have handled all the valid cases above ie either none, migration,
 376         * huge or transparent huge. At this point either it is a valid pmd
 377         * entry pointing to pte directory or it is a bad pmd that will not
 378         * recover.
 379         */
 380        if (pmd_bad(pmd)) {
 381                if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
 382                        return -EFAULT;
 383                return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 384        }
 385
 386        ptep = pte_offset_map(pmdp, addr);
 387        for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
 388                int r;
 389
 390                r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
 391                if (r) {
 392                        /* hmm_vma_handle_pte() did pte_unmap() */
 393                        return r;
 394                }
 395        }
 396        pte_unmap(ptep - 1);
 397        return 0;
 398}
 399
 400#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
 401    defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
 402static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
 403                                                 pud_t pud)
 404{
 405        if (!pud_present(pud))
 406                return 0;
 407        return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
 408                                 HMM_PFN_VALID) |
 409               hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
 410}
 411
 412static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 413                struct mm_walk *walk)
 414{
 415        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 416        struct hmm_range *range = hmm_vma_walk->range;
 417        unsigned long addr = start;
 418        pud_t pud;
 419        int ret = 0;
 420        spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
 421
 422        if (!ptl)
 423                return 0;
 424
 425        /* Normally we don't want to split the huge page */
 426        walk->action = ACTION_CONTINUE;
 427
 428        pud = READ_ONCE(*pudp);
 429        if (pud_none(pud)) {
 430                spin_unlock(ptl);
 431                return hmm_vma_walk_hole(start, end, -1, walk);
 432        }
 433
 434        if (pud_huge(pud) && pud_devmap(pud)) {
 435                unsigned long i, npages, pfn;
 436                unsigned int required_fault;
 437                unsigned long *hmm_pfns;
 438                unsigned long cpu_flags;
 439
 440                if (!pud_present(pud)) {
 441                        spin_unlock(ptl);
 442                        return hmm_vma_walk_hole(start, end, -1, walk);
 443                }
 444
 445                i = (addr - range->start) >> PAGE_SHIFT;
 446                npages = (end - addr) >> PAGE_SHIFT;
 447                hmm_pfns = &range->hmm_pfns[i];
 448
 449                cpu_flags = pud_to_hmm_pfn_flags(range, pud);
 450                required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
 451                                                      npages, cpu_flags);
 452                if (required_fault) {
 453                        spin_unlock(ptl);
 454                        return hmm_vma_fault(addr, end, required_fault, walk);
 455                }
 456
 457                pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 458                for (i = 0; i < npages; ++i, ++pfn)
 459                        hmm_pfns[i] = pfn | cpu_flags;
 460                goto out_unlock;
 461        }
 462
 463        /* Ask for the PUD to be split */
 464        walk->action = ACTION_SUBTREE;
 465
 466out_unlock:
 467        spin_unlock(ptl);
 468        return ret;
 469}
 470#else
 471#define hmm_vma_walk_pud        NULL
 472#endif
 473
 474#ifdef CONFIG_HUGETLB_PAGE
 475static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
 476                                      unsigned long start, unsigned long end,
 477                                      struct mm_walk *walk)
 478{
 479        unsigned long addr = start, i, pfn;
 480        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 481        struct hmm_range *range = hmm_vma_walk->range;
 482        struct vm_area_struct *vma = walk->vma;
 483        unsigned int required_fault;
 484        unsigned long pfn_req_flags;
 485        unsigned long cpu_flags;
 486        spinlock_t *ptl;
 487        pte_t entry;
 488
 489        ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
 490        entry = huge_ptep_get(pte);
 491
 492        i = (start - range->start) >> PAGE_SHIFT;
 493        pfn_req_flags = range->hmm_pfns[i];
 494        cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
 495                    hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
 496        required_fault =
 497                hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
 498        if (required_fault) {
 499                spin_unlock(ptl);
 500                return hmm_vma_fault(addr, end, required_fault, walk);
 501        }
 502
 503        pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
 504        for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
 505                range->hmm_pfns[i] = pfn | cpu_flags;
 506
 507        spin_unlock(ptl);
 508        return 0;
 509}
 510#else
 511#define hmm_vma_walk_hugetlb_entry NULL
 512#endif /* CONFIG_HUGETLB_PAGE */
 513
 514static int hmm_vma_walk_test(unsigned long start, unsigned long end,
 515                             struct mm_walk *walk)
 516{
 517        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 518        struct hmm_range *range = hmm_vma_walk->range;
 519        struct vm_area_struct *vma = walk->vma;
 520
 521        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
 522            vma->vm_flags & VM_READ)
 523                return 0;
 524
 525        /*
 526         * vma ranges that don't have struct page backing them or map I/O
 527         * devices directly cannot be handled by hmm_range_fault().
 528         *
 529         * If the vma does not allow read access, then assume that it does not
 530         * allow write access either. HMM does not support architectures that
 531         * allow write without read.
 532         *
 533         * If a fault is requested for an unsupported range then it is a hard
 534         * failure.
 535         */
 536        if (hmm_range_need_fault(hmm_vma_walk,
 537                                 range->hmm_pfns +
 538                                         ((start - range->start) >> PAGE_SHIFT),
 539                                 (end - start) >> PAGE_SHIFT, 0))
 540                return -EFAULT;
 541
 542        hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 543
 544        /* Skip this vma and continue processing the next vma. */
 545        return 1;
 546}
 547
 548static const struct mm_walk_ops hmm_walk_ops = {
 549        .pud_entry      = hmm_vma_walk_pud,
 550        .pmd_entry      = hmm_vma_walk_pmd,
 551        .pte_hole       = hmm_vma_walk_hole,
 552        .hugetlb_entry  = hmm_vma_walk_hugetlb_entry,
 553        .test_walk      = hmm_vma_walk_test,
 554};
 555
 556/**
 557 * hmm_range_fault - try to fault some address in a virtual address range
 558 * @range:      argument structure
 559 *
 560 * Returns 0 on success or one of the following error codes:
 561 *
 562 * -EINVAL:     Invalid arguments or mm or virtual address is in an invalid vma
 563 *              (e.g., device file vma).
 564 * -ENOMEM:     Out of memory.
 565 * -EPERM:      Invalid permission (e.g., asking for write and range is read
 566 *              only).
 567 * -EBUSY:      The range has been invalidated and the caller needs to wait for
 568 *              the invalidation to finish.
 569 * -EFAULT:     A page was requested to be valid and could not be made valid
 570 *              ie it has no backing VMA or it is illegal to access
 571 *
 572 * This is similar to get_user_pages(), except that it can read the page tables
 573 * without mutating them (ie causing faults).
 574 */
 575int hmm_range_fault(struct hmm_range *range)
 576{
 577        struct hmm_vma_walk hmm_vma_walk = {
 578                .range = range,
 579                .last = range->start,
 580        };
 581        struct mm_struct *mm = range->notifier->mm;
 582        int ret;
 583
 584        mmap_assert_locked(mm);
 585
 586        do {
 587                /* If range is no longer valid force retry. */
 588                if (mmu_interval_check_retry(range->notifier,
 589                                             range->notifier_seq))
 590                        return -EBUSY;
 591                ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
 592                                      &hmm_walk_ops, &hmm_vma_walk);
 593                /*
 594                 * When -EBUSY is returned the loop restarts with
 595                 * hmm_vma_walk.last set to an address that has not been stored
 596                 * in pfns. All entries < last in the pfn array are set to their
 597                 * output, and all >= are still at their input values.
 598                 */
 599        } while (ret == -EBUSY);
 600        return ret;
 601}
 602EXPORT_SYMBOL(hmm_range_fault);
 603