linux/mm/hmm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Red Hat Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * Authors: Jérôme Glisse <jglisse@redhat.com>
  15 */
  16/*
  17 * Refer to include/linux/hmm.h for information about heterogeneous memory
  18 * management or HMM for short.
  19 */
  20#include <linux/pagewalk.h>
  21#include <linux/hmm.h>
  22#include <linux/init.h>
  23#include <linux/rmap.h>
  24#include <linux/swap.h>
  25#include <linux/slab.h>
  26#include <linux/sched.h>
  27#include <linux/mmzone.h>
  28#include <linux/pagemap.h>
  29#include <linux/swapops.h>
  30#include <linux/hugetlb.h>
  31#include <linux/memremap.h>
  32#include <linux/sched/mm.h>
  33#include <linux/jump_label.h>
  34#include <linux/dma-mapping.h>
  35#include <linux/mmu_notifier.h>
  36#include <linux/memory_hotplug.h>
  37
  38struct hmm_vma_walk {
  39        struct hmm_range        *range;
  40        unsigned long           last;
  41};
  42
  43enum {
  44        HMM_NEED_FAULT = 1 << 0,
  45        HMM_NEED_WRITE_FAULT = 1 << 1,
  46        HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
  47};
  48
  49static int hmm_pfns_fill(unsigned long addr, unsigned long end,
  50                         struct hmm_range *range, unsigned long cpu_flags)
  51{
  52        unsigned long i = (addr - range->start) >> PAGE_SHIFT;
  53
  54        for (; addr < end; addr += PAGE_SIZE, i++)
  55                range->hmm_pfns[i] = cpu_flags;
  56        return 0;
  57}
  58
  59/*
  60 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
  61 * @addr: range virtual start address (inclusive)
  62 * @end: range virtual end address (exclusive)
  63 * @required_fault: HMM_NEED_* flags
  64 * @walk: mm_walk structure
  65 * Return: -EBUSY after page fault, or page fault error
  66 *
  67 * This function will be called whenever pmd_none() or pte_none() returns true,
  68 * or whenever there is no page directory covering the virtual address range.
  69 */
  70static int hmm_vma_fault(unsigned long addr, unsigned long end,
  71                         unsigned int required_fault, struct mm_walk *walk)
  72{
  73        struct hmm_vma_walk *hmm_vma_walk = walk->private;
  74        struct vm_area_struct *vma = walk->vma;
  75        unsigned int fault_flags = FAULT_FLAG_REMOTE;
  76
  77        WARN_ON_ONCE(!required_fault);
  78        hmm_vma_walk->last = addr;
  79
  80        if (required_fault & HMM_NEED_WRITE_FAULT) {
  81                if (!(vma->vm_flags & VM_WRITE))
  82                        return -EPERM;
  83                fault_flags |= FAULT_FLAG_WRITE;
  84        }
  85
  86        for (; addr < end; addr += PAGE_SIZE)
  87                if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
  88                        return -EFAULT;
  89        return -EBUSY;
  90}
  91
  92static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
  93                                       unsigned long pfn_req_flags,
  94                                       unsigned long cpu_flags)
  95{
  96        struct hmm_range *range = hmm_vma_walk->range;
  97
  98        /*
  99         * So we not only consider the individual per page request we also
 100         * consider the default flags requested for the range. The API can
 101         * be used 2 ways. The first one where the HMM user coalesces
 102         * multiple page faults into one request and sets flags per pfn for
 103         * those faults. The second one where the HMM user wants to pre-
 104         * fault a range with specific flags. For the latter one it is a
 105         * waste to have the user pre-fill the pfn arrays with a default
 106         * flags value.
 107         */
 108        pfn_req_flags &= range->pfn_flags_mask;
 109        pfn_req_flags |= range->default_flags;
 110
 111        /* We aren't ask to do anything ... */
 112        if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
 113                return 0;
 114
 115        /* Need to write fault ? */
 116        if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
 117            !(cpu_flags & HMM_PFN_WRITE))
 118                return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
 119
 120        /* If CPU page table is not valid then we need to fault */
 121        if (!(cpu_flags & HMM_PFN_VALID))
 122                return HMM_NEED_FAULT;
 123        return 0;
 124}
 125
 126static unsigned int
 127hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
 128                     const unsigned long hmm_pfns[], unsigned long npages,
 129                     unsigned long cpu_flags)
 130{
 131        struct hmm_range *range = hmm_vma_walk->range;
 132        unsigned int required_fault = 0;
 133        unsigned long i;
 134
 135        /*
 136         * If the default flags do not request to fault pages, and the mask does
 137         * not allow for individual pages to be faulted, then
 138         * hmm_pte_need_fault() will always return 0.
 139         */
 140        if (!((range->default_flags | range->pfn_flags_mask) &
 141              HMM_PFN_REQ_FAULT))
 142                return 0;
 143
 144        for (i = 0; i < npages; ++i) {
 145                required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
 146                                                     cpu_flags);
 147                if (required_fault == HMM_NEED_ALL_BITS)
 148                        return required_fault;
 149        }
 150        return required_fault;
 151}
 152
 153static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
 154                             __always_unused int depth, struct mm_walk *walk)
 155{
 156        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 157        struct hmm_range *range = hmm_vma_walk->range;
 158        unsigned int required_fault;
 159        unsigned long i, npages;
 160        unsigned long *hmm_pfns;
 161
 162        i = (addr - range->start) >> PAGE_SHIFT;
 163        npages = (end - addr) >> PAGE_SHIFT;
 164        hmm_pfns = &range->hmm_pfns[i];
 165        required_fault =
 166                hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
 167        if (!walk->vma) {
 168                if (required_fault)
 169                        return -EFAULT;
 170                return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
 171        }
 172        if (required_fault)
 173                return hmm_vma_fault(addr, end, required_fault, walk);
 174        return hmm_pfns_fill(addr, end, range, 0);
 175}
 176
 177static inline unsigned long hmm_pfn_flags_order(unsigned long order)
 178{
 179        return order << HMM_PFN_ORDER_SHIFT;
 180}
 181
 182static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
 183                                                 pmd_t pmd)
 184{
 185        if (pmd_protnone(pmd))
 186                return 0;
 187        return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
 188                                 HMM_PFN_VALID) |
 189               hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
 190}
 191
 192#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 193static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
 194                              unsigned long end, unsigned long hmm_pfns[],
 195                              pmd_t pmd)
 196{
 197        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 198        struct hmm_range *range = hmm_vma_walk->range;
 199        unsigned long pfn, npages, i;
 200        unsigned int required_fault;
 201        unsigned long cpu_flags;
 202
 203        npages = (end - addr) >> PAGE_SHIFT;
 204        cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
 205        required_fault =
 206                hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
 207        if (required_fault)
 208                return hmm_vma_fault(addr, end, required_fault, walk);
 209
 210        pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 211        for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
 212                hmm_pfns[i] = pfn | cpu_flags;
 213        return 0;
 214}
 215#else /* CONFIG_TRANSPARENT_HUGEPAGE */
 216/* stub to allow the code below to compile */
 217int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
 218                unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
 219#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 220
 221static inline bool hmm_is_device_private_entry(struct hmm_range *range,
 222                swp_entry_t entry)
 223{
 224        return is_device_private_entry(entry) &&
 225                device_private_entry_to_page(entry)->pgmap->owner ==
 226                range->dev_private_owner;
 227}
 228
 229static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
 230                                                 pte_t pte)
 231{
 232        if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
 233                return 0;
 234        return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
 235}
 236
 237static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 238                              unsigned long end, pmd_t *pmdp, pte_t *ptep,
 239                              unsigned long *hmm_pfn)
 240{
 241        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 242        struct hmm_range *range = hmm_vma_walk->range;
 243        unsigned int required_fault;
 244        unsigned long cpu_flags;
 245        pte_t pte = *ptep;
 246        uint64_t pfn_req_flags = *hmm_pfn;
 247
 248        if (pte_none(pte)) {
 249                required_fault =
 250                        hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
 251                if (required_fault)
 252                        goto fault;
 253                *hmm_pfn = 0;
 254                return 0;
 255        }
 256
 257        if (!pte_present(pte)) {
 258                swp_entry_t entry = pte_to_swp_entry(pte);
 259
 260                /*
 261                 * Never fault in device private pages, but just report
 262                 * the PFN even if not present.
 263                 */
 264                if (hmm_is_device_private_entry(range, entry)) {
 265                        cpu_flags = HMM_PFN_VALID;
 266                        if (is_write_device_private_entry(entry))
 267                                cpu_flags |= HMM_PFN_WRITE;
 268                        *hmm_pfn = device_private_entry_to_pfn(entry) |
 269                                        cpu_flags;
 270                        return 0;
 271                }
 272
 273                required_fault =
 274                        hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
 275                if (!required_fault) {
 276                        *hmm_pfn = 0;
 277                        return 0;
 278                }
 279
 280                if (!non_swap_entry(entry))
 281                        goto fault;
 282
 283                if (is_migration_entry(entry)) {
 284                        pte_unmap(ptep);
 285                        hmm_vma_walk->last = addr;
 286                        migration_entry_wait(walk->mm, pmdp, addr);
 287                        return -EBUSY;
 288                }
 289
 290                /* Report error for everything else */
 291                pte_unmap(ptep);
 292                return -EFAULT;
 293        }
 294
 295        cpu_flags = pte_to_hmm_pfn_flags(range, pte);
 296        required_fault =
 297                hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
 298        if (required_fault)
 299                goto fault;
 300
 301        /*
 302         * Since each architecture defines a struct page for the zero page, just
 303         * fall through and treat it like a normal page.
 304         */
 305        if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
 306                if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
 307                        pte_unmap(ptep);
 308                        return -EFAULT;
 309                }
 310                *hmm_pfn = HMM_PFN_ERROR;
 311                return 0;
 312        }
 313
 314        *hmm_pfn = pte_pfn(pte) | cpu_flags;
 315        return 0;
 316
 317fault:
 318        pte_unmap(ptep);
 319        /* Fault any virtual address we were asked to fault */
 320        return hmm_vma_fault(addr, end, required_fault, walk);
 321}
 322
 323static int hmm_vma_walk_pmd(pmd_t *pmdp,
 324                            unsigned long start,
 325                            unsigned long end,
 326                            struct mm_walk *walk)
 327{
 328        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 329        struct hmm_range *range = hmm_vma_walk->range;
 330        unsigned long *hmm_pfns =
 331                &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
 332        unsigned long npages = (end - start) >> PAGE_SHIFT;
 333        unsigned long addr = start;
 334        pte_t *ptep;
 335        pmd_t pmd;
 336
 337again:
 338        pmd = READ_ONCE(*pmdp);
 339        if (pmd_none(pmd))
 340                return hmm_vma_walk_hole(start, end, -1, walk);
 341
 342        if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
 343                if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
 344                        hmm_vma_walk->last = addr;
 345                        pmd_migration_entry_wait(walk->mm, pmdp);
 346                        return -EBUSY;
 347                }
 348                return hmm_pfns_fill(start, end, range, 0);
 349        }
 350
 351        if (!pmd_present(pmd)) {
 352                if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
 353                        return -EFAULT;
 354                return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 355        }
 356
 357        if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
 358                /*
 359                 * No need to take pmd_lock here, even if some other thread
 360                 * is splitting the huge pmd we will get that event through
 361                 * mmu_notifier callback.
 362                 *
 363                 * So just read pmd value and check again it's a transparent
 364                 * huge or device mapping one and compute corresponding pfn
 365                 * values.
 366                 */
 367                pmd = pmd_read_atomic(pmdp);
 368                barrier();
 369                if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
 370                        goto again;
 371
 372                return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
 373        }
 374
 375        /*
 376         * We have handled all the valid cases above ie either none, migration,
 377         * huge or transparent huge. At this point either it is a valid pmd
 378         * entry pointing to pte directory or it is a bad pmd that will not
 379         * recover.
 380         */
 381        if (pmd_bad(pmd)) {
 382                if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
 383                        return -EFAULT;
 384                return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 385        }
 386
 387        ptep = pte_offset_map(pmdp, addr);
 388        for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
 389                int r;
 390
 391                r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
 392                if (r) {
 393                        /* hmm_vma_handle_pte() did pte_unmap() */
 394                        return r;
 395                }
 396        }
 397        pte_unmap(ptep - 1);
 398        return 0;
 399}
 400
 401#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
 402    defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
 403static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
 404                                                 pud_t pud)
 405{
 406        if (!pud_present(pud))
 407                return 0;
 408        return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
 409                                 HMM_PFN_VALID) |
 410               hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
 411}
 412
 413static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
 414                struct mm_walk *walk)
 415{
 416        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 417        struct hmm_range *range = hmm_vma_walk->range;
 418        unsigned long addr = start;
 419        pud_t pud;
 420        int ret = 0;
 421        spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
 422
 423        if (!ptl)
 424                return 0;
 425
 426        /* Normally we don't want to split the huge page */
 427        walk->action = ACTION_CONTINUE;
 428
 429        pud = READ_ONCE(*pudp);
 430        if (pud_none(pud)) {
 431                spin_unlock(ptl);
 432                return hmm_vma_walk_hole(start, end, -1, walk);
 433        }
 434
 435        if (pud_huge(pud) && pud_devmap(pud)) {
 436                unsigned long i, npages, pfn;
 437                unsigned int required_fault;
 438                unsigned long *hmm_pfns;
 439                unsigned long cpu_flags;
 440
 441                if (!pud_present(pud)) {
 442                        spin_unlock(ptl);
 443                        return hmm_vma_walk_hole(start, end, -1, walk);
 444                }
 445
 446                i = (addr - range->start) >> PAGE_SHIFT;
 447                npages = (end - addr) >> PAGE_SHIFT;
 448                hmm_pfns = &range->hmm_pfns[i];
 449
 450                cpu_flags = pud_to_hmm_pfn_flags(range, pud);
 451                required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
 452                                                      npages, cpu_flags);
 453                if (required_fault) {
 454                        spin_unlock(ptl);
 455                        return hmm_vma_fault(addr, end, required_fault, walk);
 456                }
 457
 458                pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 459                for (i = 0; i < npages; ++i, ++pfn)
 460                        hmm_pfns[i] = pfn | cpu_flags;
 461                goto out_unlock;
 462        }
 463
 464        /* Ask for the PUD to be split */
 465        walk->action = ACTION_SUBTREE;
 466
 467out_unlock:
 468        spin_unlock(ptl);
 469        return ret;
 470}
 471#else
 472#define hmm_vma_walk_pud        NULL
 473#endif
 474
 475#ifdef CONFIG_HUGETLB_PAGE
 476static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
 477                                      unsigned long start, unsigned long end,
 478                                      struct mm_walk *walk)
 479{
 480        unsigned long addr = start, i, pfn;
 481        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 482        struct hmm_range *range = hmm_vma_walk->range;
 483        struct vm_area_struct *vma = walk->vma;
 484        unsigned int required_fault;
 485        unsigned long pfn_req_flags;
 486        unsigned long cpu_flags;
 487        spinlock_t *ptl;
 488        pte_t entry;
 489
 490        ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
 491        entry = huge_ptep_get(pte);
 492
 493        i = (start - range->start) >> PAGE_SHIFT;
 494        pfn_req_flags = range->hmm_pfns[i];
 495        cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
 496                    hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
 497        required_fault =
 498                hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
 499        if (required_fault) {
 500                spin_unlock(ptl);
 501                return hmm_vma_fault(addr, end, required_fault, walk);
 502        }
 503
 504        pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
 505        for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
 506                range->hmm_pfns[i] = pfn | cpu_flags;
 507
 508        spin_unlock(ptl);
 509        return 0;
 510}
 511#else
 512#define hmm_vma_walk_hugetlb_entry NULL
 513#endif /* CONFIG_HUGETLB_PAGE */
 514
 515static int hmm_vma_walk_test(unsigned long start, unsigned long end,
 516                             struct mm_walk *walk)
 517{
 518        struct hmm_vma_walk *hmm_vma_walk = walk->private;
 519        struct hmm_range *range = hmm_vma_walk->range;
 520        struct vm_area_struct *vma = walk->vma;
 521
 522        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
 523            vma->vm_flags & VM_READ)
 524                return 0;
 525
 526        /*
 527         * vma ranges that don't have struct page backing them or map I/O
 528         * devices directly cannot be handled by hmm_range_fault().
 529         *
 530         * If the vma does not allow read access, then assume that it does not
 531         * allow write access either. HMM does not support architectures that
 532         * allow write without read.
 533         *
 534         * If a fault is requested for an unsupported range then it is a hard
 535         * failure.
 536         */
 537        if (hmm_range_need_fault(hmm_vma_walk,
 538                                 range->hmm_pfns +
 539                                         ((start - range->start) >> PAGE_SHIFT),
 540                                 (end - start) >> PAGE_SHIFT, 0))
 541                return -EFAULT;
 542
 543        hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 544
 545        /* Skip this vma and continue processing the next vma. */
 546        return 1;
 547}
 548
 549static const struct mm_walk_ops hmm_walk_ops = {
 550        .pud_entry      = hmm_vma_walk_pud,
 551        .pmd_entry      = hmm_vma_walk_pmd,
 552        .pte_hole       = hmm_vma_walk_hole,
 553        .hugetlb_entry  = hmm_vma_walk_hugetlb_entry,
 554        .test_walk      = hmm_vma_walk_test,
 555};
 556
 557/**
 558 * hmm_range_fault - try to fault some address in a virtual address range
 559 * @range:      argument structure
 560 *
 561 * Returns 0 on success or one of the following error codes:
 562 *
 563 * -EINVAL:     Invalid arguments or mm or virtual address is in an invalid vma
 564 *              (e.g., device file vma).
 565 * -ENOMEM:     Out of memory.
 566 * -EPERM:      Invalid permission (e.g., asking for write and range is read
 567 *              only).
 568 * -EBUSY:      The range has been invalidated and the caller needs to wait for
 569 *              the invalidation to finish.
 570 * -EFAULT:     A page was requested to be valid and could not be made valid
 571 *              ie it has no backing VMA or it is illegal to access
 572 *
 573 * This is similar to get_user_pages(), except that it can read the page tables
 574 * without mutating them (ie causing faults).
 575 */
 576int hmm_range_fault(struct hmm_range *range)
 577{
 578        struct hmm_vma_walk hmm_vma_walk = {
 579                .range = range,
 580                .last = range->start,
 581        };
 582        struct mm_struct *mm = range->notifier->mm;
 583        int ret;
 584
 585        mmap_assert_locked(mm);
 586
 587        do {
 588                /* If range is no longer valid force retry. */
 589                if (mmu_interval_check_retry(range->notifier,
 590                                             range->notifier_seq))
 591                        return -EBUSY;
 592                ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
 593                                      &hmm_walk_ops, &hmm_vma_walk);
 594                /*
 595                 * When -EBUSY is returned the loop restarts with
 596                 * hmm_vma_walk.last set to an address that has not been stored
 597                 * in pfns. All entries < last in the pfn array are set to their
 598                 * output, and all >= are still at their input values.
 599                 */
 600        } while (ret == -EBUSY);
 601        return ret;
 602}
 603EXPORT_SYMBOL(hmm_range_fault);
 604