linux/mm/page_vma_mapped.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/mm.h>
   3#include <linux/rmap.h>
   4#include <linux/hugetlb.h>
   5#include <linux/swap.h>
   6#include <linux/swapops.h>
   7
   8#include "internal.h"
   9
  10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
  11{
  12        page_vma_mapped_walk_done(pvmw);
  13        return false;
  14}
  15
  16static bool map_pte(struct page_vma_mapped_walk *pvmw)
  17{
  18        pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
  19        if (!(pvmw->flags & PVMW_SYNC)) {
  20                if (pvmw->flags & PVMW_MIGRATION) {
  21                        if (!is_swap_pte(*pvmw->pte))
  22                                return false;
  23                } else {
  24                        /*
  25                         * We get here when we are trying to unmap a private
  26                         * device page from the process address space. Such
  27                         * page is not CPU accessible and thus is mapped as
  28                         * a special swap entry, nonetheless it still does
  29                         * count as a valid regular mapping for the page (and
  30                         * is accounted as such in page maps count).
  31                         *
  32                         * So handle this special case as if it was a normal
  33                         * page mapping ie lock CPU page table and returns
  34                         * true.
  35                         *
  36                         * For more details on device private memory see HMM
  37                         * (include/linux/hmm.h or mm/hmm.c).
  38                         */
  39                        if (is_swap_pte(*pvmw->pte)) {
  40                                swp_entry_t entry;
  41
  42                                /* Handle un-addressable ZONE_DEVICE memory */
  43                                entry = pte_to_swp_entry(*pvmw->pte);
  44                                if (!is_device_private_entry(entry) &&
  45                                    !is_device_exclusive_entry(entry))
  46                                        return false;
  47                        } else if (!pte_present(*pvmw->pte))
  48                                return false;
  49                }
  50        }
  51        pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
  52        spin_lock(pvmw->ptl);
  53        return true;
  54}
  55
  56/**
  57 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
  58 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
  59 *
  60 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
  61 * mapped. check_pte() has to validate this.
  62 *
  63 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
  64 * arbitrary page.
  65 *
  66 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
  67 * entry that points to @pvmw->page or any subpage in case of THP.
  68 *
  69 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
  70 * pvmw->page or any subpage in case of THP.
  71 *
  72 * Otherwise, return false.
  73 *
  74 */
  75static bool check_pte(struct page_vma_mapped_walk *pvmw)
  76{
  77        unsigned long pfn;
  78
  79        if (pvmw->flags & PVMW_MIGRATION) {
  80                swp_entry_t entry;
  81                if (!is_swap_pte(*pvmw->pte))
  82                        return false;
  83                entry = pte_to_swp_entry(*pvmw->pte);
  84
  85                if (!is_migration_entry(entry) &&
  86                    !is_device_exclusive_entry(entry))
  87                        return false;
  88
  89                pfn = swp_offset(entry);
  90        } else if (is_swap_pte(*pvmw->pte)) {
  91                swp_entry_t entry;
  92
  93                /* Handle un-addressable ZONE_DEVICE memory */
  94                entry = pte_to_swp_entry(*pvmw->pte);
  95                if (!is_device_private_entry(entry) &&
  96                    !is_device_exclusive_entry(entry))
  97                        return false;
  98
  99                pfn = swp_offset(entry);
 100        } else {
 101                if (!pte_present(*pvmw->pte))
 102                        return false;
 103
 104                pfn = pte_pfn(*pvmw->pte);
 105        }
 106
 107        return (pfn - pvmw->pfn) < pvmw->nr_pages;
 108}
 109
 110/* Returns true if the two ranges overlap.  Careful to not overflow. */
 111static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
 112{
 113        if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
 114                return false;
 115        if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
 116                return false;
 117        return true;
 118}
 119
 120static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
 121{
 122        pvmw->address = (pvmw->address + size) & ~(size - 1);
 123        if (!pvmw->address)
 124                pvmw->address = ULONG_MAX;
 125}
 126
 127/**
 128 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
 129 * @pvmw->address
 130 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
 131 * must be set. pmd, pte and ptl must be NULL.
 132 *
 133 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
 134 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
 135 * adjusted if needed (for PTE-mapped THPs).
 136 *
 137 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
 138 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
 139 * a loop to find all PTEs that map the THP.
 140 *
 141 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
 142 * regardless of which page table level the page is mapped at. @pvmw->pmd is
 143 * NULL.
 144 *
 145 * Returns false if there are no more page table entries for the page in
 146 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
 147 *
 148 * If you need to stop the walk before page_vma_mapped_walk() returned false,
 149 * use page_vma_mapped_walk_done(). It will do the housekeeping.
 150 */
 151bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 152{
 153        struct vm_area_struct *vma = pvmw->vma;
 154        struct mm_struct *mm = vma->vm_mm;
 155        unsigned long end;
 156        pgd_t *pgd;
 157        p4d_t *p4d;
 158        pud_t *pud;
 159        pmd_t pmde;
 160
 161        /* The only possible pmd mapping has been handled on last iteration */
 162        if (pvmw->pmd && !pvmw->pte)
 163                return not_found(pvmw);
 164
 165        if (unlikely(is_vm_hugetlb_page(vma))) {
 166                struct hstate *hstate = hstate_vma(vma);
 167                unsigned long size = huge_page_size(hstate);
 168                /* The only possible mapping was handled on last iteration */
 169                if (pvmw->pte)
 170                        return not_found(pvmw);
 171
 172                /* when pud is not present, pte will be NULL */
 173                pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
 174                if (!pvmw->pte)
 175                        return false;
 176
 177                pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
 178                spin_lock(pvmw->ptl);
 179                if (!check_pte(pvmw))
 180                        return not_found(pvmw);
 181                return true;
 182        }
 183
 184        end = vma_address_end(pvmw);
 185        if (pvmw->pte)
 186                goto next_pte;
 187restart:
 188        do {
 189                pgd = pgd_offset(mm, pvmw->address);
 190                if (!pgd_present(*pgd)) {
 191                        step_forward(pvmw, PGDIR_SIZE);
 192                        continue;
 193                }
 194                p4d = p4d_offset(pgd, pvmw->address);
 195                if (!p4d_present(*p4d)) {
 196                        step_forward(pvmw, P4D_SIZE);
 197                        continue;
 198                }
 199                pud = pud_offset(p4d, pvmw->address);
 200                if (!pud_present(*pud)) {
 201                        step_forward(pvmw, PUD_SIZE);
 202                        continue;
 203                }
 204
 205                pvmw->pmd = pmd_offset(pud, pvmw->address);
 206                /*
 207                 * Make sure the pmd value isn't cached in a register by the
 208                 * compiler and used as a stale value after we've observed a
 209                 * subsequent update.
 210                 */
 211                pmde = READ_ONCE(*pvmw->pmd);
 212
 213                if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
 214                    (pmd_present(pmde) && pmd_devmap(pmde))) {
 215                        pvmw->ptl = pmd_lock(mm, pvmw->pmd);
 216                        pmde = *pvmw->pmd;
 217                        if (!pmd_present(pmde)) {
 218                                swp_entry_t entry;
 219
 220                                if (!thp_migration_supported() ||
 221                                    !(pvmw->flags & PVMW_MIGRATION))
 222                                        return not_found(pvmw);
 223                                entry = pmd_to_swp_entry(pmde);
 224                                if (!is_migration_entry(entry) ||
 225                                    !check_pmd(swp_offset(entry), pvmw))
 226                                        return not_found(pvmw);
 227                                return true;
 228                        }
 229                        if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
 230                                if (pvmw->flags & PVMW_MIGRATION)
 231                                        return not_found(pvmw);
 232                                if (!check_pmd(pmd_pfn(pmde), pvmw))
 233                                        return not_found(pvmw);
 234                                return true;
 235                        }
 236                        /* THP pmd was split under us: handle on pte level */
 237                        spin_unlock(pvmw->ptl);
 238                        pvmw->ptl = NULL;
 239                } else if (!pmd_present(pmde)) {
 240                        /*
 241                         * If PVMW_SYNC, take and drop THP pmd lock so that we
 242                         * cannot return prematurely, while zap_huge_pmd() has
 243                         * cleared *pmd but not decremented compound_mapcount().
 244                         */
 245                        if ((pvmw->flags & PVMW_SYNC) &&
 246                            transparent_hugepage_active(vma) &&
 247                            (pvmw->nr_pages >= HPAGE_PMD_NR)) {
 248                                spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
 249
 250                                spin_unlock(ptl);
 251                        }
 252                        step_forward(pvmw, PMD_SIZE);
 253                        continue;
 254                }
 255                if (!map_pte(pvmw))
 256                        goto next_pte;
 257this_pte:
 258                if (check_pte(pvmw))
 259                        return true;
 260next_pte:
 261                do {
 262                        pvmw->address += PAGE_SIZE;
 263                        if (pvmw->address >= end)
 264                                return not_found(pvmw);
 265                        /* Did we cross page table boundary? */
 266                        if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
 267                                if (pvmw->ptl) {
 268                                        spin_unlock(pvmw->ptl);
 269                                        pvmw->ptl = NULL;
 270                                }
 271                                pte_unmap(pvmw->pte);
 272                                pvmw->pte = NULL;
 273                                goto restart;
 274                        }
 275                        pvmw->pte++;
 276                        if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
 277                                pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
 278                                spin_lock(pvmw->ptl);
 279                        }
 280                } while (pte_none(*pvmw->pte));
 281
 282                if (!pvmw->ptl) {
 283                        pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
 284                        spin_lock(pvmw->ptl);
 285                }
 286                goto this_pte;
 287        } while (pvmw->address < end);
 288
 289        return false;
 290}
 291
 292/**
 293 * page_mapped_in_vma - check whether a page is really mapped in a VMA
 294 * @page: the page to test
 295 * @vma: the VMA to test
 296 *
 297 * Returns 1 if the page is mapped into the page tables of the VMA, 0
 298 * if the page is not mapped into the page tables of this VMA.  Only
 299 * valid for normal file or anonymous VMAs.
 300 */
 301int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 302{
 303        struct page_vma_mapped_walk pvmw = {
 304                .pfn = page_to_pfn(page),
 305                .nr_pages = 1,
 306                .vma = vma,
 307                .flags = PVMW_SYNC,
 308        };
 309
 310        pvmw.address = vma_address(page, vma);
 311        if (pvmw.address == -EFAULT)
 312                return 0;
 313        if (!page_vma_mapped_walk(&pvmw))
 314                return 0;
 315        page_vma_mapped_walk_done(&pvmw);
 316        return 1;
 317}
 318