linux/arch/x86/mm/gup.c
<<
>>
Prefs
   1/*
   2 * Lockless get_user_pages_fast for x86
   3 *
   4 * Copyright (C) 2008 Nick Piggin
   5 * Copyright (C) 2008 Novell Inc.
   6 */
   7#include <linux/sched.h>
   8#include <linux/mm.h>
   9#include <linux/vmstat.h>
  10#include <linux/highmem.h>
  11#include <linux/swap.h>
  12#include <linux/memremap.h>
  13
  14#include <asm/pgtable.h>
  15
  16static inline pte_t gup_get_pte(pte_t *ptep)
  17{
  18#ifndef CONFIG_X86_PAE
  19        return ACCESS_ONCE(*ptep);
  20#else
  21        /*
  22         * With get_user_pages_fast, we walk down the pagetables without taking
  23         * any locks.  For this we would like to load the pointers atomically,
  24         * but that is not possible (without expensive cmpxchg8b) on PAE.  What
  25         * we do have is the guarantee that a pte will only either go from not
  26         * present to present, or present to not present or both -- it will not
  27         * switch to a completely different present page without a TLB flush in
  28         * between; something that we are blocking by holding interrupts off.
  29         *
  30         * Setting ptes from not present to present goes:
  31         * ptep->pte_high = h;
  32         * smp_wmb();
  33         * ptep->pte_low = l;
  34         *
  35         * And present to not present goes:
  36         * ptep->pte_low = 0;
  37         * smp_wmb();
  38         * ptep->pte_high = 0;
  39         *
  40         * We must ensure here that the load of pte_low sees l iff pte_high
  41         * sees h. We load pte_high *after* loading pte_low, which ensures we
  42         * don't see an older value of pte_high.  *Then* we recheck pte_low,
  43         * which ensures that we haven't picked up a changed pte high. We might
  44         * have got rubbish values from pte_low and pte_high, but we are
  45         * guaranteed that pte_low will not have the present bit set *unless*
  46         * it is 'l'. And get_user_pages_fast only operates on present ptes, so
  47         * we're safe.
  48         *
  49         * gup_get_pte should not be used or copied outside gup.c without being
  50         * very careful -- it does not atomically load the pte or anything that
  51         * is likely to be useful for you.
  52         */
  53        pte_t pte;
  54
  55retry:
  56        pte.pte_low = ptep->pte_low;
  57        smp_rmb();
  58        pte.pte_high = ptep->pte_high;
  59        smp_rmb();
  60        if (unlikely(pte.pte_low != ptep->pte_low))
  61                goto retry;
  62
  63        return pte;
  64#endif
  65}
  66
  67static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
  68{
  69        while ((*nr) - nr_start) {
  70                struct page *page = pages[--(*nr)];
  71
  72                ClearPageReferenced(page);
  73                put_page(page);
  74        }
  75}
  76
  77/*
  78 * 'pteval' can come from a pte, pmd or pud.  We only check
  79 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
  80 * same value on all 3 types.
  81 */
  82static inline int pte_allows_gup(unsigned long pteval, int write)
  83{
  84        unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
  85
  86        if (write)
  87                need_pte_bits |= _PAGE_RW;
  88
  89        if ((pteval & need_pte_bits) != need_pte_bits)
  90                return 0;
  91
  92        return 1;
  93}
  94
  95/*
  96 * The performance critical leaf functions are made noinline otherwise gcc
  97 * inlines everything into a single function which results in too much
  98 * register pressure.
  99 */
 100static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
 101                unsigned long end, int write, struct page **pages, int *nr)
 102{
 103        struct dev_pagemap *pgmap = NULL;
 104        int nr_start = *nr;
 105        pte_t *ptep;
 106
 107        ptep = pte_offset_map(&pmd, addr);
 108        do {
 109                pte_t pte = gup_get_pte(ptep);
 110                struct page *page;
 111
 112                /* Similar to the PMD case, NUMA hinting must take slow path */
 113                if (pte_numa(pte)) {
 114                        pte_unmap(ptep);
 115                        return 0;
 116                }
 117
 118                if (!pte_allows_gup(pte_val(pte), write)) {
 119                        pte_unmap(ptep);
 120                        return 0;
 121                }
 122
 123                if (pte_devmap(pte)) {
 124                        pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
 125                        if (unlikely(!pgmap)) {
 126                                undo_dev_pagemap(nr, nr_start, pages);
 127                                pte_unmap(ptep);
 128                                return 0;
 129                        }
 130                } else if (pte_special(pte)) {
 131                        pte_unmap(ptep);
 132                        return 0;
 133                }
 134                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 135                page = pte_page(pte);
 136                get_page(page);
 137                put_dev_pagemap(pgmap);
 138                SetPageReferenced(page);
 139                pages[*nr] = page;
 140                (*nr)++;
 141
 142        } while (ptep++, addr += PAGE_SIZE, addr != end);
 143        pte_unmap(ptep - 1);
 144
 145        return 1;
 146}
 147
 148static inline void get_head_page_multiple(struct page *page, int nr)
 149{
 150        VM_BUG_ON_PAGE(page != compound_head(page), page);
 151        VM_BUG_ON_PAGE(page_count(page) == 0, page);
 152        page_ref_add(page, nr);
 153        SetPageReferenced(page);
 154}
 155
 156static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
 157                unsigned long end, int write, struct page **pages, int *nr)
 158{
 159        pte_t pte = *(pte_t *)&pmd;
 160        struct page *head, *page;
 161        int refs;
 162
 163        if (!pte_allows_gup(pte_val(pte), write))
 164                return 0;
 165        /* hugepages are never "special" */
 166        VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL);
 167        VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 168
 169        refs = 0;
 170        head = pte_page(pte);
 171        page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 172        do {
 173                VM_BUG_ON_PAGE(compound_head(page) != head, page);
 174                pages[*nr] = page;
 175                if (PageTail(page))
 176                        get_huge_page_tail(page);
 177                (*nr)++;
 178                page++;
 179                refs++;
 180        } while (addr += PAGE_SIZE, addr != end);
 181        get_head_page_multiple(head, refs);
 182
 183        return 1;
 184}
 185
 186static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
 187                int write, struct page **pages, int *nr)
 188{
 189        unsigned long next;
 190        pmd_t *pmdp;
 191
 192        pmdp = pmd_offset(&pud, addr);
 193        do {
 194                pmd_t pmd = *pmdp;
 195
 196                next = pmd_addr_end(addr, end);
 197                /*
 198                 * The pmd_trans_splitting() check below explains why
 199                 * pmdp_splitting_flush has to flush the tlb, to stop
 200                 * this gup-fast code from running while we set the
 201                 * splitting bit in the pmd. Returning zero will take
 202                 * the slow path that will call wait_split_huge_page()
 203                 * if the pmd is still in splitting state. gup-fast
 204                 * can't because it has irq disabled and
 205                 * wait_split_huge_page() would never return as the
 206                 * tlb flush IPI wouldn't run.
 207                 */
 208                if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 209                        return 0;
 210                if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
 211                        /*
 212                         * NUMA hinting faults need to be handled in the GUP
 213                         * slowpath for accounting purposes and so that they
 214                         * can be serialised against THP migration.
 215                         */
 216                        if (pmd_numa(pmd))
 217                                return 0;
 218                        if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
 219                                return 0;
 220                } else {
 221                        if (!gup_pte_range(pmd, addr, next, write, pages, nr))
 222                                return 0;
 223                }
 224        } while (pmdp++, addr = next, addr != end);
 225
 226        return 1;
 227}
 228
 229static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
 230                unsigned long end, int write, struct page **pages, int *nr)
 231{
 232        pte_t pte = *(pte_t *)&pud;
 233        struct page *head, *page;
 234        int refs;
 235
 236        if (!pte_allows_gup(pte_val(pte), write))
 237                return 0;
 238        /* hugepages are never "special" */
 239        VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL);
 240        VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 241
 242        refs = 0;
 243        head = pte_page(pte);
 244        page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 245        do {
 246                VM_BUG_ON_PAGE(compound_head(page) != head, page);
 247                pages[*nr] = page;
 248                if (PageTail(page))
 249                        get_huge_page_tail(page);
 250                (*nr)++;
 251                page++;
 252                refs++;
 253        } while (addr += PAGE_SIZE, addr != end);
 254        get_head_page_multiple(head, refs);
 255
 256        return 1;
 257}
 258
 259static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
 260                        int write, struct page **pages, int *nr)
 261{
 262        unsigned long next;
 263        pud_t *pudp;
 264
 265        pudp = pud_offset(&pgd, addr);
 266        do {
 267                pud_t pud = *pudp;
 268
 269                next = pud_addr_end(addr, end);
 270                if (pud_none(pud))
 271                        return 0;
 272                if (unlikely(pud_large(pud))) {
 273                        if (!gup_huge_pud(pud, addr, next, write, pages, nr))
 274                                return 0;
 275                } else {
 276                        if (!gup_pmd_range(pud, addr, next, write, pages, nr))
 277                                return 0;
 278                }
 279        } while (pudp++, addr = next, addr != end);
 280
 281        return 1;
 282}
 283
 284/*
 285 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
 286 * back to the regular GUP.
 287 */
 288int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
 289                          struct page **pages)
 290{
 291        struct mm_struct *mm = current->mm;
 292        unsigned long addr, len, end;
 293        unsigned long next;
 294        unsigned long flags;
 295        pgd_t *pgdp;
 296        int nr = 0;
 297
 298        start &= PAGE_MASK;
 299        addr = start;
 300        len = (unsigned long) nr_pages << PAGE_SHIFT;
 301        end = start + len;
 302        if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
 303                                        (void __user *)start, len)))
 304                return 0;
 305
 306        /*
 307         * XXX: batch / limit 'nr', to avoid large irq off latency
 308         * needs some instrumenting to determine the common sizes used by
 309         * important workloads (eg. DB2), and whether limiting the batch size
 310         * will decrease performance.
 311         *
 312         * It seems like we're in the clear for the moment. Direct-IO is
 313         * the main guy that batches up lots of get_user_pages, and even
 314         * they are limited to 64-at-a-time which is not so many.
 315         */
 316        /*
 317         * This doesn't prevent pagetable teardown, but does prevent
 318         * the pagetables and pages from being freed on x86.
 319         *
 320         * So long as we atomically load page table pointers versus teardown
 321         * (which we do on x86, with the above PAE exception), we can follow the
 322         * address down to the the page and take a ref on it.
 323         */
 324        local_irq_save(flags);
 325        pgdp = pgd_offset(mm, addr);
 326        do {
 327                pgd_t pgd = *pgdp;
 328
 329                next = pgd_addr_end(addr, end);
 330                if (pgd_none(pgd))
 331                        break;
 332                if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
 333                        break;
 334        } while (pgdp++, addr = next, addr != end);
 335        local_irq_restore(flags);
 336
 337        return nr;
 338}
 339
 340/**
 341 * get_user_pages_fast() - pin user pages in memory
 342 * @start:      starting user address
 343 * @nr_pages:   number of pages from start to pin
 344 * @write:      whether pages will be written to
 345 * @pages:      array that receives pointers to the pages pinned.
 346 *              Should be at least nr_pages long.
 347 *
 348 * Attempt to pin user pages in memory without taking mm->mmap_sem.
 349 * If not successful, it will fall back to taking the lock and
 350 * calling get_user_pages().
 351 *
 352 * Returns number of pages pinned. This may be fewer than the number
 353 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 354 * were pinned, returns -errno.
 355 */
 356int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 357                        struct page **pages)
 358{
 359        struct mm_struct *mm = current->mm;
 360        unsigned long addr, len, end;
 361        unsigned long next;
 362        pgd_t *pgdp;
 363        int nr = 0;
 364
 365        start &= PAGE_MASK;
 366        addr = start;
 367        len = (unsigned long) nr_pages << PAGE_SHIFT;
 368
 369        end = start + len;
 370        if (end < start)
 371                goto slow_irqon;
 372
 373#ifdef CONFIG_X86_64
 374        if (end >> __VIRTUAL_MASK_SHIFT)
 375                goto slow_irqon;
 376#endif
 377
 378        /*
 379         * XXX: batch / limit 'nr', to avoid large irq off latency
 380         * needs some instrumenting to determine the common sizes used by
 381         * important workloads (eg. DB2), and whether limiting the batch size
 382         * will decrease performance.
 383         *
 384         * It seems like we're in the clear for the moment. Direct-IO is
 385         * the main guy that batches up lots of get_user_pages, and even
 386         * they are limited to 64-at-a-time which is not so many.
 387         */
 388        /*
 389         * This doesn't prevent pagetable teardown, but does prevent
 390         * the pagetables and pages from being freed on x86.
 391         *
 392         * So long as we atomically load page table pointers versus teardown
 393         * (which we do on x86, with the above PAE exception), we can follow the
 394         * address down to the the page and take a ref on it.
 395         */
 396        local_irq_disable();
 397        pgdp = pgd_offset(mm, addr);
 398        do {
 399                pgd_t pgd = *pgdp;
 400
 401                next = pgd_addr_end(addr, end);
 402                if (pgd_none(pgd))
 403                        goto slow;
 404                if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
 405                        goto slow;
 406        } while (pgdp++, addr = next, addr != end);
 407        local_irq_enable();
 408
 409        VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
 410        return nr;
 411
 412        {
 413                int ret;
 414
 415slow:
 416                local_irq_enable();
 417slow_irqon:
 418                /* Try to get the remaining pages with get_user_pages */
 419                start += nr << PAGE_SHIFT;
 420                pages += nr;
 421
 422                ret = get_user_pages_unlocked(current, mm, start,
 423                                              (end - start) >> PAGE_SHIFT,
 424                                              write, 0, pages);
 425
 426                /* Have to be a bit careful with return values */
 427                if (nr > 0) {
 428                        if (ret < 0)
 429                                ret = nr;
 430                        else
 431                                ret += nr;
 432                }
 433
 434                return ret;
 435        }
 436}
 437